sleuthkit-4.2.0/000755 000765 000024 00000000000 12576326352 014336 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/aclocal.m4000644 000765 000024 00001332420 12576326272 016204 0ustar00carrierstaff000000 000000 # generated automatically by aclocal 1.15 -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 2014 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program or library that is built # using GNU Libtool, you may include this file under the same # distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . ]) # serial 58 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl _LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS=$ltmain # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_PREPARE_CC_BASENAME # ----------------------- m4_defun([_LT_PREPARE_CC_BASENAME], [ # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in @S|@*""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } ])# _LT_PREPARE_CC_BASENAME # _LT_CC_BASENAME(CC) # ------------------- # It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, # but that macro is also expanded into generated libtool script, which # arranges for $SED and $ECHO to be set by different means. m4_defun([_LT_CC_BASENAME], [m4_require([_LT_PREPARE_CC_BASENAME])dnl AC_REQUIRE([_LT_DECL_SED])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl func_cc_basename $1 cc_basename=$func_cc_basename_result ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl _LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_WITH_SYSROOT])dnl m4_require([_LT_CMD_TRUNCATE])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld=$lt_cv_prog_gnu_ld old_CC=$CC old_CFLAGS=$CFLAGS # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PREPARE_SED_QUOTE_VARS # -------------------------- # Define a few sed substitution that help us do robust quoting. m4_defun([_LT_PREPARE_SED_QUOTE_VARS], [# Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ]) # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from 'configure', and 'config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # 'config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain=$ac_aux_dir/ltmain.sh ])# _LT_PROG_LTMAIN # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the 'libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to 'config.status' so that its # declaration there will have the same value as in 'configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags='_LT_TAGS'dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into 'config.status', and then the shell code to quote escape them in # for loops in 'config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$[]1 _LTECHO_EOF' } # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done _LT_OUTPUT_LIBTOOL_INIT ]) # _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) # ------------------------------------ # Generate a child script FILE with all initialization necessary to # reuse the environment learned by the parent script, and make the # file executable. If COMMENT is supplied, it is inserted after the # '#!' sequence but before initialization text begins. After this # macro, additional text can be appended to FILE to form the body of # the child script. The macro ends with non-zero status if the # file could not be fully written (such as if the disk is full). m4_ifdef([AS_INIT_GENERATED], [m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], [m4_defun([_LT_GENERATED_FILE_INIT], [m4_require([AS_PREPARE])]dnl [m4_pushdef([AS_MESSAGE_LOG_FD])]dnl [lt_write_fail=0 cat >$1 <<_ASEOF || lt_write_fail=1 #! $SHELL # Generated by $as_me. $2 SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$1 <<\_ASEOF || lt_write_fail=1 AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 _ASEOF test 0 = "$lt_write_fail" && chmod +x $1[]dnl m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) _LT_GENERATED_FILE_INIT(["$CONFIG_LT"], [# Run this file to recreate a libtool stub with the current configuration.]) cat >>"$CONFIG_LT" <<\_LTEOF lt_cl_silent=false exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ '$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2011 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test 0 != $[#] do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try '$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try '$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. lt_cl_success=: test yes = "$silent" && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi cfgfile=${ofile}T trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # Generated automatically by $as_me ($PACKAGE) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # Provide generalized library-building support services. # Written by Gordon Matzigkeit, 1996 _LT_COPYING _LT_LIBTOOL_TAGS # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF cat <<'_LT_EOF' >> "$cfgfile" # ### BEGIN FUNCTIONS SHARED WITH CONFIGURE _LT_PREPARE_MUNGE_PATH_LIST _LT_PREPARE_CC_BASENAME # ### END FUNCTIONS SHARED WITH CONFIGURE _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Go], [_LT_LANG(GO)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG m4_ifndef([AC_PROG_GO], [ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_GO. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # m4_defun([AC_PROG_GO], [AC_LANG_PUSH(Go)dnl AC_ARG_VAR([GOC], [Go compiler command])dnl AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl _AC_ARG_VAR_LDFLAGS()dnl AC_CHECK_TOOL(GOC, gccgo) if test -z "$GOC"; then if test -n "$ac_tool_prefix"; then AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) fi fi if test -z "$GOC"; then AC_CHECK_PROG(GOC, gccgo, gccgo, false) fi ])#m4_defun ])#m4_ifndef # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([AC_PROG_GO], [LT_LANG(GO)], [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) dnl AC_DEFUN([AC_LIBTOOL_RC], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "$LT_MULTI_MODULE"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test 0 = "$_lt_result"; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS=$save_LDFLAGS ]) AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], [lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then lt_cv_ld_force_load=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[[012]][[,.]]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test yes = "$lt_cv_apple_cc_single_mod"; then _lt_dar_single_mod='$single_module' fi if test yes = "$lt_cv_ld_exported_symbols_list"; then _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' fi if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES([TAG]) # --------------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported if test yes = "$lt_cv_ld_force_load"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) else _LT_TAGVAR(whole_archive_flag_spec, $1)='' fi _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" m4_if([$1], [CXX], [ if test yes != "$lt_cv_apple_cc_single_mod"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX([TAGNAME]) # ---------------------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. # Store the results from the different compilers for each TAGNAME. # Allow to override them for all tags through lt_cv_aix_libpath. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ lt_aix_libpath_sed='[ /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }]' _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib fi ]) aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [m4_divert_text([M4SH-INIT], [$1 ])])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Find how we can fake an echo command that does not interpret backslash. # In particular, with Autoconf 2.60 or later we add some code to the start # of the generated configure script that will find a shell with a builtin # printf (that we can use as an echo command). m4_defun([_LT_PROG_ECHO_BACKSLASH], [ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO AC_MSG_CHECKING([how to print strings]) # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $[]1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } case $ECHO in printf*) AC_MSG_RESULT([printf]) ;; print*) AC_MSG_RESULT([print -r]) ;; *) AC_MSG_RESULT([cat]) ;; esac m4_ifdef([_AS_DETECT_SUGGESTED], [_AS_DETECT_SUGGESTED([ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test "X`printf %s $ECHO`" = "X$ECHO" \ || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_WITH_SYSROOT # ---------------- AC_DEFUN([_LT_WITH_SYSROOT], [AC_MSG_CHECKING([for sysroot]) AC_ARG_WITH([sysroot], [AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], [Search for dependent libraries within DIR (or the compiler's sysroot if not specified).])], [], [with_sysroot=no]) dnl lt_sysroot will always be passed unquoted. We quote it here dnl in case the user passed a directory name. lt_sysroot= case $with_sysroot in #( yes) if test yes = "$GCC"; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) AC_MSG_RESULT([$with_sysroot]) AC_MSG_ERROR([The sysroot must be an absolute path.]) ;; esac AC_MSG_RESULT([${lt_sysroot:-no}]) _LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl [dependent libraries, and where our libraries should be installed.])]) # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test no = "$enable_libtool_lock" || enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out what ABI is being produced by ac_compile, and set mode # options accordingly. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE=32 ;; *ELF-64*) HPUX_IA64_MODE=64 ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test yes = "$lt_cv_prog_gnu_ld"; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; mips64*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then emul=elf case `/usr/bin/file conftest.$ac_objext` in *32-bit*) emul="${emul}32" ;; *64-bit*) emul="${emul}64" ;; esac case `/usr/bin/file conftest.$ac_objext` in *MSB*) emul="${emul}btsmip" ;; *LSB*) emul="${emul}ltsmip" ;; esac case `/usr/bin/file conftest.$ac_objext` in *N32*) emul="${emul}n32" ;; esac LD="${LD-ld} -m $emul" fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. Note that the listed cases only cover the # situations where additional linker options are needed (such as when # doing 32-bit compilation for a host where ld defaults to 64-bit, or # vice versa); the common cases where no linker options are needed do # not appear in the list. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*linux*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*linux*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS=$CFLAGS CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test yes != "$lt_cv_cc_needs_belf"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS=$SAVE_CFLAGS fi ;; *-*solaris*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*|x86_64-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD=${LD-ld}_sol2 fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks=$enable_libtool_lock ])# _LT_ENABLE_LOCK # _LT_PROG_AR # ----------- m4_defun([_LT_PROG_AR], [AC_CHECK_TOOLS(AR, [ar], false) : ${AR=ar} : ${AR_FLAGS=cru} _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], [lt_cv_ar_at_file=no AC_COMPILE_IFELSE([AC_LANG_PROGRAM], [echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' AC_TRY_EVAL([lt_ar_try]) if test 0 -eq "$ac_status"; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a AC_TRY_EVAL([lt_ar_try]) if test 0 -ne "$ac_status"; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a ]) ]) if test no = "$lt_cv_ar_at_file"; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi _LT_DECL([], [archiver_list_spec], [1], [How to feed a file listing to the archiver]) ])# _LT_PROG_AR # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [_LT_PROG_AR AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in bitrig* | openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) _LT_DECL([], [lock_old_archive_extraction], [0], [Whether to use a lock for old archive extraction]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test yes = "[$]$2"; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS ]) if test yes = "[$]$2"; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring=ABCD case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test X`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test 17 != "$i" # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n "$lt_cv_sys_max_cmd_len"; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test yes = "$cross_compiling"; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test yes != "$enable_dlopen"; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen=load_add_on lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen=LoadLibrary lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen=dlopen lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ lt_cv_dlopen=dyld lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; tpf*) # Don't try to run any link tests for TPF. We know it's impossible # because TPF is a cross-compiler, and we know how we open DSOs. lt_cv_dlopen=dlopen lt_cv_dlopen_libs= lt_cv_dlopen_self=no ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen=shl_load], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen=dlopen], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) ]) ]) ]) ]) ]) ;; esac if test no = "$lt_cv_dlopen"; then enable_dlopen=no else enable_dlopen=yes fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS=$CPPFLAGS test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS=$LDFLAGS wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS=$LIBS LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test yes = "$lt_cv_dlopen_self"; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS=$save_CPPFLAGS LDFLAGS=$save_LDFLAGS LIBS=$save_LIBS ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links=nottested if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test no = "$hard_links"; then AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", [Define to the sub-directory where libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then # We can hardcode non-existent directories. if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP"; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_PREPARE_MUNGE_PATH_LIST # --------------------------- # Make sure func_munge_path_list() is defined correctly. m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], [[# func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x@S|@2 in x) ;; *:) eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" ;; x:*) eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" ;; *::*) eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" ;; *) eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" ;; esac } ]])# _LT_PREPARE_PATH_LIST # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test yes = "$GCC"; then case $host_os in darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; *) lt_awk_arg='/^libraries:/' ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; *) lt_sed_strip_eq='s|=/|/|g' ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary... lt_tmp_lt_search_path_spec= lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` # ...but if some path component already ends with the multilib dir we assume # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). case "$lt_multi_os_dir; $lt_search_path_spec " in "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) lt_multi_os_dir= ;; esac for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" elif test -n "$lt_multi_os_dir"; then test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS = " "; FS = "/|\n";} { lt_foo = ""; lt_count = 0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo = "/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown AC_ARG_VAR([LT_SYS_LIBRARY_PATH], [User-defined run-time library search path.]) case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[[4-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a[(]lib.so.V[)]' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[23]].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[[3-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], [lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [lt_cv_shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir ]) shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [install_override_mode], [1], [Permission mode override for installation of shared libraries]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], [Detected run-time system search path for libraries]) _LT_DECL([], [configure_time_lt_sys_library_path], [2], [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program that can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$1"; then lt_cv_path_MAGIC_CMD=$ac_dir/"$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac]) MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program that can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PROG_ECHO_BACKSLASH])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test no = "$withval" || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i cat conftest.i conftest.i >conftest2.i : ${lt_DD:=$DD} AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], [if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: fi]) rm -f conftest.i conftest2.i conftest.out]) ])# _LT_PATH_DD # _LT_CMD_TRUNCATE # ---------------- # find command to truncate a binary pipe m4_defun([_LT_CMD_TRUNCATE], [m4_require([_LT_PATH_DD]) AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], [printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i lt_cv_truncate_bin= if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi rm -f conftest.i conftest2.i conftest.out test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) _LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], [Command to truncate a binary pipe]) ])# _LT_CMD_TRUNCATE # _LT_CHECK_MAGIC_METHOD # ---------------------- # how to check for library dependencies # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_MAGIC_METHOD], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) AC_CACHE_CHECK([how to recognize dependent libraries], lt_cv_deplibs_check_method, [lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # 'unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # that responds to the $file_magic_cmd with a given extended regex. # If you have 'file' or equivalent on your system and you're not sure # whether 'pass_all' will *always* work, you probably want this one. case $host_os in aix[[4-9]]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[[45]]*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd* | bitrig*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; os2*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method = "file_magic"]) _LT_DECL([], [file_magic_glob], [1], [How to find potential files when deplibs_check_method = "file_magic"]) _LT_DECL([], [want_nocaseglob], [1], [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM=$NM else lt_nm_to_check=${ac_tool_prefix}nm if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. tmp_nm=$ac_dir/$lt_tmp_nm if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then # Check to see if the nm accepts a BSD-compat flag. # Adding the 'sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty case $build_os in mingw*) lt_bad_file=conftest.nm/nofile ;; *) lt_bad_file=/dev/null ;; esac case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in *$lt_bad_file* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break 2 ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break 2 ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS=$lt_save_ifs done : ${lt_cv_path_NM=no} fi]) if test no != "$lt_cv_path_NM"; then NM=$lt_cv_path_NM else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols -headers" ;; *) DUMPBIN=: ;; esac fi AC_SUBST([DUMPBIN]) if test : != "$DUMPBIN"; then NM=$DUMPBIN fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # _LT_CHECK_SHAREDLIB_FROM_LINKLIB # -------------------------------- # how to determine the name of the shared library # associated with a specific link library. # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) m4_require([_LT_DECL_DLLTOOL]) AC_CACHE_CHECK([how to associate runtime and link libraries], lt_cv_sharedlib_from_linklib_cmd, [lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh; # decide which one to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac ]) sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO _LT_DECL([], [sharedlib_from_linklib_cmd], [1], [Command to associate shared and link libraries]) ])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB # _LT_PATH_MANIFEST_TOOL # ---------------------- # locate the manifest tool m4_defun([_LT_PATH_MANIFEST_TOOL], [AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], [lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&AS_MESSAGE_LOG_FD if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest*]) if test yes != "$lt_cv_path_mainfest_tool"; then MANIFEST_TOOL=: fi _LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ])# _LT_PATH_MANIFEST_TOOL # _LT_DLL_DEF_P([FILE]) # --------------------- # True iff FILE is a Windows DLL '.def' file. # Keep in sync with func_dll_def_p in the libtool script AC_DEFUN([_LT_DLL_DEF_P], [dnl test DEF = "`$SED -n dnl -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl -e q dnl Only consider the first "real" line $1`" dnl ])# _LT_DLL_DEF_P # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM=-lm) ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test yes = "$GCC"; then case $cc_basename in nvcc*) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; *) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; esac _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test ia64 = "$host_cpu"; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Gets list of data symbols to import. lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" # Adjust the below global symbol transforms to fixup imported variables. lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" lt_c_name_lib_hook="\ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" else # Disable hooks by default. lt_cv_sys_global_symbol_to_import= lt_cdecl_hook= lt_c_name_hook= lt_c_name_lib_hook= fi # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n"\ $lt_cdecl_hook\ " -e 's/^T .* \(.*\)$/extern int \1();/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ $lt_c_name_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" # Transform an extracted symbol line into symbol name with lib prefix and # symbol address. lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ $lt_c_name_lib_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function, # D for any global variable and I for any imported variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ " /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ " /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ " {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ " s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT@&t@_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT@&t@_DLSYM_CONST #else # define LT@&t@_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT@&t@_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS=conftstm.$ac_objext CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test yes = "$pipe_works"; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then nm_file_list_spec='@' fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], [Transform the output of nm into a list of symbols to manually relocate]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) _LT_DECL([nm_interface], [lt_cv_nm_interface], [1], [The name lister interface]) _LT_DECL([], [nm_file_list_spec], [1], [Specify filename containing input files for $NM]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test yes = "$GXX"; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' if test ia64 != "$host_cpu"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64, which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL 8.0, 9.0 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test yes = "$GCC"; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' case $cc_basename in nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) case $host_os in os2*) _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' ;; esac ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64, which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; tcc*) # Fabrice Bellard et al's Tiny C Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; *Sun\ F* | *Sun*Fortran*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Intel*\ [[CF]]*Compiler*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; *Portland\ Group*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_CACHE_CHECK([for $compiler option to produce PIC], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ;; esac ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ' (' and ')$', so one must not match beginning or # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', # as well as any symbol that contains 'd'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test yes != "$GCC"; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd* | bitrig*) with_gnu_ld=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test yes = "$with_gnu_ld"; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test yes = "$lt_use_gnu_ld_interface"; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='$wl' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test ia64 != "$host_cpu"; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test linux-dietlibc = "$host_os"; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test no = "$tmp_diet" then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; nagfor*) # NAGFOR 5.3 tmp_sharedflag='-Wl,-shared' ;; xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi case $cc_basename in tcc*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' ;; xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then aix_use_runtimelinking=yes break fi done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # traditional, no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no ;; esac if test yes = "$GCC"; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag="$shared_flag "'$wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ;; hpux10*) if test yes,no = "$GCC,$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test yes,no = "$GCC,$with_gnu_ld"; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) m4_if($1, [], [ # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) _LT_LINKER_OPTION([if $CC understands -b], _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) ;; esac fi if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], [lt_cv_irix_exported_symbol], [save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" AC_LINK_IFELSE( [AC_LANG_SOURCE( [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], [C++], [[int foo (void) { return 0; }]], [Fortran 77], [[ subroutine foo end]], [Fortran], [[ subroutine foo end]])])], [lt_cv_irix_exported_symbol=yes], [lt_cv_irix_exported_symbol=no]) LDFLAGS=$save_LDFLAGS]) if test yes = "$lt_cv_irix_exported_symbol"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' fi else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; linux*) case $cc_basename in tcc*) # Fabrice Bellard et al's Tiny C Compiler _LT_TAGVAR(ld_shlibs, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; osf3*) if test yes = "$GCC"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test yes = "$GCC"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test yes = "$GCC"; then wlarc='$wl' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='$wl' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. GCC discards it without '$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test yes = "$GCC"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test sequent = "$host_vendor"; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test sni = "$host_vendor"; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test yes,yes = "$GCC,$enable_shared"; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_CACHE_CHECK([whether -lc should be explicitly linked in], [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), [$RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no else lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* ]) _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting $shlibpath_var if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [postlink_cmds], [2], [Commands necessary for finishing linking programs]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to 'libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC=$CC AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report what library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC=$lt_save_CC ])# _LT_LANG_C_CONFIG # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to 'libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl if test -n "$CXX" && ( test no != "$CXX" && ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || (test g++ != "$CXX"))); then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_caught_CXX_error"; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test yes = "$GXX"; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test yes = "$GXX"; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test yes = "$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='$wl' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no ;; esac if test yes = "$GXX"; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag=$shared_flag' $wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. # The "-G" linker flag allows undefined symbols. _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ func_to_tool_file "$lt_outputfile"~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported shrext_cmds=.dll _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test no = "$with_gnu_ld"; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes,no = "$GXX,$with_gnu_ld"; then _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test yes,no = "$GXX,$with_gnu_ld"; then _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ '"$_LT_TAGVAR(old_archive_cmds, $1)" _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ '"$_LT_TAGVAR(reload_cmds, $1)" ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no _LT_TAGVAR(GCC, $1)=$GXX _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test yes != "$_lt_caught_CXX_error" AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_FUNC_STRIPNAME_CNF # ---------------------- # func_stripname_cnf prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # # This function is identical to the (non-XSI) version of func_stripname, # except this one can be used by m4 code that may be executed by configure, # rather than the libtool script. m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl AC_REQUIRE([_LT_DECL_SED]) AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) func_stripname_cnf () { case @S|@2 in .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; esac } # func_stripname_cnf ])# _LT_FUNC_STRIPNAME_CNF # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF package foo func foo() { } _LT_EOF ]) _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $prev$p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test x-L = "$p" || test x-R = "$p"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test no = "$pre_test_object_deps_done"; then case $prev in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)=$prev$p else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test no = "$pre_test_object_deps_done"; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)=$p else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)=$p else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_LANG_PUSH(Fortran 77) if test -z "$F77" || test no = "$F77"; then _lt_disable_F77=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_disable_F77"; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${F77-"f77"} CFLAGS=$FFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)=$G77 _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test yes != "$_lt_disable_F77" AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_LANG_PUSH(Fortran) if test -z "$FC" || test no = "$FC"; then _lt_disable_FC=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_disable_FC"; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${FC-"f95"} CFLAGS=$FCFLAGS compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu _LT_TAGVAR(LD, $1)=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test yes != "$_lt_disable_FC" AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} CFLAGS=$GCJFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)=$LD _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_GO_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Go compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_GO_CONFIG], [AC_REQUIRE([LT_PROG_GO])dnl AC_LANG_SAVE # Source file extension for Go test sources. ac_ext=go # Object file extension for compiled Go test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="package main; func main() { }" # Code to be used in simple link tests lt_simple_link_test_code='package main; func main() { }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GOC-"gccgo"} CFLAGS=$GOFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)=$LD _LT_CC_BASENAME([$compiler]) # Go did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GO_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to 'libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code=$lt_simple_compile_test_code # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC= CC=${RC-"windres"} CFLAGS= compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_GO # ---------- AC_DEFUN([LT_PROG_GO], [AC_CHECK_TOOL(GOC, gccgo,) ]) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_DLLTOOL # ---------------- # Ensure DLLTOOL variable is set. m4_defun([_LT_DECL_DLLTOOL], [AC_CHECK_TOOL(DLLTOOL, dlltool, false) test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program]) AC_SUBST([DLLTOOL]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f "$lt_ac_sed" && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test 10 -lt "$lt_ac_count" && break lt_ac_count=`expr $lt_ac_count + 1` if test "$lt_ac_count" -gt "$lt_ac_max"; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PATH_CONVERSION_FUNCTIONS # ----------------------------- # Determine what file name conversion functions should be used by # func_to_host_file (and, implicitly, by func_to_host_path). These are needed # for certain cross-compile configurations and native mingw. m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_MSG_CHECKING([how to convert $build file names to $host format]) AC_CACHE_VAL(lt_cv_to_host_file_cmd, [case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac ]) to_host_file_cmd=$lt_cv_to_host_file_cmd AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) _LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], [0], [convert $build file names to $host format])dnl AC_MSG_CHECKING([how to convert $build file names to toolchain format]) AC_CACHE_VAL(lt_cv_to_tool_file_cmd, [#assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac ]) to_tool_file_cmd=$lt_cv_to_tool_file_cmd AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) _LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], [0], [convert $build files to toolchain format])dnl ])# _LT_PATH_CONVERSION_FUNCTIONS # Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software # Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 8 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option '$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl 'shared' nor 'disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], [_LT_WITH_AIX_SONAME([aix])]) ]) ])# _LT_SET_OPTIONS # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [1], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the 'shared' and # 'disable-shared' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS=$lt_save_ifs ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the 'static' and # 'disable-static' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS=$lt_save_ifs ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the 'fast-install' # and 'disable-fast-install' LT_INIT options. # DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS=$lt_save_ifs ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_AIX_SONAME([DEFAULT]) # ---------------------------------- # implement the --with-aix-soname flag, and support the `aix-soname=aix' # and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT # is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. m4_define([_LT_WITH_AIX_SONAME], [m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl shared_archive_member_spec= case $host,$enable_shared in power*-*-aix[[5-9]]*,yes) AC_MSG_CHECKING([which variant of shared library versioning to provide]) AC_ARG_WITH([aix-soname], [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], [case $withval in aix|svr4|both) ;; *) AC_MSG_ERROR([Unknown argument to --with-aix-soname]) ;; esac lt_cv_with_aix_soname=$with_aix_soname], [AC_CACHE_VAL([lt_cv_with_aix_soname], [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) with_aix_soname=$lt_cv_with_aix_soname]) AC_MSG_RESULT([$with_aix_soname]) if test aix != "$with_aix_soname"; then # For the AIX way of multilib, we name the shared archive member # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, # the AIX toolchain works better with OBJECT_MODE set (default 32). if test 64 = "${OBJECT_MODE-32}"; then shared_archive_member_spec=shr_64 else shared_archive_member_spec=shr fi fi ;; *) with_aix_soname=aix ;; esac _LT_DECL([], [shared_archive_member_spec], [0], [Shared archive member basename, for filename based shared library versioning on AIX])dnl ])# _LT_WITH_AIX_SONAME LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the 'pic-only' and 'no-pic' # LT_INIT options. # MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for lt_pkg in $withval; do IFS=$lt_save_ifs if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS=$lt_save_ifs ;; esac], [pic_mode=m4_default([$1], [default])]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the 'pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) # ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software # Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59, which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) # ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004, 2011-2015 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # @configure_input@ # serial 4179 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.4.6]) m4_define([LT_PACKAGE_REVISION], [2.4.6]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.4.6' macro_revision='2.4.6' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) # lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software # Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 5 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) # Copyright (C) 2002-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.15' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.15], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.15])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each '.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi dnl The trailing newline in this macro's definition is deliberate, for dnl backward compatibility and to allow trailing 'dnl'-style comments dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAINTAINER_MODE([DEFAULT-MODE]) # ---------------------------------- # Control maintainer-specific portions of Makefiles. # Default is to disable them, unless 'enable' is passed literally. # For symmetry, 'disable' may be passed as well. Anyway, the user # can override the default with the --enable/--disable switch. AC_DEFUN([AM_MAINTAINER_MODE], [m4_case(m4_default([$1], [disable]), [enable], [m4_define([am_maintainer_other], [disable])], [disable], [m4_define([am_maintainer_other], [enable])], [m4_define([am_maintainer_other], [enable]) m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) dnl maintainer-mode's default is 'disable' unless 'enable' is passed AC_ARG_ENABLE([maintainer-mode], [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], am_maintainer_other[ make rules and dependencies not useful (and sometimes confusing) to the casual installer])], [USE_MAINTAINER_MODE=$enableval], [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) AC_MSG_RESULT([$USE_MAINTAINER_MODE]) AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) MAINT=$MAINTAINER_MODE_TRUE AC_SUBST([MAINT])dnl ] ) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_CC_C_O # --------------- # Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC # to automatically call this. AC_DEFUN([_AM_PROG_CC_C_O], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl AC_LANG_PUSH([C])dnl AC_CACHE_CHECK( [whether $CC understands -c and -o together], [am_cv_prog_cc_c_o], [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i]) if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR sleuthkit-4.2.0/bindings/000755 000765 000024 00000000000 12576326343 016133 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/ChangeLog.txt000644 000765 000024 00000000601 12576320677 016727 0ustar00carrierstaff000000 000000 This program does not distribute an official ChangeLog file. You can generate one from the subversion repository though using the following command: svn log http://svn.sleuthkit.org/repos/sleuthkit/ For a specific release, try something like: svn log http://svn.sleuthkit.org/repos/sleuthkit/tags/sleuthkit-3.0.0 and replace 3.0.0 with the version you are interested in. sleuthkit-4.2.0/config/000755 000765 000024 00000000000 12576326346 015606 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/configure000755 000765 000024 00002537255 12576326301 016262 0ustar00carrierstaff000000 000000 #! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for sleuthkit 4.2.0. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" SHELL=${CONFIG_SHELL-/bin/sh} test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='sleuthkit' PACKAGE_TARNAME='sleuthkit' PACKAGE_VERSION='4.2.0' PACKAGE_STRING='sleuthkit 4.2.0' PACKAGE_BUGREPORT='' PACKAGE_URL='' ac_unique_file="tsk/base/tsk_base.h" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_header_list= ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS LIBTSK_LDFLAGS X_JNI_FALSE X_JNI_TRUE ANT_FOUND uudecode JAVA JNI_CPPFLAGS _ACJNI_JAVAC JAVAC Z_PATH X_ZLIB_FALSE X_ZLIB_TRUE PTHREAD_CFLAGS PTHREAD_LIBS PTHREAD_CC ax_pthread_config LIBOBJS ALLOCA PERL CXXCPP am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE ac_ct_CXX CXXFLAGS CXX MAINT MAINTAINER_MODE_FALSE MAINTAINER_MODE_TRUE CPP LT_SYS_LIBRARY_PATH OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL MANIFEST_TOOL RANLIB ac_ct_AR AR DLLTOOL OBJDUMP LN_S NM ac_ct_DUMPBIN DUMPBIN LD FGREP EGREP GREP SED am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL CPPUNIT_FALSE CPPUNIT_TRUE CPPUNIT_LIBS CPPUNIT_CFLAGS CPPUNIT_CONFIG AM_BACKSLASH AM_DEFAULT_VERBOSITY AM_DEFAULT_V AM_V am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_silent_rules with_cppunit_prefix with_cppunit_exec_prefix enable_shared enable_static with_pic enable_fast_install with_aix_soname enable_dependency_tracking with_gnu_ld with_sysroot enable_libtool_lock enable_maintainer_mode enable_largefile enable_java with_afflib with_zlib with_libewf ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS LT_SYS_LIBRARY_PATH CPP CXX CXXFLAGS CCC CXXCPP' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures sleuthkit 4.2.0 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/sleuthkit] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of sleuthkit 4.2.0:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-silent-rules less verbose build output (undo: "make V=1") --disable-silent-rules verbose build output (undo: "make V=0") --enable-shared[=PKGS] build shared libraries [default=yes] --enable-static[=PKGS] build static libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --enable-dependency-tracking do not reject slow dependency extractors --disable-dependency-tracking speeds up one-time build --disable-libtool-lock avoid locking (might break parallel builds) --enable-maintainer-mode enable make rules and dependencies not useful (and sometimes confusing) to the casual installer --disable-largefile omit support for large files --disable-java Do not build the java bindings or jar file Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-cppunit-prefix=PFX Prefix where CppUnit is installed (optional) --with-cppunit-exec-prefix=PFX Exec prefix where CppUnit is installed (optional) --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use both] --with-aix-soname=aix|svr4|both shared library versioning (aka "SONAME") variant to provide on AIX, [default=aix]. --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified). --without-afflib Do not use AFFLIB even if it is installed --with-afflib=dir Specify that AFFLIB is installed in directory 'dir' --without-zlib Do not use ZLIB even if it is installed --with-zlib=dir Specify that ZLIB is installed in directory 'dir' --without-libewf Do not use libewf even if it is installed --with-libewf=dir Specify that libewf is installed in directory 'dir' Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory LT_SYS_LIBRARY_PATH User-defined run-time library search path. CPP C preprocessor CXX C++ compiler command CXXFLAGS C++ compiler flags CXXCPP C++ preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF sleuthkit configure 4.2.0 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by sleuthkit $as_me 4.2.0, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi as_fn_append ac_header_list " utime.h" # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_pthread.html # =========================================================================== # # SYNOPSIS # # AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) # # DESCRIPTION # # This macro figures out how to build C programs using POSIX threads. It # sets the PTHREAD_LIBS output variable to the threads library and linker # flags, and the PTHREAD_CFLAGS output variable to any special C compiler # flags that are needed. (The user can also force certain compiler # flags/libs to be tested by setting these environment variables.) # # Also sets PTHREAD_CC to any special C compiler that is needed for # multi-threaded programs (defaults to the value of CC otherwise). (This # is necessary on AIX to use the special cc_r compiler alias.) # # NOTE: You are assumed to not only compile your program with these flags, # but also link it with them as well. e.g. you should link with # $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS # # If you are only building threads programs, you may wish to use these # variables in your default LIBS, CFLAGS, and CC: # # LIBS="$PTHREAD_LIBS $LIBS" # CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # CC="$PTHREAD_CC" # # In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant # has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name # (e.g. PTHREAD_CREATE_UNDETACHED on AIX). # # ACTION-IF-FOUND is a list of shell commands to run if a threads library # is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it # is not found. If ACTION-IF-FOUND is not specified, the default action # will define HAVE_PTHREAD. # # Please let the authors know if this macro fails on any platform, or if # you have any other suggestions or comments. This macro was based on work # by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help # from M. Frigo), as well as ac_pthread and hb_pthread macros posted by # Alejandro Forero Cuervo to the autoconf macro repository. We are also # grateful for the helpful feedback of numerous users. # # LICENSE # # Copyright (c) 2008 Steven G. Johnson # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 11 # This is what autoupdate's m4 run will expand. It fires # the warning (with _au_warn_XXX), outputs it into the # updated configure.ac (with AC_DIAGNOSE), and then outputs # the replacement expansion. # This is an auxiliary macro that is also run when # autoupdate runs m4. It simply calls m4_warning, but # we need a wrapper so that each warning is emitted only # once. We break the quoting in m4_warning's argument in # order to expand this macro's arguments, not AU_DEFUN's. # Finally, this is the expansion that is picked up by # autoconf. It tells the user to run autoupdate, and # then outputs the replacement expansion. We do not care # about autoupdate's warning because that contains # information on what to do *after* running autoupdate. # =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_jni_include_dir.html # =========================================================================== # # SYNOPSIS # # AX_JNI_INCLUDE_DIR # # DESCRIPTION # # AX_JNI_INCLUDE_DIR finds include directories needed for compiling # programs using the JNI interface. # # JNI include directories are usually in the java distribution This is # deduced from the value of JAVAC. When this macro completes, a list of # directories is left in the variable JNI_INCLUDE_DIRS. # # Example usage follows: # # AX_JNI_INCLUDE_DIR # # for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS # do # CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR" # done # # If you want to force a specific compiler: # # - at the configure.in level, set JAVAC=yourcompiler before calling # AX_JNI_INCLUDE_DIR # # - at the configure level, setenv JAVAC # # Note: This macro can work with the autoconf M4 macros for Java programs. # This particular macro is not part of the original set of macros. # # LICENSE # # Copyright (c) 2008 Don Anderson # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. # TSK: This has been modifed to not error out if JNI things cannot be resolved # and to support scenarios whereby JAVAC is set to a location, but it is not # on the path. #serial 7 # This is what autoupdate's m4 run will expand. It fires # the warning (with _au_warn_XXX), outputs it into the # updated configure.ac (with AC_DIAGNOSE), and then outputs # the replacement expansion. # This is an auxiliary macro that is also run when # autoupdate runs m4. It simply calls m4_warning, but # we need a wrapper so that each warning is emitted only # once. We break the quoting in m4_warning's argument in # order to expand this macro's arguments, not AU_DEFUN's. # Finally, this is the expansion that is picked up by # autoconf. It tells the user to run autoupdate, and # then outputs the replacement expansion. We do not care # about autoupdate's warning because that contains # information on what to do *after* running autoupdate. # _ACJNI_FOLLOW_SYMLINKS # Follows symbolic links on , # finally setting variable _ACJNI_FOLLOWED # ---------------------------------------- # _ACJNI ac_config_headers="$ac_config_headers tsk/tsk_config.h" ac_aux_dir= for ac_dir in config "$srcdir"/config; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in config \"$srcdir\"/config" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. am__api_version='1.15' # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi if test "$2" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi rm -f conftest.file test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null # Check whether --enable-silent-rules was given. if test "${enable_silent_rules+set}" = set; then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=1;; esac am_make=${MAKE-make} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 $as_echo_n "checking whether $am_make supports nested variables... " >&6; } if ${am_cv_make_support_nested_variables+:} false; then : $as_echo_n "(cached) " >&6 else if $as_echo 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 $as_echo "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='sleuthkit' VERSION='4.2.0' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # mkdir_p='$(MKDIR_P)' # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 fi fi # Check whether --with-cppunit-prefix was given. if test "${with_cppunit_prefix+set}" = set; then : withval=$with_cppunit_prefix; cppunit_config_prefix="$withval" else cppunit_config_prefix="" fi # Check whether --with-cppunit-exec-prefix was given. if test "${with_cppunit_exec_prefix+set}" = set; then : withval=$with_cppunit_exec_prefix; cppunit_config_exec_prefix="$withval" else cppunit_config_exec_prefix="" fi if test x$cppunit_config_exec_prefix != x ; then cppunit_config_args="$cppunit_config_args --exec-prefix=$cppunit_config_exec_prefix" if test x${CPPUNIT_CONFIG+set} != xset ; then CPPUNIT_CONFIG=$cppunit_config_exec_prefix/bin/cppunit-config fi fi if test x$cppunit_config_prefix != x ; then cppunit_config_args="$cppunit_config_args --prefix=$cppunit_config_prefix" if test x${CPPUNIT_CONFIG+set} != xset ; then CPPUNIT_CONFIG=$cppunit_config_prefix/bin/cppunit-config fi fi # Extract the first word of "cppunit-config", so it can be a program name with args. set dummy cppunit-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_CPPUNIT_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $CPPUNIT_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_CPPUNIT_CONFIG="$CPPUNIT_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_CPPUNIT_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_CPPUNIT_CONFIG" && ac_cv_path_CPPUNIT_CONFIG="no" ;; esac fi CPPUNIT_CONFIG=$ac_cv_path_CPPUNIT_CONFIG if test -n "$CPPUNIT_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPPUNIT_CONFIG" >&5 $as_echo "$CPPUNIT_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi cppunit_version_min=1.12.1 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Cppunit - version >= $cppunit_version_min" >&5 $as_echo_n "checking for Cppunit - version >= $cppunit_version_min... " >&6; } no_cppunit="" if test "$CPPUNIT_CONFIG" = "no" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } no_cppunit=yes else CPPUNIT_CFLAGS=`$CPPUNIT_CONFIG --cflags` CPPUNIT_LIBS=`$CPPUNIT_CONFIG --libs` cppunit_version=`$CPPUNIT_CONFIG --version` cppunit_major_version=`echo $cppunit_version | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\1/'` cppunit_minor_version=`echo $cppunit_version | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\2/'` cppunit_micro_version=`echo $cppunit_version | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\3/'` cppunit_major_min=`echo $cppunit_version_min | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\1/'` if test "x${cppunit_major_min}" = "x" ; then cppunit_major_min=0 fi cppunit_minor_min=`echo $cppunit_version_min | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\2/'` if test "x${cppunit_minor_min}" = "x" ; then cppunit_minor_min=0 fi cppunit_micro_min=`echo $cppunit_version_min | \ sed 's/\([0-9]*\).\([0-9]*\).\([0-9]*\)/\3/'` if test "x${cppunit_micro_min}" = "x" ; then cppunit_micro_min=0 fi cppunit_version_proper=`expr \ $cppunit_major_version \> $cppunit_major_min \| \ $cppunit_major_version \= $cppunit_major_min \& \ $cppunit_minor_version \> $cppunit_minor_min \| \ $cppunit_major_version \= $cppunit_major_min \& \ $cppunit_minor_version \= $cppunit_minor_min \& \ $cppunit_micro_version \>= $cppunit_micro_min ` if test "$cppunit_version_proper" = "1" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cppunit_major_version.$cppunit_minor_version.$cppunit_micro_version" >&5 $as_echo "$cppunit_major_version.$cppunit_minor_version.$cppunit_micro_version" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } no_cppunit=yes fi fi if test "x$no_cppunit" = x ; then : else CPPUNIT_CFLAGS="" CPPUNIT_LIBS="" : fi if test "x$no_cppunit" = x; then CPPUNIT_TRUE= CPPUNIT_FALSE='#' else CPPUNIT_TRUE='#' CPPUNIT_FALSE= fi case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.4.6' macro_revision='2.4.6' ltmain=$ac_aux_dir/ltmain.sh # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac # Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 $as_echo_n "checking how to print strings... " >&6; } # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "" } case $ECHO in printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 $as_echo "printf" >&6; } ;; print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 $as_echo "print -r" >&6; } ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 $as_echo "cat" >&6; } ;; esac DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 $as_echo_n "checking whether $CC understands -c and -o together... " >&6; } if ${am_cv_prog_cc_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 $as_echo "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if ${ac_cv_path_FGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_FGREP" || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if ${lt_cv_path_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM=$NM else lt_nm_to_check=${ac_tool_prefix}nm if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. tmp_nm=$ac_dir/$lt_tmp_nm if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then # Check to see if the nm accepts a BSD-compat flag. # Adding the 'sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty case $build_os in mingw*) lt_bad_file=conftest.nm/nofile ;; *) lt_bad_file=/dev/null ;; esac case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in *$lt_bad_file* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break 2 ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break 2 ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS=$lt_save_ifs done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test no != "$lt_cv_path_NM"; then NM=$lt_cv_path_NM else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else if test -n "$ac_tool_prefix"; then for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols -headers" ;; *) DUMPBIN=: ;; esac fi if test : != "$DUMPBIN"; then NM=$DUMPBIN fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if ${lt_cv_nm_interface+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if ${lt_cv_sys_max_cmd_len+:} false; then : $as_echo_n "(cached) " >&6 else i=0 teststring=ABCD case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test X`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test 17 != "$i" # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n "$lt_cv_sys_max_cmd_len"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 $as_echo_n "checking how to convert $build file names to $host format... " >&6; } if ${lt_cv_to_host_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac fi to_host_file_cmd=$lt_cv_to_host_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 $as_echo "$lt_cv_to_host_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 $as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } if ${lt_cv_to_tool_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else #assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac fi to_tool_file_cmd=$lt_cv_to_tool_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 $as_echo "$lt_cv_to_tool_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if ${lt_cv_ld_reload_flag+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in cygwin* | mingw* | pw32* | cegcc*) if test yes != "$GCC"; then reload_cmds=false fi ;; darwin*) if test yes = "$GCC"; then reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if ${lt_cv_deplibs_check_method+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # 'unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # that responds to the $file_magic_cmd with a given extended regex. # If you have 'file' or equivalent on your system and you're not sure # whether 'pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd* | bitrig*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; os2*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi test -z "$DLLTOOL" && DLLTOOL=dlltool { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 $as_echo_n "checking how to associate runtime and link libraries... " >&6; } if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh; # decide which one to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 $as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO if test -n "$ac_tool_prefix"; then for ac_prog in ar do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AR" && break done fi if test -z "$AR"; then ac_ct_AR=$AR for ac_prog in ar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_AR" && break done if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi fi : ${AR=ar} : ${AR_FLAGS=cru} { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 $as_echo_n "checking for archiver @FILE support... " >&6; } if ${lt_cv_ar_at_file+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ar_at_file=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -eq "$ac_status"; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -ne "$ac_status"; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 $as_echo "$lt_cv_ar_at_file" >&6; } if test no = "$lt_cv_ar_at_file"; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in bitrig* | openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if ${lt_cv_sys_global_symbol_pipe+:} false; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test ia64 = "$host_cpu"; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Gets list of data symbols to import. lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" # Adjust the below global symbol transforms to fixup imported variables. lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" lt_c_name_lib_hook="\ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" else # Disable hooks by default. lt_cv_sys_global_symbol_to_import= lt_cdecl_hook= lt_c_name_hook= lt_c_name_lib_hook= fi # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n"\ $lt_cdecl_hook\ " -e 's/^T .* \(.*\)$/extern int \1();/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ $lt_c_name_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" # Transform an extracted symbol line into symbol name with lib prefix and # symbol address. lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ $lt_c_name_lib_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function, # D for any global variable and I for any imported variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ " /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ " /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ " {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ " s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS=conftstm.$ac_objext CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest$ac_exeext; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test yes = "$pipe_works"; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then nm_file_list_spec='@' fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 $as_echo_n "checking for sysroot... " >&6; } # Check whether --with-sysroot was given. if test "${with_sysroot+set}" = set; then : withval=$with_sysroot; else with_sysroot=no fi lt_sysroot= case $with_sysroot in #( yes) if test yes = "$GCC"; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 $as_echo "$with_sysroot" >&6; } as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 $as_echo "${lt_sysroot:-no}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 $as_echo_n "checking for a working dd... " >&6; } if ${ac_cv_path_lt_DD+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i : ${lt_DD:=$DD} if test -z "$lt_DD"; then ac_path_lt_DD_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in dd; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_lt_DD" || continue if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: fi $ac_path_lt_DD_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_lt_DD"; then : fi else ac_cv_path_lt_DD=$lt_DD fi rm -f conftest.i conftest2.i conftest.out fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 $as_echo "$ac_cv_path_lt_DD" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 $as_echo_n "checking how to truncate binary pipes... " >&6; } if ${lt_cv_truncate_bin+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i lt_cv_truncate_bin= if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi rm -f conftest.i conftest2.i conftest.out test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 $as_echo "$lt_cv_truncate_bin" >&6; } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test no = "$enable_libtool_lock" || enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out what ABI is being produced by ac_compile, and set mode # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE=32 ;; *ELF-64*) HPUX_IA64_MODE=64 ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test yes = "$lt_cv_prog_gnu_ld"; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; mips64*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then emul=elf case `/usr/bin/file conftest.$ac_objext` in *32-bit*) emul="${emul}32" ;; *64-bit*) emul="${emul}64" ;; esac case `/usr/bin/file conftest.$ac_objext` in *MSB*) emul="${emul}btsmip" ;; *LSB*) emul="${emul}ltsmip" ;; esac case `/usr/bin/file conftest.$ac_objext` in *N32*) emul="${emul}n32" ;; esac LD="${LD-ld} -m $emul" fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. Note that the listed cases only cover the # situations where additional linker options are needed (such as when # doing 32-bit compilation for a host where ld defaults to 64-bit, or # vice versa); the common cases where no linker options are needed do # not appear in the list. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*linux*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*linux*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS=$CFLAGS CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if ${lt_cv_cc_needs_belf+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test yes != "$lt_cv_cc_needs_belf"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS=$SAVE_CFLAGS fi ;; *-*solaris*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*|x86_64-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD=${LD-ld}_sol2 fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks=$enable_libtool_lock if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. set dummy ${ac_tool_prefix}mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MANIFEST_TOOL"; then ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL if test -n "$MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 $as_echo "$MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL # Extract the first word of "mt", so it can be a program name with args. set dummy mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MANIFEST_TOOL"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL if test -n "$ac_ct_MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 $as_echo "$ac_ct_MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MANIFEST_TOOL" = x; then MANIFEST_TOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL fi else MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" fi test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 $as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } if ${lt_cv_path_mainfest_tool+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&5 if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 $as_echo "$lt_cv_path_mainfest_tool" >&6; } if test yes != "$lt_cv_path_mainfest_tool"; then MANIFEST_TOOL=: fi case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if ${lt_cv_apple_cc_single_mod+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "$LT_MULTI_MODULE"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&5 # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test 0 = "$_lt_result"; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if ${lt_cv_ld_exported_symbols_list+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 $as_echo_n "checking for -force_load linker flag... " >&6; } if ${lt_cv_ld_force_load+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 echo "$AR cru libconftest.a conftest.o" >&5 $AR cru libconftest.a conftest.o 2>&5 echo "$RANLIB libconftest.a" >&5 $RANLIB libconftest.a 2>&5 cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&5 elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then lt_cv_ld_force_load=yes else cat conftest.err >&5 fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 $as_echo "$lt_cv_ld_force_load" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[012][,.]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test yes = "$lt_cv_apple_cc_single_mod"; then _lt_dar_single_mod='$single_module' fi if test yes = "$lt_cv_ld_exported_symbols_list"; then _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' fi if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS=$lt_save_ifs ;; esac else enable_shared=yes fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS=$lt_save_ifs ;; esac else enable_static=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for lt_pkg in $withval; do IFS=$lt_save_ifs if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS=$lt_save_ifs ;; esac else pic_mode=default fi # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS=$lt_save_ifs ;; esac else enable_fast_install=yes fi shared_archive_member_spec= case $host,$enable_shared in power*-*-aix[5-9]*,yes) { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 $as_echo_n "checking which variant of shared library versioning to provide... " >&6; } # Check whether --with-aix-soname was given. if test "${with_aix_soname+set}" = set; then : withval=$with_aix_soname; case $withval in aix|svr4|both) ;; *) as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 ;; esac lt_cv_with_aix_soname=$with_aix_soname else if ${lt_cv_with_aix_soname+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_with_aix_soname=aix fi with_aix_soname=$lt_cv_with_aix_soname fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 $as_echo "$with_aix_soname" >&6; } if test aix != "$with_aix_soname"; then # For the AIX way of multilib, we name the shared archive member # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, # the AIX toolchain works better with OBJECT_MODE set (default 32). if test 64 = "${OBJECT_MODE-32}"; then shared_archive_member_spec=shr_64 else shared_archive_member_spec=shr fi fi ;; *) with_aix_soname=aix ;; esac # This can be used to rebuild libtool when needed LIBTOOL_DEPS=$ltmain # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if ${lt_cv_objdir+:} false; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld=$lt_cv_prog_gnu_ld old_CC=$CC old_CFLAGS=$CFLAGS # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o func_cc_basename $compiler cc_basename=$func_cc_basename_result # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/${ac_tool_prefix}file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC=$CC ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test yes = "$GCC"; then case $cc_basename in nvcc*) lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; *) lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= if test yes = "$GCC"; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi lt_prog_compiler_pic='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 lt_prog_compiler_wl='-Xlinker ' if test -n "$lt_prog_compiler_pic"; then lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' case $cc_basename in nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; esac ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='$wl-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64, which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; tcc*) # Fabrice Bellard et al's Tiny C Compiler lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; *Sun\ F* | *Sun*Fortran*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Intel*\ [CF]*Compiler*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; *Portland\ Group*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic=$lt_prog_compiler_pic fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 $as_echo "$lt_cv_prog_compiler_pic" >&6; } lt_prog_compiler_pic=$lt_cv_prog_compiler_pic # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if ${lt_cv_prog_compiler_pic_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works"; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test yes = "$lt_cv_prog_compiler_static_works"; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ' (' and ')$', so one must not match beginning or # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', # as well as any symbol that contains 'd'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test yes != "$GCC"; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd* | bitrig*) with_gnu_ld=no ;; esac ld_shlibs=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test yes = "$with_gnu_ld"; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; *\ \(GNU\ Binutils\)\ [3-9]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test yes = "$lt_use_gnu_ld_interface"; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='$wl' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' export_dynamic_flag_spec='$wl--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test ia64 != "$host_cpu"; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' export_dynamic_flag_spec='$wl--export-all-symbols' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; haiku*) archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs=yes ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test linux-dietlibc = "$host_os"; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test no = "$tmp_diet" then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; nagfor*) # NAGFOR 5.3 tmp_sharedflag='-Wl,-shared' ;; xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi case $cc_basename in tcc*) export_dynamic_flag_spec='-rdynamic' ;; xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test no = "$ld_shlibs"; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then aix_use_runtimelinking=yes break fi done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # traditional, no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct=no hardcode_direct_absolute=no ;; esac if test yes = "$GCC"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag="$shared_flag "'$wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' $wl-bernotok' allow_undefined_flag=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' fi archive_cmds_need_lc=yes archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported always_export_symbols=yes file_list_spec='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, )='true' enable_shared_with_static_runtimes=yes exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib old_postinstall_cmds='chmod 644 $oldlib' postlink_cmds='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' enable_shared_with_static_runtimes=yes ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec='' fi link_all_deplibs=yes allow_undefined_flag=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test yes = "$GCC"; then archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='$wl-E' ;; hpux10*) if test yes,no = "$GCC,$with_gnu_ld"; then archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test yes,no = "$GCC,$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 $as_echo_n "checking if $CC understands -b... " >&6; } if ${lt_cv_prog_compiler__b+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler__b=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -b" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler__b=yes fi else lt_cv_prog_compiler__b=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 $as_echo "$lt_cv_prog_compiler__b" >&6; } if test yes = "$lt_cv_prog_compiler__b"; then archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi ;; esac fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test yes = "$GCC"; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 $as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } if ${lt_cv_irix_exported_symbol+:} false; then : $as_echo_n "(cached) " >&6 else save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo (void) { return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_irix_exported_symbol=yes else lt_cv_irix_exported_symbol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 $as_echo "$lt_cv_irix_exported_symbol" >&6; } if test yes = "$lt_cv_irix_exported_symbol"; then archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' fi else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; linux*) case $cc_basename in tcc*) # Fabrice Bellard et al's Tiny C Compiler ld_shlibs=yes archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' else archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='$wl-rpath,$libdir' fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; osf3*) if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test yes = "$GCC"; then wlarc='$wl' archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='$wl' archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. GCC discards it without '$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test yes = "$GCC"; then whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test sequent = "$host_vendor"; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='$wl-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='$wl-z,text' allow_undefined_flag='$wl-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='$wl-Bexport' runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test sni = "$host_vendor"; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='$wl-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test no = "$ld_shlibs" && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc=no else lt_cv_archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 $as_echo "$lt_cv_archive_cmds_need_lc" >&6; } archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test yes = "$GCC"; then case $host_os in darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; *) lt_awk_arg='/^libraries:/' ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; *) lt_sed_strip_eq='s|=/|/|g' ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary... lt_tmp_lt_search_path_spec= lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` # ...but if some path component already ends with the multilib dir we assume # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). case "$lt_multi_os_dir; $lt_search_path_spec " in "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) lt_multi_os_dir= ;; esac for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" elif test -n "$lt_multi_os_dir"; then test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS = " "; FS = "/|\n";} { lt_foo = ""; lt_count = 0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo = "/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's|/\([A-Za-z]:\)|\1|g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test yes = "$hardcode_automatic"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && test no != "$hardcode_minus_L"; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test relink = "$hardcode_action" || test yes = "$inherit_rpath"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi if test yes != "$enable_dlopen"; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen=load_add_on lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen=LoadLibrary lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen=dlopen lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else lt_cv_dlopen=dyld lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; tpf*) # Don't try to run any link tests for TPF. We know it's impossible # because TPF is a cross-compiler, and we know how we open DSOs. lt_cv_dlopen=dlopen lt_cv_dlopen_libs= lt_cv_dlopen_self=no ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : lt_cv_dlopen=shl_load else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : lt_cv_dlopen=dlopen else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld fi fi fi fi fi fi ;; esac if test no = "$lt_cv_dlopen"; then enable_dlopen=no else enable_dlopen=yes fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS=$CPPFLAGS test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS=$LDFLAGS wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS=$LIBS LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test yes = "$lt_cv_dlopen_self"; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self_static+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS=$save_CPPFLAGS LDFLAGS=$save_LDFLAGS LIBS=$save_LIBS ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP"; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report what library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC=$lt_save_CC ac_config_commands="$ac_config_commands libtool" # Only expand once: { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 $as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } # Check whether --enable-maintainer-mode was given. if test "${enable_maintainer_mode+set}" = set; then : enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval else USE_MAINTAINER_MODE=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 $as_echo "$USE_MAINTAINER_MODE" >&6; } if test $USE_MAINTAINER_MODE = yes; then MAINTAINER_MODE_TRUE= MAINTAINER_MODE_FALSE='#' else MAINTAINER_MODE_TRUE='#' MAINTAINER_MODE_FALSE= fi MAINT=$MAINTAINER_MODE_TRUE ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CXX" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CXX_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi func_stripname_cnf () { case $2 in .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; esac } # func_stripname_cnf if test -n "$CXX" && ( test no != "$CXX" && ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || (test g++ != "$CXX"))); then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds reload_flag_CXX=$reload_flag reload_cmds_CXX=$reload_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_caught_CXX_error"; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC compiler_CXX=$CC func_cc_basename $compiler cc_basename=$func_cc_basename_result if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test yes = "$GXX"; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test yes = "$GXX"; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test yes = "$with_gnu_ld"; then archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='$wl' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no ;; esac if test yes = "$GXX"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag=$shared_flag' $wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec_CXX='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. # The "-G" linker flag allows undefined symbols. no_undefined_flag_CXX='-bernotok' # Determine the default libpath from the value encoded in an empty # executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' $wl-bernotok' allow_undefined_flag_CXX=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' fi archive_cmds_need_lc_CXX=yes archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec_CXX=' ' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=yes file_list_spec_CXX='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' enable_shared_with_static_runtimes_CXX=yes # Don't use ranlib old_postinstall_cmds_CXX='chmod 644 $oldlib' postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ func_to_tool_file "$lt_outputfile"~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' export_dynamic_flag_spec_CXX='$wl--export-all-symbols' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec_CXX='' fi link_all_deplibs_CXX=yes allow_undefined_flag_CXX=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" if test yes != "$lt_cv_apple_cc_single_mod"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" fi else ld_shlibs_CXX=no fi ;; os2*) hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_minus_L_CXX=yes allow_undefined_flag_CXX=unsupported shrext_cmds=.dll archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes_CXX=yes ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; haiku*) archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs_CXX=yes ;; hpux9*) hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='$wl-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='$wl-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5].* | *pgcpp\ [1-5].*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='$wl-E' whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes,no = "$GXX,$with_gnu_ld"; then allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test yes,no = "$GXX,$with_gnu_ld"; then no_undefined_flag_CXX=' $wl-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='$wl-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='$wl-z,text' allow_undefined_flag_CXX='$wl-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='$wl-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ '"$old_archive_cmds_CXX" reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ '"$reload_cmds_CXX" ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no GCC_CXX=$GXX LD_CXX=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $prev$p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test x-L = "$p" || test x-R = "$p"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test no = "$pre_test_object_deps_done"; then case $prev in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX=$prev$p else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX=$prev$p else postdeps_CXX="${postdeps_CXX} $prev$p" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test no = "$pre_test_object_deps_done"; then if test -z "$predep_objects_CXX"; then predep_objects_CXX=$p else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX=$p else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= # C++ specific cases for pic, static, wl, etc. if test yes = "$GXX"; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi lt_prog_compiler_pic_CXX='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static_CXX='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' if test ia64 != "$host_cpu"; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64, which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) # IBM XL 8.0, 9.0 on PPC and BlueGene lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX=$ltdll_cmds ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ;; esac ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc_CXX=no else lt_cv_archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 $as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec_CXX='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test yes = "$hardcode_automatic_CXX"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct_CXX" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && test no != "$hardcode_minus_L_CXX"; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test relink = "$hardcode_action_CXX" || test yes = "$inherit_rpath_CXX"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test yes != "$_lt_caught_CXX_error" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 $as_echo_n "checking whether $CC understands -c and -o together... " >&6; } if ${am_cv_prog_cc_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 $as_echo "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi # Extract the first word of "perl", so it can be a program name with args. set dummy perl; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PERL+:} false; then : $as_echo_n "(cached) " >&6 else case $PERL in [\\/]* | ?:[\\/]*) ac_cv_path_PERL="$PERL" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PERL="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PERL=$ac_cv_path_PERL if test -n "$PERL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PERL" >&5 $as_echo "$PERL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi for ac_header in err.h inttypes.h unistd.h stdint.h sys/param.h sys/resource.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } if ${ac_cv_c_const+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __cplusplus /* Ultrix mips cc rejects this sort of thing. */ typedef int charset[2]; const charset cs = { 0, 0 }; /* SunOS 4.1.1 cc rejects this. */ char const *const *pcpcc; char **ppc; /* NEC SVR4.0.2 mips cc rejects this. */ struct point {int x, y;}; static struct point const zero = {0,0}; /* AIX XL C 1.02.0.0 rejects this. It does not let you subtract one const X* pointer from another in an arm of an if-expression whose if-part is not a constant expression */ const char *g = "string"; pcpcc = &g + (g ? g-g : 0); /* HPUX 7.0 cc rejects these. */ ++pcpcc; ppc = (char**) pcpcc; pcpcc = (char const *const *) ppc; { /* SCO 3.2v4 cc rejects this sort of thing. */ char tx; char *t = &tx; char const *s = 0 ? (char *) 0 : (char const *) 0; *t++ = 0; if (s) return 0; } { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ int x[] = {25, 17}; const int *foo = &x[0]; ++foo; } { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ typedef const int *iptr; iptr p = 0; ++p; } { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ struct s { int j; const int *ap[3]; } bx; struct s *b = &bx; b->j = 5; } { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ const int foo = 10; if (!foo) return 0; } return !cs[0] && !zero.x; #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_const=yes else ac_cv_c_const=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then $as_echo "#define const /**/" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 $as_echo_n "checking for uid_t in sys/types.h... " >&6; } if ${ac_cv_type_uid_t+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "uid_t" >/dev/null 2>&1; then : ac_cv_type_uid_t=yes else ac_cv_type_uid_t=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 $as_echo "$ac_cv_type_uid_t" >&6; } if test $ac_cv_type_uid_t = no; then $as_echo "#define uid_t int" >>confdefs.h $as_echo "#define gid_t int" >>confdefs.h fi ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default" if test "x$ac_cv_type_mode_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define mode_t int _ACEOF fi ac_fn_c_check_type "$LINENO" "off_t" "ac_cv_type_off_t" "$ac_includes_default" if test "x$ac_cv_type_off_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define off_t long int _ACEOF fi ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" if test "x$ac_cv_type_size_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # The Ultrix 4.2 mips builtin alloca declared by alloca.h only works # for constant arguments. Useless! { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working alloca.h" >&5 $as_echo_n "checking for working alloca.h... " >&6; } if ${ac_cv_working_alloca_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { char *p = (char *) alloca (2 * sizeof (int)); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_working_alloca_h=yes else ac_cv_working_alloca_h=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_working_alloca_h" >&5 $as_echo "$ac_cv_working_alloca_h" >&6; } if test $ac_cv_working_alloca_h = yes; then $as_echo "#define HAVE_ALLOCA_H 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for alloca" >&5 $as_echo_n "checking for alloca... " >&6; } if ${ac_cv_func_alloca_works+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __GNUC__ # define alloca __builtin_alloca #else # ifdef _MSC_VER # include # define alloca _alloca # else # ifdef HAVE_ALLOCA_H # include # else # ifdef _AIX #pragma alloca # else # ifndef alloca /* predefined by HP cc +Olibcalls */ void *alloca (size_t); # endif # endif # endif # endif #endif int main () { char *p = (char *) alloca (1); if (p) return 0; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_func_alloca_works=yes else ac_cv_func_alloca_works=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_alloca_works" >&5 $as_echo "$ac_cv_func_alloca_works" >&6; } if test $ac_cv_func_alloca_works = yes; then $as_echo "#define HAVE_ALLOCA 1" >>confdefs.h else # The SVR3 libPW and SVR4 libucb both contain incompatible functions # that cause trouble. Some versions do not even contain alloca or # contain a buggy version. If you still want to use their alloca, # use ar to extract alloca.o from them instead of compiling alloca.c. ALLOCA=\${LIBOBJDIR}alloca.$ac_objext $as_echo "#define C_ALLOCA 1" >>confdefs.h { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether \`alloca.c' needs Cray hooks" >&5 $as_echo_n "checking whether \`alloca.c' needs Cray hooks... " >&6; } if ${ac_cv_os_cray+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined CRAY && ! defined CRAY2 webecray #else wenotbecray #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "webecray" >/dev/null 2>&1; then : ac_cv_os_cray=yes else ac_cv_os_cray=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_os_cray" >&5 $as_echo "$ac_cv_os_cray" >&6; } if test $ac_cv_os_cray = yes; then for ac_func in _getb67 GETB67 getb67; do as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define CRAY_STACKSEG_END $ac_func _ACEOF break fi done fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking stack direction for C alloca" >&5 $as_echo_n "checking stack direction for C alloca... " >&6; } if ${ac_cv_c_stack_direction+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_c_stack_direction=0 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int find_stack_direction (int *addr, int depth) { int dir, dummy = 0; if (! addr) addr = &dummy; *addr = addr < &dummy ? 1 : addr == &dummy ? 0 : -1; dir = depth ? find_stack_direction (addr, depth - 1) : 0; return dir + dummy; } int main (int argc, char **argv) { return find_stack_direction (0, argc + !argv + 20) < 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_stack_direction=1 else ac_cv_c_stack_direction=-1 fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_stack_direction" >&5 $as_echo "$ac_cv_c_stack_direction" >&6; } cat >>confdefs.h <<_ACEOF #define STACK_DIRECTION $ac_cv_c_stack_direction _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for error_at_line" >&5 $as_echo_n "checking for error_at_line... " >&6; } if ${ac_cv_lib_error_at_line+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { error_at_line (0, 0, "", 0, "an error occurred"); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_error_at_line=yes else ac_cv_lib_error_at_line=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_error_at_line" >&5 $as_echo "$ac_cv_lib_error_at_line" >&6; } if test $ac_cv_lib_error_at_line = no; then case " $LIBOBJS " in *" error.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS error.$ac_objext" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGEFILE_SOURCE value needed for large files" >&5 $as_echo_n "checking for _LARGEFILE_SOURCE value needed for large files... " >&6; } if ${ac_cv_sys_largefile_source+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* for off_t */ #include int main () { int (*fp) (FILE *, off_t, int) = fseeko; return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_sys_largefile_source=no; break fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGEFILE_SOURCE 1 #include /* for off_t */ #include int main () { int (*fp) (FILE *, off_t, int) = fseeko; return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_sys_largefile_source=1; break fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_cv_sys_largefile_source=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_source" >&5 $as_echo "$ac_cv_sys_largefile_source" >&6; } case $ac_cv_sys_largefile_source in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGEFILE_SOURCE $ac_cv_sys_largefile_source _ACEOF ;; esac rm -rf conftest* # We used to try defining _XOPEN_SOURCE=500 too, to work around a bug # in glibc 2.1.3, but that breaks too many other things. # If you want fseeko and ftello with glibc, upgrade to a fixed glibc. if test $ac_cv_sys_largefile_source != unknown; then $as_echo "#define HAVE_FSEEKO 1" >>confdefs.h fi if test $ac_cv_c_compiler_gnu = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC needs -traditional" >&5 $as_echo_n "checking whether $CC needs -traditional... " >&6; } if ${ac_cv_prog_gcc_traditional+:} false; then : $as_echo_n "(cached) " >&6 else ac_pattern="Autoconf.*'x'" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include Autoconf TIOCGETP _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "$ac_pattern" >/dev/null 2>&1; then : ac_cv_prog_gcc_traditional=yes else ac_cv_prog_gcc_traditional=no fi rm -f conftest* if test $ac_cv_prog_gcc_traditional = no; then cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include Autoconf TCGETA _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "$ac_pattern" >/dev/null 2>&1; then : ac_cv_prog_gcc_traditional=yes fi rm -f conftest* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_gcc_traditional" >&5 $as_echo "$ac_cv_prog_gcc_traditional" >&6; } if test $ac_cv_prog_gcc_traditional = yes; then CC="$CC -traditional" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat correctly handles trailing slash" >&5 $as_echo_n "checking whether lstat correctly handles trailing slash... " >&6; } if ${ac_cv_func_lstat_dereferences_slashed_symlink+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then : ac_cv_func_lstat_dereferences_slashed_symlink=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail, as required by POSIX. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_dereferences_slashed_symlink=yes else ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test "x$ac_cv_func_lstat_dereferences_slashed_symlink" = xno; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat accepts an empty string" >&5 $as_echo_n "checking whether lstat accepts an empty string... " >&6; } if ${ac_cv_func_lstat_empty_string_bug+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_lstat_empty_string_bug=yes else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; return lstat ("", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_empty_string_bug=no else ac_cv_func_lstat_empty_string_bug=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_empty_string_bug" >&5 $as_echo "$ac_cv_func_lstat_empty_string_bug" >&6; } if test $ac_cv_func_lstat_empty_string_bug = yes; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac cat >>confdefs.h <<_ACEOF #define HAVE_LSTAT_EMPTY_STRING_BUG 1 _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether lstat correctly handles trailing slash" >&5 $as_echo_n "checking whether lstat correctly handles trailing slash... " >&6; } if ${ac_cv_func_lstat_dereferences_slashed_symlink+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.sym conftest.file echo >conftest.file if test "$as_ln_s" = "ln -s" && ln -s conftest.file conftest.sym; then if test "$cross_compiling" = yes; then : ac_cv_func_lstat_dereferences_slashed_symlink=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { struct stat sbuf; /* Linux will dereference the symlink and fail, as required by POSIX. That is better in the sense that it means we will not have to compile and use the lstat wrapper. */ return lstat ("conftest.sym/", &sbuf) == 0; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_lstat_dereferences_slashed_symlink=yes else ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi else # If the `ln -s' command failed, then we probably don't even # have an lstat function. ac_cv_func_lstat_dereferences_slashed_symlink=no fi rm -f conftest.sym conftest.file fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_lstat_dereferences_slashed_symlink" >&5 $as_echo "$ac_cv_func_lstat_dereferences_slashed_symlink" >&6; } test $ac_cv_func_lstat_dereferences_slashed_symlink = yes && cat >>confdefs.h <<_ACEOF #define LSTAT_FOLLOWS_SLASHED_SYMLINK 1 _ACEOF if test "x$ac_cv_func_lstat_dereferences_slashed_symlink" = xno; then case " $LIBOBJS " in *" lstat.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS lstat.$ac_objext" ;; esac fi for ac_header in sys/select.h sys/socket.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for select" >&5 $as_echo_n "checking types of arguments for select... " >&6; } if ${ac_cv_func_select_args+:} false; then : $as_echo_n "(cached) " >&6 else for ac_arg234 in 'fd_set *' 'int *' 'void *'; do for ac_arg1 in 'int' 'size_t' 'unsigned long int' 'unsigned int'; do for ac_arg5 in 'struct timeval *' 'const struct timeval *'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #ifdef HAVE_SYS_SELECT_H # include #endif #ifdef HAVE_SYS_SOCKET_H # include #endif int main () { extern int select ($ac_arg1, $ac_arg234, $ac_arg234, $ac_arg234, $ac_arg5); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_func_select_args="$ac_arg1,$ac_arg234,$ac_arg5"; break 3 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done done done # Provide a safe default value. : "${ac_cv_func_select_args=int,int *,struct timeval *}" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_select_args" >&5 $as_echo "$ac_cv_func_select_args" >&6; } ac_save_IFS=$IFS; IFS=',' set dummy `echo "$ac_cv_func_select_args" | sed 's/\*/\*/g'` IFS=$ac_save_IFS shift cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG1 $1 _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG234 ($2) _ACEOF cat >>confdefs.h <<_ACEOF #define SELECT_TYPE_ARG5 ($3) _ACEOF rm -f conftest* for ac_header in $ac_header_list do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether utime accepts a null argument" >&5 $as_echo_n "checking whether utime accepts a null argument... " >&6; } if ${ac_cv_func_utime_null+:} false; then : $as_echo_n "(cached) " >&6 else rm -f conftest.data; >conftest.data # Sequent interprets utime(file, 0) to mean use start of epoch. Wrong. if test "$cross_compiling" = yes; then : ac_cv_func_utime_null='guessing yes' else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default #ifdef HAVE_UTIME_H # include #endif int main () { struct stat s, t; return ! (stat ("conftest.data", &s) == 0 && utime ("conftest.data", 0) == 0 && stat ("conftest.data", &t) == 0 && t.st_mtime >= s.st_mtime && t.st_mtime - s.st_mtime < 120); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_utime_null=yes else ac_cv_func_utime_null=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_utime_null" >&5 $as_echo "$ac_cv_func_utime_null" >&6; } if test "x$ac_cv_func_utime_null" != xno; then ac_cv_func_utime_null=yes $as_echo "#define HAVE_UTIME_NULL 1" >>confdefs.h fi rm -f conftest.data for ac_func in vprintf do : ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" if test "x$ac_cv_func_vprintf" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VPRINTF 1 _ACEOF ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" if test "x$ac_cv_func__doprnt" = xyes; then : $as_echo "#define HAVE_DOPRNT 1" >>confdefs.h fi fi done for ac_func in ishexnumber err errx warn warnx vasprintf getrusage do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in strlcpy strlcat do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ax_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h # requires special compiler flags (e.g. on True64 or Sequent). # It gets checked for in the link test anyway. # First of all, check if the user has set any of the PTHREAD_LIBS, # etcetera environment variables, and if threads linking works using # them: if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 $as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_join (); int main () { return pthread_join (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 $as_echo "$ax_pthread_ok" >&6; } if test x"$ax_pthread_ok" = xno; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" fi # We must check for the threads library under a number of different # names; the ordering is very important because some systems # (e.g. DEC) have both -lpthread and -lpthreads, where one of the # libraries is broken (non-POSIX). # Create a list of thread flags to try. Items starting with a "-" are # C compiler flags, and other items are library names, except for "none" # which indicates that we try without any flags at all, and "pthread-config" # which is a program returning the flags for the Pth emulation library. ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread lpthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # pthreads: AIX (must check this before -lpthread) # none: in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) # -pthreads: Solaris/gcc # -mthreads: Mingw32/gcc, Lynx/gcc # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) case "${host_cpu}-${host_os}" in *solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" ;; *-darwin*) ax_pthread_flags="-pthread $ax_pthread_flags" ;; esac if test x"$ax_pthread_ok" = xno; then for flag in $ax_pthread_flags; do case $flag in none) { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 $as_echo_n "checking whether pthreads work without any flags... " >&6; } ;; -*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 $as_echo_n "checking whether pthreads work with $flag... " >&6; } PTHREAD_CFLAGS="$flag" ;; pthread-config) # Extract the first word of "pthread-config", so it can be a program name with args. set dummy pthread-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ax_pthread_config+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ax_pthread_config"; then ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ax_pthread_config="yes" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" fi fi ax_pthread_config=$ac_cv_prog_ax_pthread_config if test -n "$ax_pthread_config"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 $as_echo "$ax_pthread_config" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test x"$ax_pthread_config" = xno; then continue; fi PTHREAD_CFLAGS="`pthread-config --cflags`" PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 $as_echo_n "checking for the pthreads library -l$flag... " >&6; } PTHREAD_LIBS="-l$flag" ;; esac save_LIBS="$LIBS" save_CFLAGS="$CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we # need a special flag -Kthread to make this header compile.) # We check for pthread_join because it is in -lpthread on IRIX # while pthread_create is in libc. We check for pthread_attr_init # due to DEC craziness with -lpthreads. We check for # pthread_cleanup_push because it is one of the few pthread # functions on Solaris that doesn't have a non-functional libc stub. # We try pthread_create on general principles. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include static void routine(void* a) {a=0;} static void* start_routine(void* a) {return a;} int main () { pthread_t th; pthread_attr_t attr; pthread_create(&th,0,start_routine,0); pthread_join(th, 0); pthread_attr_init(&attr); pthread_cleanup_push(routine, 0); pthread_cleanup_pop(0); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 $as_echo "$ax_pthread_ok" >&6; } if test "x$ax_pthread_ok" = xyes; then break; fi PTHREAD_LIBS="" PTHREAD_CFLAGS="" done fi # Various other checks: if test "x$ax_pthread_ok" = xyes; then save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 $as_echo_n "checking for joinable pthread attribute... " >&6; } attr_name=unknown for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { int attr=$attr; return attr; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : attr_name=$attr; break fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext done { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 $as_echo "$attr_name" >&6; } if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then cat >>confdefs.h <<_ACEOF #define PTHREAD_CREATE_JOINABLE $attr_name _ACEOF fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 $as_echo_n "checking if more special flags are required for pthreads... " >&6; } flag=no case "${host_cpu}-${host_os}" in *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${flag}" >&5 $as_echo "${flag}" >&6; } if test "x$flag" != xno; then PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" # More AIX lossage: must compile with xlc_r or cc_r if test x"$GCC" != xyes; then for ac_prog in xlc_r cc_r do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_PTHREAD_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$PTHREAD_CC"; then ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_PTHREAD_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi PTHREAD_CC=$ac_cv_prog_PTHREAD_CC if test -n "$PTHREAD_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 $as_echo "$PTHREAD_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PTHREAD_CC" && break done test -n "$PTHREAD_CC" || PTHREAD_CC="${CC}" else PTHREAD_CC=$CC fi else PTHREAD_CC="$CC" fi # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: if test x"$ax_pthread_ok" = xyes; then $as_echo "#define HAVE_PTHREAD 1" >>confdefs.h CLIBS="$PTHREAD_LIBS $LIBS" CPPFLAGS="$CPPFLAGS $PTHREAD_CFLAGS" LDFLAGS="$LDFLAGS $PTHREAD_CFLAGS" CC="$PTHREAD_CC" : else ax_pthread_ok=no fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu case "$host" in *-*-mingw*) ;; *) if test -d /usr/local/include; then CPPFLAGS="$CPPFLAGS -I/usr/local/include" LDFLAGS="$LDFLAGS -L/usr/local/lib" fi ;; esac # Check whether --enable-java was given. if test "${enable_java+set}" = set; then : enableval=$enable_java; fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lstdc++" >&5 $as_echo_n "checking for main in -lstdc++... " >&6; } if ${ac_cv_lib_stdcpp_main+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lstdc++ $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { return main (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_stdcpp_main=yes else ac_cv_lib_stdcpp_main=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_stdcpp_main" >&5 $as_echo "$ac_cv_lib_stdcpp_main" >&6; } if test "x$ac_cv_lib_stdcpp_main" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBSTDC__ 1 _ACEOF LIBS="-lstdc++ $LIBS" else as_fn_error $? "missing libstdc++" "$LINENO" 5 fi for ac_header in list do : ac_fn_c_check_header_compile "$LINENO" "list" "ac_cv_header_list" "as_fn_error $? \"missing STL list class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_list" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIST 1 _ACEOF fi done for ac_header in map do : ac_fn_c_check_header_compile "$LINENO" "map" "ac_cv_header_map" "as_fn_error $? \"missing STL map class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_map" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MAP 1 _ACEOF fi done for ac_header in queue do : ac_fn_c_check_header_compile "$LINENO" "queue" "ac_cv_header_queue" "as_fn_error $? \"missing STL queue class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_queue" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_QUEUE 1 _ACEOF fi done for ac_header in set do : ac_fn_c_check_header_compile "$LINENO" "set" "ac_cv_header_set" "as_fn_error $? \"missing STL set class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_set" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_SET 1 _ACEOF fi done for ac_header in stack do : ac_fn_c_check_header_compile "$LINENO" "stack" "ac_cv_header_stack" "as_fn_error $? \"missing STL stack class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_stack" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STACK 1 _ACEOF fi done for ac_header in streambuf do : ac_fn_c_check_header_compile "$LINENO" "streambuf" "ac_cv_header_streambuf" "as_fn_error $? \"missing STL streambuf class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_streambuf" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STREAMBUF 1 _ACEOF fi done for ac_header in string do : ac_fn_c_check_header_compile "$LINENO" "string" "ac_cv_header_string" "as_fn_error $? \"missing STL string class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_string" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STRING 1 _ACEOF fi done for ac_header in vector do : ac_fn_c_check_header_compile "$LINENO" "vector" "ac_cv_header_vector" "as_fn_error $? \"missing STL vector class header\" \"$LINENO\" 5 " if test "x$ac_cv_header_vector" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_VECTOR 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBDL 1 _ACEOF LIBS="-ldl $LIBS" fi # Check if we should link afflib. # Check whether --with-afflib was given. if test "${with_afflib+set}" = set; then : withval=$with_afflib; else with_afflib=yes fi if test "x$with_afflib" != "xno"; then : if test "x$with_afflib" != "xyes"; then : if test -d ${with_afflib}/include; then : CPPFLAGS="$CPPFLAGS -I${with_afflib}/include" LDFLAGS="$LDFLAGS -L${with_afflib}/lib" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "AFFLIB directory not found at ${with_afflib} See \`config.log' for more details" "$LINENO" 5; } fi fi for ac_header in afflib/afflib.h do : ac_fn_c_check_header_mongrel "$LINENO" "afflib/afflib.h" "ac_cv_header_afflib_afflib_h" "$ac_includes_default" if test "x$ac_cv_header_afflib_afflib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_AFFLIB_AFFLIB_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for af_open in -lafflib" >&5 $as_echo_n "checking for af_open in -lafflib... " >&6; } if ${ac_cv_lib_afflib_af_open+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lafflib $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char af_open (); int main () { return af_open (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_afflib_af_open=yes else ac_cv_lib_afflib_af_open=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_afflib_af_open" >&5 $as_echo "$ac_cv_lib_afflib_af_open" >&6; } if test "x$ac_cv_lib_afflib_af_open" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBAFFLIB 1 _ACEOF LIBS="-lafflib $LIBS" fi fi done fi if test "x$ac_cv_lib_afflib_af_open" = "xyes"; then : ax_afflib=yes else ax_afflib=no fi # Check whether --with-zlib was given. if test "${with_zlib+set}" = set; then : withval=$with_zlib; else with_zlib=yes fi if test "x$with_zlib" != "xno"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for zlib" >&5 $as_echo "$as_me: checking for zlib" >&6;} if test "x$with_zlib" != "xyes"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: LOOKING for zlib in ${with_zlib}" >&5 $as_echo "$as_me: LOOKING for zlib in ${with_zlib}" >&6;} if test -d ${with_zlib}; then : CPPFLAGS="$CPPFLAGS -I${with_zlib}/include" LDFLAGS="$LDFLAGS -L${with_zlib}/lib" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "ZLIB directory not found at ${with_zlib} See \`config.log' for more details" "$LINENO" 5; } fi fi for ac_header in zlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" if test "x$ac_cv_header_zlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_ZLIB_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflate in -lz" >&5 $as_echo_n "checking for inflate in -lz... " >&6; } if ${ac_cv_lib_z_inflate+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char inflate (); int main () { return inflate (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_z_inflate=yes else ac_cv_lib_z_inflate=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflate" >&5 $as_echo "$ac_cv_lib_z_inflate" >&6; } if test "x$ac_cv_lib_z_inflate" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBZ 1 _ACEOF LIBS="-lz $LIBS" else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Found zlib headers, but could not link to zlib library. Will build without zlib." >&5 $as_echo "$as_me: WARNING: Found zlib headers, but could not link to zlib library. Will build without zlib." >&2;} with_zlib=no fi else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Could not find usable zlib headers. Will build without zlib." >&5 $as_echo "$as_me: WARNING: Could not find usable zlib headers. Will build without zlib." >&2;} with_zlib=no fi done else { $as_echo "$as_me:${as_lineno-$LINENO}: NOT checking for zlib because with_zlib is no" >&5 $as_echo "$as_me: NOT checking for zlib because with_zlib is no" >&6;} fi if test "x$ac_cv_lib_z_inflate" = "xyes"; then : ax_zlib=yes else ax_zlib=no fi if test "x$with_zlib" != "xno" && test "x$with_zlib" != "xyes"; then X_ZLIB_TRUE= X_ZLIB_FALSE='#' else X_ZLIB_TRUE='#' X_ZLIB_FALSE= fi if test "x$with_zlib" != "xno"; then : Z_PATH="${with_zlib}/lib" else { $as_echo "$as_me:${as_lineno-$LINENO}: failed to make Z_PATH" >&5 $as_echo "$as_me: failed to make Z_PATH" >&6;} fi Z_PATH=$Z_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBDL 1 _ACEOF LIBS="-ldl $LIBS" fi # Check whether --with-libewf was given. if test "${with_libewf+set}" = set; then : withval=$with_libewf; else with_libewf=yes fi if test "x$with_libewf" != "xno"; then : if test "x$with_libewf" != "xyes"; then : if test -d ${with_libewf}/include; then : CPPFLAGS="$CPPFLAGS -I${with_libewf}/include" LDFLAGS="$LDFLAGS -L${with_libewf}/lib" else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "libewf directory not found at ${with_libewf} See \`config.log' for more details" "$LINENO" 5; } fi fi for ac_header in libewf.h do : ac_fn_c_check_header_mongrel "$LINENO" "libewf.h" "ac_cv_header_libewf_h" "$ac_includes_default" if test "x$ac_cv_header_libewf_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBEWF_H 1 _ACEOF { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libewf_get_version in -lewf" >&5 $as_echo_n "checking for libewf_get_version in -lewf... " >&6; } if ${ac_cv_lib_ewf_libewf_get_version+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lewf $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char libewf_get_version (); int main () { return libewf_get_version (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ewf_libewf_get_version=yes else ac_cv_lib_ewf_libewf_get_version=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ewf_libewf_get_version" >&5 $as_echo "$ac_cv_lib_ewf_libewf_get_version" >&6; } if test "x$ac_cv_lib_ewf_libewf_get_version" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBEWF 1 _ACEOF LIBS="-lewf $LIBS" else NO_LIBEWF=true fi fi done fi if test "x$ac_cv_lib_ewf_libewf_get_version" = "xyes"; then : ax_libewf=yes else ax_libewf=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 $as_echo_n "checking for library containing dlopen... " >&6; } if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_dlopen+:} false; then : break fi done if ${ac_cv_search_dlopen+:} false; then : else ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 $as_echo "$ac_cv_search_dlopen" >&6; } ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi if test "x$enable_java" != "xno"; then : if test "x$JAVAPREFIX" = x; then test "x$JAVAC" = x && for ac_prog in "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT javac$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVAC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVAC"; then ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVAC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVAC=$ac_cv_prog_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done else test "x$JAVAC" = x && for ac_prog in "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT javac$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVAC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVAC"; then ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVAC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVAC=$ac_cv_prog_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done test -n "$JAVAC" || JAVAC="$JAVAPREFIX" fi test "x$JAVAC" = x && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: no acceptable Java compiler found in \$PATH" >&5 $as_echo "$as_me: WARNING: no acceptable Java compiler found in \$PATH" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $JAVAC works" >&5 $as_echo_n "checking if $JAVAC works... " >&6; } if ${ac_cv_prog_javac_works+:} false; then : $as_echo_n "(cached) " >&6 else JAVA_TEST=Test.java CLASS_TEST=Test.class cat << \EOF > $JAVA_TEST /* #line 19514 "configure" */ public class Test { } EOF if { ac_try='$JAVAC $JAVACFLAGS $JAVA_TEST' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 (eval $ac_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; } >/dev/null 2>&1; then ac_cv_prog_javac_works=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&5 $as_echo "$as_me: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&2;} echo "configure: failed program was:" >&5 cat $JAVA_TEST >&5 fi rm -f $JAVA_TEST $CLASS_TEST fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_javac_works" >&5 $as_echo "$ac_cv_prog_javac_works" >&6; } if test "x$JAVAC" != x; then JNI_INCLUDE_DIRS="" test "x$JAVAC" = x && as_fn_error $? "'\$JAVAC' undefined" "$LINENO" 5 # Extract the first word of "$JAVAC", so it can be a program name with args. set dummy $JAVAC; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path__ACJNI_JAVAC+:} false; then : $as_echo_n "(cached) " >&6 else case $_ACJNI_JAVAC in [\\/]* | ?:[\\/]*) ac_cv_path__ACJNI_JAVAC="$_ACJNI_JAVAC" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path__ACJNI_JAVAC="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path__ACJNI_JAVAC" && ac_cv_path__ACJNI_JAVAC="no" ;; esac fi _ACJNI_JAVAC=$ac_cv_path__ACJNI_JAVAC if test -n "$_ACJNI_JAVAC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_ACJNI_JAVAC" >&5 $as_echo "$_ACJNI_JAVAC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$_ACJNI_JAVAC" = xno; then { $as_echo "$as_me:${as_lineno-$LINENO}: \"$JAVAC could not be found in path -- cannot resolve to JNI headers\"" >&5 $as_echo "$as_me: \"$JAVAC could not be found in path -- cannot resolve to JNI headers\"" >&6;} else # find the include directory relative to the javac executable _cur=""$_ACJNI_JAVAC"" while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do { $as_echo "$as_me:${as_lineno-$LINENO}: checking symlink for $_cur" >&5 $as_echo_n "checking symlink for $_cur... " >&6; } _slink=`ls -ld "$_cur" | sed 's/.* -> //'` case "$_slink" in /*) _cur="$_slink";; # 'X' avoids triggering unwanted echo options. *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[^/]*$::'`"$_slink";; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_cur" >&5 $as_echo "$_cur" >&6; } done _ACJNI_FOLLOWED="$_cur" _JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[^/]*$::'` case "$host_os" in darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[^/]*$::'` if test -d "$_JTOPDIR/Headers" then _JINC="$_JTOPDIR/Headers" elif test -d "$_JTOPDIR/include" then _JINC="$_JTOPDIR/include" fi;; *) _JINC="$_JTOPDIR/include";; esac $as_echo "$as_me:${as_lineno-$LINENO}: _JTOPDIR=$_JTOPDIR" >&5 $as_echo "$as_me:${as_lineno-$LINENO}: _JINC=$_JINC" >&5 # On Mac OS X 10.6.4, jni.h is a symlink: # /System/Library/Frameworks/JavaVM.framework/Versions/Current/Headers/jni.h # -> ../../CurrentJDK/Headers/jni.h. if test -f "$_JINC/jni.h" || test -L "$_JINC/jni.h"; then JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC" else _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[^/]*$::'` if test -f "$_JTOPDIR/include/jni.h"; then JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include" else { $as_echo "$as_me:${as_lineno-$LINENO}: \"cannot find java include files\"" >&5 $as_echo "$as_me: \"cannot find java include files\"" >&6;} fi fi if test "x$JNI_INCLUDE_DIRS" != x; then # get the likely subdirectories for system specific java includes case "$host_os" in bsdi*) _JNI_INC_SUBDIRS="bsdos";; linux*) _JNI_INC_SUBDIRS="linux genunix";; darwin*) _JNI_INC_SUBDIRS="darwin";; osf*) _JNI_INC_SUBDIRS="alpha";; solaris*) _JNI_INC_SUBDIRS="solaris";; mingw*) _JNI_INC_SUBDIRS="win32";; cygwin*) _JNI_INC_SUBDIRS="win32";; *) _JNI_INC_SUBDIRS="genunix";; esac # add any subdirectories that are present for JINCSUBDIR in $_JNI_INC_SUBDIRS do if test -d "$_JTOPDIR/include/$JINCSUBDIR"; then JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$JINCSUBDIR" fi done fi fi for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS do JNI_CPPFLAGS="$JNI_CPPFLAGS -I$JNI_INCLUDE_DIR" done JNI_CPPFLAGS=$JNI_CPPFLAGS fi if test x$JAVAPREFIX = x; then test x$JAVA = x && for ac_prog in kaffe$EXEEXT java$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVA+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVA"; then ac_cv_prog_JAVA="$JAVA" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVA="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVA=$ac_cv_prog_JAVA if test -n "$JAVA"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVA" >&5 $as_echo "$JAVA" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVA" && break done else test x$JAVA = x && for ac_prog in kaffe$EXEEXT java$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVA+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVA"; then ac_cv_prog_JAVA="$JAVA" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVA="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVA=$ac_cv_prog_JAVA if test -n "$JAVA"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVA" >&5 $as_echo "$JAVA" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVA" && break done test -n "$JAVA" || JAVA="$JAVAPREFIX" fi test x$JAVA = x && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: no acceptable Java virtual machine found in \$PATH" >&5 $as_echo "$as_me: WARNING: no acceptable Java virtual machine found in \$PATH" >&2;} # Extract the first word of "uudecode$EXEEXT", so it can be a program name with args. set dummy uudecode$EXEEXT; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_uudecode+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$uudecode"; then ac_cv_prog_uudecode="$uudecode" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_uudecode="yes" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi uudecode=$ac_cv_prog_uudecode if test -n "$uudecode"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $uudecode" >&5 $as_echo "$uudecode" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test x$JAVA != x; then if test x$uudecode = xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if uudecode can decode base 64 file" >&5 $as_echo_n "checking if uudecode can decode base 64 file... " >&6; } if ${ac_cv_prog_uudecode_base64+:} false; then : $as_echo_n "(cached) " >&6 else cat << \EOF > Test.uue begin-base64 644 Test.class yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51 bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ= ==== EOF if uudecode$EXEEXT Test.uue; then ac_cv_prog_uudecode_base64=yes else echo "configure: 19819: uudecode had trouble decoding base 64 file 'Test.uue'" >&5 echo "configure: failed file was:" >&5 cat Test.uue >&5 ac_cv_prog_uudecode_base64=no fi rm -f Test.uue fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_uudecode_base64" >&5 $as_echo "$ac_cv_prog_uudecode_base64" >&6; } fi if test x$ac_cv_prog_uudecode_base64 != xyes; then rm -f Test.class { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: I have to compile Test.class from scratch" >&5 $as_echo "$as_me: WARNING: I have to compile Test.class from scratch" >&2;} if test x$ac_cv_prog_javac_works = xno; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Cannot compile java source. $JAVAC does not work properly" >&5 $as_echo "$as_me: WARNING: Cannot compile java source. $JAVAC does not work properly" >&2;} fi if test x$ac_cv_prog_javac_works = x; then if test "x$JAVAPREFIX" = x; then test "x$JAVAC" = x && for ac_prog in "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT javac$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVAC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVAC"; then ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVAC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVAC=$ac_cv_prog_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done else test "x$JAVAC" = x && for ac_prog in "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT javac$EXEEXT do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_JAVAC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$JAVAC"; then ac_cv_prog_JAVAC="$JAVAC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_JAVAC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi JAVAC=$ac_cv_prog_JAVAC if test -n "$JAVAC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $JAVAC" >&5 $as_echo "$JAVAC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$JAVAC" && break done test -n "$JAVAC" || JAVAC="$JAVAPREFIX" fi test "x$JAVAC" = x && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: no acceptable Java compiler found in \$PATH" >&5 $as_echo "$as_me: WARNING: no acceptable Java compiler found in \$PATH" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $JAVAC works" >&5 $as_echo_n "checking if $JAVAC works... " >&6; } if ${ac_cv_prog_javac_works+:} false; then : $as_echo_n "(cached) " >&6 else JAVA_TEST=Test.java CLASS_TEST=Test.class cat << \EOF > $JAVA_TEST /* #line 19939 "configure" */ public class Test { } EOF if { ac_try='$JAVAC $JAVACFLAGS $JAVA_TEST' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 (eval $ac_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; } >/dev/null 2>&1; then ac_cv_prog_javac_works=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&5 $as_echo "$as_me: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&2;} echo "configure: failed program was:" >&5 cat $JAVA_TEST >&5 fi rm -f $JAVA_TEST $CLASS_TEST fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_javac_works" >&5 $as_echo "$ac_cv_prog_javac_works" >&6; } fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $JAVA works" >&5 $as_echo_n "checking if $JAVA works... " >&6; } if ${ac_cv_prog_java_works+:} false; then : $as_echo_n "(cached) " >&6 else JAVA_TEST=Test.java CLASS_TEST=Test.class TEST=Test cat << \EOF > $JAVA_TEST /* [#]line 19975 "configure" */ public class Test { public static void main (String args[]) { System.exit(0); } } EOF if test x$ac_cv_prog_uudecode_base64 != xyes; then if { ac_try='$JAVAC $JAVACFLAGS $JAVA_TEST' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 (eval $ac_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; } && test -s $CLASS_TEST; then : else echo "configure: failed program was:" >&5 cat $JAVA_TEST >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&5 $as_echo "$as_me: WARNING: The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)" >&2;} fi fi if { ac_try='$JAVA $JAVAFLAGS $TEST' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 (eval $ac_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; } >/dev/null 2>&1; then ac_cv_prog_java_works=yes else echo "configure: failed program was:" >&5 cat $JAVA_TEST >&5 { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The Java VM $JAVA failed (see config.log, check the CLASSPATH?)" >&5 $as_echo "$as_me: WARNING: The Java VM $JAVA failed (see config.log, check the CLASSPATH?)" >&2;} fi rm -fr $JAVA_TEST $CLASS_TEST Test.uue fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_java_works" >&5 $as_echo "$ac_cv_prog_java_works" >&6; } fi # Extract the first word of "ant", so it can be a program name with args. set dummy ant; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ANT_FOUND+:} false; then : $as_echo_n "(cached) " >&6 else case $ANT_FOUND in [\\/]* | ?:[\\/]*) ac_cv_path_ANT_FOUND="$ANT_FOUND" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ANT_FOUND="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ANT_FOUND=$ac_cv_path_ANT_FOUND if test -n "$ANT_FOUND"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ANT_FOUND" >&5 $as_echo "$ANT_FOUND" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test "x$JNI_CPPFLAGS" != x && test "x$ANT_FOUND" != x && test "x$JAVA" != x; then : ax_java_support=yes else ax_java_support=no fi if test "x$ax_java_support" == "xyes"; then X_JNI_TRUE= X_JNI_FALSE='#' else X_JNI_TRUE='#' X_JNI_FALSE= fi ac_config_commands="$ac_config_commands tsk/tsk_incs.h" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool needs -no-undefined flag to build shared libraries" >&5 $as_echo_n "checking if libtool needs -no-undefined flag to build shared libraries... " >&6; } case "$host" in *-*-mingw*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } LIBTSK_LDFLAGS="-no-undefined" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac for ac_func in getline do : ac_fn_c_check_func "$LINENO" "getline" "ac_cv_func_getline" if test "x$ac_cv_func_getline" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_GETLINE 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing regexec" >&5 $as_echo_n "checking for library containing regexec... " >&6; } if ${ac_cv_search_regexec+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char regexec (); int main () { return regexec (); ; return 0; } _ACEOF for ac_lib in '' regex; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_regexec=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_regexec+:} false; then : break fi done if ${ac_cv_search_regexec+:} false; then : else ac_cv_search_regexec=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_regexec" >&5 $as_echo "$ac_cv_search_regexec" >&6; } ac_res=$ac_cv_search_regexec if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" else as_fn_error $? "missing regex" "$LINENO" 5 fi ac_config_files="$ac_config_files Makefile tsk/Makefile tsk/base/Makefile tsk/img/Makefile tsk/vs/Makefile tsk/fs/Makefile tsk/hashdb/Makefile tsk/auto/Makefile tools/Makefile tools/imgtools/Makefile tools/vstools/Makefile tools/fstools/Makefile tools/hashtools/Makefile tools/srchtools/Makefile tools/autotools/Makefile tools/sorter/Makefile tools/timeline/Makefile tools/fiwalk/Makefile tools/fiwalk/src/Makefile tools/fiwalk/plugins/Makefile tests/Makefile samples/Makefile man/Makefile bindings/java/Makefile bindings/java/jni/Makefile unit_tests/Makefile unit_tests/base/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs { $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 $as_echo_n "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${CPPUNIT_TRUE}" && test -z "${CPPUNIT_FALSE}"; then as_fn_error $? "conditional \"CPPUNIT\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${X_ZLIB_TRUE}" && test -z "${X_ZLIB_FALSE}"; then as_fn_error $? "conditional \"X_ZLIB\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${X_JNI_TRUE}" && test -z "${X_JNI_FALSE}"; then as_fn_error $? "conditional \"X_JNI\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by sleuthkit $as_me 4.2.0, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ sleuthkit config.status 4.2.0 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } # Quote evaled strings. for var in SHELL \ ECHO \ PATH_SEPARATOR \ SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ file_magic_glob \ want_nocaseglob \ DLLTOOL \ sharedlib_from_linklib_cmd \ AR \ AR_FLAGS \ archiver_list_spec \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_import \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ lt_cv_nm_interface \ nm_file_list_spec \ lt_cv_truncate_bin \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_pic \ lt_prog_compiler_wl \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ MANIFEST_TOOL \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_separator \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ reload_flag_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_separator_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postlink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ configure_time_dlsearch_path \ configure_time_lt_sys_library_path \ reload_cmds_CXX \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX \ postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done ac_aux_dir='$ac_aux_dir' # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' RM='$RM' ofile='$ofile' ac_cv_header_unistd_h=$ac_cv_header_unistd_h ac_cv_header_inttypes_h=$ac_cv_header_inttypes_h ac_cv_header_sys_param_h=$ac_cv_header_sys_param_h ax_pthread_ok=$ax_pthread_ok _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "tsk/tsk_config.h") CONFIG_HEADERS="$CONFIG_HEADERS tsk/tsk_config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "tsk/tsk_incs.h") CONFIG_COMMANDS="$CONFIG_COMMANDS tsk/tsk_incs.h" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "tsk/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/Makefile" ;; "tsk/base/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/base/Makefile" ;; "tsk/img/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/img/Makefile" ;; "tsk/vs/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/vs/Makefile" ;; "tsk/fs/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/fs/Makefile" ;; "tsk/hashdb/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/hashdb/Makefile" ;; "tsk/auto/Makefile") CONFIG_FILES="$CONFIG_FILES tsk/auto/Makefile" ;; "tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;; "tools/imgtools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/imgtools/Makefile" ;; "tools/vstools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/vstools/Makefile" ;; "tools/fstools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/fstools/Makefile" ;; "tools/hashtools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/hashtools/Makefile" ;; "tools/srchtools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/srchtools/Makefile" ;; "tools/autotools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/autotools/Makefile" ;; "tools/sorter/Makefile") CONFIG_FILES="$CONFIG_FILES tools/sorter/Makefile" ;; "tools/timeline/Makefile") CONFIG_FILES="$CONFIG_FILES tools/timeline/Makefile" ;; "tools/fiwalk/Makefile") CONFIG_FILES="$CONFIG_FILES tools/fiwalk/Makefile" ;; "tools/fiwalk/src/Makefile") CONFIG_FILES="$CONFIG_FILES tools/fiwalk/src/Makefile" ;; "tools/fiwalk/plugins/Makefile") CONFIG_FILES="$CONFIG_FILES tools/fiwalk/plugins/Makefile" ;; "tests/Makefile") CONFIG_FILES="$CONFIG_FILES tests/Makefile" ;; "samples/Makefile") CONFIG_FILES="$CONFIG_FILES samples/Makefile" ;; "man/Makefile") CONFIG_FILES="$CONFIG_FILES man/Makefile" ;; "bindings/java/Makefile") CONFIG_FILES="$CONFIG_FILES bindings/java/Makefile" ;; "bindings/java/jni/Makefile") CONFIG_FILES="$CONFIG_FILES bindings/java/jni/Makefile" ;; "unit_tests/Makefile") CONFIG_FILES="$CONFIG_FILES unit_tests/Makefile" ;; "unit_tests/base/Makefile") CONFIG_FILES="$CONFIG_FILES unit_tests/base/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir=$dirpart/$fdir; as_fn_mkdir_p # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; "libtool":C) # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi cfgfile=${ofile}T trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # Generated automatically by $as_me ($PACKAGE) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # Provide generalized library-building support services. # Written by Gordon Matzigkeit, 1996 # Copyright (C) 2014 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program or library that is built # using GNU Libtool, you may include this file under the same # distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # The names of the tagged configurations supported by this script. available_tags='CXX ' # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} # ### BEGIN LIBTOOL CONFIG # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # Whether or not to build static libraries. build_old_libs=$enable_static # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # Shared archive member basename,for filename based shared library versioning on AIX. shared_archive_member_spec=$shared_archive_member_spec # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that protects backslashes. ECHO=$lt_ECHO # The PATH separator for the build system. PATH_SEPARATOR=$lt_PATH_SEPARATOR # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # convert \$build file names to \$host format. to_host_file_cmd=$lt_cv_to_host_file_cmd # convert \$build files to toolchain format. to_tool_file_cmd=$lt_cv_to_tool_file_cmd # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method = "file_magic". file_magic_cmd=$lt_file_magic_cmd # How to find potential files when deplibs_check_method = "file_magic". file_magic_glob=$lt_file_magic_glob # Find potential files using nocaseglob when deplibs_check_method = "file_magic". want_nocaseglob=$lt_want_nocaseglob # DLL creation program. DLLTOOL=$lt_DLLTOOL # Command to associate shared and link libraries. sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd # The archiver. AR=$lt_AR # Flags to create an archive. AR_FLAGS=$lt_AR_FLAGS # How to feed a file listing to the archiver. archiver_list_spec=$lt_archiver_list_spec # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # Whether to use a lock for old archive extraction. lock_old_archive_extraction=$lock_old_archive_extraction # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm into a list of symbols to manually relocate. global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # The name lister interface. nm_interface=$lt_lt_cv_nm_interface # Specify filename containing input files for \$NM. nm_file_list_spec=$lt_nm_file_list_spec # The root where to search for dependent libraries,and where our libraries should be installed. lt_sysroot=$lt_sysroot # Command to truncate a binary pipe. lt_truncate_bin=$lt_lt_cv_truncate_bin # The name of the directory that contains temporary libtool files. objdir=$objdir # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Manifest tool. MANIFEST_TOOL=$lt_MANIFEST_TOOL # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Permission mode override for installation of shared libraries. install_override_mode=$lt_install_override_mode # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Detected run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path # Explicit LT_SYS_LIBRARY_PATH set during ./configure time. configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF cat <<'_LT_EOF' >> "$cfgfile" # ### BEGIN FUNCTIONS SHARED WITH CONFIGURE # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # ### END FUNCTIONS SHARED WITH CONFIGURE _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain=$ac_aux_dir/ltmain.sh # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # How to create reloadable object files. reload_flag=$lt_reload_flag_CXX reload_cmds=$lt_reload_cmds_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; "tsk/tsk_incs.h":C) echo "#ifndef _TSK_INCS_H" > tsk/tsk_incs.h echo "#define _TSK_INCS_H" >> tsk/tsk_incs.h echo "// automatically by ./configure" >> tsk/tsk_incs.h echo "// Contains the config.h data needed by programs that use libtsk" >> tsk/tsk_incs.h echo "" >> tsk/tsk_incs.h if test x$ac_cv_header_unistd_h = xyes; then echo "#include " >> tsk/tsk_incs.h fi if test x$ac_cv_header_inttypes_h = xyes; then echo "#ifndef __STDC_FORMAT_MACROS" >> tsk/tsk_incs.h echo "#define __STDC_FORMAT_MACROS" >> tsk/tsk_incs.h echo "#endif" >> tsk/tsk_incs.h echo "#include " >> tsk/tsk_incs.h fi if test x$ac_cv_header_sys_param_h = xyes; then echo "#include " >> tsk/tsk_incs.h fi if test x$ax_pthread_ok = xyes; then echo "#define TSK_MULTITHREAD_LIB // set because we have pthreads" >> tsk/tsk_incs.h fi echo "" >> tsk/tsk_incs.h echo "#endif" >> tsk/tsk_incs.h ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi { $as_echo "$as_me:${as_lineno-$LINENO}: Building: afflib support: $ax_afflib libewf support: $ax_libewf zlib support: $ax_zlib Features: Java/JNI support: $ax_java_support " >&5 $as_echo "$as_me: Building: afflib support: $ax_afflib libewf support: $ax_libewf zlib support: $ax_zlib Features: Java/JNI support: $ax_java_support " >&6;}; sleuthkit-4.2.0/configure.ac000644 000765 000024 00000026706 12576320700 016626 0ustar00carrierstaff000000 000000 dnl -*- Autoconf -*- dnl Process this file with autoconf to produce a configure script. AC_PREREQ(2.59) AC_INIT(sleuthkit, 4.2.0) m4_include([m4/ax_pthread.m4]) dnl include the version from 1.12.1. This will work for m4_include([m4/cppunit.m4]) m4_include([m4/ax_jni_include_dir.m4]) m4_include([m4/ac_prog_javac_works.m4]) m4_include([m4/ac_prog_javac.m4]) m4_include([m4/ac_prog_java_works.m4]) m4_include([m4/ac_prog_java.m4]) AC_CONFIG_SRCDIR([tsk/base/tsk_base.h]) AC_CONFIG_HEADERS([tsk/tsk_config.h]) AC_CONFIG_AUX_DIR(config) AM_INIT_AUTOMAKE([foreign]) AM_PATH_CPPUNIT(1.12.1) AM_CONDITIONAL([CPPUNIT],[test "x$no_cppunit" = x]) AM_PROG_LIBTOOL AM_MAINTAINER_MODE AC_CONFIG_MACRO_DIR([m4]) dnl Checks for programs. AC_PROG_CXX AC_PROG_CC AC_PROG_CPP AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_PATH_PROG(PERL, perl) dnl Checks for header files. AC_HEADER_STDC dnl AC_HEADER_MAJOR dnl AC_HEADER_SYS_WAIT dnl AC_CHECK_HEADERS([fcntl.h inttypes.h limits.h locale.h memory.h netinet/in.h stdint.h stdlib.h string.h sys/ioctl.h sys/param.h sys/time.h unistd.h utime.h wchar.h wctype.h]) AC_CHECK_HEADERS([err.h inttypes.h unistd.h stdint.h sys/param.h sys/resource.h]) dnl Checks for typedefs, structures, and compiler characteristics. AC_HEADER_STDBOOL AC_C_CONST AC_TYPE_UID_T AC_TYPE_MODE_T AC_TYPE_OFF_T AC_TYPE_SIZE_T dnl AC_CHECK_MEMBERS([struct stat.st_rdev]) dnl AC_HEADER_TIME dnl AC_STRUCT_TM dnl check for large file support AC_SYS_LARGEFILE dnl Checks for library functions. AC_FUNC_ALLOCA AC_FUNC_ERROR_AT_LINE dnl AC_FUNC_FORK AC_FUNC_FSEEKO AC_PROG_GCC_TRADITIONAL AC_FUNC_LSTAT AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK dnl AC_FUNC_MALLOC dnl AC_FUNC_MBRTOWC dnl AC_FUNC_MEMCMP dnl AC_FUNC_MKTIME dnl AC_FUNC_MMAP dnl AC_FUNC_REALLOC AC_FUNC_SELECT_ARGTYPES dnl AC_FUNC_STAT AC_FUNC_UTIME_NULL AC_FUNC_VPRINTF dnl AC_CHECK_FUNCS([dup2 gethostname isascii iswprint memset munmap regcomp select setlocale strcasecmp strchr strdup strerror strndup strrchr strtol strtoul strtoull utime wcwidth]) AC_CHECK_FUNCS([ishexnumber err errx warn warnx vasprintf getrusage]) AC_CHECK_FUNCS([strlcpy strlcat]) AX_PTHREAD([ AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]) CLIBS="$PTHREAD_LIBS $LIBS" CPPFLAGS="$CPPFLAGS $PTHREAD_CFLAGS" LDFLAGS="$LDFLAGS $PTHREAD_CFLAGS" CC="$PTHREAD_CC"],[]) case "$host" in *-*-mingw*) dnl Adding the native /usr/local is wrong for cross-compiling ;; *) dnl Not all compilers include /usr/local in the include and link path if test -d /usr/local/include; then CPPFLAGS="$CPPFLAGS -I/usr/local/include" LDFLAGS="$LDFLAGS -L/usr/local/lib" fi ;; esac dnl Add enable/disable option AC_ARG_ENABLE([java], [AS_HELP_STRING([--disable-java], [Do not build the java bindings or jar file])]) dnl Checks for libraries. dnl Some platforms will complain about missing included functions if libstdc++ is not included. AC_CHECK_LIB(stdc++, main, , AC_MSG_ERROR([missing libstdc++])) AC_CHECK_HEADERS(list, , , AC_MSG_ERROR([missing STL list class header])) AC_CHECK_HEADERS(map, , , AC_MSG_ERROR([missing STL map class header])) AC_CHECK_HEADERS(queue, , , AC_MSG_ERROR([missing STL queue class header])) AC_CHECK_HEADERS(set, , , AC_MSG_ERROR([missing STL set class header])) AC_CHECK_HEADERS(stack, , , AC_MSG_ERROR([missing STL stack class header])) AC_CHECK_HEADERS(streambuf, , , AC_MSG_ERROR([missing STL streambuf class header])) AC_CHECK_HEADERS(string, , , AC_MSG_ERROR([missing STL string class header])) AC_CHECK_HEADERS(vector, , , AC_MSG_ERROR([missing STL vector class header])) dnl needed for sqllite AC_CHECK_LIB(dl, dlopen) dnl sqlite requires pthread libraries - this was copied from its configure.ac dnl AC_SEARCH_LIBS(pthread_create, pthread) dnl AC_SEARCH_LIBS(dlopen, dl) # Check if we should link afflib. AC_ARG_WITH([afflib], [AS_HELP_STRING([--without-afflib],[Do not use AFFLIB even if it is installed])] [AS_HELP_STRING([--with-afflib=dir],[Specify that AFFLIB is installed in directory 'dir'])], dnl If --with-afflib or --without-afflib is given [], dnl If --with-afflib or --without-afflib is given [with_afflib=yes]) dnl check for the lib if they did not specify no AS_IF([test "x$with_afflib" != "xno"], dnl Test the dir if they specified something beyond yes/no [AS_IF([test "x$with_afflib" != "xyes"], [AS_IF([test -d ${with_afflib}/include], [CPPFLAGS="$CPPFLAGS -I${with_afflib}/include" LDFLAGS="$LDFLAGS -L${with_afflib}/lib"], dnl Dir given was not correct [AC_MSG_FAILURE([AFFLIB directory not found at ${with_afflib}])]) ] )] dnl Check for the header file first to make sure they have the dev install [AC_CHECK_HEADERS([afflib/afflib.h], [AC_CHECK_LIB([afflib], [af_open])] )] ) AS_IF([test "x$ac_cv_lib_afflib_af_open" = "xyes"], [ax_afflib=yes], [ax_afflib=no]) dnl Check if we should link zlib AC_ARG_WITH([zlib], [AS_HELP_STRING([--without-zlib],[Do not use ZLIB even if it is installed])] [AS_HELP_STRING([--with-zlib=dir],[Specify that ZLIB is installed in directory 'dir'])], dnl If --with-zlib or --without-zlib is given [], dnl if nothing was specified, default to a test [with_zlib=yes]) dnl check for the lib if they did not specify no AS_IF( [test "x$with_zlib" != "xno"], [AC_MSG_NOTICE([checking for zlib])] dnl Test the dir if they specified something beyond yes/no [AS_IF([test "x$with_zlib" != "xyes"], [AC_MSG_NOTICE([LOOKING for zlib in ${with_zlib}])] [AS_IF([test -d ${with_zlib}], [CPPFLAGS="$CPPFLAGS -I${with_zlib}/include" LDFLAGS="$LDFLAGS -L${with_zlib}/lib"], dnl Dir given was not correct [AC_MSG_FAILURE([ZLIB directory not found at ${with_zlib}])] )] )] dnl Check for the header file first to make sure they have the dev install [AC_CHECK_HEADERS([zlib.h], [AC_CHECK_LIB([z], [inflate], [], [AC_MSG_WARN([Found zlib headers, but could not link to zlib library. Will build without zlib.])] [with_zlib=no] )], [AC_MSG_WARN([Could not find usable zlib headers. Will build without zlib.])] [with_zlib=no] )], [AC_MSG_NOTICE([NOT checking for zlib because with_zlib is no])] ) AS_IF([test "x$ac_cv_lib_z_inflate" = "xyes"], [ax_zlib=yes], [ax_zlib=no]) AM_CONDITIONAL([X_ZLIB],[test "x$with_zlib" != "xno" && test "x$with_zlib" != "xyes"]) AS_IF([test "x$with_zlib" != "xno"], [Z_PATH="${with_zlib}/lib"], [AC_MSG_NOTICE([failed to make Z_PATH])] ) AC_SUBST(Z_PATH, $Z_PATH) dnl needed for sqllite AC_CHECK_LIB(dl, dlopen) dnl Check if we should link libewf. AC_ARG_WITH([libewf], [AS_HELP_STRING([--without-libewf],[Do not use libewf even if it is installed])] [AS_HELP_STRING([--with-libewf=dir],[Specify that libewf is installed in directory 'dir'])], dnl If --with-libewf or --without-libewf is given [], dnl if nothing was specified, default to a test [with_libewf=yes]) dnl check for the lib if they did not specify no AS_IF([test "x$with_libewf" != "xno"], dnl Test the dir if they specified something beyond yes/no [AS_IF([test "x$with_libewf" != "xyes"], [AS_IF([test -d ${with_libewf}/include], [CPPFLAGS="$CPPFLAGS -I${with_libewf}/include" LDFLAGS="$LDFLAGS -L${with_libewf}/lib"], dnl Dir given was not correct [AC_MSG_FAILURE([libewf directory not found at ${with_libewf}])]) ] )] dnl Check for the header file first to make sure they have the dev install [AC_CHECK_HEADERS([libewf.h], [AC_CHECK_LIB([ewf], [libewf_get_version], [], [NO_LIBEWF=true])] )] ) AS_IF([test "x$ac_cv_lib_ewf_libewf_get_version" = "xyes"], [ax_libewf=yes], [ax_libewf=no]) dnl sqlite requires pthread libraries - this was copied from its configure.ac dnl AC_SEARCH_LIBS(pthread_create, pthread) AC_SEARCH_LIBS(dlopen, dl) dnl Test for the various java things that we need for bindings AS_IF([test "x$enable_java" != "xno"], [ dnl javac is needed to compile the JAR file AC_PROG_JAVAC if test "x$JAVAC" != x; then AX_JNI_INCLUDE_DIR for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS do JNI_CPPFLAGS="$JNI_CPPFLAGS -I$JNI_INCLUDE_DIR" done dnl Export the paths so that the makefile gets them AC_SUBST(JNI_CPPFLAGS, $JNI_CPPFLAGS) fi dnl java is needed by ant dnl we had one report of a system with javac and not java AC_PROG_JAVA dnl Test is ant is available AC_PATH_PROG([ANT_FOUND], [ant], []) ]) dnl test enable_java dnl if we found everything we need, set ax_java_support for the dnl status message and set X_JNI for use in Makefile AS_IF([test "x$JNI_CPPFLAGS" != x && test "x$ANT_FOUND" != x && test "x$JAVA" != x], [ax_java_support=yes], [ax_java_support=no]) AM_CONDITIONAL([X_JNI],[test "x$ax_java_support" == "xyes"]) AC_CONFIG_COMMANDS([tsk/tsk_incs.h], [echo "#ifndef _TSK_INCS_H" > tsk/tsk_incs.h echo "#define _TSK_INCS_H" >> tsk/tsk_incs.h echo "// automatically by ./configure" >> tsk/tsk_incs.h echo "// Contains the config.h data needed by programs that use libtsk" >> tsk/tsk_incs.h echo "" >> tsk/tsk_incs.h if test x$ac_cv_header_unistd_h = xyes; then echo "#include " >> tsk/tsk_incs.h fi if test x$ac_cv_header_inttypes_h = xyes; then echo "#ifndef __STDC_FORMAT_MACROS" >> tsk/tsk_incs.h echo "#define __STDC_FORMAT_MACROS" >> tsk/tsk_incs.h echo "#endif" >> tsk/tsk_incs.h echo "#include " >> tsk/tsk_incs.h fi if test x$ac_cv_header_sys_param_h = xyes; then echo "#include " >> tsk/tsk_incs.h fi if test x$ax_pthread_ok = xyes; then echo "#define TSK_MULTITHREAD_LIB // set because we have pthreads" >> tsk/tsk_incs.h fi echo "" >> tsk/tsk_incs.h echo "#endif" >> tsk/tsk_incs.h], [ac_cv_header_unistd_h=$ac_cv_header_unistd_h ac_cv_header_inttypes_h=$ac_cv_header_inttypes_h ac_cv_header_sys_param_h=$ac_cv_header_sys_param_h ax_pthread_ok=$ax_pthread_ok]) AC_MSG_CHECKING([if libtool needs -no-undefined flag to build shared libraries]) case "$host" in *-*-mingw*) dnl Add -no-undefined flag to LDFLAGS to let libtool build DLLs. AC_MSG_RESULT([yes]) LIBTSK_LDFLAGS="-no-undefined" AC_SUBST([LIBTSK_LDFLAGS]) ;; *) dnl No additional flags needed. AC_MSG_RESULT([no]) ;; esac dnl Dependencies for fiwalk AC_CHECK_FUNCS([getline]) AC_SEARCH_LIBS(regexec, [regex], , AC_MSG_ERROR([missing regex])) AC_CONFIG_FILES([ Makefile tsk/Makefile tsk/base/Makefile tsk/img/Makefile tsk/vs/Makefile tsk/fs/Makefile tsk/hashdb/Makefile tsk/auto/Makefile tools/Makefile tools/imgtools/Makefile tools/vstools/Makefile tools/fstools/Makefile tools/hashtools/Makefile tools/srchtools/Makefile tools/autotools/Makefile tools/sorter/Makefile tools/timeline/Makefile tools/fiwalk/Makefile tools/fiwalk/src/Makefile tools/fiwalk/plugins/Makefile tests/Makefile samples/Makefile man/Makefile bindings/java/Makefile bindings/java/jni/Makefile unit_tests/Makefile unit_tests/base/Makefile]) AC_OUTPUT dnl Print a summary AC_MSG_NOTICE([ Building: afflib support: $ax_afflib libewf support: $ax_libewf zlib support: $ax_zlib Features: Java/JNI support: $ax_java_support ]); sleuthkit-4.2.0/docs/000755 000765 000024 00000000000 12576326344 015267 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/framework/000755 000765 000024 00000000000 12576326345 016335 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/INSTALL.txt000644 000765 000024 00000007634 12576320677 016223 0ustar00carrierstaff000000 000000 The Sleuth Kit http://www.sleuthkit.org/sleuthkit Installation Instructions Last Modified: Oct 2012 REQUIREMENTS ============================================================================= Tested Platform: - FreeBSD 2-6.* - Linux 2.* - OpenBSD 2-3.* - Mac OS X - SunOS 4-5.* - Windows Build System (to compile from a source distribution): - C/C++ compiler - GNU Make - Java compiler / JDK (if you want the java bindings) Development System (to extend TSK or compile from the repository): - GNU autoconf, automake, and libtool - Plus the build system requirements Optional Programs: - Autopsy: Provides a graphical HTML-based interface to The Sleuth Kit (which makes it much easier to use). Install this AFTER installing The Sleuth Kit. Available at: http://www.sleuthkit.org/autopsy Optional Libraries: There are optional features that TSK can use if you have installed them before you build and install TSK. - AFFLIB: Allows you to process disk images that are stored in the AFF format. Version 3.3.6 has been tested to compile and work with this release. Available at: http://www.afflib.org - LibEWF: Allows you to process disk images that are stored in the Expert Witness format (EnCase Format). Version 20130128 has been tested to compile and work with this release. Available at: http://sourceforge.net/projects/libewf/ INSTALLATION ============================================================================= Refer to the README_win32.txt file for details on Windows. The Sleuth Kit uses the GNU autotools for building and installation. There are a few steps to this process. First, run the 'configure' script in the root TSK directory. See the CONFIGURE OPTIONS section for useful arguments that can be given to 'configure. $ ./configure If there were no errors, then run 'make'. If you do not have a 'configure' script, then it is probably because you cloned the source code repository. If so, you will need to have automake, autoconf, and libtool installed and you can create the configure script using the 'bootstrap' script in the root directory. $ make The 'make' process will take a while and will build the TSK tools. When this process is complete, the libraries and executables will be located in the TSK sub-directories. To install them, type 'make install'. $ make install By default, this will copy everything in to the /usr/local/ structure. So, the executables will be in '/usr/local/bin'. This directory will need to be in your PATH if you want to run the TSK commands without specifying '/usr/local/bin' everytime. If you get an error like: libtool: Version mismatch error. This is libtool 2.2.10, but the libtool: definition of this LT_INIT comes from libtool 2.2.4. libtool: You should recreate aclocal.m4 with macros from libtool 2.2.10 libtool: and run autoconf again. Run: ./bootstrap and then go back to running configure and make. To run 'bootstrap', you'll need to have the autotools installed (see the list at the top of this page). CONFIGURE OPTIONS ----------------------------------------------------------------------------- There are some arguments to 'configure' that you can supply to customize the setup. Currently, they focus on the optional disk image format libraries. --without-afflib: Supply this if you want TSK to ignore AFFLIB even if it is installed. --with-afflib=dir: Supply this if you want TSK to look in 'dir' for the AFFLIB installation (the directory should have 'lib' and 'include' directories in it). --without-ewf: Supply this if you want TSK to ignore libewf even if it is installed. --with-libewf=dir: Supply this if you want TSK to look in 'dir' for the libewf installation (the directory should have 'lib' and 'include' directories in it). ----------------------------------------------------------------------------- Brian Carrier carrier sleuthkit org sleuthkit-4.2.0/licenses/000755 000765 000024 00000000000 12576326344 016144 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/m4/000755 000765 000024 00000000000 12576326344 014657 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/Makefile.am000644 000765 000024 00000006251 12576320677 016402 0ustar00carrierstaff000000 000000 # File that we want to include in the dist EXTRA_DIST = README_win32.txt README.md INSTALL.txt ChangeLog.txt NEWS.txt \ licenses/GNU-COPYING licenses/IBM-LICENSE licenses/cpl1.0.txt \ m4/*.m4 \ docs/README.txt \ packages/sleuthkit.spec \ win32/BUILDING.txt \ win32/NOTES.txt \ win32/*/*.vcxproj \ win32/tsk-win.sln \ win32/docs/* \ bindings/java/README.txt \ bindings/java/*.xml \ bindings/java/doxygen/Doxyfile \ bindings/java/doxygen/*.dox \ bindings/java/doxygen/*.html \ bindings/java/nbproject/project.xml \ bindings/java/src/org/sleuthkit/datamodel/*.java \ bindings/java/src/org/sleuthkit/datamodel/*.html \ bindings/java/src/*.html \ framework/*.txt \ framework/Makefile.am \ framework/bootstrap \ framework/config/* \ framework/config.h.in \ framework/configure.ac \ framework/m4/* \ framework/tsk/framework/*.h \ framework/tsk/framework/Makefile.am \ framework/tsk/framework/extraction/Makefile.am \ framework/tsk/framework/extraction/*.cpp \ framework/tsk/framework/extraction/*.h \ framework/tsk/framework/file/Makefile.am \ framework/tsk/framework/file/*.cpp \ framework/tsk/framework/file/*.h \ framework/tsk/framework/pipeline/Makefile.am \ framework/tsk/framework/pipeline/*.cpp \ framework/tsk/framework/pipeline/*.h \ framework/tsk/framework/services/Makefile.am \ framework/tsk/framework/services/*.cpp \ framework/tsk/framework/services/*.h \ framework/tsk/framework/utilities/Makefile.am \ framework/tsk/framework/utilities/*.cpp \ framework/tsk/framework/utilities/*.h \ framework/modules/* \ framework/SampleConfig/*.xml \ framework/docs/* \ framework/man/* \ framework/tools/tsk_analyzeimg/Makefile.am \ framework/tools/tsk_analyzeimg/*.cpp \ framework/tools/tsk_validatepipeline/Makefile.am \ framework/tools/tsk_validatepipeline/*.cpp \ framework/tools/Makefile.am \ framework/msvcpp/*/*.vcxproj \ framework/msvcpp/*/*.sln \ framework/msvcpp/Makefile ACLOCAL_AMFLAGS = -I m4 # directories to compile if CPPUNIT UNIT_TESTS=unit_tests endif # Compile java bindings if all of the dependencies existed if X_JNI JAVA_BINDINGS=bindings/java else JAVA_BINDINGS= endif SUBDIRS = tsk tools tests samples man $(UNIT_TESTS) $(JAVA_BINDINGS) nobase_include_HEADERS = tsk/libtsk.h tsk/tsk_incs.h \ tsk/base/tsk_base.h tsk/base/tsk_os.h \ tsk/img/tsk_img.h tsk/vs/tsk_vs.h \ tsk/vs/tsk_bsd.h tsk/vs/tsk_dos.h tsk/vs/tsk_gpt.h \ tsk/vs/tsk_mac.h tsk/vs/tsk_sun.h \ tsk/fs/tsk_fs.h tsk/fs/tsk_ffs.h tsk/fs/tsk_ext2fs.h tsk/fs/tsk_fatfs.h \ tsk/fs/tsk_ntfs.h tsk/fs/tsk_iso9660.h tsk/fs/tsk_hfs.h tsk/fs/tsk_yaffs.h \ tsk/fs/tsk_exfatfs.h tsk/fs/tsk_fatxxfs.h \ tsk/hashdb/tsk_hashdb.h tsk/auto/tsk_auto.h tsk/auto/sqlite3.h nobase_dist_data_DATA = tsk/sorter/default.sort tsk/sorter/freebsd.sort \ tsk/sorter/images.sort tsk/sorter/linux.sort tsk/sorter/openbsd.sort \ tsk/sorter/solaris.sort tsk/sorter/windows.sort api-docs: doxygen tsk/docs/Doxyfile doxygen framework/docs/Doxyfile doxygen bindings/java/doxygen/Doxyfile man-html: cd man;build-html sleuthkit-4.2.0/Makefile.in000644 000765 000024 00000100164 12576326276 016412 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = . ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ $(am__configure_deps) $(nobase_dist_data_DATA) \ $(nobase_include_HEADERS) $(am__DIST_COMMON) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(datadir)" "$(DESTDIR)$(includedir)" DATA = $(nobase_dist_data_DATA) HEADERS = $(nobase_include_HEADERS) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ cscope distdir dist dist-all distcheck am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags CSCOPE = cscope DIST_SUBDIRS = tsk tools tests samples man unit_tests bindings/java am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/compile \ $(top_srcdir)/config/config.guess \ $(top_srcdir)/config/config.sub \ $(top_srcdir)/config/install-sh $(top_srcdir)/config/ltmain.sh \ $(top_srcdir)/config/missing config/compile \ config/config.guess config/config.sub config/install-sh \ config/ltmain.sh config/missing DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ if test -d "$(distdir)"; then \ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -rf "$(distdir)" \ || { sleep 5 && rm -rf "$(distdir)"; }; \ else :; fi am__post_remove_distdir = $(am__remove_distdir) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best DIST_TARGETS = dist-gzip distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ # File that we want to include in the dist EXTRA_DIST = README_win32.txt README.md INSTALL.txt ChangeLog.txt NEWS.txt \ licenses/GNU-COPYING licenses/IBM-LICENSE licenses/cpl1.0.txt \ m4/*.m4 \ docs/README.txt \ packages/sleuthkit.spec \ win32/BUILDING.txt \ win32/NOTES.txt \ win32/*/*.vcxproj \ win32/tsk-win.sln \ win32/docs/* \ bindings/java/README.txt \ bindings/java/*.xml \ bindings/java/doxygen/Doxyfile \ bindings/java/doxygen/*.dox \ bindings/java/doxygen/*.html \ bindings/java/nbproject/project.xml \ bindings/java/src/org/sleuthkit/datamodel/*.java \ bindings/java/src/org/sleuthkit/datamodel/*.html \ bindings/java/src/*.html \ framework/*.txt \ framework/Makefile.am \ framework/bootstrap \ framework/config/* \ framework/config.h.in \ framework/configure.ac \ framework/m4/* \ framework/tsk/framework/*.h \ framework/tsk/framework/Makefile.am \ framework/tsk/framework/extraction/Makefile.am \ framework/tsk/framework/extraction/*.cpp \ framework/tsk/framework/extraction/*.h \ framework/tsk/framework/file/Makefile.am \ framework/tsk/framework/file/*.cpp \ framework/tsk/framework/file/*.h \ framework/tsk/framework/pipeline/Makefile.am \ framework/tsk/framework/pipeline/*.cpp \ framework/tsk/framework/pipeline/*.h \ framework/tsk/framework/services/Makefile.am \ framework/tsk/framework/services/*.cpp \ framework/tsk/framework/services/*.h \ framework/tsk/framework/utilities/Makefile.am \ framework/tsk/framework/utilities/*.cpp \ framework/tsk/framework/utilities/*.h \ framework/modules/* \ framework/SampleConfig/*.xml \ framework/docs/* \ framework/man/* \ framework/tools/tsk_analyzeimg/Makefile.am \ framework/tools/tsk_analyzeimg/*.cpp \ framework/tools/tsk_validatepipeline/Makefile.am \ framework/tools/tsk_validatepipeline/*.cpp \ framework/tools/Makefile.am \ framework/msvcpp/*/*.vcxproj \ framework/msvcpp/*/*.sln \ framework/msvcpp/Makefile ACLOCAL_AMFLAGS = -I m4 # directories to compile @CPPUNIT_TRUE@UNIT_TESTS = unit_tests @X_JNI_FALSE@JAVA_BINDINGS = # Compile java bindings if all of the dependencies existed @X_JNI_TRUE@JAVA_BINDINGS = bindings/java SUBDIRS = tsk tools tests samples man $(UNIT_TESTS) $(JAVA_BINDINGS) nobase_include_HEADERS = tsk/libtsk.h tsk/tsk_incs.h \ tsk/base/tsk_base.h tsk/base/tsk_os.h \ tsk/img/tsk_img.h tsk/vs/tsk_vs.h \ tsk/vs/tsk_bsd.h tsk/vs/tsk_dos.h tsk/vs/tsk_gpt.h \ tsk/vs/tsk_mac.h tsk/vs/tsk_sun.h \ tsk/fs/tsk_fs.h tsk/fs/tsk_ffs.h tsk/fs/tsk_ext2fs.h tsk/fs/tsk_fatfs.h \ tsk/fs/tsk_ntfs.h tsk/fs/tsk_iso9660.h tsk/fs/tsk_hfs.h tsk/fs/tsk_yaffs.h \ tsk/fs/tsk_exfatfs.h tsk/fs/tsk_fatxxfs.h \ tsk/hashdb/tsk_hashdb.h tsk/auto/tsk_auto.h tsk/auto/sqlite3.h nobase_dist_data_DATA = tsk/sorter/default.sort tsk/sorter/freebsd.sort \ tsk/sorter/images.sort tsk/sorter/linux.sort tsk/sorter/openbsd.sort \ tsk/sorter/solaris.sort tsk/sorter/windows.sort all: all-recursive .SUFFIXES: am--refresh: Makefile @: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs distclean-libtool: -rm -f libtool config.lt install-nobase_dist_dataDATA: $(nobase_dist_data_DATA) @$(NORMAL_INSTALL) @list='$(nobase_dist_data_DATA)'; test -n "$(datadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(datadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(datadir)" || exit 1; \ fi; \ $(am__nobase_list) | while read dir files; do \ xfiles=; for file in $$files; do \ if test -f "$$file"; then xfiles="$$xfiles $$file"; \ else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \ test -z "$$xfiles" || { \ test "x$$dir" = x. || { \ echo " $(MKDIR_P) '$(DESTDIR)$(datadir)/$$dir'"; \ $(MKDIR_P) "$(DESTDIR)$(datadir)/$$dir"; }; \ echo " $(INSTALL_DATA) $$xfiles '$(DESTDIR)$(datadir)/$$dir'"; \ $(INSTALL_DATA) $$xfiles "$(DESTDIR)$(datadir)/$$dir" || exit $$?; }; \ done uninstall-nobase_dist_dataDATA: @$(NORMAL_UNINSTALL) @list='$(nobase_dist_data_DATA)'; test -n "$(datadir)" || list=; \ $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ dir='$(DESTDIR)$(datadir)'; $(am__uninstall_files_from_dir) install-nobase_includeHEADERS: $(nobase_include_HEADERS) @$(NORMAL_INSTALL) @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ fi; \ $(am__nobase_list) | while read dir files; do \ xfiles=; for file in $$files; do \ if test -f "$$file"; then xfiles="$$xfiles $$file"; \ else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \ test -z "$$xfiles" || { \ test "x$$dir" = x. || { \ echo " $(MKDIR_P) '$(DESTDIR)$(includedir)/$$dir'"; \ $(MKDIR_P) "$(DESTDIR)$(includedir)/$$dir"; }; \ echo " $(INSTALL_HEADER) $$xfiles '$(DESTDIR)$(includedir)/$$dir'"; \ $(INSTALL_HEADER) $$xfiles "$(DESTDIR)$(includedir)/$$dir" || exit $$?; }; \ done uninstall-nobase_includeHEADERS: @$(NORMAL_UNINSTALL) @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \ $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscope: cscope.files test ! -s cscope.files \ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) clean-cscope: -rm -f cscope.files cscope.files: clean-cscope cscopelist cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags -rm -f cscope.out cscope.in.out cscope.po.out cscope.files distdir: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__post_remove_distdir) dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__post_remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) dist-tarZ: distdir @echo WARNING: "Support for distribution archives compressed with" \ "legacy program 'compress' is deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__post_remove_distdir) dist-shar: distdir @echo WARNING: "Support for shar distribution archives is" \ "deprecated." >&2 @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__post_remove_distdir) dist dist-all: $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' $(am__post_remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build/sub \ && ../../configure \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ --srcdir=../.. --prefix="$$dc_install_base" \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__post_remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @test -n '$(distuninstallcheck_dir)' || { \ echo 'ERROR: trying to run $@ with an empty' \ '$$(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ $(am__cd) '$(distuninstallcheck_dir)' || { \ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile $(DATA) $(HEADERS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(datadir)" "$(DESTDIR)$(includedir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-libtool \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-nobase_dist_dataDATA \ install-nobase_includeHEADERS install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-nobase_dist_dataDATA \ uninstall-nobase_includeHEADERS .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--refresh check check-am clean clean-cscope clean-generic \ clean-libtool cscope cscopelist-am ctags ctags-am dist \ dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \ dist-xz dist-zip distcheck distclean distclean-generic \ distclean-libtool distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-nobase_dist_dataDATA install-nobase_includeHEADERS \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am \ uninstall-nobase_dist_dataDATA uninstall-nobase_includeHEADERS .PRECIOUS: Makefile api-docs: doxygen tsk/docs/Doxyfile doxygen framework/docs/Doxyfile doxygen bindings/java/doxygen/Doxyfile man-html: cd man;build-html # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/man/000755 000765 000024 00000000000 12576326352 015111 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/NEWS.txt000644 000765 000024 00000232730 12576320677 015666 0ustar00carrierstaff000000 000000 Numbers refer to SourceForge.net tracker IDs: http://sourceforge.net/tracker/?group_id=55685 ---------------- VERSION 4.2.0 -------------- - ExFAT support added - New database schema - New Sqlite hash database - Various bug fixes - NTFS pays more attention to sequence and loads metadata only if it matches. - Added secondary hash database index ---------------- VERSION 4.1.3 -------------- - fixed bug that could crash UFS/ExtX in inode_lookup. - More bounds checking in ISO9660 code - Image layer bounds checking - Update version of SQLITE-JDBC - changed how java loads navite libraries - Config file for YAFFS2 spare area - New method in image layer to return names - Yaffs2 cleanup. - Escape all strings in SQLite database - SQlite code uses NTTFS sequence number to match parent IDs ---------------- VERSION 4.1.2 -------------- Core: - Fixed more visual studio projects to work on 64-bit - TskAutoDB considers not finding a VS/FS a critical error. Java: - added method to Image to perform sanity check on image sizes. fiwalk: - Fixed compile error on Linux etc. ---------------- VERSION 4.1.1 -------------- Core: - Added FILE_SHARE_WRITE to all windows open calls. - removed unused methods in CRC code that caused compile errors. - Added NTFS FNAME times to time2 struct in TSK_FS_META to make them easier to access -- should have done this a long time ago! - fls -m and tsk_gettimes output NTFS FNAME times to output for timelines. - hfind with EnCase hashsets works when DB is specified (and not only index) - TskAuto now goes into UNALLOC partitions by default too. - Added support to automatically find all Cellebrite raw dump files given the name of the first image. - Added 64-bit windows targets to VisualStudio files. - Added NTFS sequence to parent address in directory and directory itself. - Updated SQLite code to use sequence when finding parent object ID. Java: - Java bindings JAR files now have native libraries in them. - Logical files are added with a transaction ---------------- VERSION 4.1.0 -------------- Core: - Added YAFFS2 support (patch from viaForensics). - Added Ext4 support (patch from kfairbanks) - changed all include paths to be 'tsk' instead of 'tsk3' -- IMPORTANT FOR ALL DEVELOPERS! Framework: - Added Linux and MAC support. - Added L01 support. - Added APIs to find files by name, path and extension. - Removed deprecated TskFile::getAttributes methods. - moved code around for AutoBuild tool support. Java Bindings: - added DerivedFile datamodel support - added a public method to Content to add ability to close() its tsk handle before the object is gc'd - added faster skip() and random seek support to ReadContentInputStream - refactored datamodel by pushing common methods up to AbstractFile - fixed minor memory leaks - improved regression testing framework for java bindings datamodel ---------------- VERSION 4.0.2 -------------- Core: New Features: - Added fiwalk tool from Simson. Not supported in Visual Studio yet. Bug Fixes: - Fixed fcat to work on NTFS files (still doesn't support ADS though). - Fixed HFS+ support in tsk_loaddb / SQLite -- root directory was not added. - NTFS code now looks at all MFT entries when listing directory contents. It used to only look at unallocated entries for orphan files. This fixes an image that had allocated files missing from the directory b-tree. - NTFS code uses sequence number when searching MFT entries for all files. - Libewf detection code change to support v2 API more reliably (ID: 3596212). - NTFS $SII code could crash in rare cases if $SDS was multiple of block size. Framework: - Added new API to TskImgDB that returns the base name of an image. - Numerous performance improvements to framework. - Removed requirement in framework to specify module extension in pipeline configuration file. - Added blackboard artifacts to represent both operating system and network service user accounts. Java Bindings: - added more APIs to find files by name, path and where clause - added API to get currently processed dir when image is being added, - added API to return specific types of children of image, volume system, volume, file system. - moved more common methods up to Content interface - deprecated context of blackboard attributes, - deprecated SleuthkitCase.runQuery() and SleuthkitCase.closeRunQuery() - fixed ReadContentInputStream bugs (ignoring offset into a buffer, implementing available() ) - methods that are lazy loading are now thread safe - Hash class is now thread-safe - use more PreparedStatements to improve performance - changed source level from java 1.6 to 1.7 - Throw exceptions from C++ side better ---------------- VERSION 4.0.1 -------------- New Features: - Can open raw Windows devices with write mode sharing. - More DOS partition types are displayed. - Added fcat tool that takes in file name and exports content (equivalent to using ifind and icat together). - Added new API to TskImgDB that returns hash value associated with carved files. - performance improvements with FAT code (maps and dir_add) - performance improvements with NTFS code (maps) - added AONLY flag to block_walk - Updated blkls and blkcalc to use AONLY flag -- MUCH faster. Bug Fixes: - Fixed mactime issue where it could choose the wrong timezone that did not follow daylight savings times. - Fixed file size of alternate data streams in framework. - Incorporated memory leak fixes and raw device fixes from ADF Solutions. ---------------- VERSION 4.0.0 -------------- New Features: - Added multithreaded support - Added C++ wrapper classes - Added JNI bindings / Java data model classes - 3314047: Added utf8-specific versions of 'toid' methods for img,vs,fs types - 3184429: More consistent printing of unset times (all zerso instead of 1970) - New database design that allows for multiple images in the same database - GPT volume system tries other sector sizes if first attempt fails. - Added hash calculation and lookup to AutoDB and JNI. - Upgraded SQLite to 3.7.9. - Added Framework in (windows-only) - EnCase hash support - Libewf v2 support (it is now non-beta) - First file in a raw split or E01 can be specified and the rest of the files are found. - mactime displays times as 0 if the time is not set (isntead of 1970) - Changed behavior of 'mactime -y' to use ISO8601 format. - Updated HFS+ code from ATC-NY. - FAT orphan file improvements to reduce false positives. - TskAuto better reports errors. - Upgrade build projects from Visual Studio 2008 to 2010. Bug Fixes: - Relaxed checking when conflict exists between DOS and GPT partitions. Had a Mac image that was failing to resolve which partition table to use. ---------------- VERSION 3.2.3 -------------- New Features: - new TskAuto method (handleNotification()) that gets verbose messages that allow for debugging when the class makes decisions. - DOS partitions are loaded even if an extended partition fails to load - new TskAuto::findFilesInFs(TSK_FS_INFO *) method - Need to only specify first E01 file and the rest are found - Changed docs license to non-commercial - Unicode conversion routines fix invalid UTF-16 text during conversion - Added '-d' to tsk_recover to specify directory to recover Bug Fixes: - Added check to fatfs_open to compare first sectors of FAT if we used backup boot sector and verify it is FAT32. - More checks to make sure that FAT short names are valid ASCII - 3406523: Mactime size sanity check - 3393960: hfind reading of Windows input file - 3316603: Error reading last blocks of RAW CD images - Fixed bugs in how directories and files were detected in TskAuto ---------------- VERSION 3.2.2 -------------- Bug Fixes - 3213886: ISO9660 directory hole not advancing - 3173095 contd: Updated checks so that tougher FAT checks are applied to deleted directories. - 3303678: Image type in Sqlite DB is now not always 0 - 3303679: Deleted FAT files have more name cleanup in short names New Features: - 3213888: RAW CD format - Auto class accepts TSK_IMG_INFO as argument - Copies of split image file names are stored in TSK so that the caller can free them before TSK_IMG_INFO is freed. ---------------- VERSION 3.2.1 -------------- Bug Fixes - 3108272: fls arguments for -d and -u - 3105539: compile error issues because of SQlite and pthreads - 3173095: missing FAT files because of invalid dates. - 3184419: mingew compile errors. - 3191391: surround file name in quotes in mactime -d csv output New Features: - A single dummy entry is added to the SQlite DB if no volume exists so that all programs can assume that there will be at least one volume in the table. - 3184455: allow srcdir != builddir ---------------- VERSION 3.2.0 -------------- Bug Fixes - 3043092: Minor logic errors with ifind code. - FAT performance fix when looking for parent directories in $OrphanFiles. - 3052302: Crash on NTFS/UFS detection test because of corrupt data -- tsk_malloc error. - 3088447: Error adding attribute because of run collision. Solved by assigning unique IDs. New Features: - 3012324: Name mangling moved out of library into outer tools so that they can see control characters if they want to. Patch by Anthony Lawrence. - 2993806: ENUM values have a specified NONE value if you don't want to specify any special flags. Patch by Anthony Lawrence. - 3026989: Add -e and -s flags to img_cat. patch by Simson Garfinkel. - 2941805: Add case sensitive flag to fsstat in HFS. Patch by Rob Joyce. - 3017764: Changed how default NTFS $DATA attribute was named. Now it has no name, while it previously had a fake name of "$Data". - New TskAuto class. - New tsk_loaddb, tsk_recover, tsk_comparedir, and tsk_gettimes tools. ---------------- VERSION 3.1.3 -------------- Bug Fixes - 3006733: FAT directory listings were slow because the inner code was not stopping when it found the parent directory. - Adjusted sanity / testing code on FAT directory entries to allow non-ascii in extensions and reject entries with lots of 0s. - 3023606: Ext2 / ffs corrupted file names. - Applied NTFS SID fixes from Mandiant. - ntfs_load_secure() memory leak patch from Michael Cohen ---------------- VERSION 3.1.2 -------------- Bug Fixes - 2982426: FAT directory listings were slow because the entire image was being scanned for parent directory information. - 2982965: fs_attr length bug fix. - 2988619: mmls -B display error. - 2988330: ntfs SII cluster size increment bug - 2991487: Zeroed content in NTFS files that were not fully intialized. - 2993767: Slow FAT listings of OrphanFiles because hunt for parent directory resulted in many searches for OrphanFiles. Added cache of OrphanFiles. - 2999567: ifind was not stopping after first hit. - 2993804: read past end of file did not always return -1. ---------------- VERSION 3.1.1 -------------- Bug Fixes - 2954703: ISO9660 missing files because duplicate files had same starting block. - 2954707: ISO9660 missing some files with zero length and duplicate starting block. Also changed behavior of how multiple volume descriptors are processed. - 2955898: Orphan files not found if no deleted file names exist. - 2955899: NTFS internal setting of USED flag. - 2972721: Sorter fails with hash lookup if '-l' is given. - 2941813: Reverse HFS case sensitive flags (internal fix only) - 2954448: Debian package typo fixes, etc. - 2975245: sorter ignores realloc entries to reduce misleading mismatch entries and duplicate entries. ---------------- VERSION 3.1.0 -------------- New Features and Changes - 2206285: HFS+ can now be read. Lots of tracker items about this. Thanks to Rob Joyce and ATC-NY for many of the patches and reports. - 2677069: DOS Safety Partitions in GPT Volume Systems are better detected instead of reporting multiple VSs. - Windows executables can be build in Visual Studio w/out needing other image format libraries. - 2367426: Uninitialized file space is shown if slack space is requested. - 2677107 All image formats supported by AFFLIB can be accessed by specifying the "afflib" type. - 2206265: sigfind can now process non-raw files. - 2206331: Indirect block addresses are now available in the library and command line tools. They are stored in a different attribute. - Removed 'docs' files and moved them to the wiki. - Removed disk_stat and disk_sreset because they were out of date and hdparm now has the same functionality. - 2874854: Image layer tools now support non-512 byte device sector sizes. Users can specify sector size using the -b argument to the command line tools. This has several consequences: -- 'mmls -b' is now 'mmls -B'. Similarly with istat -b. -- Changed command line format for '-o' so that sector size is specified only via -b and not using '-o 62@4096'. - 2874852: Sanity checking on partition table entires is relaxed and only first couple of partitions are checked to make sure that they can fit into the image. - 2895607: NTFS SID data is available in the library and 'istat'. - 2206341: AFF encrypted images now give more proper error message if password is not given. - 2351426: mactime is now distributed with Windows execs. Developer-level Changes - Abstracted name comparison to file system-specific function. - Added support in mactime to read body files with comment lines. - 2596153: Changed img_open arguments, similar to getopt(). - 2797169: tsk_fs_make_ls is now supported as an external library function. Now named tsk_fs_meta_make_ls. - 2908510: Nanosecond resolution of timestamps is now available. - 2914255: Version info is now available in .h files in both string and integer form. Bug Fixes: - 2568528: incorrect adjustment of attribute FILLER offset. - 2596397: Incorrect date sorting in mactime. - 2708195: Errors when doing long reads in fragmented attributes. - Fixed typo bugs in sorter (reported via e-mail by Drew Hunt). - 2734458: added orphan cache map to prevent slow NTFS listing times. - 2655831: Sorter now knows about the ext2 and ext3 types. - 2725799: ifind not converting UTF16 names properly on Windows because it was using endian ordering of file system and not local system. - 2662168: warning messages on macs when reading the raw character device. - 2778170: incorrect read size on resident attributes. - 2777633: missing second resolution on FAT creation times. - Added the READ_SHARE option to the CreateFile command for split image files. Patch by Christopher Siwy. - 2786963: NTFS compression infinite loop fix. - 2645156: FAT / blkls error getting slack because allocsize was being set too small (and other values were not being reset). - 2367426: Zeros are set for VDL slack on NTFS files. - 2796945: Inifite loop in fs_attr. - 2821031: Missing fls -m fields. - 2840345: Extended DOS partitions in extended partitions are now marked as Meta. - 2848162: Reading attributes at offsets that are on boundary of run fragment. - 2824457: Fixed issue reading last block of file system with blkcat. - 2891285: Fixed issue that prevented reads from the last block of a file system when using the POSIX-style API. - 2825690: Fixed issue that prevented blkls -A from working. - 2901365: Allow FAT files to have a 0 wdate. - 2900761: Added FAT directory sanity checks to prevent infinite loops. - 2895607: Fixed various memory leaks. - 2907248: Fixed image layer cache crash. - 2905750: all file system read() functions now return -1 when offset given is past end of file. ---------------- VERSION 3.0.1 -------------- 11/11/08: Bug Fix: Fixed crashing bug in ifind on FAT file system. Bug: 2265927 11/11/08: Bug Fix: Fixed crashing bug in istat on ExtX $OrphanFiles dir. Bug: 2266104 11/26/08: Update: Updated fls man page. 11/30/08: Update: Removed TODO file and using tracker for bugs and feature requests. 12/29/08: Bug Fix: Fixed incorrectly setting block status in file_walk for compressed files (Bug: 2475246) 12/29/08: Bug Fix: removed fs_info field from FS_META because it was not being set and should have been removed in 3.0. Reported by Rob Joyce and Judson Powers. 12/29/08: Bug Fix: orphan files and NTFS files found via parent directory have an unknown file name type (instead of being equal to meta type). (Bug: 2389901). Reported by Barry Grundy. 1/12/09: Bug Fix: Fixed ISO9660 bug where large directory contents were not displayed. (Bug: 2503552). Reported by Tom Black. 1/24/09: Bug Fix: Fixed bug 2534449 where extra NTFS files were shown if the MFT address was changed to 0 because fs_dir_add was checking the address and name. Reported by Andy Bontoft. 1/29/09: Update: Fixed fix for bug 2534449. The fix is in ifind instead of fs_dir_add(). 2/2/09: Update: Added RPM spec file from Morgan Weetmam. ---------------- VERSION 3.0.0 -------------- 0/00/00: Update: Many, many, many API changes. 2/14/08: Update: Added mmcat tool. 2/26/08: Update: Added flags to mmls to specify partition types. 3/1/08: Update: Major update of man pages. 4/14/08: Bug Fix: Fixed the calculation of "actual" last block. Off by 1 error. Reported by steve. 5/23/08: Bug Fix: Incorrect malloc return check in srch_strings. reported by Petri Latvala. 5/29/08: Bug Fix: Fixed endian ordering bug in ISO9660 code. Reported by Eduardo Aguiar de Oliveira. 6/17/08: Update: 'sorter' now uses the ifind method for finding deleted NTFS files (like Autopsy) does instead of relying on fls. Reported by John Lehr. 6/17/08: Update: 'ifind -p' reports data on ADS. 7/10/08: Update: FAT looks for a backup boot sector in FAT32 if magic is 0 7/21/08: Bug Fix: Changed define of strcasecmp to _stricmp instead of _strnicmp in Windows. (reported by Darren Bilby). 7/21/08: Bug Fix: Fall back to open "\\.\" image files on Windows with SHARE_WRITE access so that drive devices can be opened. (reported by Darren Bilby). 8/20/08: Bug Fix: Look for Windows objects when opening files in Cygwin, not just Win32. Reported by Par Osterberg Medina. 8/21/08: Update: Renamed library and install header files to have a '3' in them to allow parallel installations of v2 and v3. Suggested by Simson Garfinkel. 8/22/08: Update: Added -b option to sorter to specify minimum file size to process. Suggested by Jeff Kell. 8/22/08: Update: Added libewf as a requirement to build win32 so that E01 files are supported. 8/29/08: Update: Added initial mingw patches for cross compiling and Windows. Patches by Michael Cohen. 9/X/08: Update: Added ability to access attibutes 9/6/08: Update: Added image layer cache. 9/12/08: Bug Fix: Fixed crash from incorrectly cleared value in FS_DIR structure. Reported and patched by Jason Miller. 9/13/08: Update: Changed d* tool names to blk*. 9/17/08: Update: Finished mingw support so that both tools and library work with Unicode file name support. 9/22/08: Update: Added new HFS+ code from Judson Powers and Rob Joyce (ATC-NY) 9/24/08: Bug Fix: Fixed some cygwin compile errors about types on Cygwin. Reported by Phil Peacock. 9/25/08: Bug Fix: Added O_BINARY to open() in raw and split because Cygwin was having problems. Reported by Mark Stam. 10/1/08: Update: Added ifndef to TSK_USE_HFS define to allow people to define it on the command line. Patch by RB. ---------------- VERSION 2.52 -------------- 2/12/08: Bug Fix: Fixed warning messages in mactime about non-Numeric data. Reported by Pope. 2/19/08: Bug Fix: Added #define to tsk_base_i.h to define LARGEFILE64_SOURCE based on LARGEFILE_SOURCE for older Linux systems. 2/20/08: Bug Fix: Updated afflib references and code. 3/13/08: Update: Added more fixes to auto* so that AFF will compile on more systems. I have confirmed that AFFLIB 3.1.3 will run with OS X 10.4.11. 3/14/08: Bug Fix: Added checks to FAT code that calcs size of directories. If starting cluster of deleted dir points into a cluster chain, then problems can occur. Reported by John Ward. 3/19/08: Update: I have verified that this compiles with libewf-20070512. 3/21/08: Bug Fix: Deleted Ext/FFS directories were not being recursed into. This case was rare (because typically the metadata are wiped), but possible. Reported by JWalker. 3/24/08: Update: I have verified that this compiles with libewf-20080322. Updates from Joachim Metz. 3/26/08: Update: Changed some of the header file design for the tools so that the define settings in tsk_config.h can be used (for large files). 3/28/08: Update: Added config.h reference to srch_strings to get the LARGEFILE support. 4/5/08: Update: Improved inode argument number parsing function. ---------------- VERSION 2.51 -------------- 1/30/08: Bug Fix: Fixed potential infinite loop in fls_lib.c. Patch by Nathaniel Pierce. 2/7/08: Bug Fix: Defined some of the new constants that are used in disktools because older Linux distros did not define them. Reported by Russell Reynolds. 2/7/08: Bug Fix: Modified autoconf to check for large file build requirements and look for new 48-bit structures needed by disktools. Both of these were causing problems on older Linux distros. 2/7/08: Update: hfind will normalize hash values in database so that they are case insensitive. ---------------- VERSION 2.50 -------------- 12/19/07: Update: Finished upgrade to autotools building design. No longer include file, afflib, libewf. Resulted in many source code layout changes and sorter now searches for md5, sha1, etc. ---------------- VERSION 2.10 -------------- 7/12/07: Update: 0s are returned for AFF pages that were not imaged. 7/31/07: Bug Fix: ifind -p could crash if a deleted file name was found that did not point to a valid meta data stucture. (Reported by Andy Bontoft) 8/5/07: Update: Added NSRL support back into sorter. 8/15/07: Update: Errors are given if supplied sector offset is larger than disk image. Reported by Simson Garfinkel. 8/16/07: Update: Renamed MD5 and SHA1 functions to TSK_MD5_.. and TSK_SHA_.... 8/16/07: Update: tsk_error_get() does not reset the error messages. 9/26/07: Bug Fix: Changed FATFS check for valid dentries to consider second values of 30. Reported by Alessandro Camillo. 10/18/07: Update: inode_walk for NTFS and FAT will not abort if data corruption is found in one entry -- instead they will just skip it. 10/18/07: Update: tsk_os.h uses standard gcc system names instead of TSK specific ones. 10/18/07: Update: Updated raw.c to use ioctl commands on OS X to get size of raw device because it does not work with SEEK_END. Patch by Rob Joyce. 10/31/07: Update: Finished upgrade to fatfs_file_walk_off so that walking can start at a specific offset. Also finished upgrade that caches FAT run list to make the fatfs_file_walk_off more effecient. 11/14/07: Update: Fixed few places where off_t was being used instead of OFF_T. Reported by GiHan Kim. 11/14/07: Update: Fixed a memory leak in aff.c to free AFF_INFO. Reported by GiHan Kim. 11/24/07: Update: Finished review and update of ISO9660 code. 11/26/07: Bug Fix: Fixed 64-bit calculation in HFS+ code. Submitted by Rob Joyce. 11/29/07: Update: removed linking of srch_strings.c and libtsk. Reported by kwizart. 11/30/07: Upate: Made a #define TSK_USE_HFS compile flag for incorporating the HFS support (flag is in src/fstools/fs_tools_i.h) 11/30/07: Update: restricted the FAT dentry sanity checks to verify space padding in the name and latin-only extensions. 12/5/07: Bug Fix: fs_read_file_int had a bug that ignored the type passed for NTFS files. Reported by Dave Collett. 12/12/07: Update: Changed teh FAT dentry sanity checks to allow spaces in volume labels and do more checking on the attribute flag. ---------------- VERSION 2.09 -------------- 4/6/07: Bug Fix: Inifite loop in ext2 and ffs istat code because of using unsigned size_t variable. Reported by Makoto Shiotsuki. 4/16/07: Bug Fix: Changed use of fseek() to fseeko() in hashtools. Patch by Andy Bontoft. 4/16/07: Bug Fix: Changed Win32 SetFilePointer to use LARGE_INTEGER. Reported by Kim GiHan. 4/19/07: Bug Fix: Not all FAT orphan files were being found because of and offset error. 4/26/07: Bug Fix: ils -O was not working (link value not being checked). Reported by Christian Perst. 4/27/07: Bug Fix: ils -r was showing UNUSED inodes. Reported by Christian Perst. 5/10/07: Update: Redefined the USED and UNUSED flags for NTFS so that UNUSED is set when no attributes exist. 5/16/07: Bug Fix: Fixed several bounds checking bugs that may cause a crash if the disk image is corrupt. Reported by Tim Newsham (iSec Partners) 5/17/07: Update: Updated AFFLIB to 2.2.11 5/17/07: Update: Updated libewf to libewf-20070512 5/17/07: Update: Updated file to 4.20 5/29/07: Update: Removed NTFS SID/SDS contributed code because it causes crashes on some systems and its output is not entirely clear. (most recent bug reported by Andy Scott) 6/11/07: Update: Updated AFFLIB to 2.2.12. 6/12/07: Bug Fix: ifind -p was not reporting back info on the allocated name when one existed (because strtok was overwritting the name when the search continued). Reported by Andy Bontoft. 6/13/07: Update: Updated file to 4.21 ---------------- VERSION 2.08 -------------- 12/19/06: Bug Fix: ifind_path was not setting *result when root inode was searched for. patch by David Collett. 12/29/06: Update: Removed 'strncpy' in ntfs.c to manual assignment of text for '$Data' and 'N/A' for performance reasons. 1/11/07: Update: Added duname to FS_INFO that contains a string of name for a file system's data unit -- Cluster for example. 1/19/07: Bug Fix: ifind_path was returning an error even after some files were found. Errors are now ignored if a file was found. Reported by Michael Cohen. 1/26/07: Bug Fix: Fixed calcuation of inode numbers in fatfs.c (reported by Simson Garfinkel). 2/1/07: Update: Changed aff-install to support symlinked directory. 2/1/07: Update: img_open modified so that it does not report errors for s3:// and http:// files that do not exist. 2/5/07: Update: updated *_read() return values to look for "<0" instead of simply "== -1". (suggested by Simson Garfinkel). 2/8/07: Update: removed typedef for uintptr in WIN32 code. 2/13/07: Update: Applied patch from Kim Kulak to update HFS+ code to internal design changes. 2/16/07: Update: Renamed many of the external data structures and flags so that they start with TSK_ or tsk_ to prevent name collisions. 2/16/07: Update: Moved MD5 and SHA1 routines and binaries to auxtools instead of hashtools so that they are more easy to access. 2/16/07: Update: started redesign and port of hashtools. 2/21/07: Update: Changed inode_walk callback API to remove the flags variable -- this was redundant since flags are also in TSK_FS_INODE. Same for TSK_FS_DENT. 3/7/07: Bug Fix: fs_read_file failed for NTFS resident files. Reported by Michael Cohen. 3/8/07: Bug Fix: FATFS assumed a 512-byte sector in a couple of locations. 3/13/07: Update: Finished hashtools update. 3/13/07: Update: dcat reads block by block instead of all at once. 3/23/07: Update: Change ntfs_load_secure to allocate all of its needed memory at once instead of doing reallocs. 3/23/07: Update: Updated AFFLIB to 2.2.0 3/24/07: Bug Fix: Fixed many locations where return value from strtoull was not being properly checked and therefore invalid numbers were not being detected. 3/24/07: Bug Fix: A couple of error messages in ntfs_file_walk should have been converted to _RECOVER when the _RECOVERY flag was given. 3/24/07: Update: Changed behavior of ntfs_file_walk. If no type is given, then a default type is chosen for files and dirs. Now, no error is generated if that type does not exist -- similar to how no error is generated if a FAT file has 0 file size. 3/26/07: Update: cleaned up and documented fs_data code more. 3/29/07: Update: Updated AFF to 2.2.2. 3/29/07: Update: Updated install scripts for afflib, libewf, and file to touch files so that the auto* files are in the correct time stamp order. 4/5/07: Bug Fix: Added sanity checks to offsets and addresses in ExtX and UFS group descriptors. Reported by Simson Garfinkel. ---------------- VERSION 2.07 -------------- 9/6/06: Update: Changed TCHAR and _T to TSK_TCHAR and _TSK_T to avoid conflicts with other libraries. 9/18/06: Update: Added tsk_list_* functions and strutures. 9/18/06: Update: Added checks for recursive FAT directories. 9/20/06: Update: Changed FS_META_* flags for LINK and UNLINK and moved them to ILS_? flags. 9/20/06: Update: added flags to ils to find only orphan inodes. 9/20/06: Update: Added Orphan support for FAT, NTFS, UFS, Ext2, ISO. 9/20/06: Update: File walk actions now have a flag to identify if a block is SPARSE or not (used to identify if the address being passed is valid or made up). 9/21/06: Update: Added file size sanity check to fatfs_is_dentry and fixed assignment of fatfs->clustcnt. 9/21/06: Update: block_, inode, and dent_walk functions now do more flag checking and make sure that some things are set instead of making the calling code do it. 9/21/06: Update: Added checks for recursive (infinite loop) NTFS, UFS, ExtX, and ISO9660 directories. 9/21/06: Update Added checks to make sure that walking the FAT for files and directories would result in an infinite loop (if FAT is corrupt). 9/21/06: Update: Added -a and -A to dls to specify allocated and unallocated blocks to display. 9/21/06: Update: Updated AFFLIB to 1.6.31. 9/22/06: Update: added a fs_read_file() function that allows you to read random parts of a file. 10/10/06: Update: Improved performance of fs_read_file() and added new FS_FLAG_META_COMP and FS_FLAG_DATA_COMP flags to show if a file and data are using file system-level compression (NTFS only). 10/18/06: Bug fix: in fs_data_put_run, added a check to see if the head was null before looking up. An extra error message was being created for nothing. 10/18/06: Bug Fix: Added a check to the compression buffer to see if it is null in _done(). 10/25/06: Bug Fix: Added some more bounds checks to NTFS uncompression code. 11/3/06: Bug Fix: added check to dcat_lib in case the number of blocks requested is too large. 11/07/06: Update: Added fs_read_file_noid wrapper around fs_read_file interface. 11/09/06: Update: Updated AFF to 1.7.1 11/17/06: Update: Updated libewf to 20061008-1 11/17/06: Bug Fix: Fixed attribute lookup bug in fs_data_lookup. Patch by David Collett. 11/21/06: Bug Fix: Fixed fs_data loops that were stopping when they hit an unused attribute. Patch by David Collett. 11/21/06: Bug Fix: sorter no longer clears the path when it starts. THis was causing errors on Cygwin because OpenSSL libraries could not be found. 11/22/06: Update: Added a tskGetVersion() function to return the string of the current version. 11/29/06: Update: Added more tsk_error_resets to more places to prevent extra error messages from being displayed. 11/30/06: Update: Added Caching to the getFAT function and to fs_read. 12/1/06: Update: Changed TSK_LIST to a reverse sorted list of buckets. 12/5/06: Bug Fix: Fixed FS_DATA_INUSE infinite loop bug. 12/5/06: Bug Fix: Fixed infinite loop bug with NTFS decompression code. 12/5/06: Update: Added NULL check to fs_inode_free (from Michael Cohen). 12/5/06: Update: Updated ifind_path so that an allocated name will be shown if one exists -- do not exit if we find simply an unallocated entry with an address of 0. Suggested by David Collett. 12/6/06: Update: Updated file to version 4.18. 12/6/06: Update: Updated libaff to 2.0a10 and changed build process accordingly. 12/7/06: Update: Added a tsk_error_get() function that returns a string with the error messages -- can be used instead of tsk_error_print. 12/7/06: Update: fixed some memory leaks in FAT and NTFS code. 12/11/06: Bug Fix: fatfs_open error message code referenced a value that was in freed memory -- reordered statements. 12/15/06: Update: Include VCProj files in build. ---------------- VERSION 2.06 -------------- 8/11/06: Bug Fix: Added back in ASCII/UTF-8 checks to remove control characters in file names. 8/11/06: Bug Fix: Added support for fast sym links in UFS1 8/11/06: Update: Redesigned the endian support so that getuX takes only the endian flag so that the Unicode design could be changed as well. 8/11/06: Update: Redesigned the Unicode support so that there is a tsk_UTF... routine instead of fs_UTF... 8/11/06: Update: Updated GPT to fully convert UTF16 to UTF8. 8/11/06: Update: There is now only one aux_tools header file to include instead of libauxtools and/or aux_lib, which were nearly identical. 8/16/06: Bug Fix: ntfs_dent_walk could segfault if two consecutive unallocated entries were found that had an MFT entry address of 0. Reported by Robert-Jan Mora. 8/16/06: Update: Changed a lot of the header files and reduced them so that it is easier to use the library and only one header file needs to be included. 8/21/06: Update: mmtools had char * instead of void * for walk callback 8/22/06: Update: Added fs_load_file function that returns a buffer full with the contents of a file. 8/23/06: Update: Upgraded AFFLIB to 1.6.31 and libewf to 20060820-1. 8/25/06: Update: Created printf wrappers so that output is UTF-16 on Windows and UTF-8 on Unix. 8/25/06: Update: Continued port to Windows by starting to use more TCHARS and defining needed macros for the Unix side. 8/25/06: Bug Fix: Fixed crash that could occur because of SDS code in NTFS. (reported by Simson Garfinkel) (BUG: 1546925). 8/25/06: Bug Fix: Fixed crash that could occur because path stack became corrupt with deep directories or corrupt images. (reported by Simson Garfinkel) (BUG: 1546926). 8/25/06: Bug Fix: Fixed infinite loop that could occur when trying to determine size of FAT directory when the FAT has a loop in it. (BUG: 1546929) 8/25/06: Update: Improved FAT checking code to look for '.' and '..' entries when inode value is replaced during dent_walk. 8/29/06: Update: Finished Win32 port and changes to handle UTF-16 vs UTF-8 inputs. 8/29/06: Update: Created a parse_inum function to handle parsing inode addresses from command line. 8/30/06: Update: Made progname a local variable instead of global. 8/31/06: Bug Fix: Fixed a sizeof() error with the memset in fatfs_inode_walk for the sect_alloc buffer. 8/31/06: Update: if mktime in dos2unixtime returns any negative value, then the return value is set to 0. Windows and glibc seem to have different return values. ---------------- VERSION 2.05 -------------- 5/15/06: Bug Fix: Fixed a bug in img_cat that could cause it to go into an infinite loop. (BUG: 1489284) 5/16/06: Update: Fixed printf statements in tsk_error.c that caused warning messages for some compilers. Reported by Jason DePriest. 5/17/06: Update: created a union of file system-specific file times in FS_INFO (Patch by Wyatt Banks) 5/22/06: Bug Fix: Updated libewf to 20060520 to fix bug with reported image size. (BUG: 1489287) 5/22/06: Bug Fix: Updated AFFLIB to 1.6.24 so that TSK could compile in CYGWIN. (BUG: 1493013) 5/22/06: Update: Fixed some more printf statements that were causing compile warnings. 5/23/06: Update: Added a file existence check to img_open to make error message more accurate. 5/23/06: Update: Usage messages had extra "Supported image types message". 5/25/06: Update: Added block / page range to fsstat for raw and swapfs. 6/5/06: Update: fixed some typos in the output messages of sigfind (reported by Jelle Smet) 6/9/06: Update: Added HFS+ template to sigfind (Patch by Wyatt Banks) 6/9/06: Update: Added ntfs and HFS template to sigfind. 6/19/06: Update: Begin Windows Visual Studio port 6/22/06: Update: Updated a myflags check in ntfs.c (reported by Wyatt Banks) 6/28/06: Update: Incorporated NTFS compression patch from I.D.E.A.L. 6/28/06: Update: Incorporated NTFS SID patch from I.D.E.A.L. 6/28/06: Bug Fix: A segfault could occur with NTFS if no inode was loaded in the dent_walk code. (Reported by Pope). 7/5/06: Update: Added tsk_error_reset function and updated code to use it. 7/5/06: Update: Added more sanity checks to the DOS partitions code. 7/10/06: Update: Upgraded libewf to version 20060708. 7/10/06: Update: Upgraded AFFLIB to version 1.6.28 7/10/06: Update: added 'list' option to usage message so that file system, image, volume system types are listed only if '-x list' is given. Suggested by kenshin. 7/10/06: Update: Compressed NTFS files use the compression unit size specified in the header. 7/10/06: Update: Added -R flag to icat to suppress recovery warnings and use this flag in sorter to prevent FAT recovery messages from filling up screen. 7/10/06: Update: file_walk functions now return FS_ERR_RECOVERY error codes for most cases if the RECOVERY flag is set -- this allows the errors to be more easily suppressed. 7/12/06: Update: Removed individual libraries and now make a single static libtsk.a library. 7/12/06: Update: Cleaned up top-level Makefile. Use '-C' flag (suggested by kenshin). 7/14/06: Update: Fixed and redesigned some of the new NTFS compression code. Changed variable names. 7/20/06: Update: Fixed an NTFS compression bug if a sub-block was not compressed. 7/21/06: Update: Made NTFS compression code thread friendly. ---------------- VERSION 2.04 -------------- 12/1/05: Bug Fix: Fixed a bug in the verbose output of img_open that would crash if no type or offset was given. Reported and patched by Wyatt Banks. 12/20/05: Bug Fix: An NTFS directory index sanity check used 356 instead of 365 when calculating an upper bound on the times. Reported by Wyatt Banks. 12/23/05: Bug Fix: Two printf statements in istat for NTFS printed to stdout instead of a specific file handle. Reported by Wyatt Banks. 1/22/06: Bug Fix: fsstat, imgstat and dcalc were using a char instead of int for the return value of getopt, which caused some systems to not execute the programs. (internal fix and later reported by Bernhard Reiter) 2/23/06: Update: added support for FreeBSD 6. 2/27/06: Bug Fix: Indirect blocks would nto be found by ifind with UFS and Ext2. Reported by Nelson G. Mejias-Diaz. (BUG: 1440075) 3/9/06: Update: Added AFF image file support. 3/14/06: Bug Fix: If the first directory entry of a UFS or ExtX block was unallocated, then later entries may not be shown. Reported by John Langezaal. (BUG: 1449655) 4/3/06: Update: Finished the improved error handling. Many internal changes, not many external changes. error() function no longer used and instead tsk_err variables and function are used. This makes the library more powerful. 4/5/06: Update: The byte offset for a volume is now passed to the mm_ and fs_ functions instead of img_open. This allows img_info to be used for multiple volumes at the same time. This required some mm_ changes. 4/5/06: Update: All TSK libraries are written to the lib directory. 4/6/06: Update: Added FS_FLAG_DATA_RES flag to identify data that are resident in ntfs_data_walk (suggested by Michael Cohen). 4/6/06: Update: The partition code (media Management) now checks that a partition starts before the end of the image file. There are currently no checks about the end of the partition though. 4/6/06: Update: The media management code now shows unpartitioned space as such from the end of the last partition to the end of the image file (using the image file size). (Suggested by Wyatt Banks). 4/7/06: Update: New version of ISO9660 code from Wyatt Banks and Crucial Security added and other code updated to allow CDs to be analyzed. 4/7/06: There was a conflict with guessuXX with mmtools and fstools. Renamed to mm_guessXX and fs_guessXX. 4/10/06: Upgraded AFFLIB to 1.5.6 4/12/06: Added version of libewf and support for it in imgtools 4/13/06: Added new img_cat tool to extract raw data from an image format. 4/24/06: Upgraded AFFLIB to 1.5.12 4/24/06: split and raw check if the image is a directory 4/24/06: Updated libewf to 20060423-1 4/26/06: Updated makedefs to work with SunOS 5.10 5/3/06: Added iso9660 patch from Wyatt Banks so that version number is not printed with file name. 5/4/06: Updated error checking in icat, istat, fatfs_dent, and ntfs_dent 5/8/06: Updated libewf to 20060505-1 to fix some gcc 2 compile errors. 5/9/06: Updated AFFLIB to 1.6.18 5/11/06: Cleaned up error handling (removed %m and unused legacy code) 5/11/06: Updated AFFLIB to 1.6.23 ---------------- VERSION 2.03 -------------- 7/26/05: Update: Removed incorrect print_version() statement from fs_tools.h (reported by Jaime Chang) 7/26/05: Update: Renamed libraries to start with "lib" 7/26/05: Update: Removed the logfp variable for verbose statements and instead use only stderr. 8/12/05: Update: If time is 0, then it is put as 00:00:00 instead of the default 1970 or 1980 time. 8/13/05: Update: Added Unicode support for FAT and NTFS (Supported by I.D.E.A.L. Technology Corp). 9/2/05: Update: Added Unicode support for UFS and ExtX. Non-printable ASCII characters are no longer replaced with '^.'. 9/2/05: Update: Improved the directory entry sanity checks for UFS and ExtX. 9/2/05: Update: Upgraded file to version 4.15. 9/2/05: Update: The dent_walk code of all file systems does not abort if a sub-directory is encountered with an error. If it is the top directory explicitly called, then it still gives an error. 9/2/05: Bug Fix: MD5 and SHA-1 values were incorrect under AMD64 systems because the incorrect variable sizes were being used. (reported by: Regis Friend Cassidy. BUG: 1280966) 9/2/05: Update: Changed all licenses in TSK to Common Public License (except those that were already IBM Public License). 9/15/05: Bug Fix: The Unicode names would not be displayed if the FAT short name entry was using code pages. The ASCII name check was removed, which may lead to more false positives during inode_walk. 10/05/05: Update: improved the sector size check when the FAT boot sector is read (check for specific values besides just mod 512). 10/12/05: Update: The ASCII name check was added back into FAT, but the check no longer looks for values over 0x80. 10/12/05: Update: The inode_walk function in FAT skips clusters that are allocated to files. This makes it much faster, but it will now not find unallocated directory entries in the slack space of allocated files. 10/13/05: Update: sorter updated to handle unicode in HTML output. ---------------- VERSION 2.02 -------------- 4/27/05: Bug Fix: the sizes of 'id' were not consistent in the front-end and library functions for icat and ffind. Reported by John Ward. 5/16/05: Bug Fix: fls could segfault in FAT if short name did not exist. There was also a bug where the long file name variable (fatfs->lfn_len) was not reset after processing a directory and the next entry could incorrectly get the long name. Reported by Jaime Chang. BUG: 1203673. 5/18/05: Update: Updated makedefs to support Darwin 8 (OS X Tiger) 5/23/05: Bug Fix: ntfs_dent_walk would not always stop when WALK_STOP was returned. This caused some issues with previous versions of ifind. This was fixed. 5/24/05: Bug Fix: Would not compile under Suse because it had header file conflicts for the size of int64_t. Reported by: Andrea Ghirardini. BUG: 1203676 5/25/05: Update: Fixed some memory leaks in fstools (reported by Jaime Chang). 6/13/05: Update: Compiled with g++ to get better warning messages. Fixed many signed versus unsigned comparisons, -1 assignments to unsigned vars, and some other minor internal issues. 6/13/05: Bug Fix: if UFS or FFS found a valid dentry in unallocated space, it could have a documented length that is larger than the remaining unallocated space. This would cause an allocated name to be skipped. BUG: 1210204 Reported by Christopher Betz. 6/13/05: Update: Improved design of all dent code so that there are no more global variables. 6/13/05: Update: Improved design of FAT dent code so that FATFS_INFO does not keep track of long file name information. 6/13/05: Bug Fix: If a cluster in a directory started with a strange dentry, then FAT inode_walk would skip it. The fixis to make sure that all directory sectors are processed. (BUG: 1203669). Reported by Jaime Chang. 6/14/05: Update: Changed design of FS_INODE so that it contains the inode address and the inode_walk action was changed to remove inum as an argument. 6/15/05: Update: Added 'ils -o' back in as 'ils -O' to list open and deleted files. 6/15/05: Update: Added '-m' flag to mactime so that it prints the month as a number instead of its name. 7/2/05: Bug Fix: If an NTFS file did not have a $DATA or $IDX_* attribute, then fls would not print it. The file had no content, but the name should be shown. (BUG: 1231515) (Reported by Fuerst) ---------------- VERSION 2.01 -------------- 3/24/05: Bug Fix: ffind would fail if the directory had two non-printable chars. The handling of non-printable chars was changed to replace with '^.'. (BUG: 1170310) (reported by Brian Baskin) 3/24/05: Bug Fix: icat would not print the output to stdout when split images were used. There was a bug in the image closing process of icat. (BUG: 1170309) (reported by Brian Baskin) 3/24/05: Update: Changed the header files in fstools to make fs_lib.h more self contained. 4/1/05: Bug Fix: Imgtools byte offset with many leading 0s could cause issues. (BUG: 1174977) 4/1/05: Update: Removed test check in mmtools/dos.c for value cluster size because to many partition tables have that as a valid field. Now it checks only OEM name. 4/8/05: Update: Updated usage of 'strtoul' to 'strtoull' for blocks and inodes. ---------------- VERSION 2.00 -------------- 1/6/05: Update: Added '-b' flag to 'mmls' so that sizes can be printed in bytes. Suggested and a patch proposed by Matt Kucenski 1/6/05: Update: Define DADDR_T, INUM_T, OFF_T, PNUM_T as a static size and use those to store values in data structures. Updated print statements as well. 1/6/05: Update: FAT now supports larger images becuase the inode address space is 64-bits. 1/6/05: Moved guess and get functions to misc from mmtools and fstools. 1/7/05: Update: Added imgtools with support for "raw" and "split" layers. All fstools have been updated. 1/7/05: Update: removed dtime from ils output 1/9/05: Update: FAT code reads in clusters instead of sectors to be faster (suggested by David Collett) 1/9/05: Update: mmtools uses imgtools for split images etc. 1/10/05: Update: Removed usage of global variables when using file_walk internally. 1/10/05: Update: mmls BSD will use the next sector automatically if the wrong is given instead of giving an error. 1/10/05: Update: Updated file to version 4.12 1/11/05: Update: Added autodetect to file system tools. 1/11/05: Update: Changed names to specify file system type (not OS-based) 1/11/05: Update: Added '-t' option to fsstat to give just the type. 1/11/05: Update: Added autodetect to mmls 1/17/05: Update: Added the 'mmstat' tool that gives the type of volume system. 1/17/05: Update: Now using CVS for local version control - added date stamps to all files. 2/20/05: Bug Fix: ils / istat would go into an infinte loop if the attribute list had an entry with a length of 0. Reported by Angus Marshall (BUG: 1144846) 3/2/05: Update: non-printable letters in ExtX/UFS file names are now replaced by a '.' 3/2/05: Update: Made file system tools more library friendly by making stubs for each application. 3/4/05: Update: Redesigned the diskstat tool and created the disksreset tool to remove the HPA temporarily. 3/4/05: Update: Added imgstat tool that displays image format details 3/7/05: Bug Fix: In fsstat on ExtX, the final group would have an incorrect _percentage_ of free blocks value (although the actual number was correct). Reported by Knut Eckstein. (BUG: 1158620) 3/11/05: Update: Renamed diskstat, disksreset, sstrings, and imgstat to disk_stat, disk_sreset, srch_strings, and img_stat to make the names more clear. 3/13/05: Bug Fix: The verbose output for fatfs_file_walk had an incorrect sector address. Reported by Rudolph Pereira. 3/13/05: Bug Fix: The beta version had compiling problems on FreeBSD because of a naming clash with the new 'fls' functions. (reported by secman) ---------------- VERSION 1.74 -------------- 11/18/04: Bug Fix: FreeBSD 5 would produce incorrect 'icat' output for Ext2/3 & UFS1 images because it used a 64-bit on-disk address. reported by neutrino neutrino. (BUG: 1068771) 11/30/04: Bug Fix: The makefile in disktools would generate an error on some systems (Cygwin) because of an extra entry. Reported by Vajira Ganepola (BUG: 1076029) ---------------- VERSION 1.73 -------------- 09/09/04: Update: Added journal support for EXT3FS and added jls and jcat tools. 09/13/04: Updated: Added the major and minor device numbers to EXTxFS istat. 09/13/04: Update: Added EXTxFS orphan code to 'fsstat' 09/24/04: Update: Fixed incorrect usage of 'ptr' and "" in action of ntfs_dent.c. Did not affect any code, but could have in the future. Reported by Pete Winkler. 09/25/04: Update: Added UFS flags to fsstat 09/26/04: Update: All fragments are printed for indirect block pointer addresses in UFS istat. 09/29/04: Update: Print extended UFS2 attributes in 'istat' 10/07/04: Bug Fix: Changed usage of (int) to (uintptr_t) for pointer arithmetic. Caused issues with Debian Sarge. (BUG: 1049352) - turned out to be from changes made to package version so that it would compile in 64-bit system (BUG: 928278). 10/11/04: Update: Added diskstat to check for HPA on linux systems. 10/13/04: Update: Added root directory location to FAT32 fsstat output 10/17/04: Bug Fix: EXTxFS superblock location would not be printed for images in fsstat that did not have sparse superblok (which is rare) (BUG: 1049355) 10/17/04: Update: Added sigfind tool to find binary signatures. 10/27/04: Bug Fix: NTFS is_clust_alloc returned an error when loading $MFT that had attribute list entry. Now I assume that clusters referred to by the $MFT are allocated until the $MFT is loaded. (BUG: 1055862). 10/28/04: Bug Fix: Check to see if an attribute with the same name exists instead of relying on id only. (ntfs_proc_attrseq) Affects the processing of attribute lists. Reported by Szakacsits Szabolcs, Matt Kucenski, & Gene Meltser (BUG: 1055862) 10/28/04: Update: Removed usage of mylseek in fstools for all systems (Bug: 928278) ---------------- VERSION 1.72 -------------- 07/31/04: Update: Added flag to mft_lookup so that ifind can run in noabort mode and it will not stop when it finds an invalid magic value. 08/01/04: Update: Removed previous change and removed MAGIC check entirely. XP doesn't even care if the Magic is corrupt, so neither does TSK. The update sequence check should find an invalid MFT entry. 08/01/04: Update: Added error message to 'ifind' if none of the search options are given. 08/05/04: Bug Fix: Fixed g_curdirptr recursive error by clearing the value when dent_walk had to abort because a deleted directory could not be recovered. (BUG: 1004329) Reported by epsilon@yahoo.com 08/16/04: Update: Added a sanity check to fatfs.c fat2unixtime to check if the year is > 137 (which is the overflow date for the 32-bit UNIX time). 08/16/04: Update: Added first version of sstrings from binutils-2.15 08/20/04: Bug Fix: Fixed a bug where the group number for block 0 of an EXT2FS file system would report -1. 'dstat' no longer displays value when it is not part of a block group. (BUG: 1013227) 8/24/04: Update: If an attribute list entry is found with an invalid MFT entry address, then it is ignored instead of an error being generated and exiting. 8/26/04: Update: Changed internal design of NTFS to make is_clust_alloc 8/26/04: Update: If an attribute list entry is found with an invalid MFT entry address AND the entry is unallocated, then no error message is printed, it is just ignored or logged in verbose mode. 8/29/04: Update: Added support for 32-bit GID and UID in EXTxFS 8/30/04: Bug Fix: ntfs_dent_walk was adding 24 extra bytes to the size of the index record for the final record processing (calc of list_len) (BUG: 1019321) (reported and debugging help from Matt Kucenski). 8/30/04: Bug Fix: fs_data_lookup was using an id of 0 as a wild card, but 0 is a legit id value and this could cause confusion. To solve this, a new FS_FLAG_FILE_NOID flag was added and a new fs_data_lookup_noid function that will not use the id to lookup values. (BUG: 1019690) (reported and debugging help from Matt Kucenski) 8/30/04: Update: modified fs_data_lookup_noid to return unamed data attribute if that type is requested (instead of just relying on id value in attributes) 8/31/04: Update: Updated file to v4.10, which seems to fix the CYGWIN compile problem. 9/1/04: Update: Added more DOS partition types to mmls (submitted by Matt Kucenski) 9/2/04: Update: Added EXT3FS extended attributes and Posix ACL to istat output. 9/2/04: Update: Added free inode and block counts per group to fsstat for EXT2FS. 9/7/04: Bug Fix: FreeBSD compile error for PRIx printf stuff in mmtools/gpt.c ---------------- VERSION 1.71 -------------- 06/05/04: Update: Added sanity checks in fat to unix time conversion so that invalid times are set to 0. 06/08/04: Bug Fix: Added a type cast when size is assigned in FAT and removed the assignment to a 32-bit signed variable (which was no longer needed). (Bug: 966839) 06/09/04: Bug Fix: Added a type cast to the 'getuX' macros because some compilers were assuming it was signed (Bug: 966839). 06/11/04: Update: Changed NTFS magic check to use the aa55 at the end and fixed the name of the original "magic" value to oemname. The oemname is now printed in fsstat. 06/12/04: Bug Fix: The NTFS serial number was being printed with bytes in the wrong order in the fsstat output. (BUG: 972207) 06/12/04: Update: The begin offset value in index header for NTFS was 16-bits instead of 32-bits. 06/22/04: Update: Created a library for the MD5 and SHA1 functions so that it can be incorporated into other tools. Also renamed some of the indexing tools that hfind uses. 06/23/04: Update: Changed output of 'istat' for NTFS images. Added more data from $STANDARD_INFORMATION. 07/13/04: Update: Changed output of 'istat' for NTFS images again. Moved more data to the $FILE_NAME section and added new data. 07/13/04: Update: Changed code for processing NTFS runs and no longer check for the offset to be 0 in ntfs_make_data_run(). This could have prevented some sparse files from being processed. 07/13/04: Update: Added flags for compressed and encrypted NTFS files. They are not decrypted or uncompressed yet, just identified. They cannot be displayed from 'icat', but the known layout is given in 'istat'. 07/18/04: Bug Fix: Sometimes, 'icat' would report an error about an existing FILLER entry in an NTFS attribute. This was traced to instances when it was run on a non-base file record. There is now a check for that to not show the error. (BUG: 993459) 07/19/04: Bug Fix: A run of -1 may exist for sparse files in non-NT versions of NTFS. Changed check for this. reported by Matthew Kucenski. (BUG: 994024). 07/24/04: Bug Fix: NTFS attribute names were missing (rarely) on some files because the code assumed they would always be at offset 64 for non-res attributes (Bug: 996981). 07/24/04: Update: Made listing of unallcoated NTFS file names less strict. There was a check for file name length versus stream length. 07/24/04: Update: Added $OBJECT_ID output to 'istat' 07/24/04: Update: Fixed ntfs.c compile warning about constant too large in time conversion code. 07/25/04: Update: Added attribute list contents to NTFS 'istat' output 07/25/04: Bug Fix: Not all slack space was being shown with 'dls -s'. It was documented that this occurs, but it is not what would be expected. (BUG: 997800). 07/25/04: Update: Changed output format of 'dls -s' so that it sends zeros where the file content was. Therefore the output is now a multiple of the data unit size. Also removed limitation to FAT & NTFS. 07/25/04: Update: 'dcalc' now has the '-s' option calculate the original location of data from a slack space image (dls -s). (from Chris Betz). 07/26/04: Update: Created the fs_os.h file and adjusted some of the header files for the PRI macros (C99). Created defines for OSes that do not have the macros already defined. 07/26/04: Non-release bug fix: Fixed file record size bug introduced with recent changes. 07/27/04: Update: Added GPT support to mmls. 07/29/04: Update: Added '-p' flag to 'ifind' to find deleted NTFS files that point to the given parent directory. Added '-l and -z' as well. ---------------- VERSION 1.70 -------------- 04/21/04: Update: Changed attribute and mode for FAT 'istat' so that actual FAT attributes are used instead of UNIX translation. 04/21/04: Update: The FAT 'istat' output better handles Long FIle Name entry 04/21/04: Update: The FAT 'istat' output better handles Volume Label entry 04/21/04: Update: Allowed the FAT volume label entry to be displayed with 'ils' 04/21/04: Update: Allowed the FAT volume label entry to be displayed with 'fls' 04/24/04: Update: 'dstat' on a FAT cluster now shows the cluster address in addition to the sector address. 04/24/04: Update: Added the cluster range to the FAT 'fsstat' output 05/01/04: Update: Improved the FAT version autodetect code. 05/02/04: Update: Removed 'H' flag from 'icat'. 05/02/04: Update: Changed all of the FS_FLAG_XXX variables in the file system tools to constants that are specific to the usage (NAME, DATA, META, FILE). 05/03/04: Update: fatfs_inode_walk now goes by sectors instead of clusters to get more dentries from slack space. 05/03/04: Bug Fix: The allocation status of FAT dentires was set only by the flag and not the allocation status of the cluster it is located in. (BUG: 947112) 05/03/04: Update: Improved comments and variable names in FAT code 05/03/04: Update: Added '-r' flag to 'icat' for deleted file recovery 05/03/04: Update: Added RECOVERY flag to file_walk for deleted file recovery 05/03/04: Update: Added FAT file recovery. 05/03/04: Update: Removed '-H' flag from 'icat'. Default is to display holes. 05/03/04: Update: 'fls -r' will recurse down deleted directories in FAT 05/03/04: Update: 'fsstat' reports FAT clusters that are marked as BAD 05/03/04: Update: 'istat' for FAT now shows recovery clusters for deleted files. 05/04/04: Update: Added output to 'fsstat' for FAT file systems by adding a list of BAD sectors and improving the amount of layout information. I also changed some of the internal variables. 05/08/04: Update: Removed addr_bsize from FS_INFO, moved block_frags to FFS_INFO, modified dcat output only data unit size. 05/20/04: Update: Added RECOVERY flag to 'ifind' so that it can find the data units that are allocated to deleted files 05/20/04: Update: Added icat recovery options to 'sorter'. 05/20/04: Update: Improved the naming convention in sorter for the 'ils' dead files. 05/21/04: Update: Added outlook to sorter rules (from David Berger) 05/27/04: Bug Fix: Added to mylseek.c so that it compiles with Fedora Core 2 (Patch by Angus Marshall) (BUG: 961908). 05/27/04: Update: Changed the letter with 'fls -l' for FIFO to 'p' instead of 'f' (reported by Dave Henkewick). 05/28/04: Update: Added '-u' flag to 'dcat' so that the data unit size can be specified for raw, swap, and dls image types. 05/28/04: Update: Changed the size argument of 'dcat' to be number of data units instead of size in bytes (suggestion by Harald Katzer). ---------------- VERSION 1.69 -------------- 03/06/04: Update: Fixed some memory leaks in ext2fs_close. reported by Paul Bakker. 03/10/04: Bug Fix: If the '-s' flag was used with 'icat' on a EXT2FS or FFS file system, then a large amount of extra data came out. Reported by epsion. (BUG: 913874) 03/10/04: Bug Fix: One of the verbose outputs in ext2fs.c was being sent to STDOUT instead of logfp. (BUG: 913875) 04/14/04: Update: Added more data to fsstat output of FAT file system. 04/15/04: Bug Fix: The last sector of a FAT file system may not be analyzed. (BUG: 935976) 04/16/04: Update: Added full support for swap and raw by making the standard files and functions for them instead of the hack in dcat. Suggested by (and initial patch by) Paul Baker. 04/18/04: Update: Changed error messages in EXT2/3FS code to be extXfs. 04/18/04: Update: Updaged to version 4.09 of 'file'. This will help fix some of the problems people have had compiling it under OS X 10.3. 04/18/04: Update: Added compiling support for SFU 3.5 (Microsoft). Patches from an anonymous person. ---------------- VERSION 1.68 -------------- 01/20/04: Bug Fix: FAT times were an hour too fast during daylight savings. Now use mktime() instead of manual calculation. Reported by Randall Shane. (BUG: 880606) 02/01/04: Update: 'hfind -i' now reports the header entry as an invalid entry. The first header row was ignored. 02/20/04: Bug Fix: indirect block pointer blocks would not be identified by the ifind tool. Reported by Knut Eckstein (BUG: 902709) 03/01/04: Update: Added fs->seek_pos check to fs_read_random. ---------------- VERSION 1.67 -------------- 11/15/03: Bug Fix: Added support for OS X 10.3 to src/makedefs. (BUG: 843029) 11/16/03: Bug Fix: Mac partition tables could generate an error if there were VOID-type partitions. (BUG: 843366) 11/21/03: Update: Changed NOABORT messages to verbose messages, so invalid data is not printed during 'ifind' searches. 11/30/03: Bug Fix: icat would not hide the 'holes' if '-h' was given because the _UNALLOC flag was always being passed to file_walk. (reported by Knut Eckstein). (BUG: 851873) 11/30/03: Bug Fix: NTFS data_walk was not using _ALLOC and _UNALLOC flags and other code that called it was not either. (BUG: 851895) 11/30/03: Bug Fix: Not all needed commands were using _UNALLOC when they called file_walk (although for most cases it did not matter because sparse files would not be found in a directory for example). (Bug: 851897) 12/09/03: Bug Fix: FFS and EXT2FS code was using OFF_T type instead of size_t for the size of the file. This could result in a file > 2GB as being a negative size on some systems (BUG: 856957). 12/26/03: Bug Fix: ffind would crash for root directory of FAT image. Added NULL check and added a NULL name to fake root directory entry. (BUG: 871219) 01/05/04: Bug Fix: The clustcnt value for FAT was incorrectly calculated and was too large for FAT12 and FAT16 by 32 sectors. This could produce extra entries in the 'fsstat' output when the FAT is dumped. (BUG: 871220) 01/05/04: Bug Fix: ils, fls, and istat were not printing the full size of files that are > 2GB. (reported by Knut Eckstein) (BUG: 871457) 01/05/04: Bug Fix: The EXT2FS and EXT3FS code was not using the i_dir_acl value as the upper 32-bits of regular files that are > 2GB (BUG: 871458) 01/06/04: Mitigation: An error was reported where sorter would error that icat was being passed a '-1' argument. I can't find how that would happen, so I added quotes to all arguments so that the next time it occurs, the error is more useful (BUG: 845840). 01/06/04: Update: Incorporated patch from Charles Seeger so that 'cc' can be used and compile time warnings are fixed with Sun 'cc'. 01/06/04: Update: Upgraded file from v3.41 to v4.07 ---------------- VERSION 1.66 -------------- 09/02/03: Bug Fix: Would not compile under OpenBSD 3 because fs_tools.h & mm_tools was missing a defined statment (reported by Randy - m0th_man) NOTE: Bugs now will have an entry into the Source Forge bug tracking sytem. 10/13/03: Bug Fix: buffer was not being cleared between uses and length incorrectly set in NTFS resulted in false deleted file names being shown when the '-r' flag was given. The extra entries were from the previous directory. (BUG: 823057) 10/13/03: Bug Fix: The results of 'sorter' varied depending on the version of Perl and the system. If the file output matched more than one, sorter could not gaurantee which would match. Therefore, results were different for some files and some machines. 'sorter' now enforces the ordering based on the order they are in the configuration file. The entries at the end of the file have priority over the first entries (generic rules to specific rules). (BUG: 823057) 10/14/03: Update: 'mmls' prints 'MS LVM' with partition type 0x42 now. 10/25/03: Bug Fix: NTFS could have a null pointer crash if the image was very corrupt and $Data was not found for the MFT. 11/10/03: Bug Fix: NTFS 'ffind' would only report the file name and not the attribute name because the type and id were ignored. ffind and ntfs_dent were updated - found during NTFS keyword search test. (Bug: 831579() 11/12/03: Update: added support for Solaris x86 partition tables to 'mmls' 11/12/03: Update: Modified the sparc data structure to add the correct location of the 'sanity' magic value. 11/15/03: Update: Added '-s' flag to 'icat' so that slack space is also displayed. ---------------- VERSION 1.65 -------------- 08/03/03: Bug Fix: 'sorter' now checks for inode values that are too small to avoid 'icat' errors about invalid inode values. 08/19/03: Update: 'raw' is now a valid type for 'dcat'. 08/21/03: Update: mactime and sorter look for perl5.6.0 first. 08/21/03: Update: Removed NSRL support from 'sorter' until a better wany to identify the known good and known bad files is found 08/21/03: Bug Fix: The file path replaces < and > with HTML encoding for HTML output (ils names were not being shown) 08/25/03: Update: Added 'nsrl.txt' describing why the NSRL functionality was removed. 08/27/03: Update: Improved code in 'mactime' to reduce warnings when '-w' is used with Perl ('exists' checks on arrays). 08/27/03: Update: Improved code in 'sorter' to reduce warnings when '-w' is used with Perl (inode_int for NTFS). ---------------- VERSION 1.64 -------------- 08/01/03: Docs Fix: The Sun VTOC was documented as Virtual TOC and it should be Volume TOC (Jake @ UMASS). 08/02/03: Bug Fix: Some compilers complained about verbose logging assignment in 'mmls' (Ralf Spenneberg). ---------------- VERSION 1.63 -------------- 06/13/03; Update: Added 'mmtools' directory with 'dos' partitions and 'mmls'. 06/18/03: Update: Updated the documents in the 'doc' directory 06/19/03: Update: Updated error message for EXT3FS magic check 06/27/03: Update: Added slot & table number to mmls 07/08/03: Update: Added mac support to mmtools 07/11/03: Bug Fix: 'sorter' was not processing all unallocated meta data structures because of a regexp error. (reported by Jeff Reava) 07/16/03: Update: Added support for FreeBSD5 07/16/03: Update: Added BSD disk labels to mmtools 07/28/03: Update: Relaxed requirements for DOS directory entries, the wtime can be zero (reported by Adam Uccello). 07/30/03: Update: Added SUN VTOC to mmtools 07/31/03: Update: Added NetBSD support (adam@monkeybyte.org) 08/01/03: Update: Added more sanity checks to FAT so that it would not try and process NTFS images that have the same MAGIC value ---------------- VERSION 1.62 -------------- 04/11/03: Bug Fix: 'fsstat' for an FFS file system could report data fragments in the last group that were larger than the maximum fragment 04/11/03: Bug Fix: 'ffs' allows the image to not be a multiple of the block size. A read error occured when it tried to read the last fragments since a whole block could not be read. 04/15/03: Update: Added debug statements to FAT code. 04/26/03: Update: Added verbose statements to FAT code 04/26/03: Update: Added NOABORT flag to dls -s 04/26/03: Update: Added stderr messages for errors that are not aborted because of NOABORT 05/27/03: Update: Added 'mask' field to FATFS_INFO structure and changed code in fatfs.c to use it. 05/27/03: Update: isdentry now checks the starting cluster to see if it is a valid size. 05/27/03: Bug Fix: Added a sanitizer to 'sorter' to remove invalid chars from the 'file' output and reduce the warnings from Perl. 05/28/03: Bug Fix: Improved sanitize expression in 'sorter' 05/28/03: Update: Added '-d' option to 'mactime' to allow output to be given in comma delimited format for importing into a spread sheet or other graphing tool 06/09/03: Update: Added hourly summary / indexing to mactime 06/09/03: Bug Fix: sorter would not allow linux-ext3 fstype ---------------- VERSION 1.61 -------------- 02/05/03: Update: Started addition of image thumbnails to sorter 03/05/03: Update: Updated 'file' to version 3.41 03/16/03: Update: Added comments and NULL check to 'ifind' 03/16/03: Bug Fix: Added a valid magic of 0 for MFT entries. This was found in an XP image. 03/26/03: Bug Fix: fls would crash for an inode of 0 and a clock skew was given. fixed the bug in fls.c (debug help from Josep Homs) 03/26/03: Update: Added more verbose comments to ntfs_dent.c. 03/26/03: Bug Fix: 'ifind' for a path could return a result that was shorter than the requested name (strncmp was used) 03/26/03: Update: Short FAT names can be used in 'ifind -n' and error messages were improved 03/26/03: Bug Fix: A final NTFS Index Buffer was not always processed in ntfs_dent.c, which resulted in files not being shown. This was fixed with debugging help from Matthew Shannon. 03/27/03: Update: Added an 'index.html' for image thumbnails in sorter and added a 'details' link from the thumbnail to the images.html file 03/27/03: Update: 'sorter' can now take a directory inode to start processing 03/27/03: Update: added '-z' flag when running 'file' in 'sorter' so that compressed file contents are reported 03/27/03: Update: added '-i' flag to 'mactime' that creates a daily summary of events 03/27/03: Update: Added support for Version 2 of the NSRL in 'hfind' 04/01/03: Update: Added support for Hash Keeper to 'hfind' 04/01/03: Update: Added '-e' flag to 'hfind' for extended info (currently hashkeeper only) ---------------- VERSION 1.60 -------------- 10/31/02: Bug Fix: the unmounting status of EXT2FS in the 'fsstat' command was not correct (reported by Stephane Denis). 11/24/02: Bug Fix: The -v argument was not allowed on istat or fls (Michael Stone) 11/24/02: Bug Fix: When doing an 'ifind' on a UNIX fs, it could abort if it looked at an unallocated inode with invalid indirect block pointers. This was fixed by adding a "NOABORT" flag to the walk code and adding error checks in the file system code instead of relying on the fs_io code. (suggested by Micael Stone) 11/26/02: Update: ifind has a '-n' argument that allows one to specify a file name it and it searches to find the meta data structure for it (suggested by William Salusky). 11/26/02: Update: Now that there is a '-n' flag with 'ifind', the '-d' flag was added to specify the data unit address. The old syntax of giving the data_unit at the end is no longer supported. 11/27/02: Update: Added sanity checks on meta data and data unit addresses earlier in the code. 12/12/02: Update: Added additional debug statements to NTFS code 12/19/02: Update: Moved 'hash' directory to 'hashtools' 12/19/02: Update: Started development of 'hfind' 12/31/02: Update: Improved verbose debug statements to show full 64-bit offsets 01/02/03: Update: Finished development of 'hfind' with ability to update for next version of NSRL (which may have a different format) 01/05/03: Bug Fix: FFS and EXT2FS symbolic link destinations where not properly NULL terminated and some extra chars were appended in 'fls' (later reported by Thorsten Zachmann) 01/06/03: Bug Fix: getu64() was not properly masking byte sizes and some data was being lost. This caused incorrect times to be displayed in some NTFS files. 01/06/03: Bug Fix: ifind reported incorrect ownership for some UNIX file systems if the end fragments were allocated to a different file than the first ones were. 01/07/03: Update: Renamed the src/mactime directory to src/timeline. 01/07/03: Update: Updated README and man pages for hfind and sorter 01/12/03: Bug Fix: ntfs_mft_lookup was casting a 64-bit value to a 32-bit variable. This caused MFT Magic errors. Reported and debugged by Keven Murphy 01/12/03: Update: Added verbose argument to 'fls' 01/12/03: Bug Fix: '-V' argument to 'istat' was doing verbose instead of version 01/13/03: Update: Changed static sizes of OFF_T and DADDR_T in Linux version to the actual 'off_t' and 'daddr_t' types 01/23/03: Update: Changed use of strtok_r to strtok in ifind.c so that Mac 10.1 could compile (Dave Goldsmith). 01/28/03: Update: Improved code in 'hfind' and 'sorter' to handle files with spaces in the path (Dave Goldsmith). ---------------- VERSION 1.52 -------------- 09/24/02: Bug Fix: Memory leak in ntfs_dent_idxentry(), ntfs_find_file(), and ntfs_dent_walk() 09/24/02: Update: Removal of index sequences for index buffers is now done using upd_off, which will allow for NTFS to move the structure in the future. 09/26/02: Update: Added create time for NTFS / STANDARD_INFO to istat output. 09/26/02: Update: Changed the method that the NTFS time is converted to UNIX time. Should be more effecient. 10/09/02: Update: dcat error changed. 10/02/02: Update: Includes a Beta version of 'sorter' ---------------- VERSION 1.51 -------------- 09/10/02: Bug Fix: Fixed a design bug that would not allow attribute lists in $MFT. This bug would generate an error that complained about an invalid MFT entry in attribute list. 09/10/02: Update: The size of files and directories is now calculated after each time proc_attrseq() is called so that it is more up to date when dealing with attribute lists. The size has the sizes of all $Data, $IDX_ROOT, and $IDX_ALLOC streams. 09/10/02: Update: The maxinum number of MFT entries is now calculated each time an MFT entry is processed while loading the MFT. This allows us to reflect what the maximum possible MFT entry is at that given point based on how many attribute lists have been processed. 09/10/02: Update: Added file version 3.39 to distro (bigger magic files) (Salusky) 09/10/02: Bug Fix: fs_data was wasting memory when it was allocated 09/10/02: Update: added a fs_data_alloc() function 09/12/02: Bug Fix: Do not give an error if an attribute list of an unallocated file points to an MFT that no longer claims it is a member of the list. 09/12/02: Update: No longer need version to remove update sequence values from on-disk buffers 09/19/02: Bug Fix: fixed memory leak in ntfs_load_ver() 09/19/02: Bug Fix: Update sequence errors were displayed because of a bug that occured when an MFT entry crossed a run in $MFT. Only occured with 512-byte clusters and an odd number of clusters in a run. 09/19/02: Update: New argument to ils, istat, and fls that allows user to specify a time skew in seconds of the compromised system. Originated from discussion at DFRWS II. 09/19/02: Update: Added '-h' argument to mactime to display header info ---------------- VERSION 1.50 -------------- 04/21/02: icat now displays idxroot attribute for NTFS directories 04/21/02: fs_dent_print functions now are passed the FS_DATA structure instead of the extra inode and name strings. (NTFS) 04/21/02: fs_dent_print functions display alternate data stream size instead of the default data size (NTFS) 04/24/02: Fixed bug in istat that displayed too many fragments with ffs images 04/24/02: Fixed bug in istat that did not display sparse files correctly 04/24/02: fsstat of FFS images now identifies the fragments at the beginning of cyl groups as data fragments. 04/26/02: Fixed bug in ext2fs_dent_parse_block that did not advance the directory entry pointer far enough each time 04/26/02: Fixed bug in ext2fs_dent_parse_block so that gave an error if a file name was exactly 255 chars 04/29/02: Removed the getX functions from get.c as they are now macros 05/11/02: Added support for lowercase flag in FAT 05/11/02: Added support for sequence values (NTFS) 05/13/02: Added FS_FLAG_META for FAT 05/13/02: Changed ifind so that it looks the block up to identify if it is a meta data block when an inode can not be found 05/13/02: Added a conditional to ifind so that it handles sparse files better 05/19/02: Changed icat so that the default attribute type is set in the file_walk function 05/20/02: ils and dls now use boundary inode & block values if too large or small are given 05/21/02: istat now displays all NTFS times 05/21/02: Created functions to just display date and time 05/24/02: moved istat functionality to the specific file system file 05/25/02: added linux-ext3 flag, but no new features 05/25/02: Added sha1 (so Autopsy can use the NIST SW Database) 05/26/02: Fixed bug with FAT that did not return all slack space on file_walk 05/26/02: Added '-s' flag to dls to extract slack space of FAT and NTFS 06/07/02: fixed _timezone variable so correct times are shown in CYGWIN 06/11/02: *_copy_inode now sets the flags for the inode 06/11/02: fixed bug in mactimes that displayed a duplicate entry with time because of header entries in body file 06/12/02: Added ntfs.README doc 06/16/02: Added a comment to file Makefile to make it easier to compile for an IR CD. 06/18/02: Fixed NTFS bug that showed ADS when only deleted files were supposed to be shown (when ADS in directory) 06/19/02: added the day of the week to the mactime output (Tan) 07/09/02: Fixed bug that added extra chars to end of symlink destination 07/17/02: 1.50 Released ---------------- VERSION 1.00 -------------- - Integrated TCT-1.09 and TCTUTILs-1.01 - Fixed bug in bcat if size is not given with type of swap. - Added platform indep by including the structures of each file system type - Added flags for large file support under linux - blockcalc was off by 1 if calculated using the raw block number and not the one that lazarus spits out (which start at 1) - Changed the inode_walk and block_walk functions slightly to return a value so that a walk can be ended in the middle of it. - FAT support added - Improved ifind to better handle fragments - '-z' flag to fls and istat now use the time zone string instead of integer value. - no longer prepend / in _dent - verify that '-m' directory in fls ends with a '/' - identify the destination of sym links - fsstat tool added - fixed caching bug with FAT12 when the value overlapped cache entries - added mactime - removed the value in fls when printing mac format (inode is now printed in mactime) - renamed src/misc directory to src/hash (it only has md5 and will have sha) - renamed aux directory to misc (Windows doesn't allow aux as a name ??) - Added support for Cygwin - Use the flags in super block of EXT2FS to identify v1 or v2 - removed file system types of linux1 and linux2 and linux - added file system type of linux-ext2 (as ext3 is becoming more popular) - bug in file command that reported seek error for object files and STDIN sleuthkit-4.2.0/packages/000755 000765 000024 00000000000 12576326344 016115 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/README.md000644 000765 000024 00000017562 12576320677 015634 0ustar00carrierstaff000000 000000 # [The Sleuth Kit](http://www.sleuthkit.org/sleuthkit) README File ## INTRODUCTION The Sleuth Kit is an open source forensic toolkit for analyzing Microsoft and UNIX file systems and disks. The Sleuth Kit enables investigators to identify and recover evidence from images acquired during incident response or from live systems. The Sleuth Kit is open source, which allows investigators to verify the actions of the tool or customize it to specific needs. The Sleuth Kit uses code from the file system analysis tools of The Coroner's Toolkit (TCT) by Wietse Venema and Dan Farmer. The TCT code was modified for platform independence. In addition, support was added for the NTFS (see docs/ntfs.README) and FAT (see docs/fat.README) file systems. Previously, The Sleuth Kit was called The @stake Sleuth Kit (TASK). The Sleuth Kit is now independent of any commercial or academic organizations. It is recommended that these command line tools can be used with the Autopsy Forensic Browser. Autopsy, (http://www.sleuthkit.org/autopsy), is a graphical interface to the tools of The Sleuth Kit and automates many of the procedures and provides features such as image searching and MD5 image integrity checks. As with any investigation tool, any results found with The Sleuth Kit should be be recreated with a second tool to verify the data. ## OVERVIEW The Sleuth Kit allows one to analyze a disk or file system image created by 'dd', or a similar application that creates a raw image. These tools are low-level and each performs a single task. When used together, they can perform a full analysis. For a more detailed description of these tools, refer to docs/filesystem.README. The tools are briefly described in a file system layered approach. Each tool name begins with a letter that is assigned to the layer. ### File System Layer: A disk contains one or more partitions (or slices). Each of these partitions contain a file system. Examples of file systems include the Berkeley Fast File System (FFS), Extended 2 File System (EXT2FS), File Allocation Table (FAT), and New Technologies File System (NTFS). The fsstat tool displays file system details in an ASCII format. Examples of data in this display include volume name, last mounting time, and the details about each "group" in UNIX file systems. ### Content Layer (block): The content layer of a file system contains the actual file content, or data. Data is stored in large chunks, with names such as blocks, fragments, and clusters. All tools in this layer begin with the letters 'blk'. The blkcat tool can be used to display the contents of a specific unit of the file system (similar to what 'dd' can do with a few arguments). The unit size is file system dependent. The 'blkls' tool displays the contents of all unallocated units of a file system, resulting in a stream of bytes of deleted content. The output can be searched for deleted file content. The 'blkcalc' program allows one to identify the unit location in the original image of a unit in the 'blkls' generated image. A new feature of The Sleuth Kit from TCT is the '-l' argument to 'blkls' (or 'unrm' in TCT). This argument lists the details for data units, similar to the 'ils' command. The 'blkstat' tool displays the statistics of a specific data unit (including allocation status and group number). ### Metadata Layer (inode): The metadata layer describes a file or directory. This layer contains descriptive data such as dates and size as well as the addresses of the data units. This layer describes the file in terms that the computer can process efficiently. The structures that the data is stored in have names such as inode and directory entry. All tools in this layer begin with an 'i'. The 'ils' program lists some values of the metadata structures. By default, it will only list the unallocated ones. The 'istat' displays metadata information in an ASCII format about a specific structure. New to The Sleuth Kit is that 'istat' will display the destination of symbolic links. The 'icat' function displays the contents of the data units allocated to the metadata structure (similar to the UNIX cat(1) command). The 'ifind' tool will identify which metadata structure has allocated a given content unit or file name. Refer to the ntfs.README doc for information on addressing metadata attributes in NTFS. ### Human Interface Layer (file): The human interface layer allows one to interact with files in a manner that is more convenient than directly with the metadata layer. In some operating systems there are separate structures for the metadata and human interface layers while others combine them. All tools in this layer begin with the letter 'f'. The 'fls' program lists file and directory names. This tool will display the names of deleted files as well. The 'ffind' program will identify the name of the file that has allocated a given metadata structure. With some file systems, deleted files will be identified. #### Time Line Generation Time lines are useful to quickly get a picture of file activity. Using The Sleuth Kit a time line of file MAC times can be easily made. The mactime (TCT) program takes as input the 'body' file that was generated by fls and ils. To get data on allocated and unallocated file names, use 'fls -rm dir' and for unallocated inodes use 'ils -m'. Note that the behavior of these tools are different than in TCT. For more information, refer to docs/mac.README. #### Hash Databases Hash databases are used to quickly identify if a file is known. The MD5 or SHA-1 hash of a file is taken and a database is used to identify if it has been seen before. This allows identification to occur even if a file has been renamed. The Sleuth Kit includes the 'md5' and 'sha1' tools to generate hashes of files and other data. Also included is the 'hfind' tool. The 'hfind' tool allows one to create an index of a hash database and perform quick lookups using a binary search algorithm. The 'hfind' tool can perform lookups on the NIST National Software Reference Library (NSRL) (www.nsrl.nist.gov) and files created from the 'md5' or 'md5sum' command. Refer to the docs/hfind.README file for more details. #### File Type Categories Different types of files typically have different internal structure. The 'file' command comes with most versions of UNIX and a copy is also distributed with The Sleuth Kit. This is used to identify the type of file or other data regardless of its name and extension. It can even be used on a given data unit to help identify what file used that unit for storage. Note that the 'file' command typically uses data in the first bytes of a file so it may not be able to identify a file type based on the middle blocks or clusters. The 'sorter' program in The Sleuth Kit will use other Sleuth Kit tools to sort the files in a file system image into categories. The categories are based on rule sets in configuration files. The 'sorter' tool will also use hash databases to flag known bad files and ignore known good files. Refer to the 'docs/sorter.README' file for more details. ## LICENSE The file system tools (in the src/fstools directory) are released under the IBM open source license and Common Public License, both are located in the license directory. The modifications to 'mactime' from the original 'mactime' in TCT and 'mac-daddy' are released under the Common Public License. Other tools in the src directory are either Common Public License or the GNU Public License. ## INSTALL For installation instructions, refer to the INSTALL document. ## OTHER DOCS The 'docs' directory contains documents that describe the provided tools in more detail. The Sleuth Kit Informer is a newsletter that contains new documentation and articles. > www.sleuthkit.org/informer/ ## MAILING LIST Mailing lists exist on SourceForge, for both users and a low-volume announcements list. > http://sourceforge.net/mail/?group_id=55685 Brian Carrier carrier sleuthkit org sleuthkit-4.2.0/README_win32.txt000644 000765 000024 00000003351 12576320677 017064 0ustar00carrierstaff000000 000000 The Sleuth Kit Win32 README File http://www.sleuthkit.org/sleuthkit Last Modified: Jan 2014 ==================================================================== The Sleuth Kit (TSK) runs on Windows. If you simply want the executables, you can download them from the www.sleuthkit.org website. If you want to build your own executables, you have two options. 1) Microsoft Visual Studio. The VS solution file is in the win32 directory. Refer to the win32\BUILDING.txt file for details for building the 32-bit and 64-bit versions. 2) mingw32. See below for more details. --------------------------------------------------------------- MINGW32 If you're using mingw32 on Linux, simply give the "--host=i586-mingw32msvc" argument when running the './configure' script and use 'make' to compile. If you're using mingw32 on Windows, './configure' and 'make' will work directly. Note that to compile the Java bindings you will need to have a JDK to be installed, and by default the Oracle JDK on Windows is installed in a path such as C:\Program Files\Java\jdk1.6.0_16\. GNU autotools (which is used if you do a mingw32 compile, but not a Visual Studio compile) do not handle paths containing spaces, so you will need to copy the JDK to a directory without spaces in the name, such as C:\jdk1.6.0_16\, then add C:\jdk1.6.0_16\bin to $PATH before running './configure' Note also that libtool may fail on mingw32 on Windows if C:\Windows\system32 is on $PATH before /usr/bin. The fix is to have the C:\Windows directories at the _end_ of your mingw $PATH. ------------------------------------------------------------------- carrier sleuthkit org Brian Carrier sleuthkit-4.2.0/samples/000755 000765 000024 00000000000 12576326351 016001 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tests/000755 000765 000024 00000000000 12576326351 015477 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tools/000755 000765 000024 00000000000 12576326351 015475 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/000755 000765 000024 00000000000 12576326346 015142 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/unit_tests/000755 000765 000024 00000000000 12576326352 016537 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/000755 000765 000024 00000000000 12576326344 015301 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/blkcalc/000755 000765 000024 00000000000 12576326344 016674 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/blkcat/000755 000765 000024 00000000000 12576326344 016541 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/blkls/000755 000765 000024 00000000000 12576326344 016410 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/blkstat/000755 000765 000024 00000000000 12576326344 016745 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/BUILDING.txt000755 000765 000024 00000005755 12576320700 017244 0ustar00carrierstaff000000 000000 This file describes how to build TSK using Visual Studio (see README_win32.txt for instructions on building the win32 libraries and executables from Linux). If you do not have a copy of Visual Studio, you can use the free Express Edition: http://www.microsoft.com/express/vc/ Visual Studio 2010 Express cannot make 64-bit versions of the executables. To do that, you must download and install an additional SDK: http://msdn.microsoft.com/en-us/windowsserver/bb980924.aspx The SDK installation may fail, particularly if Visual Studio 2010 Service Pack 1 has been installed. In this case, re-run the SDK installer but un-check the compilers box. After the SDK is installed, run the following fix to get the 64 bit compiler: https://www.microsoft.com/en-us/download/details.aspx?id=4422 The Visual Studio Solution file has three build targets: Debug, Debug_NoLibs, and Release. Debug and Release require that libewf exists (to provide support for E01 image files) and that zlib exists (to provide support for HFS+ compressed data). Debug_NoLibs does not require libewf or zlib and you should be able to compile Debug_NoLibs without any additional setup. The steps below outline the process required to compile the Debug and Release targets. 1) Download libewf-20130128 (or later). The official releases are from: http://sourceforge.net/projects/libewf/ If you want to build 64-bit libraries though, download a version that we've upgraded: https://github.com/sleuthkit/libewf_64bit 2) Open archive file and follow the README instructions in libewf to build libewf_dll (at the time of this writing, that includes downloading the zlib dll). Note that TSK will use only the Release version of libewf_dll. Later steps also depend on the zlib dll being built inside of libewf. Note that libewf will need to be converted to Visual Studio 2010 and be upgraded to support a 64-bit build. 3) Set the LIBEWF_HOME environment variable to point to the libewf folder that you created and built in step 2. 4) If you want to build libtsk_jni for the Java JNI bindings, then set the JDK_HOME environment variable to point to the top directory of your Java SDK. 5) Open the TSK Visual Studio Solution file, tsk-win.sln, in the win32 directory. 6) Compile a Debug, Debug_NoLibs, or Release version of the libraries and executables. The resulting libraries and executables on a 32-bit build will be put in win32/Debug, win32/Debug_NoLibs, or win32/Release as appropriate. A 64-bit build will put them into the win32/x64 folders. You can change the type of build using the pulldown in Visual Studio and switching between Win32 and x64. 7) Note that the libraries and executables will depend on the libewf and zlib dll files (which are copied to the TSK build directories). Refer to the API docs at http://sleuthkit.org/sleuthkit/docs/api-docs/ for details on how to use the library in an application. ------------------------------------------------------------------- carrier sleuthkit org Brian Carrier sleuthkit-4.2.0/win32/callback-cpp-sample/000755 000765 000024 00000000000 12576326344 021074 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/callback-sample/000755 000765 000024 00000000000 12576326344 020314 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/docs/000755 000765 000024 00000000000 12576326344 016231 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/fcat/000755 000765 000024 00000000000 12576326344 016216 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/ffind/000755 000765 000024 00000000000 12576326344 016367 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/fls/000755 000765 000024 00000000000 12576326344 016065 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/fsstat/000755 000765 000024 00000000000 12576326344 016605 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/hfind/000755 000765 000024 00000000000 12576326344 016371 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/icat/000755 000765 000024 00000000000 12576326344 016221 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/ifind/000755 000765 000024 00000000000 12576326344 016372 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/ils/000755 000765 000024 00000000000 12576326344 016070 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/img_cat/000755 000765 000024 00000000000 12576326344 016704 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/img_stat/000755 000765 000024 00000000000 12576326344 017110 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/istat/000755 000765 000024 00000000000 12576326344 016425 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/jcat/000755 000765 000024 00000000000 12576326344 016222 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/jls/000755 000765 000024 00000000000 12576326344 016071 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/libtsk/000755 000765 000024 00000000000 12576326344 016571 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/mmcat/000755 000765 000024 00000000000 12576326344 016402 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/mmls/000755 000765 000024 00000000000 12576326344 016251 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/mmstat/000755 000765 000024 00000000000 12576326344 016606 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/NOTES.txt000644 000765 000024 00000000766 12576320700 016731 0ustar00carrierstaff000000 000000 This document contains some notes about how the projects are configured, since they are getting more complex with the post-build events. ZLIB: - ZLIB is needed by libewf and the hfs code. - The ZLIB that is used by libewf is also used by the hfs code. - The current version of libewf (201203...) compiles zlib and the lib and dll are in the libewf release/debug folder - libtsk copies the zlib and libewf dlls to the TSK folders. - The individual tools use the libewf folder to find the .lib file. sleuthkit-4.2.0/win32/posix-cpp-sample/000755 000765 000024 00000000000 12576326344 020502 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/posix-sample/000755 000765 000024 00000000000 12576326344 017722 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk-win.sln000644 000765 000024 00000130435 12576320700 017407 0ustar00carrierstaff000000 000000  Microsoft Visual Studio Solution File, Format Version 11.00 # Visual C++ Express 2010 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "img_stat", "img_stat\img_stat.vcxproj", "{48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mmls", "mmls\mmls.vcxproj", "{712DD83B-786E-485E-83C7-7197DD851B78}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "img_cat", "img_cat\img_cat.vcxproj", "{671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mmstat", "mmstat\mmstat.vcxproj", "{5D75FBFB-539A-4014-ACEB-520BB16F5BFC}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fls", "fls\fls.vcxproj", "{58DA1042-AC19-4779-AC1A-AA8EEB3A4524}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fsstat", "fsstat\fsstat.vcxproj", "{D1E6567A-4F65-4832-8018-D33B3CB4692B}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "blkcat", "blkcat\blkcat.vcxproj", "{A2BEA467-A4CC-4FA6-9C74-587498E35467}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "blkstat", "blkstat\blkstat.vcxproj", "{FBB66156-9A54-4713-A801-C507BE7A3AE3}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "blkcalc", "blkcalc\blkcalc.vcxproj", "{46B82840-9832-466F-8568-132407CA3853}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "blkls", "blkls\blkls.vcxproj", "{48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ffind", "ffind\ffind.vcxproj", "{7C132953-1700-42FF-9F61-A814C9F2C758}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "icat", "icat\icat.vcxproj", "{38D89022-2C83-4436-A333-375A2E3E7BB0}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ifind", "ifind\ifind.vcxproj", "{52251CB2-65A3-421B-9CB4-7DAC13BB3758}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "ils", "ils\ils.vcxproj", "{62C97F5E-64DD-4623-9563-747C4C173348}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "istat", "istat\istat.vcxproj", "{D7643AD7-8518-4B3E-8F3F-F11258D9540E}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jcat", "jcat\jcat.vcxproj", "{44A003BE-400D-4434-AFED-64D8E3B448D9}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "jls", "jls\jls.vcxproj", "{C52F935E-1FD2-443C-A181-27908DAB3BC8}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "hfind", "hfind\hfind.vcxproj", "{0B127AE3-0C18-4EEF-AB20-A0693E6AA822}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mmcat", "mmcat\mmcat.vcxproj", "{A15F1E4F-951A-403E-B746-2A6D63D9C416}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "callback-sample", "callback-sample\callback-sample.vcxproj", "{6CE3D593-E90D-4CC1-A66B-694AC909F6B8}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "posix-sample", "posix-sample\posix-sample.vcxproj", "{1BA0B9E8-F135-494F-9CF5-86427C1F6E41}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tsk_recover", "tsk_recover\tsk_recover.vcxproj", "{06D707E5-68FF-4FC4-AFD0-C84584E32F47}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tsk_loaddb", "tsk_loaddb\tsk_loaddb.vcxproj", "{96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tsk_comparedir", "tsk_comparedir\tsk_compare.vcxproj", "{8EE881F4-78DC-49C7-8845-E842358AC0FA}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "callback-cpp-sample", "callback-cpp-sample\callback-cpp-sample.vcxproj", "{3B32F1BE-9686-4DC9-8197-F734D146E9F8}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "posix-cpp-sample", "posix-cpp-sample\posix-cpp-sample.vcxproj", "{5594DC0E-191C-4F2A-83FE-97F53A9C1222}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tsk_gettimes", "tsk_gettimes\tsk_gettimes.vcxproj", "{11A8927C-F971-4104-A286-5DC11C25E2EC}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libtsk_jni", "tsk_jni\tsk_jni.vcxproj", "{62D88133-09F6-4E13-B39F-36FCEFBE4FAF}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libtsk", "libtsk\libtsk.vcxproj", "{76EFC06C-1F64-4478-ABE8-79832716B393}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "fcat", "fcat\fcat.vcxproj", "{E4A40368-152D-4D54-9E2E-4B140212F98F}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug_NoLibs|Win32 = Debug_NoLibs|Win32 Debug_NoLibs|x64 = Debug_NoLibs|x64 Debug|Win32 = Debug|Win32 Debug|x64 = Debug|x64 Release_NoLibs|Win32 = Release_NoLibs|Win32 Release_NoLibs|x64 = Release_NoLibs|x64 Release|Win32 = Release|Win32 Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug|Win32.ActiveCfg = Debug|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug|Win32.Build.0 = Debug|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug|x64.ActiveCfg = Debug|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Debug|x64.Build.0 = Debug|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release|Win32.ActiveCfg = Release|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release|Win32.Build.0 = Release|Win32 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release|x64.ActiveCfg = Release|x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9}.Release|x64.Build.0 = Release|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug|Win32.ActiveCfg = Debug|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug|Win32.Build.0 = Debug|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug|x64.ActiveCfg = Debug|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Debug|x64.Build.0 = Debug|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Release|Win32.ActiveCfg = Release|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Release|Win32.Build.0 = Release|Win32 {712DD83B-786E-485E-83C7-7197DD851B78}.Release|x64.ActiveCfg = Release|x64 {712DD83B-786E-485E-83C7-7197DD851B78}.Release|x64.Build.0 = Release|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug|Win32.ActiveCfg = Debug|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug|Win32.Build.0 = Debug|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug|x64.ActiveCfg = Debug|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Debug|x64.Build.0 = Debug|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release|Win32.ActiveCfg = Release|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release|Win32.Build.0 = Release|Win32 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release|x64.ActiveCfg = Release|x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E}.Release|x64.Build.0 = Release|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug|Win32.ActiveCfg = Debug|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug|Win32.Build.0 = Debug|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug|x64.ActiveCfg = Debug|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Debug|x64.Build.0 = Debug|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release|Win32.ActiveCfg = Release|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release|Win32.Build.0 = Release|Win32 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release|x64.ActiveCfg = Release|x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC}.Release|x64.Build.0 = Release|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug|Win32.ActiveCfg = Debug|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug|Win32.Build.0 = Debug|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug|x64.ActiveCfg = Debug|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Debug|x64.Build.0 = Debug|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release|Win32.ActiveCfg = Release|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release|Win32.Build.0 = Release|Win32 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release|x64.ActiveCfg = Release|x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524}.Release|x64.Build.0 = Release|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug|Win32.ActiveCfg = Debug|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug|Win32.Build.0 = Debug|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug|x64.ActiveCfg = Debug|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Debug|x64.Build.0 = Debug|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release|Win32.ActiveCfg = Release|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release|Win32.Build.0 = Release|Win32 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release|x64.ActiveCfg = Release|x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B}.Release|x64.Build.0 = Release|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug|Win32.ActiveCfg = Debug|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug|Win32.Build.0 = Debug|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug|x64.ActiveCfg = Debug|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Debug|x64.Build.0 = Debug|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release|Win32.ActiveCfg = Release|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release|Win32.Build.0 = Release|Win32 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release|x64.ActiveCfg = Release|x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467}.Release|x64.Build.0 = Release|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug|Win32.ActiveCfg = Debug|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug|Win32.Build.0 = Debug|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug|x64.ActiveCfg = Debug|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Debug|x64.Build.0 = Debug|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release|Win32.ActiveCfg = Release|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release|Win32.Build.0 = Release|Win32 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release|x64.ActiveCfg = Release|x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3}.Release|x64.Build.0 = Release|x64 {46B82840-9832-466F-8568-132407CA3853}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {46B82840-9832-466F-8568-132407CA3853}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {46B82840-9832-466F-8568-132407CA3853}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {46B82840-9832-466F-8568-132407CA3853}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {46B82840-9832-466F-8568-132407CA3853}.Debug|Win32.ActiveCfg = Debug|Win32 {46B82840-9832-466F-8568-132407CA3853}.Debug|Win32.Build.0 = Debug|Win32 {46B82840-9832-466F-8568-132407CA3853}.Debug|x64.ActiveCfg = Debug|x64 {46B82840-9832-466F-8568-132407CA3853}.Debug|x64.Build.0 = Debug|x64 {46B82840-9832-466F-8568-132407CA3853}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {46B82840-9832-466F-8568-132407CA3853}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {46B82840-9832-466F-8568-132407CA3853}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {46B82840-9832-466F-8568-132407CA3853}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {46B82840-9832-466F-8568-132407CA3853}.Release|Win32.ActiveCfg = Release|Win32 {46B82840-9832-466F-8568-132407CA3853}.Release|Win32.Build.0 = Release|Win32 {46B82840-9832-466F-8568-132407CA3853}.Release|x64.ActiveCfg = Release|x64 {46B82840-9832-466F-8568-132407CA3853}.Release|x64.Build.0 = Release|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug|Win32.ActiveCfg = Debug|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug|Win32.Build.0 = Debug|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug|x64.ActiveCfg = Debug|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Debug|x64.Build.0 = Debug|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release|Win32.ActiveCfg = Release|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release|Win32.Build.0 = Release|Win32 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release|x64.ActiveCfg = Release|x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997}.Release|x64.Build.0 = Release|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug|Win32.ActiveCfg = Debug|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug|Win32.Build.0 = Debug|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug|x64.ActiveCfg = Debug|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Debug|x64.Build.0 = Debug|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release|Win32.ActiveCfg = Release|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release|Win32.Build.0 = Release|Win32 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release|x64.ActiveCfg = Release|x64 {7C132953-1700-42FF-9F61-A814C9F2C758}.Release|x64.Build.0 = Release|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug|Win32.ActiveCfg = Debug|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug|Win32.Build.0 = Debug|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug|x64.ActiveCfg = Debug|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Debug|x64.Build.0 = Debug|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release|Win32.ActiveCfg = Release|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release|Win32.Build.0 = Release|Win32 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release|x64.ActiveCfg = Release|x64 {38D89022-2C83-4436-A333-375A2E3E7BB0}.Release|x64.Build.0 = Release|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug|Win32.ActiveCfg = Debug|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug|Win32.Build.0 = Debug|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug|x64.ActiveCfg = Debug|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Debug|x64.Build.0 = Debug|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release|Win32.ActiveCfg = Release|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release|Win32.Build.0 = Release|Win32 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release|x64.ActiveCfg = Release|x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758}.Release|x64.Build.0 = Release|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug|Win32.ActiveCfg = Debug|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug|Win32.Build.0 = Debug|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug|x64.ActiveCfg = Debug|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Debug|x64.Build.0 = Debug|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Release|Win32.ActiveCfg = Release|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Release|Win32.Build.0 = Release|Win32 {62C97F5E-64DD-4623-9563-747C4C173348}.Release|x64.ActiveCfg = Release|x64 {62C97F5E-64DD-4623-9563-747C4C173348}.Release|x64.Build.0 = Release|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug|Win32.ActiveCfg = Debug|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug|Win32.Build.0 = Debug|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug|x64.ActiveCfg = Debug|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Debug|x64.Build.0 = Debug|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release|Win32.ActiveCfg = Release|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release|Win32.Build.0 = Release|Win32 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release|x64.ActiveCfg = Release|x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E}.Release|x64.Build.0 = Release|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug|Win32.ActiveCfg = Debug|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug|Win32.Build.0 = Debug|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug|x64.ActiveCfg = Debug|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Debug|x64.Build.0 = Debug|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release|Win32.ActiveCfg = Release|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release|Win32.Build.0 = Release|Win32 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release|x64.ActiveCfg = Release|x64 {44A003BE-400D-4434-AFED-64D8E3B448D9}.Release|x64.Build.0 = Release|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug|Win32.ActiveCfg = Debug|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug|Win32.Build.0 = Debug|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug|x64.ActiveCfg = Debug|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Debug|x64.Build.0 = Debug|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release|Win32.ActiveCfg = Release|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release|Win32.Build.0 = Release|Win32 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release|x64.ActiveCfg = Release|x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8}.Release|x64.Build.0 = Release|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug|Win32.ActiveCfg = Debug|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug|Win32.Build.0 = Debug|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug|x64.ActiveCfg = Debug|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Debug|x64.Build.0 = Debug|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release|Win32.ActiveCfg = Release|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release|Win32.Build.0 = Release|Win32 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release|x64.ActiveCfg = Release|x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822}.Release|x64.Build.0 = Release|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug|Win32.ActiveCfg = Debug|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug|Win32.Build.0 = Debug|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug|x64.ActiveCfg = Debug|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Debug|x64.Build.0 = Debug|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release|Win32.ActiveCfg = Release|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release|Win32.Build.0 = Release|Win32 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release|x64.ActiveCfg = Release|x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416}.Release|x64.Build.0 = Release|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug|Win32.ActiveCfg = Debug|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug|Win32.Build.0 = Debug|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug|x64.ActiveCfg = Debug|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Debug|x64.Build.0 = Debug|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release|Win32.ActiveCfg = Release|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release|Win32.Build.0 = Release|Win32 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release|x64.ActiveCfg = Release|x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8}.Release|x64.Build.0 = Release|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug|Win32.ActiveCfg = Debug|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug|Win32.Build.0 = Debug|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug|x64.ActiveCfg = Debug|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Debug|x64.Build.0 = Debug|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release|Win32.ActiveCfg = Release|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release|Win32.Build.0 = Release|Win32 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release|x64.ActiveCfg = Release|x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41}.Release|x64.Build.0 = Release|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug|Win32.ActiveCfg = Debug|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug|Win32.Build.0 = Debug|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug|x64.ActiveCfg = Debug|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Debug|x64.Build.0 = Debug|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release|Win32.ActiveCfg = Release|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release|Win32.Build.0 = Release|Win32 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release|x64.ActiveCfg = Release|x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47}.Release|x64.Build.0 = Release|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug|Win32.ActiveCfg = Debug|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug|Win32.Build.0 = Debug|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug|x64.ActiveCfg = Debug|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Debug|x64.Build.0 = Debug|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release|Win32.ActiveCfg = Release|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release|Win32.Build.0 = Release|Win32 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release|x64.ActiveCfg = Release|x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C}.Release|x64.Build.0 = Release|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug|Win32.ActiveCfg = Debug|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug|Win32.Build.0 = Debug|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug|x64.ActiveCfg = Debug|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Debug|x64.Build.0 = Debug|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release|Win32.ActiveCfg = Release|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release|Win32.Build.0 = Release|Win32 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release|x64.ActiveCfg = Release|x64 {8EE881F4-78DC-49C7-8845-E842358AC0FA}.Release|x64.Build.0 = Release|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug|Win32.ActiveCfg = Debug|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug|Win32.Build.0 = Debug|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug|x64.ActiveCfg = Debug|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Debug|x64.Build.0 = Debug|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release|Win32.ActiveCfg = Release|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release|Win32.Build.0 = Release|Win32 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release|x64.ActiveCfg = Release|x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8}.Release|x64.Build.0 = Release|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug|Win32.ActiveCfg = Debug|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug|Win32.Build.0 = Debug|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug|x64.ActiveCfg = Debug|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Debug|x64.Build.0 = Debug|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release|Win32.ActiveCfg = Release|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release|Win32.Build.0 = Release|Win32 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release|x64.ActiveCfg = Release|x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222}.Release|x64.Build.0 = Release|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug|Win32.ActiveCfg = Debug|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug|Win32.Build.0 = Debug|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug|x64.ActiveCfg = Debug|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Debug|x64.Build.0 = Debug|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release|Win32.ActiveCfg = Release|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release|Win32.Build.0 = Release|Win32 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release|x64.ActiveCfg = Release|x64 {11A8927C-F971-4104-A286-5DC11C25E2EC}.Release|x64.Build.0 = Release|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug|Win32.ActiveCfg = Debug|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug|Win32.Build.0 = Debug|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug|x64.ActiveCfg = Debug|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Debug|x64.Build.0 = Debug|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release|Win32.ActiveCfg = Release|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release|Win32.Build.0 = Release|Win32 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release|x64.ActiveCfg = Release|x64 {62D88133-09F6-4E13-B39F-36FCEFBE4FAF}.Release|x64.Build.0 = Release|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug|Win32.ActiveCfg = Debug|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug|Win32.Build.0 = Debug|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug|x64.ActiveCfg = Debug|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Debug|x64.Build.0 = Debug|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release|Win32.ActiveCfg = Release|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release|Win32.Build.0 = Release|Win32 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release|x64.ActiveCfg = Release|x64 {76EFC06C-1F64-4478-ABE8-79832716B393}.Release|x64.Build.0 = Release|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug_NoLibs|Win32.ActiveCfg = Debug_NoLibs|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug_NoLibs|Win32.Build.0 = Debug_NoLibs|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug_NoLibs|x64.ActiveCfg = Debug_NoLibs|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug_NoLibs|x64.Build.0 = Debug_NoLibs|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug|Win32.ActiveCfg = Debug|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug|Win32.Build.0 = Debug|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug|x64.ActiveCfg = Debug|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Debug|x64.Build.0 = Debug|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release_NoLibs|Win32.ActiveCfg = Release_NoLibs|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release_NoLibs|Win32.Build.0 = Release_NoLibs|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release_NoLibs|x64.ActiveCfg = Release_NoLibs|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release_NoLibs|x64.Build.0 = Release_NoLibs|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release|Win32.ActiveCfg = Release|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release|Win32.Build.0 = Release|Win32 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release|x64.ActiveCfg = Release|x64 {E4A40368-152D-4D54-9E2E-4B140212F98F}.Release|x64.Build.0 = Release|x64 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE EndGlobalSection EndGlobal sleuthkit-4.2.0/win32/tsk_comparedir/000755 000765 000024 00000000000 12576326344 020307 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk_gettimes/000755 000765 000024 00000000000 12576326344 020003 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk_jni/000755 000765 000024 00000000000 12576326344 016742 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk_loaddb/000755 000765 000024 00000000000 12576326344 017407 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk_recover/000755 000765 000024 00000000000 12576326344 017627 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/win32/tsk_recover/tsk_recover.vcxproj000755 000765 000024 00000044214 12576320700 023570 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {06D707E5-68FF-4FC4-AFD0-C84584E32F47} tsk_recover Win32Proj Application false Application false Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL EditAndContinue Disabled %(AdditionalLibraryDirectories) $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL ProgramDatabase Disabled %(AdditionalLibraryDirectories) {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/tsk_loaddb/tsk_loaddb.vcxproj000755 000765 000024 00000041241 12576320700 023125 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {96AFC6D4-A3DC-44D4-8F55-F74E1D21798C} tsk_loaddb Application NotSet Application Unicode Windows7.1SDK Application MultiByte true v100 Application MultiByte true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application NotSet Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue _CRT_SECURE_NO_DEPRECATE; libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase _CRT_SECURE_NO_DEPRECATE; libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreadedDLL true Level3 ProgramDatabase _CRT_SECURE_NO_DEPRECATE;_MBCS;%(PreprocessorDefinitions) libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreaded true Level3 ProgramDatabase _CRT_SECURE_NO_DEPRECATE;_MBCS;%(PreprocessorDefinitions) %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreadedDLL true Level3 ProgramDatabase _CRT_SECURE_NO_DEPRECATE;_MBCS;%(PreprocessorDefinitions) libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreaded true Level3 ProgramDatabase _CRT_SECURE_NO_DEPRECATE;_MBCS;%(PreprocessorDefinitions) %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/tsk_jni/tsk_jni.vcxproj000755 000765 000024 00000053235 12576320700 022021 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 libtsk_jni {62D88133-09F6-4E13-B39F-36FCEFBE4FAF} tsk_jni Win32Proj DynamicLibrary Unicode DynamicLibrary Unicode Windows7.1SDK DynamicLibrary false false Unicode true v100 DynamicLibrary false false Unicode true v100 DynamicLibrary false false Unicode true Windows7.1SDK DynamicLibrary false false Unicode true Windows7.1SDK DynamicLibrary Unicode DynamicLibrary Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;$(ProjectDir)\..\..\tsk\hashdb;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue false libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Windows MachineX86 true $(TargetDir)$(TargetName).map Disabled $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase false libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Windows true $(TargetDir)$(TargetName).map MaxSpeed true $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;$(ProjectDir)\..\..\tsk\hashdb;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) false Windows true true MachineX86 true $(TargetDir)$(TargetName).map MaxSpeed true $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;$(ProjectDir)\..\..\tsk\hashdb;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreaded true Level3 %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) false Windows true true MachineX86 true $(TargetDir)$(TargetName).map MaxSpeed true $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) false Windows true true true $(TargetDir)$(TargetName).map MaxSpeed true $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) MultiThreaded true Level3 %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) false Windows true true true $(TargetDir)$(TargetName).map Disabled $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue false %(AdditionalLibraryDirectories) true Windows MachineX86 true $(TargetDir)$(TargetName).map Disabled $(JDK_HOME)\include;$(JDK_HOME)\include\win32;$(ProjectDir)\..\..;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;_USRDLL;TSK_JNI_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase false %(AdditionalLibraryDirectories) true Windows true $(TargetDir)$(TargetName).map {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/tsk_gettimes/tsk_gettimes.vcxproj000755 000765 000024 00000044642 12576320700 024125 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {11A8927C-F971-4104-A286-5DC11C25E2EC} tsk_gettimes ManagedCProj Application Unicode true Application Unicode false Windows7.1SDK Application Unicode true true v100 Application Unicode false true v100 Application Unicode false true Windows7.1SDK Application Unicode false true Windows7.1SDK Application Unicode true Application Unicode false Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true false false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true true MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true true MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true true true true true true true true {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/tsk_comparedir/tsk_compare.vcxproj000755 000765 000024 00000041576 12576320700 024241 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 tsk_comparedir {8EE881F4-78DC-49C7-8845-E842358AC0FA} tsk_compare Application Application Unicode Windows7.1SDK Application MultiByte true v100 Application MultiByte true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application MultiByte Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true true true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level1 EditAndContinue Disabled %(AdditionalLibraryDirectories) true true NotSet $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level1 ProgramDatabase Disabled %(AdditionalLibraryDirectories) true true {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/posix-sample/posix-sample.vcxproj000755 000765 000024 00000045775 12576320700 023773 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {1BA0B9E8-F135-494F-9CF5-86427C1F6E41} posixsample Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/posix-cpp-sample/posix-cpp-sample.vcxproj000755 000765 000024 00000045170 12576320700 025320 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {5594DC0E-191C-4F2A-83FE-97F53A9C1222} posixcppsample Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/mmstat/mmstat.vcxproj000755 000765 000024 00000045543 12576320700 021534 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {5D75FBFB-539A-4014-ACEB-520BB16F5BFC} mmstat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/mmls/mmls.vcxproj000755 000765 000024 00000045537 12576320700 020645 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {712DD83B-786E-485E-83C7-7197DD851B78} mmls Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/mmcat/mmcat.vcxproj000755 000765 000024 00000045767 12576320700 021134 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {A15F1E4F-951A-403E-B746-2A6D63D9C416} mmcat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/libtsk/libtsk.vcxproj000755 000765 000024 00000055125 12576320700 021477 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {76EFC06C-1F64-4478-ABE8-79832716B393} libtsk Win32Proj StaticLibrary Unicode StaticLibrary Unicode Windows7.1SDK StaticLibrary Unicode true v100 StaticLibrary Unicode true v100 StaticLibrary Unicode true Windows7.1SDK StaticLibrary Unicode true Windows7.1SDK StaticLibrary Unicode StaticLibrary Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) Disabled $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;HAVE_LIBEWF;HAVE_LIBZ;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase Default copy "$(LIBEWF_HOME)\msvscpp\release\libewf.dll" "$(OutDir)" copy "$(LIBEWF_HOME)\msvscpp\release\zlib.dll" "$(OutDir)" Disabled $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;HAVE_LIBEWF;HAVE_LIBZ;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase copy "$(LIBEWF_HOME)\msvscpp\x64\release\libewf.dll" "$(OutDir)" copy "$(LIBEWF_HOME)\msvscpp\x64\release\zlib.dll" "$(OutDir)" MaxSpeed true $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;HAVE_LIBEWF;HAVE_LIBZ;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase copy "$(LIBEWF_HOME)\msvscpp\release\libewf.dll" "$(OutDir)" copy "$(LIBEWF_HOME)\msvscpp\release\zlib.dll" "$(OutDir)" MaxSpeed true $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase MaxSpeed true $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;HAVE_LIBEWF;HAVE_LIBZ;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase copy "$(LIBEWF_HOME)\msvscpp\x64\release\libewf.dll" "$(OutDir)" copy "$(LIBEWF_HOME)\msvscpp\x64\release\zlib.dll" "$(OutDir)" MaxSpeed true $(ProjectDir)\..\..\;$(LIBEWF_HOME)\common;$(LIBEWF_HOME)\include;$(LIBEWF_HOME)\..\zlib;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase sleuthkit-4.2.0/win32/jls/jls.vcxproj000755 000765 000024 00000045535 12576320700 020303 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {C52F935E-1FD2-443C-A181-27908DAB3BC8} jls Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/jcat/jcat.vcxproj000755 000765 000024 00000045567 12576320700 020572 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {44A003BE-400D-4434-AFED-64D8E3B448D9} jcat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/istat/istat.vcxproj000755 000765 000024 00000045571 12576320700 021173 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {D7643AD7-8518-4B3E-8F3F-F11258D9540E} istat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/img_stat/img_stat.vcxproj000755 000765 000024 00000045550 12576320700 022336 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {48F52EA8-A5D1-4BF4-B774-6ECFCB0CE3C9} img_stat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/img_cat/img_cat.vcxproj000755 000765 000024 00000045576 12576320700 021736 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {671D843F-4DFA-4CB8-8BC9-D44E7F4ECF1E} img_cat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/ils/ils.vcxproj000755 000765 000024 00000045535 12576320700 020301 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {62C97F5E-64DD-4623-9563-747C4C173348} ils Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/ifind/ifind.vcxproj000755 000765 000024 00000045571 12576320700 021105 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {52251CB2-65A3-421B-9CB4-7DAC13BB3758} ifind Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/icat/icat.vcxproj000755 000765 000024 00000045537 12576320700 020565 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {38D89022-2C83-4436-A333-375A2E3E7BB0} icat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/hfind/hfind.vcxproj000755 000765 000024 00000045543 12576320700 021102 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {0B127AE3-0C18-4EEF-AB20-A0693E6AA822} hfind Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/fsstat/fsstat.vcxproj000755 000765 000024 00000045543 12576320700 021532 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {D1E6567A-4F65-4832-8018-D33B3CB4692B} fsstat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/fls/fls.vcxproj000755 000765 000024 00000045565 12576320700 020276 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {58DA1042-AC19-4779-AC1A-AA8EEB3A4524} fls Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) _CRT_SECURE_NO_DEPRECATE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/ffind/ffind.vcxproj000755 000765 000024 00000045541 12576320700 021074 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {7C132953-1700-42FF-9F61-A814C9F2C758} ffind Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/fcat/fcat.vcxproj000755 000765 000024 00000045537 12576320700 020557 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {E4A40368-152D-4D54-9E2E-4B140212F98F} fcat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/docs/README-win32.txt000755 000765 000024 00000004424 12576320700 020664 0ustar00carrierstaff000000 000000 The Sleuth Kit Windows Executables http://www.sleuthkit.org/sleuthkit Brian Carrier [carrier@sleuthkit.org] Last Updated: July 2012 ====================================================================== This zip file contains the Microsoft Windows executables for The Sleuth Kit. The full source code (including Visual Studio Solution files) and documentation can be downloaded from: http://www.sleuthkit.org These are distributed under the IBM Public License and the Common Public License, which can be found in the licenses folder. NOTES The dll files in the zip file are required to run the executables. They must be either in the same directory as the executables or in the path. There have been reports of the exe files not running on some systems and they give the error "The system cannot execute the specified program". This occurs because the system can't find the needed dll files. Installing the "Microsoft Visual C++ 2008 SP1 Redistributable Package (x86)" seems to fix the problem. It can be downloaded from Microsoft: http://www.microsoft.com/downloads/en/confirmation.aspx?FamilyID=A5C84275-3B97-4AB7-A40D-3802B2AF5FC2&displaylang=en mactime.pl requires a Windows port of Perl to be installed. If you have the ".pl" extension associated with Perl, you should be able to run "mactime.pl" from the command line. Otherwise, you may need to run it as "perl mactime.pl". Examples of Windows ports of Perl include: - ActivePerl (http://www.activestate.com/activeperl/) - Strawberry Perl (http://strawberryperl.com/) CURRENT LIMITATIONS The tools do not currently support globbing, which means that you cannot use 'fls img.*' on a split image. Windows does not automatically expand the '*' to all file names. However, most split images can now be used in The Sleuth Kit by simply specifying the first segment's path. These programs can be run on a live system, if you use the \\.\PhysicalDrive0 syntax. Note though, that you may get errors or the file system type may not be detected because the data being read is out of sync with cached versions of the data. Unicode characters are not always properly displayed in the command shell. The AFF image formats are not supported. sleuthkit-4.2.0/win32/callback-sample/callback-sample.vcxproj000755 000765 000024 00000045555 12576320700 024753 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {6CE3D593-E90D-4CC1-A66B-694AC909F6B8} callbacksample Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/callback-cpp-sample/callback-cpp-sample.vcxproj000755 000765 000024 00000045346 12576320700 026311 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {3B32F1BE-9686-4DC9-8197-F734D146E9F8} callbackcppsample Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true MachineX86 MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) MultiThreadedDLL true Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true MaxSpeed true $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_WINDOWS;%(PreprocessorDefinitions) MultiThreaded true Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_WINDOWS;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/blkstat/blkstat.vcxproj000755 000765 000024 00000045545 12576320700 022034 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {FBB66156-9A54-4713-A801-C507BE7A3AE3} blkstat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/blkls/blkls.vcxproj000755 000765 000024 00000045541 12576320700 021136 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {48D98A0A-BF9C-4D7E-9AF8-E4CAE8437997} blkls Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/blkcat/blkcat.vcxproj000755 000765 000024 00000045543 12576320700 021422 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {A2BEA467-A4CC-4FA6-9C74-587498E35467} blkcat Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/win32/blkcalc/blkcalc.vcxproj000755 000765 000024 00000045545 12576320700 021712 0ustar00carrierstaff000000 000000  Debug_NoLibs Win32 Debug_NoLibs x64 Debug Win32 Debug x64 Release_NoLibs Win32 Release_NoLibs x64 Release Win32 Release x64 {46B82840-9832-466F-8568-132407CA3853} blkcalc Win32Proj Application Unicode Application Unicode Windows7.1SDK Application Unicode true v100 Application Unicode true v100 Application Unicode true Windows7.1SDK Application Unicode true Windows7.1SDK Application Unicode Application Unicode Windows7.1SDK <_ProjectFileVersion>10.0.30319.1 $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true $(SolutionDir)$(Configuration)\ $(SolutionDir)$(Configuration)\ $(OutDir) $(OutDir) $(IntDir) $(IntDir) $(IntDir) $(IntDir) false false false false $(SolutionDir)$(Configuration)\ $(OutDir) $(IntDir) $(IntDir) true true Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\release;%(AdditionalLibraryDirectories) true Console true true false MachineX86 $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreadedDLL Level3 ProgramDatabase libewf.lib;zlib.lib;%(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) MultiThreaded Level3 ProgramDatabase %(AdditionalDependencies) $(LIBEWF_HOME)\msvscpp\x64\release;%(AdditionalLibraryDirectories) true Console true true false Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL Level3 EditAndContinue %(AdditionalLibraryDirectories) true Console false MachineX86 Disabled $(ProjectDir)\..\..\;%(AdditionalIncludeDirectories) WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) EnableFastChecks MultiThreadedDebugDLL Level3 ProgramDatabase %(AdditionalLibraryDirectories) true Console false {76efc06c-1f64-4478-abe8-79832716b393} false sleuthkit-4.2.0/unit_tests/base/000755 000765 000024 00000000000 12576326352 017451 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/unit_tests/Makefile.am000644 000765 000024 00000000016 12576320700 020557 0ustar00carrierstaff000000 000000 SUBDIRS= base sleuthkit-4.2.0/unit_tests/Makefile.in000644 000765 000024 00000045144 12576326300 020605 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = unit_tests ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ SUBDIRS = base all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign unit_tests/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign unit_tests/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/unit_tests/base/errors_test.cpp000644 000765 000024 00000011757 12576320700 022532 0ustar00carrierstaff000000 000000 /* * errors_test.cpp * * Created on: Oct 22, 2010 * Author: benson */ #include #include #include #ifdef HAVE_PTHREAD #ifdef __APPLE__ #include #include extern "C" mach_port_t mach_task_self(void); #define SEM_TYPE semaphore_t #else #include #include #define SEM_TYPE sem_t #endif #endif #include "errors_test.h" // Registers the fixture into the 'registry' CPPUNIT_TEST_SUITE_REGISTRATION( ErrorsTest ); void ErrorsTest::setUp() {} void ErrorsTest::tearDown() {} void ErrorsTest::testInitialState() { TSK_ERROR_INFO *ei; ei = tsk_error_get_info(); CPPUNIT_ASSERT(0 == ei->t_errno); CPPUNIT_ASSERT(0 == ei->errstr[0]); CPPUNIT_ASSERT(0 == ei->errstr2[0]); } void ErrorsTest::testLengthChecks() { TSK_ERROR_INFO *ei; ei = tsk_error_get_info(); std::string s; for (unsigned x = 0; x < 4096; x ++) { s = s + std::string("x"); } tsk_error_set_errstr("%s", s.c_str()); std::string es(tsk_error_get_errstr()); CPPUNIT_ASSERT(es.size() < 1025); } #ifdef HAVE_PTHREAD struct xErrorsTestShared { SEM_TYPE sync_barrier; bool errno_check_failed; bool errstr_check_failed; bool errstr2_check_failed; bool failure; xErrorsTestShared() { failure = false; errno_check_failed = false; errstr_check_failed = false; errstr2_check_failed = false; } }; /* * This thread sets error variables, updates the semaphore, * waits on the semaphore, and reads them back. */ void * thread_1(void *arg) { xErrorsTestShared * shared = (xErrorsTestShared*) arg; // wait to be told to start. #ifdef __APPLE__ kern_return_t se = semaphore_wait(shared->sync_barrier); if (se != 0) { fprintf(stderr, "sem_wait failed: %d\n", se); shared->failure = true; } #else if (sem_wait(&shared->sync_barrier) != 0) { fprintf(stderr, "sem_wait failed: %s\n", strerror(errno)); shared->failure = true; } #endif tsk_error_set_errno(42); tsk_error_set_errstr("I just set errno to %d.", 42); tsk_error_set_errstr2("Indeed, I just set errno to %d.", 42); #ifdef __APPLE__ se = semaphore_signal(shared->sync_barrier); if (se != 0) { fprintf(stderr, "sem_signal failed: %d\n", se); shared->failure = true; } se = semaphore_wait(shared->sync_barrier); if (se != 0) { fprintf(stderr, "sem_wait failed: %d\n", se); shared->failure = true; } #else sem_post(&shared->sync_barrier); sem_wait(&shared->sync_barrier); #endif shared->errno_check_failed = 42 != tsk_error_get_errno(); char const * s = tsk_error_get_errstr(); shared->errstr_check_failed = 0 != strcmp("I just set errno to 42.", s); s = tsk_error_get_errstr2(); shared->errstr2_check_failed = 0 != strcmp("Indeed, I just set errno to 42.", s); return 0; } void ErrorsTest::testMultithreaded() { xErrorsTestShared shared; tsk_error_reset(); // start semaphore unlocked. Thread will lock. #ifdef __APPLE__ kern_return_t se; se = semaphore_create(mach_task_self(), &shared.sync_barrier, SYNC_POLICY_FIFO, 0); if (se != 0) { fprintf(stderr, "sem_init failed: %d\n", se); CPPUNIT_FAIL("Could not initialize semaphore"); } #else if (sem_init(&shared.sync_barrier, 0, 0)) { fprintf(stderr, "sem_init failed: %s\n", strerror(errno)); CPPUNIT_FAIL("Could not initialize semaphore"); } #endif pthread_t thread1; int pte = pthread_create(&thread1, 0, thread_1, &shared); if (pte != 0) { fprintf(stderr, "pthread_create failed: %d\n", pte); CPPUNIT_FAIL("pthread_create failed."); } #ifdef __APPLE__ se = semaphore_signal(shared.sync_barrier); if (se != 0) { fprintf(stderr, "semaphore_signal failed: %d\n", se); CPPUNIT_FAIL("Could not post to semaphore"); } se = semaphore_wait(shared.sync_barrier); if (se != 0) { fprintf(stderr, "semaphore_wait failed: %d\n", se); CPPUNIT_FAIL("Could not post to semaphore"); } #else // give thread permission to proceed if (sem_post(&shared.sync_barrier) != 0) { fprintf(stderr, "sem_post failed: %s\n", strerror(errno)); CPPUNIT_FAIL("Could not post to semaphore"); } // wait for thread to set some things. if (sem_wait(&shared.sync_barrier) != 0) { fprintf(stderr, "sem_wait failed: %s\n", strerror(errno)); CPPUNIT_FAIL("Could not wait on semaphore"); } #endif CPPUNIT_ASSERT(0 == tsk_error_get_errno()); CPPUNIT_ASSERT(0 == tsk_error_get_errstr()[0]); CPPUNIT_ASSERT(0 == tsk_error_get_errstr2()[0]); // give thread permission to proceed #ifdef __APPLE__ se = semaphore_signal(shared.sync_barrier); if (se != 0) { fprintf(stderr, "semaphore_signal failed: %d\n", se); CPPUNIT_FAIL("Could not post to semaphore"); } #else if (sem_post(&shared.sync_barrier) != 0) { fprintf(stderr, "sem_post failed: %s\n", strerror(errno)); CPPUNIT_FAIL("Could not post to semaphore"); } #endif void *exitval = 0; pte = pthread_join(thread1, &exitval); if (pte != 0) { fprintf(stderr, "pthread_join failed: %d\n", pte); CPPUNIT_FAIL("pthread_join failed."); } CPPUNIT_ASSERT(!shared.errno_check_failed); CPPUNIT_ASSERT(!shared.errstr_check_failed); CPPUNIT_ASSERT(!shared.errstr2_check_failed); } #endif sleuthkit-4.2.0/unit_tests/base/errors_test.h000644 000765 000024 00000001165 12576320700 022167 0ustar00carrierstaff000000 000000 /* * errors_test.h * * Created on: Oct 22, 2010 * Author: benson */ #ifndef ERRORS_TEST_H_ #define ERRORS_TEST_H_ #include class ErrorsTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE( ErrorsTest ); CPPUNIT_TEST(testInitialState); CPPUNIT_TEST(testLengthChecks); #ifdef HAVE_PTHREAD CPPUNIT_TEST(testMultithreaded); #endif CPPUNIT_TEST_SUITE_END(); public: void setUp(); void tearDown(); void testConstructor(); void testInitialState(); void testLengthChecks(); #ifdef HAVE_PTHREAD void testMultithreaded(); #endif }; #endif /* ERRORS_TEST_H_ */ sleuthkit-4.2.0/unit_tests/base/Makefile.am000644 000765 000024 00000000434 12576320700 021475 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -Wall $(CPPUNIT_CFLAGS) LDADD = ../../tsk/libtsk.la $(CPPUNIT_LIBS) LDFLAGS = -static noinst_PROGRAMS = test_base test_base_SOURCES= test_base.cpp errors_test.cpp errors_test.h indent: indent *.cpp *.h clean-local: -rm -f *.cpp~ *.h~ check: ./test_base sleuthkit-4.2.0/unit_tests/base/Makefile.in000644 000765 000024 00000045643 12576326300 021523 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = test_base$(EXEEXT) subdir = unit_tests/base ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am_test_base_OBJECTS = test_base.$(OBJEXT) errors_test.$(OBJEXT) test_base_OBJECTS = $(am_test_base_OBJECTS) test_base_LDADD = $(LDADD) am__DEPENDENCIES_1 = test_base_DEPENDENCIES = ../../tsk/libtsk.la $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(test_base_SOURCES) DIST_SOURCES = $(test_base_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = -static LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -Wall $(CPPUNIT_CFLAGS) LDADD = ../../tsk/libtsk.la $(CPPUNIT_LIBS) test_base_SOURCES = test_base.cpp errors_test.cpp errors_test.h all: all-am .SUFFIXES: .SUFFIXES: .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign unit_tests/base/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign unit_tests/base/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list test_base$(EXEEXT): $(test_base_OBJECTS) $(test_base_DEPENDENCIES) $(EXTRA_test_base_DEPENDENCIES) @rm -f test_base$(EXEEXT) $(AM_V_CXXLD)$(CXXLINK) $(test_base_OBJECTS) $(test_base_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/errors_test.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test_base.Po@am__quote@ .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstPROGRAMS cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.cpp *.h clean-local: -rm -f *.cpp~ *.h~ check: ./test_base # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/unit_tests/base/test_base.cpp000644 000765 000024 00000002002 12576320700 022107 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * * Copyright (c) 2010 Basis Technology Corp. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include #include #include #include #include #include #include int main(int argc, char **argv) { // Get the top level suite from the registry CppUnit::Test *suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); // Adds the test to the list of test to run CppUnit::TextUi::TestRunner runner; runner.addTest( suite ); // Change the default outputter to a compiler error format outputter runner.setOutputter( new CppUnit::CompilerOutputter( &runner.result(), std::cerr ) ); // Run the tests. bool wasSucessful = runner.run(); // Return error code 1 if the one of test failed. return wasSucessful ? 0 : 1; } sleuthkit-4.2.0/tsk/auto/000755 000765 000024 00000000000 12576326347 016113 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/base/000755 000765 000024 00000000000 12576326346 016054 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/docs/000755 000765 000024 00000000000 12576326346 016072 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/fs/000755 000765 000024 00000000000 12576326347 015553 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/hashdb/000755 000765 000024 00000000000 12576326347 016374 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/img/000755 000765 000024 00000000000 12576326346 015716 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/libtsk.h000644 000765 000024 00000000351 12576320700 016566 0ustar00carrierstaff000000 000000 #ifndef _TSK_LIBTSK_H #define _TSK_LIBTSK_H #include "tsk/base/tsk_base.h" #include "tsk/img/tsk_img.h" #include "tsk/vs/tsk_vs.h" #include "tsk/fs/tsk_fs.h" #include "tsk/hashdb/tsk_hashdb.h" #include "tsk/auto/tsk_auto.h" #endif sleuthkit-4.2.0/tsk/Makefile.am000644 000765 000024 00000000661 12576326264 017200 0ustar00carrierstaff000000 000000 # Compile the sub directories SUBDIRS = base img vs fs hashdb auto # Merge the libraries into one lib_LTLIBRARIES = libtsk.la libtsk_la_SOURCES = libtsk_la_LIBADD = base/libtskbase.la img/libtskimg.la \ vs/libtskvs.la fs/libtskfs.la hashdb/libtskhashdb.la \ auto/libtskauto.la # current:revision:age libtsk_la_LDFLAGS = -version-info 13:0:0 $(LIBTSK_LDFLAGS) EXTRA_DIST = tsk_tools_i.h docs/Doxyfile docs/*.dox docs/*.html sleuthkit-4.2.0/tsk/Makefile.in000644 000765 000024 00000057615 12576326277 017230 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) libtsk_la_DEPENDENCIES = base/libtskbase.la img/libtskimg.la \ vs/libtskvs.la fs/libtskfs.la hashdb/libtskhashdb.la \ auto/libtskauto.la am_libtsk_la_OBJECTS = libtsk_la_OBJECTS = $(am_libtsk_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libtsk_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(libtsk_la_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libtsk_la_SOURCES) DIST_SOURCES = $(libtsk_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ $(LISP)tsk_config.h.in # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/tsk_config.h.in DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ # Compile the sub directories SUBDIRS = base img vs fs hashdb auto # Merge the libraries into one lib_LTLIBRARIES = libtsk.la libtsk_la_SOURCES = libtsk_la_LIBADD = base/libtskbase.la img/libtskimg.la \ vs/libtskvs.la fs/libtskfs.la hashdb/libtskhashdb.la \ auto/libtskauto.la # current:revision:age libtsk_la_LDFLAGS = -version-info 13:0:0 $(LIBTSK_LDFLAGS) EXTRA_DIST = tsk_tools_i.h docs/Doxyfile docs/*.dox docs/*.html all: tsk_config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): tsk_config.h: stamp-h1 @test -f $@ || rm -f stamp-h1 @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 stamp-h1: $(srcdir)/tsk_config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status tsk/tsk_config.h $(srcdir)/tsk_config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f tsk_config.h stamp-h1 install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtsk.la: $(libtsk_la_OBJECTS) $(libtsk_la_DEPENDENCIES) $(EXTRA_libtsk_la_DEPENDENCIES) $(AM_V_CCLD)$(libtsk_la_LINK) -rpath $(libdir) $(libtsk_la_OBJECTS) $(libtsk_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) tsk_config.h installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-hdr distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES .MAKE: $(am__recursive_targets) all install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libLTLIBRARIES \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-hdr \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-libLTLIBRARIES install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs installdirs-am \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \ uninstall-libLTLIBRARIES .PRECIOUS: Makefile # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/sorter/000755 000765 000024 00000000000 12576326344 016456 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/tsk_config.h.in000644 000765 000024 00000015341 12576320700 020036 0ustar00carrierstaff000000 000000 /* tsk/tsk_config.h.in. Generated from configure.ac by autoheader. */ /* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP systems. This function is required for `alloca.c' support on those systems. */ #undef CRAY_STACKSEG_END /* Define to 1 if using `alloca.c'. */ #undef C_ALLOCA /* Define to 1 if you have the header file. */ #undef HAVE_AFFLIB_AFFLIB_H /* Define to 1 if you have `alloca', as a function or macro. */ #undef HAVE_ALLOCA /* Define to 1 if you have and it should be used (not on Ultrix). */ #undef HAVE_ALLOCA_H /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ #undef HAVE_DOPRNT /* Define to 1 if you have the `err' function. */ #undef HAVE_ERR /* Define to 1 if you have the `errx' function. */ #undef HAVE_ERRX /* Define to 1 if you have the header file. */ #undef HAVE_ERR_H /* Define to 1 if fseeko (and presumably ftello) exists and is declared. */ #undef HAVE_FSEEKO /* Define to 1 if you have the `getrusage' function. */ #undef HAVE_GETRUSAGE /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if you have the `ishexnumber' function. */ #undef HAVE_ISHEXNUMBER /* Define to 1 if you have the `afflib' library (-lafflib). */ #undef HAVE_LIBAFFLIB /* Define to 1 if you have the `dl' library (-ldl). */ #undef HAVE_LIBDL /* Define to 1 if you have the `ewf' library (-lewf). */ #undef HAVE_LIBEWF /* Define to 1 if you have the header file. */ #undef HAVE_LIBEWF_H /* Define to 1 if you have the `stdc++' library (-lstdc++). */ #undef HAVE_LIBSTDC__ /* Define to 1 if you have the `z' library (-lz). */ #undef HAVE_LIBZ /* Define to 1 if you have the header file. */ #undef HAVE_LIST /* Define to 1 if `lstat' has the bug that it succeeds when given the zero-length file name argument. */ #undef HAVE_LSTAT_EMPTY_STRING_BUG /* Define to 1 if you have the header file. */ #undef HAVE_MAP /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define if you have POSIX threads libraries and header files. */ #undef HAVE_PTHREAD /* Define to 1 if you have the header file. */ #undef HAVE_QUEUE /* Define to 1 if you have the header file. */ #undef HAVE_SET /* Define to 1 if you have the header file. */ #undef HAVE_STACK /* Define to 1 if stdbool.h conforms to C99. */ #undef HAVE_STDBOOL_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the header file. */ #undef HAVE_STREAMBUF /* Define to 1 if you have the header file. */ #undef HAVE_STRING /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strlcat' function. */ #undef HAVE_STRLCAT /* Define to 1 if you have the `strlcpy' function. */ #undef HAVE_STRLCPY /* Define to 1 if you have the header file. */ #undef HAVE_SYS_PARAM_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_RESOURCE_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SELECT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SOCKET_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the header file. */ #undef HAVE_UTIME_H /* Define to 1 if `utime(file, NULL)' sets file's timestamp to the present. */ #undef HAVE_UTIME_NULL /* Define to 1 if you have the `vasprintf' function. */ #undef HAVE_VASPRINTF /* Define to 1 if you have the header file. */ #undef HAVE_VECTOR /* Define to 1 if you have the `vprintf' function. */ #undef HAVE_VPRINTF /* Define to 1 if you have the `warn' function. */ #undef HAVE_WARN /* Define to 1 if you have the `warnx' function. */ #undef HAVE_WARNX /* Define to 1 if you have the header file. */ #undef HAVE_ZLIB_H /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* Define to 1 if `lstat' dereferences a symlink specified with a trailing slash. */ #undef LSTAT_FOLLOWS_SLASHED_SYMLINK /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Define to necessary symbol if this constant uses a non-standard name on your system. */ #undef PTHREAD_CREATE_JOINABLE /* Define to the type of arg 1 for `select'. */ #undef SELECT_TYPE_ARG1 /* Define to the type of args 2, 3 and 4 for `select'. */ #undef SELECT_TYPE_ARG234 /* Define to the type of arg 5 for `select'. */ #undef SELECT_TYPE_ARG5 /* If using the C implementation of alloca, define if you know the direction of stack growth for your system; otherwise it will be automatically deduced at runtime. STACK_DIRECTION > 0 => grows toward higher addresses STACK_DIRECTION < 0 => grows toward lower addresses STACK_DIRECTION = 0 => direction of growth unknown */ #undef STACK_DIRECTION /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Version number of package */ #undef VERSION /* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */ #undef _LARGEFILE_SOURCE /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES /* Define to empty if `const' does not conform to ANSI C. */ #undef const /* Define to `int' if doesn't define. */ #undef gid_t /* Define to `int' if does not define. */ #undef mode_t /* Define to `long int' if does not define. */ #undef off_t /* Define to `unsigned int' if does not define. */ #undef size_t /* Define to `int' if doesn't define. */ #undef uid_t sleuthkit-4.2.0/tsk/tsk_incs.h000644 000765 000024 00000000521 12576326324 017122 0ustar00carrierstaff000000 000000 #ifndef _TSK_INCS_H #define _TSK_INCS_H // automatically by ./configure // Contains the config.h data needed by programs that use libtsk #include #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include #include #define TSK_MULTITHREAD_LIB // set because we have pthreads #endif sleuthkit-4.2.0/tsk/tsk_tools_i.h000644 000765 000024 00000000660 12576320700 017632 0ustar00carrierstaff000000 000000 #ifndef _TSK_TOOLS_I_H #define _TSK_TOOLS_I_H /* same as tsklib.h except that it includes the base_i.h file * instead of base.h so that we can get the _config defines. * This is to be used by the tools included with TSK (such as fls). */ #include "tsk/base/tsk_base_i.h" #include "tsk/img/tsk_img.h" #include "tsk/vs/tsk_vs.h" #include "tsk/fs/tsk_fs.h" #include "tsk/hashdb/tsk_hashdb.h" #include "tsk/auto/tsk_auto.h" #endif sleuthkit-4.2.0/tsk/vs/000755 000765 000024 00000000000 12576326346 015572 5ustar00carrierstaff000000 000000 sleuthkit-4.2.0/tsk/vs/.indent.pro000644 000765 000024 00000000036 12576320700 017636 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nlp -nut sleuthkit-4.2.0/tsk/vs/bsd.c000644 000765 000024 00000015110 12576320700 016470 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file bsd.c * Contains the internal functions required to process BSD disk labels. */ #include "tsk_vs_i.h" #include "tsk_bsd.h" /* * Return a buffer with a description of the partition type. The buffer * must be freed by the caller. */ static char * bsd_get_desc(uint8_t fstype) { char *str = tsk_malloc(64); if (str == NULL) return ""; switch (fstype) { case 0: strncpy(str, "Unused (0x00)", 64); break; case 1: strncpy(str, "Swap (0x01)", 64); break; case 2: strncpy(str, "Version 6 (0x02)", 64); break; case 3: strncpy(str, "Version 7 (0x03)", 64); break; case 4: strncpy(str, "System V (0x04)", 64); break; case 5: strncpy(str, "4.1BSD (0x05)", 64); break; case 6: strncpy(str, "Eighth Edition (0x06)", 64); break; case 7: strncpy(str, "4.2BSD (0x07)", 64); break; case 8: strncpy(str, "MSDOS (0x08)", 64); break; case 9: strncpy(str, "4.4LFS (0x09)", 64); break; case 10: strncpy(str, "Unknown (0x0A)", 64); break; case 11: strncpy(str, "HPFS (0x0B)", 64); break; case 12: strncpy(str, "ISO9660 (0x0C)", 64); break; case 13: strncpy(str, "Boot (0x0D)", 64); break; case 14: strncpy(str, "Vinum (0x0E)", 64); break; default: snprintf(str, 64, "Unknown Type (0x%.2x)", fstype); break; } return str; } /* * Process the partition table at the sector address * * Return 1 on error and 0 if no error */ static uint8_t bsd_load_table(TSK_VS_INFO * a_vs) { char *sect_buf; bsd_disklabel *dlabel; uint32_t idx = 0; ssize_t cnt; char *table_str; TSK_DADDR_T laddr = a_vs->offset / a_vs->block_size + BSD_PART_SOFFSET; // used for printing only TSK_DADDR_T max_addr = (a_vs->img_info->size - a_vs->offset) / a_vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "bsd_load_table: Table Sector: %" PRIuDADDR "\n", laddr); if ((sect_buf = tsk_malloc(a_vs->block_size)) == NULL) return 1; dlabel = (bsd_disklabel *) sect_buf; /* read the block */ cnt = tsk_vs_read_block (a_vs, BSD_PART_SOFFSET, sect_buf, a_vs->block_size); if (cnt != a_vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("BSD Disk Label in Sector: %" PRIuDADDR, laddr); free(sect_buf); return 1; } /* Check the magic */ if (tsk_vs_guessu32(a_vs, dlabel->magic, BSD_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("BSD partition table (magic #1) (Sector: %" PRIuDADDR ") %" PRIx32, laddr, tsk_getu32(a_vs->endian, dlabel->magic)); free(sect_buf); return 1; } /* Check the second magic value */ if (tsk_getu32(a_vs->endian, dlabel->magic2) != BSD_MAGIC) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("BSD disk label (magic #2) (Sector: %" PRIuDADDR ") %" PRIx32, laddr, tsk_getu32(a_vs->endian, dlabel->magic2)); free(sect_buf); return 1; } /* Add an entry of 1 length for the table to the internal structure */ if ((table_str = tsk_malloc(32)) == NULL) { free(sect_buf); return 1; } snprintf(table_str, 32, "Partition Table"); if (NULL == tsk_vs_part_add(a_vs, BSD_PART_SOFFSET, (TSK_DADDR_T) 1, TSK_VS_PART_FLAG_META, table_str, -1, -1)) { free(sect_buf); return 1; } /* Cycle through the partitions, there are either 8 or 16 */ for (idx = 0; idx < tsk_getu16(a_vs->endian, dlabel->num_parts); idx++) { uint32_t part_start; uint32_t part_size; part_start = tsk_getu32(a_vs->endian, dlabel->part[idx].start_sec); part_size = tsk_getu32(a_vs->endian, dlabel->part[idx].size_sec); if (tsk_verbose) tsk_fprintf(stderr, "load_table: %" PRIu32 " Starting Sector: %" PRIu32 " Size: %" PRIu32 " Type: %d\n", idx, part_start, part_size, dlabel->part[idx].fstype); if (part_size == 0) continue; // make sure the first couple are in the image bounds if ((idx < 2) && (part_start > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("bsd_load_table: Starting sector too large for image"); free(sect_buf); return 1; } /* Add the partition to the internal sorted list */ if (NULL == tsk_vs_part_add(a_vs, (TSK_DADDR_T) part_start, (TSK_DADDR_T) part_size, TSK_VS_PART_FLAG_ALLOC, bsd_get_desc(dlabel->part[idx].fstype), -1, idx)) { free(sect_buf); return 1; } } free(sect_buf); return 0; } static void bsd_close(TSK_VS_INFO * a_vs) { a_vs->tag = 0; tsk_vs_part_free(a_vs); free(a_vs); } /* * analyze the image in img_info and process it as BSD * Initialize the TSK_VS_INFO structure * * Return TSK_VS_INFO or NULL if not BSD or an error */ TSK_VS_INFO * tsk_vs_bsd_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) { TSK_VS_INFO *vs; // clean up any errors that are lying around tsk_error_reset(); vs = (TSK_VS_INFO *) tsk_malloc(sizeof(*vs)); if (vs == NULL) return NULL; vs->img_info = img_info; vs->vstype = TSK_VS_TYPE_BSD; vs->tag = TSK_VS_INFO_TAG; /* use the offset provided */ vs->offset = offset; /* inititialize settings */ vs->part_list = NULL; vs->part_count = 0; vs->endian = 0; vs->block_size = img_info->sector_size; /* Assign functions */ vs->close = bsd_close; /* Load the partitions into the sorted list */ if (bsd_load_table(vs)) { bsd_close(vs); return NULL; } /* fill in the sorted list with the 'unknown' values */ if (tsk_vs_part_unused(vs)) { bsd_close(vs); return NULL; } return vs; } sleuthkit-4.2.0/tsk/vs/dos.c000644 000765 000024 00000077053 12576320700 016523 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file dos.c * Contains the internal functions to process DOS Partition tables */ #include "tsk_vs_i.h" #include "tsk_dos.h" /* Check the extended partition flags */ #define dos_is_ext(x) \ ((((x) == 0x05) || ((x) == 0x0F) || ((x) == 0x85)) ? 1 : 0) /* * dos_get_desc * * Return a buffer with a string description of the partition type * * From: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html */ static char * dos_get_desc(uint8_t ptype) { #define DESC_LEN 64 char *str = tsk_malloc(DESC_LEN); if (str == NULL) return ""; switch (ptype) { case 0x00: snprintf(str, DESC_LEN, "Empty (0x00)"); break; case 0x01: snprintf(str, DESC_LEN, "DOS FAT12 (0x01)"); break; case 0x02: snprintf(str, DESC_LEN, "XENIX root (0x02)"); break; case 0x03: snprintf(str, DESC_LEN, "XENIX /usr (0x03)"); break; case 0x04: case 0x06: snprintf(str, DESC_LEN, "DOS FAT16 (0x%.2x)", ptype); break; case 0x05: snprintf(str, DESC_LEN, "DOS Extended (0x05)"); break; case 0x07: snprintf(str, DESC_LEN, "NTFS / exFAT (0x07)"); break; case 0x08: snprintf(str, DESC_LEN, "AIX Boot (0x08)"); break; case 0x09: snprintf(str, DESC_LEN, "AIX Data (0x09)"); break; case 0x0a: snprintf(str, DESC_LEN, "OS/2 Boot Manager (0x0a)"); break; /* case 0x0a: snprintf(str, DESC_LEN, "Coherent swap (0x0a)"); break; case 0x0a: snprintf(str, DESC_LEN, "OPUS (0x0a)"); break; */ case 0x0b: case 0x0c: snprintf(str, DESC_LEN, "Win95 FAT32 (0x%.2x)", ptype); break; case 0x0e: snprintf(str, DESC_LEN, "Win95 FAT16 (0x0e)"); break; case 0x0f: snprintf(str, DESC_LEN, "Win95 Extended (0x0f)"); break; case 0x10: snprintf(str, DESC_LEN, "OPUS (0x10)"); break; case 0x11: snprintf(str, DESC_LEN, "DOS FAT12 Hidden (0x11)"); break; case 0x12: snprintf(str, DESC_LEN, "Hibernation (0x12)"); break; case 0x14: case 0x16: snprintf(str, DESC_LEN, "DOS FAT16 Hidden (0x%.2x)", ptype); break; case 0x17: snprintf(str, DESC_LEN, "Hidden IFS/HPFS (0x17)"); break; case 0x18: snprintf(str, DESC_LEN, "AST SmartSleep (0x18)"); break; case 0x19: case 0x1b: case 0x1c: snprintf(str, DESC_LEN, "Win95 FAT32 Hidden (0x%.2x)", ptype); break; case 0x1e: snprintf(str, DESC_LEN, "Win95 FAT16 Hidden (0x1e)"); break; case 0x20: case 0x22: case 0x7e: case 0x7f: case 0xed: case 0xf7: snprintf(str, DESC_LEN, "Unused (0x%.2x)", ptype); break; case 0x21: case 0x23: case 0x26: case 0x31: case 0x33: case 0x34: case 0x36: case 0x71: case 0x73: case 0x76: case 0xf3: snprintf(str, DESC_LEN, "Reserved (0x%.2x)", ptype); break; case 0x24: snprintf(str, DESC_LEN, "NEC DOS 3.x (0x24)"); break; case 0x32: snprintf(str, DESC_LEN, "NOS (0x32)"); break; case 0x35: snprintf(str, DESC_LEN, "JFS on OS/2 or eCS (0x35)"); break; case 0x38: snprintf(str, DESC_LEN, "THEOS v3.2 2gb (0x38)"); break; case 0x39: snprintf(str, DESC_LEN, "THEOS v4 Spanned (0x39)"); break; /* case 0x39: snprintf(str, DESC_LEN, "Plan 9 (0x39)"); break; */ case 0x3a: snprintf(str, DESC_LEN, "THEOS v4 4gb (0x3a)"); break; case 0x3b: snprintf(str, DESC_LEN, "THEOS v4 Extended (0x3b)"); break; case 0x3c: snprintf(str, DESC_LEN, "PartitionMagic Recovery (0x3c)"); break; case 0x3d: snprintf(str, DESC_LEN, "Hidden NetWare (0x3d)"); break; case 0x40: snprintf(str, DESC_LEN, "Venix 80286 (0x40)"); break; case 0x41: snprintf(str, DESC_LEN, "Linux/MINIX (Sharing Disk with DR-DOS) (0x41)"); break; /* case 0x41: snprintf(str, DESC_LEN, "Personal RISC Boot (0x41)"); break; case 0x41: snprintf(str, DESC_LEN, "PPC PReP Boot (0x41)"); break; */ case 0x42: snprintf(str, DESC_LEN, "Win LVM / Secure FS (0x42)"); break; case 0x43: snprintf(str, DESC_LEN, "Linux Native (Sharing Disk with DR-DOS) (0x43)"); break; case 0x44: snprintf(str, DESC_LEN, "GoBack (0x44)"); break; case 0x45: snprintf(str, DESC_LEN, "Boot-US Boot Manager (0x45)"); break; /* case 0x45: snprintf(str, DESC_LEN, "Priam (0x45)"); break; case 0x45: snprintf(str, DESC_LEN, "EUMEL/Elan (0x45)"); break; */ case 0x46: snprintf(str, DESC_LEN, "EUMEL/Elan (0x46)"); break; case 0x47: snprintf(str, DESC_LEN, "EUMEL/Elan (0x47)"); break; case 0x48: snprintf(str, DESC_LEN, "EUMEL/Elan (0x48)"); break; case 0x4a: snprintf(str, DESC_LEN, "Mark Aitchison's ALFS/THIN Lightweight Filesystem (0x4a)"); break; /*case 0x4a: snprintf(str, DESC_LEN, "AdaOS Aquila (0x4a)"); break; */ case 0x4c: snprintf(str, DESC_LEN, "Oberon (0x4c)"); break; case 0x4d: case 0x4e: case 0x4f: snprintf(str, DESC_LEN, "QNX 4.x (0x%.2x)", ptype); break; /*case 0x4f: snprintf(str, DESC_LEN, "Oberon (0x4f)"); break; */ /*case 0x52: snprintf(str, DESC_LEN, "CP/M (0x52)"); break; */ case 0x50: case 0x51: case 0x53: case 0x54: snprintf(str, DESC_LEN, "OnTrack Disk Manager (0x%.2x)", ptype); break; case 0x52: snprintf(str, DESC_LEN, "Microport SysV/AT (0x52)"); break; case 0x55: snprintf(str, DESC_LEN, "EZ-Drive (0x55)"); break; case 0x56: snprintf(str, DESC_LEN, "AT&T MS-DOS 3.x Logically Sectored FAT (0x56)"); break; /*case 0x56: snprintf(str, DESC_LEN, "Golden Bow VFeature Partitioned Volume (0x56)"); break; */ /*case 0x56: snprintf(str, DESC_LEN, "DM Converted to EZ-BIOS (0x56)"); break; */ case 0x57: snprintf(str, DESC_LEN, "DrivePro (0x57)"); break; case 0x5c: snprintf(str, DESC_LEN, "Priam EDisk (0x5c)"); break; case 0x61: snprintf(str, DESC_LEN, "SpeedStor (0x61)"); break; case 0x63: snprintf(str, DESC_LEN, "UNIX System V (0x63)"); break; case 0x64: case 0x65: case 0x66: case 0x67: case 0x68: case 0x69: snprintf(str, DESC_LEN, "Novell Netware (0x%.2x)", ptype); break; case 0x70: snprintf(str, DESC_LEN, "DiskSecure Multi-Boot (0x70)"); break; case 0x74: snprintf(str, DESC_LEN, "Scramdisk (0x74)"); break; case 0x75: snprintf(str, DESC_LEN, "IBM PC/IX (0x75)"); break; case 0x77: snprintf(str, DESC_LEN, "VNDI (0x77)"); break; /*case 0x77: snprintf(str, DESC_LEN, "M2FS/M2CS (0x77)"); break; */ case 0x78: snprintf(str, DESC_LEN, "XOSL FS (0x78)"); break; case 0x80: snprintf(str, DESC_LEN, "MINIX <=v1.4a (0x80)"); break; case 0x81: snprintf(str, DESC_LEN, "MINIX >=v1.4b, Early Linux (0x81)"); break; /*case 0x81: snprintf(str, DESC_LEN, "Mitac Disk Manager (0x81)"); break; */ case 0x82: snprintf(str, DESC_LEN, "Linux Swap / Solaris x86 (0x82)"); break; case 0x83: snprintf(str, DESC_LEN, "Linux (0x83)"); break; case 0x84: snprintf(str, DESC_LEN, "Hibernation (0x84)"); break; case 0x85: snprintf(str, DESC_LEN, "Linux Extended (0x85)"); break; case 0x86: snprintf(str, DESC_LEN, "NTFS Volume Set (0x86)"); break; case 0x87: snprintf(str, DESC_LEN, "NTFS Volume Set (0x87)"); break; case 0x8a: snprintf(str, DESC_LEN, "Linux Kernel (0x8a)"); break; case 0x8b: snprintf(str, DESC_LEN, "Legacy Fault Tolerant FAT32 (0x8b)"); break; case 0x8c: snprintf(str, DESC_LEN, "Legacy Fault Tolerant FAT32 using BIOS extd INT 13h (0x8c)"); break; case 0x8d: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS FAT12 (0x8d)"); break; case 0x8e: snprintf(str, DESC_LEN, "Linux Logical Volume Manager (0x8e)"); break; case 0x90: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS FAT16 (0x90)"); break; case 0x91: snprintf(str, DESC_LEN, "Free FDISK Hidden DOS Extended (0x91)"); break; case 0x92: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS Large FAT16 (0x92)"); break; case 0x93: snprintf(str, DESC_LEN, "Linux Hidden (0x93)"); break; case 0x94: snprintf(str, DESC_LEN, "Amoeba Bad Block Table (0x94)"); break; case 0x95: snprintf(str, DESC_LEN, "MIT EXOPC (0x95)"); break; case 0x97: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS FAT32 (0x97)"); break; case 0x98: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS FAT32 LBA (0x98)"); break; /*case 0x98: snprintf(str, DESC_LEN, "Datalight ROM-DOS Super-Boot (0x98)"); break; */ case 0x99: snprintf(str, DESC_LEN, "DCE376 Logical Drive (0x99)"); break; case 0x9a: snprintf(str, DESC_LEN, "Free FDISK Hidden Primary DOS FAT16 LBA (0x9a)"); break; case 0x9b: snprintf(str, DESC_LEN, "Free FDISK Hidden DOS Extended LBA (0x9b)"); break; case 0x9f: snprintf(str, DESC_LEN, "BSD/OS (0x9f)"); break; case 0xa0: case 0xa1: snprintf(str, DESC_LEN, "Hibernation (0x%.2x)", ptype); break; case 0xa3: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xa3)"); break; case 0xa4: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xa4)"); break; case 0xa5: snprintf(str, DESC_LEN, "BSD/386, 386BSD, NetBSD, FreeBSD (0xa5)"); break; case 0xa6: snprintf(str, DESC_LEN, "OpenBSD (0xa6)"); break; case 0xa7: snprintf(str, DESC_LEN, "NeXTSTEP (0xa7)"); break; case 0xa8: snprintf(str, DESC_LEN, "Mac OS X (0xa8)"); break; case 0xa9: snprintf(str, DESC_LEN, "NetBSD (0xa9)"); break; case 0xaa: snprintf(str, DESC_LEN, "Olivetti Fat 12 1.44MB Service (0xaa)"); break; case 0xab: snprintf(str, DESC_LEN, "Mac OS X Boot Partition (0xab)"); break; case 0xae: snprintf(str, DESC_LEN, "ShagOS Filesystem (0xae)"); break; case 0xaf: snprintf(str, DESC_LEN, "Mac OS X HFS (0xaf)"); break; case 0xb0: snprintf(str, DESC_LEN, "BootStar Dummy (0xb0)"); break; case 0xb1: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xb1)"); break; case 0xb3: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xb3)"); break; case 0xb4: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xb4)"); break; case 0xb6: snprintf(str, DESC_LEN, "Corrupted Windows NT Mirror Set Master FAT16 (0xb6)"); break; /*case 0xb6: snprintf(str, DESC_LEN, "HP Volume Expansion (SpeedStor Variant) (0xb6)"); break; */ case 0xb7: snprintf(str, DESC_LEN, "BSDI (0xb7)"); break; case 0xb8: snprintf(str, DESC_LEN, "BSDI Swap (0xb8)"); break; case 0xbb: snprintf(str, DESC_LEN, "Boot Wizard Hidden (0xbb)"); break; case 0xbe: snprintf(str, DESC_LEN, "Solaris 8 Boot (0xbe)"); break; case 0xc0: snprintf(str, DESC_LEN, "DR-DOS Secured (0xc0)"); break; /*case 0xc0: snprintf(str, DESC_LEN, "CTOS (0xc0)"); break; */ /*case 0xc0: snprintf(str, DESC_LEN, "REAL/32 Secure Small (0xc0)"); break; */ /*case 0xc0: snprintf(str, DESC_LEN, "NTFT (0xc0)"); break; */ case 0xc1: snprintf(str, DESC_LEN, "DR-DOS Secured FAT12 (0xc1)"); break; case 0xc2: snprintf(str, DESC_LEN, "Hidden Linux (0xc2)"); break; case 0xc3: snprintf(str, DESC_LEN, "Hidden Linux Swap (0xc3)"); break; case 0xc4: snprintf(str, DESC_LEN, "DR-DOS Secured FAT16 <32M (0xc4)"); break; case 0xc5: snprintf(str, DESC_LEN, "DR-DOS Secured Extended (0xc5)"); break; case 0xc6: case 0xc7: snprintf(str, DESC_LEN, "Corrupted Windows NT Volume / Stripe Set (0x%.2x)", ptype); break; case 0xc8: snprintf(str, DESC_LEN, "Reserved for DR-DOS 8.0+ (0xc8)"); break; case 0xc9: snprintf(str, DESC_LEN, "Reserved for DR-DOS 8.0+ (0xc9)"); break; case 0xca: snprintf(str, DESC_LEN, "Reserved for DR-DOS 8.0+ (0xca)"); break; case 0xcb: snprintf(str, DESC_LEN, "DR-DOS 7.04+ Secured FAT32 CHS (0xcb)"); break; case 0xcc: snprintf(str, DESC_LEN, "DR-DOS 7.04+ Secured FAT32 LBA (0xcc)"); break; case 0xcd: snprintf(str, DESC_LEN, "CTOS Memdump? (0xcd)"); break; case 0xce: snprintf(str, DESC_LEN, "DR-DOS 7.04+ FAT16X LBA (0xce)"); break; case 0xcf: snprintf(str, DESC_LEN, "DR-DOS 7.04+ Secured EXT DOS LBA (0xcf)"); break; case 0xd0: snprintf(str, DESC_LEN, "Multiuser DOS Secured (0xd0)"); break; /*case 0xd0: snprintf(str, DESC_LEN, "REAL/32 Secure Big (0xd0)"); break; */ case 0xd1: snprintf(str, DESC_LEN, "Old Multiuser DOS Secured FAT12 (0xd1)"); break; case 0xd4: snprintf(str, DESC_LEN, "Old Multiuser DOS Secured FAT16 <32M (0xd4)"); break; case 0xd5: snprintf(str, DESC_LEN, "Old Multiuser DOS Secured extended (0xd5)"); break; case 0xd6: snprintf(str, DESC_LEN, "Old Multiuser DOS Secured FAT16 >=32M (0xd6)"); break; case 0xd8: snprintf(str, DESC_LEN, "CP/M-86 (0xd8)"); break; case 0xda: snprintf(str, DESC_LEN, "Non-FS Data (0xda)"); break; case 0xdb: snprintf(str, DESC_LEN, "Digital Research CP/M, Concurrent CP/M, Concurrent DOS (0xdb)"); break; /*case 0xdb: snprintf(str, DESC_LEN, "Unisys CTOS (0xdb)"); break; */ /*case 0xdb: snprintf(str, DESC_LEN, "KDG Telemetry SCPU boot (0xdb)"); break; */ case 0xdd: snprintf(str, DESC_LEN, "Hidden CTOS Memdump? (0xdd)"); break; case 0xde: snprintf(str, DESC_LEN, "Dell Utilities FAT (0xde)"); break; /*case 0xdf: snprintf(str, DESC_LEN, "DG/UX Virtual Disk Manager (0xdf)"); break; */ /*case 0xdf: snprintf(str, DESC_LEN, "BootIt EMBRM (0xdf)"); break; */ case 0xe0: snprintf(str, DESC_LEN, "Reserved by STMicroelectronics for ST AVFS. (0xe0)"); break; case 0xe1: snprintf(str, DESC_LEN, "DOS Access or SpeedStor 12-bit FAT Extended (0xe1)"); break; case 0xe3: snprintf(str, DESC_LEN, "DOS R/O or SpeedStor (0xe3)"); break; case 0xe4: snprintf(str, DESC_LEN, "SpeedStor 16-bit FAT Extended <1024 cyl. (0xe4)"); break; case 0xe5: snprintf(str, DESC_LEN, "Tandy MS-DOS with Logically Sectored FAT (0xe5)"); break; case 0xe6: snprintf(str, DESC_LEN, "Storage Dimensions SpeedStor (0xe6)"); break; case 0xeb: snprintf(str, DESC_LEN, "BeOS BFS (0xeb)"); break; case 0xee: snprintf(str, DESC_LEN, "GPT Safety Partition (0xee)"); break; case 0xef: snprintf(str, DESC_LEN, "EFI File System (0xef)"); break; case 0xf0: snprintf(str, DESC_LEN, "Linux/PA-RISC Boot Loader (0xf0)"); break; case 0xf1: snprintf(str, DESC_LEN, "Storage Dimensions SpeedStor (0xf1)"); break; case 0xf2: snprintf(str, DESC_LEN, "DOS 3.3+ Secondary (0xf2)"); break; case 0xf4: snprintf(str, DESC_LEN, "SpeedStor Large (0xf4)"); break; /*case 0xf4: snprintf(str, DESC_LEN, "Prologue Single-Volume (0xf4)"); break; */ case 0xf5: snprintf(str, DESC_LEN, "Prologue Multi-Volume (0xf5)"); break; case 0xf6: snprintf(str, DESC_LEN, "Storage Dimensions SpeedStor (0xf6)"); break; case 0xf9: snprintf(str, DESC_LEN, "pCache (0xf9)"); break; case 0xfa: snprintf(str, DESC_LEN, "Bochs (0xfa)"); break; case 0xfb: snprintf(str, DESC_LEN, "VMWare File System (0xfb)"); break; case 0xfc: snprintf(str, DESC_LEN, "VMWare Swap (0xfc)"); break; case 0xfd: snprintf(str, DESC_LEN, "Linux RAID (0xfd)"); break; case 0xfe: snprintf(str, DESC_LEN, "Windows NT Disk Administrator Hidden (0xfe)"); break; /*case 0xfe: snprintf(str, DESC_LEN, "SpeedStor >1024 cyl. (0xfe)"); break; */ /*case 0xfe: snprintf(str, DESC_LEN, "LANstep (0xfe)"); break; */ /*case 0xfe: snprintf(str, DESC_LEN, "IBM PS/2 IML (0xfe)"); break; */ /*case 0xfe: snprintf(str, DESC_LEN, "Old Linux Logical Volume Manager (0xfe)"); break; */ case 0xff: snprintf(str, DESC_LEN, "Xenix Bad Block Table (0xff)"); break; default: snprintf(str, DESC_LEN, "Unknown Type (0x%.2x)", ptype); break; } return str; } /* * Load an extended partition table into the structure in TSK_VS_INFO. * * sect_cur: The sector where the extended table is located * sect_ext_base: The sector of the primary extended table (this does * not change for recursive calls) * table: a counter that identifies the table depth * (increases by 1 for each recursive call) * * For the primary extended table, sect_cur == sect_ext_base * * Return 1 on error and 0 on success * */ static uint8_t dos_load_ext_table(TSK_VS_INFO * vs, TSK_DADDR_T sect_cur, TSK_DADDR_T sect_ext_base, int table) { dos_sect *sect; char *sect_buf; int i; char *table_str; ssize_t cnt; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "dos_load_ext: Table Sector: %" PRIuDADDR ", Primary Base Sector: %" PRIuDADDR "\n", sect_cur, sect_ext_base); if ((sect_buf = tsk_malloc(vs->block_size)) == NULL) return 1; sect = (dos_sect *) sect_buf; /* Read the partition table sector */ cnt = tsk_vs_read_block(vs, sect_cur, sect_buf, vs->block_size); if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("Extended DOS table sector %" PRIuDADDR, sect_cur); free(sect_buf); return 1; } /* Sanity Check */ if (tsk_getu16(vs->endian, sect->magic) != DOS_MAGIC) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("Extended DOS partition table in sector %" PRIuDADDR, sect_cur); free(sect_buf); return 1; } /* Add an entry of 1 length for the table to the internal structure */ if ((table_str = tsk_malloc(32)) == NULL) { free(sect_buf); return 1; } snprintf(table_str, 32, "Extended Table (#%d)", table); if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) sect_cur, (TSK_DADDR_T) 1, TSK_VS_PART_FLAG_META, table_str, table, -1)) { free(sect_buf); return 1; } /* Cycle through the four partitions in the table * * When another extended partition is found, it is processed * inside of the loop */ for (i = 0; i < 4; i++) { dos_part *part = §->ptable[i]; /* Get the starting sector and size, we currently * ignore CHS */ uint32_t part_start = tsk_getu32(vs->endian, part->start_sec); uint32_t part_size = tsk_getu32(vs->endian, part->size_sec); if (tsk_verbose) tsk_fprintf(stderr, "load_ext: %d:%d Start: %" PRIu32 " Size: %" PRIu32 " Type: %d\n", table, i, part_start, part_size, part->ptype); if (part_size == 0) continue; /* partitions are addressed differently * in extended partitions */ if (dos_is_ext(part->ptype)) { /* part start is added to the start of the * first extended partition (the primary * extended partition) */ if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) (sect_ext_base + part_start), (TSK_DADDR_T) part_size, TSK_VS_PART_FLAG_META, dos_get_desc(part->ptype), table, i)) { free(sect_buf); return 1; } if (sect_ext_base + part_start > max_addr) { if (tsk_verbose) tsk_fprintf(stderr, "Starting sector %" PRIuDADDR " of extended partition too large for image\n", sect_ext_base + part_start); } /* Process the extended partition */ else if (dos_load_ext_table(vs, sect_ext_base + part_start, sect_ext_base, table + 1)) { free(sect_buf); return 1; } } else { /* part_start is added to the start of the * current partition for the actual * starting location */ // we ignore the max_addr checks on extended partitions... if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) (sect_cur + part_start), (TSK_DADDR_T) part_size, TSK_VS_PART_FLAG_ALLOC, dos_get_desc(part->ptype), table, i)) { free(sect_buf); return 1; } } } free(sect_buf); return 0; } /* * Load the primary partition table (MBR) into the internal * data structures in TSK_VS_INFO * * This will automatically call load_ext_table for extended * partitions * * sect_cur is the addres of the table to load * * 0 is returned if the load is successful and 1 if error */ static uint8_t dos_load_prim_table(TSK_VS_INFO * vs, uint8_t test) { dos_sect *sect; char *sect_buf; int i, added = 0; char *table_str; ssize_t cnt; TSK_DADDR_T taddr = vs->offset / vs->block_size + DOS_PART_SOFFSET; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim: Table Sector: %" PRIuDADDR "\n", taddr); if ((sect_buf = tsk_malloc(vs->block_size)) == NULL) return 1; sect = (dos_sect *) sect_buf; /* Read the table */ cnt = tsk_vs_read_block (vs, DOS_PART_SOFFSET, sect_buf, vs->block_size); if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("Primary DOS table sector %" PRIuDADDR, taddr); free(sect_buf); return 1; } /* Sanity Check */ if (tsk_vs_guessu16(vs, sect->magic, DOS_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("File is not a DOS partition (invalid primary magic) (Sector: %" PRIuDADDR ")", taddr); if (tsk_verbose) fprintf(stderr, "File is not a DOS partition (invalid primary magic) (Sector: %" PRIuDADDR ")", taddr); free(sect_buf); return 1; } /* Because FAT and NTFS use the same magic - check for a * standard MS OEM name and sizes. Not a great check, but we can't * really test the table entries. */ if (test) { if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim_table: Testing FAT/NTFS conditions\n"); if (strncmp("MSDOS", sect->oemname, 5) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("dos_load_prim_table: MSDOS OEM name exists"); if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim_table: MSDOS OEM name exists\n"); free(sect_buf); return 1; } else if (strncmp("MSWIN", sect->oemname, 5) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("dos_load_prim_table: MSWIN OEM name exists"); if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim_table: MSWIN OEM name exists\n"); free(sect_buf); return 1; } else if (strncmp("NTFS", sect->oemname, 4) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("dos_load_prim_table: NTFS OEM name exists"); if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim_table: NTFS OEM name exists\n"); free(sect_buf); return 1; } else if (strncmp("FAT", sect->oemname, 4) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("dos_load_prim_table: FAT OEM name exists"); if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim_table: FAT OEM name exists\n"); free(sect_buf); return 1; } } /* Add an entry of 1 sector for the table to the internal structure */ if ((table_str = tsk_malloc(32)) == NULL) { free(sect_buf); return 1; } snprintf(table_str, 32, "Primary Table (#0)"); if (NULL == tsk_vs_part_add(vs, DOS_PART_SOFFSET, (TSK_DADDR_T) 1, TSK_VS_PART_FLAG_META, table_str, -1, -1)) { free(sect_buf); return 1; } /* Cycle through the partition table */ for (i = 0; i < 4; i++) { dos_part *part = §->ptable[i]; /* We currently ignore CHS */ uint32_t part_start = tsk_getu32(vs->endian, part->start_sec); uint32_t part_size = tsk_getu32(vs->endian, part->size_sec); if (tsk_verbose) tsk_fprintf(stderr, "load_pri:0:%d Start: %" PRIu32 " Size: %" PRIu32 " Type: %d\n", i, part_start, part_size, part->ptype); if (part_size == 0) continue; // make sure the first couple are in the image bounds if ((i < 2) && (part_start > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("dos_load_prim_table: Starting sector too large for image"); if (tsk_verbose) tsk_fprintf(stderr, "Starting sector %" PRIu32 " too large for image\n", part_start); free(sect_buf); return 1; } #if 0 // I'm not sure if this is too strict ... else if ((part_start + part_size) > max_addr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("dos_load_prim_table: Partition ends after image"); return 1; } #endif added = 1; /* Add the partition to the internal structure * If it is an extended partition, process it now */ if (dos_is_ext(part->ptype)) { if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) part_start, (TSK_DADDR_T) part_size, TSK_VS_PART_FLAG_META, dos_get_desc(part->ptype), 0, i)) { free(sect_buf); return 1; } if (dos_load_ext_table(vs, part_start, part_start, 1)) { if (tsk_verbose) { fprintf(stderr, "Error loading extended table, moving on"); tsk_error_print(stderr); } tsk_error_reset(); } } else { if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) part_start, (TSK_DADDR_T) part_size, TSK_VS_PART_FLAG_ALLOC, dos_get_desc(part->ptype), 0, i)) { free(sect_buf); return 1; } } } free(sect_buf); if (added == 0) { if (tsk_verbose) tsk_fprintf(stderr, "dos_load_prim: No valid entries\n"); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("dos_load_prim_table: No valid entries in primary table"); return 1; } return 0; } static void dos_close(TSK_VS_INFO * vs) { vs->tag = 0; tsk_vs_part_free(vs); free(vs); } /* * Given the path to the file, open it and load the internal * partition table structure * * offset is the byte offset to the start of the volume system * * If test is 1 then additional tests are performed to make sure * it isn't a FAT or NTFS file system. This is used when autodetection * is being used to detect the volume system type. */ TSK_VS_INFO * tsk_vs_dos_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset, uint8_t test) { TSK_VS_INFO *vs; // clean up any errors that are lying around tsk_error_reset(); vs = (TSK_VS_INFO *) tsk_malloc(sizeof(*vs)); if (vs == NULL) return NULL; vs->vstype = TSK_VS_TYPE_DOS; vs->tag = TSK_VS_INFO_TAG; vs->img_info = img_info; vs->offset = offset; /* inititialize settings */ vs->part_list = NULL; vs->part_count = 0; vs->endian = 0; vs->block_size = img_info->sector_size; /* Assign functions */ vs->close = dos_close; /* Load the partitions into the sorted list */ if (dos_load_prim_table(vs, test)) { dos_close(vs); return NULL; } /* fill in the sorted list with the 'unknown' values */ if (tsk_vs_part_unused(vs)) { dos_close(vs); return NULL; } return vs; } sleuthkit-4.2.0/tsk/vs/gpt.c000644 000765 000024 00000023345 12576320700 016523 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2004-2005 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file gpt.c * The internal functions required to process the GPT GUID Partiition Table. */ #include "tsk_vs_i.h" #include "tsk_gpt.h" #include "tsk_dos.h" /* * Process the partition table at the sector address * * It is loaded into the internal sorted list */ static uint8_t gpt_load_table(TSK_VS_INFO * vs) { gpt_head *head; gpt_entry *ent; dos_sect *dos_part; unsigned int i, a; uint32_t ent_size; char *safe_str, *head_str, *tab_str, *ent_buf; ssize_t cnt; char *sect_buf; TSK_DADDR_T taddr = vs->offset / vs->block_size + GPT_PART_SOFFSET; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "gpt_load_table: Sector: %" PRIuDADDR "\n", taddr); if ((sect_buf = tsk_malloc(vs->block_size)) == NULL) return 1; dos_part = (dos_sect *) sect_buf; cnt = tsk_vs_read_block (vs, GPT_PART_SOFFSET, sect_buf, vs->block_size); /* if -1, then tsk_errno is already set */ if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2 ("Error reading DOS safety partition table in Sector: %" PRIuDADDR, taddr); free(sect_buf); return 1; } /* Sanity Check */ if (tsk_vs_guessu16(vs, dos_part->magic, DOS_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("Missing DOS safety partition (invalid magic) (Sector: %" PRIuDADDR ")", taddr); free(sect_buf); return 1; } if (dos_part->ptable[0].ptype != GPT_DOS_TYPE) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr ("Missing DOS safety partition (invalid type in table: %d)", dos_part->ptable[0].ptype); free(sect_buf); return 1; } /* Read the GPT header */ head = (gpt_head *) sect_buf; cnt = tsk_vs_read_block (vs, GPT_PART_SOFFSET + 1, sect_buf, vs->block_size); if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("GPT Header structure in Sector: %" PRIuDADDR, taddr + 1); free(sect_buf); return 1; } if (tsk_getu64(vs->endian, &head->signature) != GPT_HEAD_SIG) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("GPT Header: %" PRIx64, tsk_getu64(vs->endian, &head->signature)); free(sect_buf); return 1; } // now that we checked the sig, lets make the meta entries if ((safe_str = tsk_malloc(16)) == NULL) { free(sect_buf); return 1; } snprintf(safe_str, 16, "Safety Table"); if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) 0, (TSK_DADDR_T) 1, TSK_VS_PART_FLAG_META, safe_str, -1, -1)) { free(sect_buf); return 1; } if ((head_str = tsk_malloc(16)) == NULL) { free(sect_buf); return 1; } snprintf(head_str, 16, "GPT Header"); if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) 1, (TSK_DADDR_T) ((tsk_getu32(vs->endian, &head->head_size_b) + (vs->block_size - 1)) / vs->block_size), TSK_VS_PART_FLAG_META, head_str, -1, -1)) { free(sect_buf); return 1; } /* Allocate a buffer for each table entry */ ent_size = tsk_getu32(vs->endian, &head->tab_size_b); if (ent_size < sizeof(gpt_entry)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("Header reports partition entry size of %" PRIu32 " and not %" PRIuSIZE "", ent_size, sizeof(gpt_entry)); free(sect_buf); return 1; } if ((tab_str = tsk_malloc(20)) == NULL) { free(sect_buf); return 1; } snprintf(tab_str, 20, "Partition Table"); if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) tsk_getu64(vs->endian, &head->tab_start_lba), (TSK_DADDR_T) ((ent_size * tsk_getu32(vs->endian, &head->tab_num_ent) + (vs->block_size - 1)) / vs->block_size), TSK_VS_PART_FLAG_META, tab_str, -1, -1)) { free(sect_buf); return 1; } /* Process the partition table */ if ((ent_buf = tsk_malloc(vs->block_size)) == NULL) { free(sect_buf); return 1; } i = 0; for (a = 0; i < tsk_getu32(vs->endian, &head->tab_num_ent); a++) { char *name; /* Read a sector */ cnt = tsk_vs_read_block(vs, tsk_getu64(vs->endian, &head->tab_start_lba) + a, ent_buf, vs->block_size); if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2 ("Error reading GPT partition table sector : %" PRIuDADDR, tsk_getu64(vs->endian, &head->tab_start_lba) + a); free(ent_buf); free(sect_buf); return 1; } /* Process the sector */ ent = (gpt_entry *) ent_buf; for (; (uintptr_t) ent < (uintptr_t) ent_buf + vs->block_size && i < tsk_getu32(vs->endian, &head->tab_num_ent); i++) { UTF16 *name16; UTF8 *name8; int retVal; if (tsk_verbose) tsk_fprintf(stderr, "gpt_load: %d Starting Sector: %" PRIu64 " End: %" PRIu64 " Flag: %" PRIx64 "\n", i, tsk_getu64(vs->endian, ent->start_lba), tsk_getu64(vs->endian, ent->end_lba), tsk_getu64(vs->endian, ent->flags)); if (tsk_getu64(vs->endian, ent->start_lba) == 0) { ent++; continue; } // make sure the first couple are in the image bounds if ((i < 2) && (tsk_getu64(vs->endian, ent->start_lba) > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("gpt_load_table: Starting sector too large for image"); free(sect_buf); free(ent_buf); return 1; } if ((name = tsk_malloc(256)) == NULL) { free(sect_buf); free(ent_buf); return 1; } name16 = (UTF16 *) ((uintptr_t) ent->name); name8 = (UTF8 *) name; retVal = tsk_UTF16toUTF8(vs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + sizeof(ent->name)), &name8, (UTF8 *) ((uintptr_t) name8 + 256), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "gpt_load_table: Error converting name to UTF8: %d\n", retVal); *name = '\0'; } if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) tsk_getu64(vs->endian, ent->start_lba), (TSK_DADDR_T) (tsk_getu64(vs->endian, ent->end_lba) - tsk_getu64(vs->endian, ent->start_lba) + 1), TSK_VS_PART_FLAG_ALLOC, name, -1, i)) { free(sect_buf); free(ent_buf); return 1; } ent++; } } free(sect_buf); free(ent_buf); return 0; } static void gpt_close(TSK_VS_INFO * vs) { vs->tag = 0; tsk_vs_part_free(vs); free(vs); } TSK_VS_INFO * tsk_vs_gpt_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) { TSK_VS_INFO *vs; // clean up any errors that are lying around tsk_error_reset(); vs = (TSK_VS_INFO *) tsk_malloc(sizeof(*vs)); if (vs == NULL) return NULL; vs->img_info = img_info; vs->vstype = TSK_VS_TYPE_GPT; vs->tag = TSK_VS_INFO_TAG; /* If an offset was given, then use that too */ vs->offset = offset; /* inititialize settings */ vs->part_list = NULL; vs->part_count = 0; vs->endian = 0; vs->block_size = img_info->sector_size; /* Assign functions */ vs->close = gpt_close; /* Load the partitions into the sorted list */ if (gpt_load_table(vs)) { int found = 0; if (tsk_verbose) tsk_fprintf(stderr, "gpt_open: Trying other sector sizes\n"); /* Before we give up, lets try some other sector sizes */ vs->block_size = 512; while (vs->block_size <= 8192) { if (tsk_verbose) tsk_fprintf(stderr, "gpt_open: Trying sector size: %d\n", vs->block_size); if (gpt_load_table(vs)) { vs->block_size *= 2; continue; } found = 1; break; } if (found == 0) { gpt_close(vs); return NULL; } } /* fill in the sorted list with the 'unknown' values */ if (tsk_vs_part_unused(vs)) { gpt_close(vs); return NULL; } return vs; } sleuthkit-4.2.0/tsk/vs/mac.c000644 000765 000024 00000016075 12576320700 016473 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file mac.c * Contains the internal functions to process and load a Mac partition table. */ #include "tsk_vs_i.h" #include "tsk_mac.h" /* * Process the partition table at the sector address * * It is loaded into the internal sorted list * * Return 1 on error and 0 on success */ static uint8_t mac_load_table(TSK_VS_INFO * vs) { char *part_buf; mac_part *part; char *table_str; uint32_t idx, max_part; TSK_DADDR_T taddr = vs->offset / vs->block_size + MAC_PART_SOFFSET; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "mac_load_table: Sector: %" PRIuDADDR "\n", taddr); /* The table can be variable length, so we loop on it * The idx variable shows which round it is * Each structure is a block size */ if ((part_buf = tsk_malloc(vs->block_size)) == NULL) return 1; part = (mac_part *) part_buf; max_part = 1; /* set it to 1 and it will be set in the first loop */ for (idx = 0; idx < max_part; idx++) { uint32_t part_start; uint32_t part_size; uint32_t part_status; char *str; ssize_t cnt; int flag = 0; /* Read the entry */ cnt = tsk_vs_read_block (vs, MAC_PART_SOFFSET + idx, part_buf, vs->block_size); /* If -1, then tsk_errno is already set */ if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("MAC Partition entry %" PRIuDADDR, taddr + idx); free(part_buf); return 1; } /* Sanity Check */ if (idx == 0) { /* Set the endian ordering the first time around */ if (tsk_vs_guessu16(vs, part->magic, MAC_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("Mac partition table entry (Sector: %" PRIuDADDR ") %" PRIx16, (taddr + idx), tsk_getu16(vs->endian, part->magic)); if (tsk_verbose) tsk_fprintf(stderr, "mac_load: Missing initial magic value\n"); free(part_buf); return 1; } /* Get the number of partitions */ max_part = tsk_getu32(vs->endian, part->pmap_size); } else if (tsk_getu16(vs->endian, part->magic) != MAC_MAGIC) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("Mac partition table entry (Sector: %" PRIuDADDR ") %" PRIx16, (taddr + idx), tsk_getu16(vs->endian, part->magic)); if (tsk_verbose) tsk_fprintf(stderr, "mac_load: Missing magic value in entry %" PRIu32 "\n", idx); free(part_buf); return 1; } part_start = tsk_getu32(vs->endian, part->start_sec); part_size = tsk_getu32(vs->endian, part->size_sec); part_status = tsk_getu32(vs->endian, part->status); if (tsk_verbose) tsk_fprintf(stderr, "mac_load: %" PRIu32 " Starting Sector: %" PRIu32 " Size: %" PRIu32 " Type: %s Status: %"PRIu32"\n", idx, part_start, part_size, part->type, part_status); if (part_size == 0) continue; if (part_status == 0) flag = TSK_VS_PART_FLAG_UNALLOC; else flag = TSK_VS_PART_FLAG_ALLOC; // make sure the first couple are within the bounds of the image. if ((idx < 2) && (part_start > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("mac_load_table: Starting sector too large for image"); if (tsk_verbose) tsk_fprintf(stderr, "mac_load: Starting sector too large for image (%" PRIu32 " vs %" PRIu32 ")\n", part_start, max_addr); free(part_buf); return 1; } if ((str = tsk_malloc(sizeof(part->name))) == NULL) { free(part_buf); return 1; } strncpy(str, (char *) part->type, sizeof(part->name)); if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) part_start, (TSK_DADDR_T) part_size, (TSK_VS_PART_FLAG_ENUM)flag, str, -1, idx)) return 1; } free(part_buf); part_buf = NULL; /* Add an entry for the table length */ if ((table_str = tsk_malloc(16)) == NULL) { return 1; } snprintf(table_str, 16, "Table"); if (NULL == tsk_vs_part_add(vs, taddr, max_part, TSK_VS_PART_FLAG_META, table_str, -1, -1)) { return 1; } return 0; } static void mac_close(TSK_VS_INFO * vs) { vs->tag = 0; tsk_vs_part_free(vs); free(vs); } /* * Process img_info as a Mac disk. Initialize TSK_VS_INFO or return * NULL on error * */ TSK_VS_INFO * tsk_vs_mac_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) { TSK_VS_INFO *vs; // clean up any errors that are lying around tsk_error_reset(); vs = (TSK_VS_INFO *) tsk_malloc(sizeof(*vs)); if (vs == NULL) return NULL; vs->img_info = img_info; vs->vstype = TSK_VS_TYPE_MAC; vs->tag = TSK_VS_INFO_TAG; /* If an offset was given, then use that too */ vs->offset = offset; //vs->sect_offset = offset + MAC_PART_OFFSET; /* inititialize settings */ vs->part_list = NULL; vs->part_count = 0; vs->endian = 0; vs->block_size = img_info->sector_size; /* Assign functions */ vs->close = mac_close; /* Load the partitions into the sorted list */ if (mac_load_table(vs)) { // try some other sector sizes uint8_t returnNull = 1; if (vs->block_size == 512) { if (tsk_verbose) tsk_fprintf(stderr, "mac_open: Trying 4096-byte sector size instead of 512-byte\n"); vs->block_size = 4096; returnNull = mac_load_table(vs); } else if (vs->block_size == 4096) { if (tsk_verbose) tsk_fprintf(stderr, "mac_open: Trying 512-byte sector size instead of 4096-byte\n"); vs->block_size = 512; returnNull = mac_load_table(vs); } if (returnNull) { mac_close(vs); return NULL; } } /* fill in the sorted list with the 'unknown' values */ if (tsk_vs_part_unused(vs)) { mac_close(vs); return NULL; } return vs; } sleuthkit-4.2.0/tsk/vs/Makefile.am000644 000765 000024 00000000506 12576320700 017613 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskvs.la # Note that the .h files are in the top-level Makefile libtskvs_la_SOURCES = mm_open.c mm_part.c mm_types.c mm_io.c \ bsd.c dos.c gpt.c mac.c sun.c tsk_vs_i.h indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ sleuthkit-4.2.0/tsk/vs/Makefile.in000644 000765 000024 00000045150 12576326300 017632 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/vs ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskvs_la_LIBADD = am_libtskvs_la_OBJECTS = mm_open.lo mm_part.lo mm_types.lo mm_io.lo \ bsd.lo dos.lo gpt.lo mac.lo sun.lo libtskvs_la_OBJECTS = $(am_libtskvs_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libtskvs_la_SOURCES) DIST_SOURCES = $(libtskvs_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskvs.la # Note that the .h files are in the top-level Makefile libtskvs_la_SOURCES = mm_open.c mm_part.c mm_types.c mm_io.c \ bsd.c dos.c gpt.c mac.c sun.c tsk_vs_i.h all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/vs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/vs/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskvs.la: $(libtskvs_la_OBJECTS) $(libtskvs_la_DEPENDENCIES) $(EXTRA_libtskvs_la_DEPENDENCIES) $(AM_V_CCLD)$(LINK) $(libtskvs_la_OBJECTS) $(libtskvs_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bsd.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dos.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gpt.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mm_io.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mm_open.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mm_part.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mm_types.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sun.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/vs/mm_io.c000644 000765 000024 00000006226 12576320700 017030 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file mm_io.c * Contains the wrapper code that allows one to read sectors from * a TSK_VS_INFO or TSK_VS_PART_INFO structure. These functions * call the underlying TSK_IMG_INFO functions. */ #include #include "tsk_vs_i.h" /** * \ingroup vslib * Reads one or more blocks of data with an address relative to the start of the volume system. * * @param a_vs Pointer to open volume system * @param a_addr Sector address to read from, relative to start of VOLUME SYSTEM. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes - must be a multiple of block_size) * @returns Number of bytes read or -1 on error */ ssize_t tsk_vs_read_block(TSK_VS_INFO * a_vs, TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { if (a_len % a_vs->block_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); tsk_error_set_errstr("tsk_vs_read_block: length %" PRIuSIZE "" " not a multiple of %d", a_len, a_vs->block_size); return -1; } return tsk_img_read(a_vs->img_info, a_vs->offset + (TSK_OFF_T) (a_addr * a_vs->block_size), a_buf, a_len); } /** * \ingroup vslib * Reads data starting at a byte address relative to the start of a VOLUME in a volume system. * * @param a_vs_part info Pointer to open volume in a volume system * @param a_off Byte offset to read from, relative to start of VOLUME in volume system. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes) * @returns Number of bytes read or -1 on error */ ssize_t tsk_vs_part_read(const TSK_VS_PART_INFO * a_vs_part, TSK_OFF_T a_off, char *a_buf, size_t a_len) { TSK_VS_INFO *vs = a_vs_part->vs; return tsk_img_read(vs->img_info, vs->offset + (TSK_OFF_T) a_vs_part->start * vs->block_size + a_off, a_buf, a_len); } /** * \ingroup vslib * Reads one or more blocks of data with an address relative to the start of a VOLUME in a volume system. * * @param a_vs_part info Pointer to open volume in a volume system * @param a_addr Block address to start reading from, relative to start of VOLUME in volume system. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes - must be a multiple of block_size) * @returns Number of bytes read or -1 on error */ ssize_t tsk_vs_part_read_block(const TSK_VS_PART_INFO * a_vs_part, TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { TSK_VS_INFO *vs = a_vs_part->vs; if (a_len % vs->block_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); tsk_error_set_errstr("tsk_vs_part_read_block: length %" PRIuSIZE "" " not a multiple of %d", a_len, vs->block_size); return -1; } return tsk_img_read(vs->img_info, vs->offset + (TSK_OFF_T) (a_vs_part->start + a_addr) * vs->block_size, a_buf, a_len); } sleuthkit-4.2.0/tsk/vs/mm_open.c000644 000765 000024 00000013751 12576320700 017363 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * tsk_vs_open - wrapper function for specific partition type * * This software is distributed under the Common Public License 1.0 */ /** * \file mm_open.c * Contains general code to open volume systems. */ #include "tsk_vs_i.h" /** * \ingroup vslib * * Open a disk image and process the media management system * data. This calls VS specific code to determine the type and * collect data. * * @param img_info The opened disk image. * @param offset Byte offset in the disk image to start analyzing from. * @param type Type of volume system (including auto detect) * * @return NULL on error. */ TSK_VS_INFO * tsk_vs_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset, TSK_VS_TYPE_ENUM type) { if (img_info == NULL) { /* Opening the image file(s) failed, if attempted. */ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_NOFILE); tsk_error_set_errstr("mm_open"); return NULL; } /* Autodetect mode * We need to try all of them in case there are multiple * installations * * NOte that errors that are encountered during the testing process * will not be reported */ if (type == TSK_VS_TYPE_DETECT) { TSK_VS_INFO *vs, *vs_set = NULL; char *set = NULL; if ((vs = tsk_vs_dos_open(img_info, offset, 1)) != NULL) { set = "DOS"; vs_set = vs; } else { tsk_error_reset(); } if ((vs = tsk_vs_bsd_open(img_info, offset)) != NULL) { // if (set == NULL) { // In this case, BSD takes priority because BSD partitions start off with // the DOS magic value in the first sector with the boot code. set = "BSD"; vs_set = vs; /* } else { vs_set->close(vs_set); vs->close(vs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNKTYPE); tsk_error_set_errstr( "BSD or %s at %" PRIuDADDR, set, offset); tsk_errstr2[0] = '\0'; return NULL; } */ } else { tsk_error_reset(); } if ((vs = tsk_vs_gpt_open(img_info, offset)) != NULL) { if (set != NULL) { /* GPT drives have a DOS Safety partition table. * Test to see if we can ignore one */ if (strcmp(set, "DOS") == 0) { TSK_VS_PART_INFO *tmp_set; for (tmp_set = vs_set->part_list; tmp_set; tmp_set = tmp_set->next) { if ((tmp_set->desc) && (strncmp(tmp_set->desc, "GPT Safety", 10) == 0) && (tmp_set->start <= 63)) { if (tsk_verbose) tsk_fprintf(stderr, "mm_open: Ignoring DOS Safety GPT Partition\n"); set = NULL; vs_set = NULL; break; } } } if (set != NULL) { vs_set->close(vs_set); vs->close(vs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNKTYPE); tsk_error_set_errstr("GPT or %s at %" PRIuDADDR, set, offset); return NULL; } } set = "GPT"; vs_set = vs; } else { tsk_error_reset(); } if ((vs = tsk_vs_sun_open(img_info, offset)) != NULL) { if (set == NULL) { set = "Sun"; vs_set = vs; } else { vs_set->close(vs_set); vs->close(vs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNKTYPE); tsk_error_set_errstr("Sun or %s at %" PRIuDADDR, set, offset); return NULL; } } else { tsk_error_reset(); } if ((vs = tsk_vs_mac_open(img_info, offset)) != NULL) { if (set == NULL) { set = "Mac"; vs_set = vs; } else { vs_set->close(vs_set); vs->close(vs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNKTYPE); tsk_error_set_errstr("Mac or %s at %" PRIuDADDR, set, offset); return NULL; } } else { tsk_error_reset(); } if (vs_set == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNKTYPE); return NULL; } return vs_set; } else { switch (type) { case TSK_VS_TYPE_DOS: return tsk_vs_dos_open(img_info, offset, 0); case TSK_VS_TYPE_MAC: return tsk_vs_mac_open(img_info, offset); case TSK_VS_TYPE_BSD: return tsk_vs_bsd_open(img_info, offset); case TSK_VS_TYPE_SUN: return tsk_vs_sun_open(img_info, offset); case TSK_VS_TYPE_GPT: return tsk_vs_gpt_open(img_info, offset); case TSK_VS_TYPE_UNSUPP: default: tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_UNSUPTYPE); tsk_error_set_errstr("%d", type); return NULL; } } } /** * \ingroup vslib * Closes an open volume system * @param a_vs Pointer to the open volume system structure. */ void tsk_vs_close(TSK_VS_INFO * a_vs) { if (a_vs == NULL) { return; } a_vs->close((TSK_VS_INFO *) a_vs); } sleuthkit-4.2.0/tsk/vs/mm_part.c000644 000765 000024 00000020632 12576320700 017364 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file mm_part.c * Contains the functions need to create, maintain, and access the linked list of * partitions in a volume. */ #include "tsk_vs_i.h" /** * Add a partition to a sorted list * @param a_vs Volume system that partition belongs to * @param a_start Starting sector address of volume (relative to start of volume) * @param len Length of volume in sectors * @param type Type of volume * @param desc Text description of partition. Note that this is not copied * and must not be freed until the volume system has been closed. * @param table The table ID that the volume was located in or -1 for volumes not in a partition table. * @param slot The slot number in the partition table that the volume was located in or -1 for volumes not in a partition table. * @returns Pointer to structure that was created for the partition or NULL on error. */ TSK_VS_PART_INFO * tsk_vs_part_add(TSK_VS_INFO * a_vs, TSK_DADDR_T a_start, TSK_DADDR_T len, TSK_VS_PART_FLAG_ENUM type, char *desc, int8_t table, int8_t slot) { TSK_VS_PART_INFO *part; TSK_VS_PART_INFO *cur_part; if ((part = (TSK_VS_PART_INFO *) tsk_malloc(sizeof(TSK_VS_PART_INFO))) == NULL) { return NULL; } /* set the values */ part->next = NULL; part->prev = NULL; part->start = a_start; part->len = len; part->desc = desc; part->table_num = table; part->slot_num = slot; part->flags = type; part->vs = a_vs; part->addr = 0; part->tag = TSK_VS_PART_INFO_TAG; /* is this the first entry in the list */ if (a_vs->part_list == NULL) { a_vs->part_list = part; a_vs->part_count = 1; return part; } /* Cycle through to find the correct place to put it into */ for (cur_part = a_vs->part_list; cur_part != NULL; cur_part = cur_part->next) { /* The one to add starts before this partition */ if (cur_part->start > part->start) { part->next = cur_part; part->prev = cur_part->prev; if (part->prev) part->prev->next = part; cur_part->prev = part; /* If we are now the head update a_vs */ if (part->prev == NULL) a_vs->part_list = part; /* update the count and address numbers */ a_vs->part_count++; part->addr = cur_part->addr; for (; cur_part != NULL; cur_part = cur_part->next) cur_part->addr++; return part; } /* the one to add is bigger then current and the list is done */ else if (cur_part->next == NULL) { cur_part->next = part; part->prev = cur_part; /* Update partition counts and addresses */ a_vs->part_count++; part->addr = cur_part->addr + 1; return part; } /* The one to add fits in between this and the next */ else if (cur_part->next->start > part->start) { part->prev = cur_part; part->next = cur_part->next; cur_part->next->prev = part; cur_part->next = part; /* Update partition counts and addresses */ a_vs->part_count++; part->addr = cur_part->addr + 1; for (cur_part = part->next; cur_part != NULL; cur_part = cur_part->next) cur_part->addr++; return part; } } return NULL; } /** * Identify regions in the partition list where there are unused sectors * and create new entries for them. * * @param a_vs Pointer to open volume system * @returns 1 on error and 0 on success */ uint8_t tsk_vs_part_unused(TSK_VS_INFO * a_vs) { TSK_VS_PART_INFO *part = a_vs->part_list; TSK_DADDR_T prev_end = 0; /* prev_ent is set to where the previous entry stopped plus 1 */ for (part = a_vs->part_list; part != NULL; part = part->next) { // ignore the META volume if (part->flags & TSK_VS_PART_FLAG_META) continue; // there is space before current and previous volume if (part->start > prev_end) { char *str; if ((str = tsk_malloc(12)) == NULL) return 1; snprintf(str, 12, "Unallocated"); if (NULL == tsk_vs_part_add(a_vs, prev_end, part->start - prev_end, TSK_VS_PART_FLAG_UNALLOC, str, -1, -1)) { free(str); return 1; } } prev_end = part->start + part->len; } /* Is there unallocated space at the end? */ if (prev_end < (TSK_DADDR_T) (a_vs->img_info->size / a_vs->block_size)) { char *str; if ((str = tsk_malloc(12)) == NULL) return 1; snprintf(str, 12, "Unallocated"); if (NULL == tsk_vs_part_add(a_vs, prev_end, a_vs->img_info->size / a_vs->block_size - prev_end, TSK_VS_PART_FLAG_UNALLOC, str, -1, -1)) { free(str); return 1; } } return 0; } /* * free the buffer with the description */ void tsk_vs_part_free(TSK_VS_INFO * a_vs) { TSK_VS_PART_INFO *part = a_vs->part_list; TSK_VS_PART_INFO *part2; while (part) { if (part->desc) free(part->desc); part->tag = 0; part2 = part->next; free(part); part = part2; } a_vs->part_list = NULL; return; } /** * \ingroup vslib * Return handle to a volume in the volume system. * * @param a_vs Open volume system * @param a_idx Index for volume to return (0-based) * @returns Handle to volume or NULL on error */ const TSK_VS_PART_INFO * tsk_vs_part_get(const TSK_VS_INFO * a_vs, TSK_PNUM_T a_idx) { TSK_VS_PART_INFO *part; if ((a_vs == NULL) || (a_vs->tag != TSK_VS_INFO_TAG)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_ARG); tsk_error_set_errstr ("tsk_vs_part_get: pointer is NULL or has unallocated structures"); return NULL; } if (a_idx >= a_vs->part_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_ARG); tsk_error_set_errstr("tsk_vs_part_get: Volume address is too big"); return NULL; } for (part = a_vs->part_list; part != NULL; part = part->next) { if (part->addr == a_idx) return part; } return NULL; } /** * \ingroup vslib * Walk a range of partitions and pass the data to a callback function. * * @param a_vs Pointer to open volume system * @param a_start Address of first partition to walk from. * @param a_last Address of last partition to walk to. * @param a_flags Flags that are used to identify which of the partitions in the range should be returned (if 0, all partitions will be returned). * @param a_action Callback action to call for each partition. * @param a_ptr Pointer to data that will be passed to callback. * @returns 1 on error and 0 on success */ uint8_t tsk_vs_part_walk(TSK_VS_INFO * a_vs, TSK_PNUM_T a_start, TSK_PNUM_T a_last, TSK_VS_PART_FLAG_ENUM a_flags, TSK_VS_PART_WALK_CB a_action, void *a_ptr) { TSK_VS_PART_INFO *part; if (a_start >= a_vs->part_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_WALK_RNG); tsk_error_set_errstr ("tsk_vs_part_walk: Start partition too large: %" PRIuPNUM "", a_start); return 1; } if (a_last >= a_vs->part_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_WALK_RNG); tsk_error_set_errstr("tsk_vs_part_walk: End partition too large: %" PRIuPNUM "", a_last); return 1; } if (a_flags == 0) { a_flags |= (TSK_VS_PART_FLAG_ALLOC | TSK_VS_PART_FLAG_UNALLOC | TSK_VS_PART_FLAG_META); } for (part = a_vs->part_list; part != NULL; part = part->next) { if ((part->addr >= a_start) && ((part->flags & a_flags) != 0)) { int retval; retval = a_action(a_vs, part, a_ptr); if (retval == TSK_WALK_STOP) { return 0; } else if (retval == TSK_WALK_ERROR) { return 1; } } if (part->addr >= a_last) break; } return 0; } sleuthkit-4.2.0/tsk/vs/mm_types.c000644 000765 000024 00000006433 12576320700 017565 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file mm_types.c * Contains the code to parse and print the strings for the supported volume system types. */ #include "tsk_vs_i.h" typedef struct { char *name; TSK_VS_TYPE_ENUM code; char *comment; } VS_TYPES; VS_TYPES vs_open_table[] = { {"dos", TSK_VS_TYPE_DOS, "DOS Partition Table"}, {"mac", TSK_VS_TYPE_MAC, "MAC Partition Map"}, {"bsd", TSK_VS_TYPE_BSD, "BSD Disk Label"}, {"sun", TSK_VS_TYPE_SUN, "Sun Volume Table of Contents (Solaris)"}, {"gpt", TSK_VS_TYPE_GPT, "GUID Partition Table (EFI)"}, {0}, }; /** * \ingroup vslib * Parse a string with the volume system type and return its internal ID. * * @param str String to parse. * @returns ID of string (or unsupported if the name is unknown) */ TSK_VS_TYPE_ENUM tsk_vs_type_toid(const TSK_TCHAR * str) { char tmp[16]; int i; // convert to char for (i = 0; i < 15 && str[i] != '\0'; i++) { tmp[i] = (char) str[i]; } tmp[i] = '\0'; return tsk_vs_type_toid_utf8(tmp); } /** * \ingroup vslib * Parse a string with the volume system type and return its internal ID. * * @param str String to parse (always in UTF-8). * @returns ID of string (or unsupported if the name is unknown) */ TSK_VS_TYPE_ENUM tsk_vs_type_toid_utf8(const char *str) { VS_TYPES *types; for (types = vs_open_table; types->name; types++) { if (strcmp(str, types->name) == 0) { return types->code; } } return TSK_VS_TYPE_UNSUPP; } /** * \ingroup vslib * Print the supported volume system type names to an open handle. * @param hFile Handle to print to. */ void tsk_vs_type_print(FILE * hFile) { VS_TYPES *types; tsk_fprintf(hFile, "Supported partition types:\n"); for (types = vs_open_table; types->name; types++) tsk_fprintf(hFile, "\t%s (%s)\n", types->name, types->comment); } /** * \ingroup vslib * Return the supported volume system types. * @returns The bit in the return value is 1 if the type is supported. */ TSK_VS_TYPE_ENUM tsk_vs_type_supported() { TSK_VS_TYPE_ENUM sup_types = 0; VS_TYPES *types; for (types = vs_open_table; types->name; types++) { sup_types |= types->code; } return sup_types; } /** * \ingroup vslib * Return the string name of a partition type ID. * * @param type Volume system type * @returns name of type or NULL on error */ const char * tsk_vs_type_toname(TSK_VS_TYPE_ENUM type) { VS_TYPES *types; for (types = vs_open_table; types->name; types++) { if (types->code == type) { return types->name; } } if (type == TSK_VS_TYPE_DBFILLER) { return "DB Filler"; } return NULL; } /** * \ingroup vslib * Return the string description of a partition type ID. * * @param type Volume system type * @returns description of type or NULL on error */ const char * tsk_vs_type_todesc(TSK_VS_TYPE_ENUM type) { VS_TYPES *types; for (types = vs_open_table; types->name; types++) { if (types->code == type) { return types->comment; } } return NULL; } sleuthkit-4.2.0/tsk/vs/sun.c000644 000765 000024 00000026037 12576320700 016537 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * SUN - Sun VTOC code * * This software is distributed under the Common Public License 1.0 */ /** \file sun.c * Contains the internal SUN VTOC volume system processing code. */ #include "tsk_vs_i.h" #include "tsk_sun.h" /* * Return a buffer with a description of the partition type */ static char * sun_get_desc(uint16_t fstype) { char *str = tsk_malloc(64); if (str == NULL) return ""; switch (fstype) { case 0: strncpy(str, "Unassigned (0x00)", 64); break; case 1: strncpy(str, "boot (0x01)", 64); break; case 2: strncpy(str, "/ (0x02)", 64); break; case 3: strncpy(str, "swap (0x03)", 64); break; case 4: strncpy(str, "/usr/ (0x04)", 64); break; case 5: strncpy(str, "backup (0x05)", 64); break; case 6: strncpy(str, "stand (0x06)", 64); break; case 7: strncpy(str, "/var/ (0x07)", 64); break; case 8: strncpy(str, "/home/ (0x08)", 64); break; case 9: strncpy(str, "alt sector (0x09)", 64); break; case 10: strncpy(str, "cachefs (0x0A)", 64); break; default: snprintf(str, 64, "Unknown Type (0x%.4x)", fstype); break; } return str; } /* * Load an Intel disk label, this is called by sun_load_table */ static uint8_t sun_load_table_i386(TSK_VS_INFO * vs, sun_dlabel_i386 * dlabel_x86) { uint32_t idx = 0; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector if (tsk_verbose) tsk_fprintf(stderr, "load_table_i386: Number of partitions: %d\n", tsk_getu16(vs->endian, dlabel_x86->num_parts)); /* Cycle through the partitions, there are either 8 or 16 */ for (idx = 0; idx < tsk_getu16(vs->endian, dlabel_x86->num_parts); idx++) { TSK_VS_PART_FLAG_ENUM ptype = TSK_VS_PART_FLAG_ALLOC; if (tsk_verbose) tsk_fprintf(stderr, "load_table_i386: %" PRIu32 " Starting Sector: %" PRIu32 " Size: %" PRIu32 " Type: %" PRIu16 "\n", idx, tsk_getu32(vs->endian, dlabel_x86->part[idx].start_sec), tsk_getu32(vs->endian, dlabel_x86->part[idx].size_sec), tsk_getu16(vs->endian, dlabel_x86->part[idx].type)); if (tsk_getu32(vs->endian, dlabel_x86->part[idx].size_sec) == 0) continue; // make sure the first couple are in the image bounds if ((idx < 2) && (tsk_getu32(vs->endian, dlabel_x86->part[idx].start_sec) > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("sun_load_i386: Starting sector too large for image"); return 1; } // set the entry that covers the entire disk image as DESC if ((tsk_getu16(vs->endian, dlabel_x86->part[idx].type) == 5) && (tsk_getu32(vs->endian, dlabel_x86->part[idx].start_sec) == 0)) ptype = TSK_VS_PART_FLAG_META; /* Add the partition to the internal sorted list */ if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) tsk_getu32(vs->endian, dlabel_x86->part[idx].start_sec), (TSK_DADDR_T) tsk_getu32(vs->endian, dlabel_x86->part[idx].size_sec), ptype, sun_get_desc(tsk_getu16(vs->endian, dlabel_x86->part[idx].type)), -1, idx)) { return 1; } } return 0; } /* load a sparc disk label, this is called from the general * sun_load_table */ static uint8_t sun_load_table_sparc(TSK_VS_INFO * vs, sun_dlabel_sparc * dlabel_sp) { uint32_t idx = 0; uint32_t cyl_conv; TSK_DADDR_T max_addr = (vs->img_info->size - vs->offset) / vs->block_size; // max sector /* The value to convert cylinders to sectors */ cyl_conv = tsk_getu16(vs->endian, dlabel_sp->sec_per_tr) * tsk_getu16(vs->endian, dlabel_sp->num_head); if (tsk_verbose) tsk_fprintf(stderr, "load_table_sparc: Number of partitions: %d\n", tsk_getu16(vs->endian, dlabel_sp->num_parts)); /* Cycle through the partitions, there are either 8 or 16 */ for (idx = 0; idx < tsk_getu16(vs->endian, dlabel_sp->num_parts); idx++) { TSK_VS_PART_FLAG_ENUM ptype = TSK_VS_PART_FLAG_ALLOC; uint32_t part_start = cyl_conv * tsk_getu32(vs->endian, dlabel_sp->part_layout[idx].start_cyl); uint32_t part_size = tsk_getu32(vs->endian, dlabel_sp->part_layout[idx].size_blk); if (tsk_verbose) tsk_fprintf(stderr, "load_table_sparc: %" PRIu32 " Starting Sector: %" PRIu32 " Size: %" PRIu32 " Type: %" PRIu16 "\n", idx, part_start, part_size, tsk_getu16(vs->endian, dlabel_sp->part_meta[idx].type)); if (part_size == 0) continue; // make sure the first couple are in the image bounds if ((idx < 2) && (part_start > max_addr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BLK_NUM); tsk_error_set_errstr ("sun_load_sparc: Starting sector too large for image"); return 1; } // set the entry that covers the entire disk image as DESC if ((tsk_getu16(vs->endian, dlabel_sp->part_meta[idx].type) == 5) && (part_start == 0)) ptype = TSK_VS_PART_FLAG_META; /* Add the partition to the internal sorted list */ if (NULL == tsk_vs_part_add(vs, (TSK_DADDR_T) part_start, (TSK_DADDR_T) part_size, ptype, sun_get_desc(tsk_getu16(vs->endian, dlabel_sp->part_meta[idx].type)), -1, idx)) return 1; } return 0; } /* * Process the partition table at the sector address * * This method just finds out if it is sparc or Intel and then * calls the appropriate method * * Return 0 on success and 1 on error */ static uint8_t sun_load_table(TSK_VS_INFO * vs) { sun_dlabel_sparc *dlabel_sp; sun_dlabel_i386 *dlabel_x86; char *buf; ssize_t cnt; TSK_DADDR_T taddr = vs->offset / vs->block_size + SUN_SPARC_PART_SOFFSET; int result = 0; /* Sanity check in case label sizes change */ if ((sizeof(*dlabel_sp) > vs->block_size) || (sizeof(*dlabel_x86) > vs->block_size)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_BUF); tsk_error_set_errstr ("sun_load_table: disk labels bigger than block size"); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "sun_load_table: Trying sector: %" PRIuDADDR "\n", taddr); if ((buf = tsk_malloc(vs->block_size)) == NULL) { goto on_error; } /* Try the given offset */ cnt = tsk_vs_read_block (vs, SUN_SPARC_PART_SOFFSET, buf, vs->block_size); /* If -1 is returned, tsk_errno is already set */ if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("SUN Disk Label in Sector: %" PRIuDADDR, taddr); goto on_error; } /* Check the magic value * Both intel and sparc have the magic value in the same location * * We try both in case someone specifies the exact location of the * intel disk label. * */ dlabel_sp = (sun_dlabel_sparc *) buf; dlabel_x86 = (sun_dlabel_i386 *) buf; if (tsk_vs_guessu16(vs, dlabel_sp->magic, SUN_MAGIC) == 0) { if (tsk_getu32(vs->endian, dlabel_sp->sanity) == SUN_SANITY) { result = sun_load_table_sparc(vs, dlabel_sp); // TODO: I assume based on the existing free that the previous function // does not take ownership of buf. free(buf); return result; } else if (tsk_getu32(vs->endian, dlabel_x86->sanity) == SUN_SANITY) { result = sun_load_table_i386(vs, dlabel_x86); // TODO: I assume based on the existing free that the previous function // does not take ownership of buf. free(buf); return result; } } /* Now try the next sector, which is where the intel * could be stored */ taddr = vs->offset / vs->block_size / SUN_I386_PART_SOFFSET; if (tsk_verbose) tsk_fprintf(stderr, "sun_load_table: Trying sector: %" PRIuDADDR "\n", taddr + 1); cnt = tsk_vs_read_block (vs, SUN_I386_PART_SOFFSET, buf, vs->block_size); if (cnt != vs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_READ); } tsk_error_set_errstr2("SUN (Intel) Disk Label in Sector: %" PRIuDADDR, taddr); goto on_error; } dlabel_x86 = (sun_dlabel_i386 *) buf; if (tsk_vs_guessu16(vs, dlabel_x86->magic, SUN_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("SUN (intel) partition table (Sector: %" PRIuDADDR ") %x", taddr, tsk_getu16(vs->endian, dlabel_sp->magic)); goto on_error; } if (tsk_getu32(vs->endian, dlabel_x86->sanity) != SUN_SANITY) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_VS_MAGIC); tsk_error_set_errstr("SUN (intel) sanity value (Sector: %" PRIuDADDR ") %x", taddr, tsk_getu16(vs->endian, dlabel_sp->magic)); goto on_error; } result = sun_load_table_i386(vs, dlabel_x86); // TODO: I assume based on the existing free that the previous function // does not take ownership of buf. free(buf); return result; on_error: if( buf != NULL ) { free( buf ); } return 1; } static void sun_close(TSK_VS_INFO * vs) { vs->tag = 0; tsk_vs_part_free(vs); free(vs); } TSK_VS_INFO * tsk_vs_sun_open(TSK_IMG_INFO * img_info, TSK_DADDR_T offset) { TSK_VS_INFO *vs; // clean up any errors that are lying around tsk_error_reset(); vs = (TSK_VS_INFO *) tsk_malloc(sizeof(*vs)); if (vs == NULL) return NULL; vs->img_info = img_info; vs->vstype = TSK_VS_TYPE_SUN; vs->tag = TSK_VS_INFO_TAG; vs->offset = offset; /* inititialize settings */ vs->part_list = NULL; vs->part_count = 0; vs->endian = 0; vs->block_size = img_info->sector_size; /* Assign functions */ vs->close = sun_close; /* Load the partitions into the sorted list */ if (sun_load_table(vs)) { sun_close(vs); return NULL; } /* fill in the sorted list with the 'unknown' values */ if (tsk_vs_part_unused(vs)) { sun_close(vs); return NULL; } return vs; } sleuthkit-4.2.0/tsk/vs/tsk_bsd.h000644 000765 000024 00000003212 12576320700 017356 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * ** This software is distributed under the Common Public License 1.0 */ /* * C header file with BSD and internal data structures. */ #ifndef _TSK_BSD_H #define _TSK_BSD_H #ifdef __cplusplus extern "C" { #endif typedef struct { uint8_t magic[4]; uint8_t type[2]; uint8_t sub_type[2]; uint8_t type_name[16]; uint8_t packname[16]; uint8_t sec_size[4]; uint8_t sec_per_tr[4]; uint8_t tr_per_cyl[4]; uint8_t cyl_per_unit[4]; uint8_t sec_per_cyl[4]; uint8_t sec_per_unit[4]; uint8_t spare_per_tr[2]; uint8_t spare_per_cyl[2]; uint8_t alt_per_unit[4]; uint8_t rpm[2]; uint8_t interleave[2]; uint8_t trackskew[2]; uint8_t cylskew[2]; uint8_t headswitch[4]; uint8_t track_seek[4]; uint8_t flags[4]; uint8_t drivedata[20]; uint8_t reserved1[20]; uint8_t magic2[4]; uint8_t checksum[2]; uint8_t num_parts[2]; uint8_t bootarea_size[4]; uint8_t sb_size[4]; struct { uint8_t size_sec[4]; uint8_t start_sec[4]; uint8_t frag_size[4]; uint8_t fstype; uint8_t frag_per_block; uint8_t cyl_per_grp[2]; } part[16]; /* padding to make it a full 512 bytes */ uint8_t reserved2[108]; } bsd_disklabel; #define BSD_MAGIC 0x82564557 #define BSD_PART_SOFFSET 1 #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_dos.h000644 000765 000024 00000002115 12576320700 017374 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /* * C header file with DOS and internal data structures. */ #ifndef _TSK_DOS_H #define _TSK_DOS_H #ifdef __cplusplus extern "C" { #endif typedef struct { uint8_t boot; uint8_t start_chs[3]; uint8_t ptype; uint8_t end_chs[3]; uint8_t start_sec[4]; uint8_t size_sec[4]; } dos_part; /* Boot Sector w/partition table */ typedef struct { uint8_t f1[3]; /* the next three are actually part of NTFS and FAT, but * we use them for sanity checks in the detect code */ char oemname[8]; uint8_t ssize[2]; /* sector size in bytes */ uint8_t csize; /* cluster size in sectors */ uint8_t filler[432]; dos_part ptable[4]; uint8_t magic[2]; } dos_sect; #define DOS_MAGIC 0xaa55 #define DOS_PART_SOFFSET 0 #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_gpt.h000644 000765 000024 00000003534 12576320700 017407 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2004-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /* * C header file with GPT and internal data structures. */ #ifndef _TSK_GPT_H #define _TSK_GPT_H #ifdef __cplusplus extern "C" { #endif /* Partition type in the safety DOS partition table */ #define GPT_PART_SOFFSET 0 #define GPT_DOS_TYPE 0xEE /* This is located in sector 1 of the disk */ #define GPT_HEAD_OFFSET 1 #define GPT_HEAD_SIG 0x5452415020494645ULL typedef struct { uint8_t signature[8]; /* EFI PART */ uint8_t version[4]; uint8_t head_size_b[4]; /* size of partition header */ uint8_t head_crc[4]; /* crc of header */ uint8_t f1[4]; uint8_t head_lba[8]; /* lba of this header */ uint8_t head2_lba[8]; /* lba of second header */ uint8_t partarea_start[8]; /* lba of partition area start */ uint8_t partarea_end[8]; /* lba of partition area end */ uint8_t guid[16]; /* disk GUID */ uint8_t tab_start_lba[8]; /* lba of table start */ uint8_t tab_num_ent[4]; /* num of table entries */ uint8_t tab_size_b[4]; /* size of each table entry */ uint8_t tab_crc[4]; /* crc of table */ uint8_t f2[420]; } gpt_head; /* The location of this is specified in the header - tab_start */ typedef struct { uint8_t type_guid[16]; /* partition type guid */ uint8_t id_guid[16]; /* unique partition GUID */ uint8_t start_lba[8]; /* Starting lba of part */ uint8_t end_lba[8]; /* end lba of part */ uint8_t flags[8]; /* flags */ uint8_t name[72]; /* name in unicode */ } gpt_entry; #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_mac.h000644 000765 000024 00000002303 12576320700 017346 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /* * C header file with Mac and internal data structures. */ #ifndef _TSK_MAC_H #define _TSK_MAC_H #ifdef __cplusplus extern "C" { #endif typedef struct { uint8_t magic[2]; uint8_t reserved[2]; uint8_t pmap_size[4]; uint8_t start_sec[4]; uint8_t size_sec[4]; uint8_t name[32]; uint8_t type[32]; uint8_t data_start_sec[4]; uint8_t data_size_sec[4]; uint8_t status[4]; uint8_t boot_start_sec[4]; uint8_t boot_size_sec[4]; uint8_t boot_load_addr[4]; uint8_t reserved2[4]; uint8_t boot_entry[4]; uint8_t reserved3[4]; uint8_t boot_checksum[4]; uint8_t proc_type[16]; uint8_t reserved4[376]; } mac_part; #define MAC_MAGIC 0x504d #define MAC_PART_SOFFSET 1 #define MAC_STAT_VALID 0x00 #define MAC_STAT_ALLOC 0x01 #define MAC_STAT_INUSE 0x02 #define MAC_STAT_BOOT 0x04 #define MAC_STAT_READ 0x08 #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_sun.h000644 000765 000024 00000005104 12576320700 017415 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 * */ /* * C header file with Sun and internal data structures. */ #ifndef _TSK_SUN_H #define _TSK_SUN_H #ifdef __cplusplus extern "C" { #endif typedef struct { uint8_t asciilabel[128]; /* VTOC */ uint8_t version[4]; uint8_t vol_name[8]; uint8_t num_parts[2]; struct { uint8_t type[2]; uint8_t flag[2]; } part_meta[8]; uint8_t bootinfo[4][3]; uint8_t reserved0[2]; uint8_t sanity[4]; uint8_t reserved1[38]; uint8_t timestamp[8][4]; /* End VTOC */ uint8_t write_reinstruct[2]; uint8_t read_reinstruct[2]; uint8_t reserved2[154]; uint8_t rpm[2]; uint8_t num_ph_cyl[2]; uint8_t alt_per_cyl[2]; uint8_t reserved3[4]; uint8_t interleave[2]; uint8_t num_cyl[2]; uint8_t num_alt_cyl[2]; uint8_t num_head[2]; uint8_t sec_per_tr[2]; uint8_t reserved5[4]; struct { uint8_t start_cyl[4]; uint8_t size_blk[4]; } part_layout[8]; uint8_t magic[2]; uint8_t checksum[2]; } sun_dlabel_sparc; typedef struct { /* VTOC */ uint8_t bootinfo[3][4]; uint8_t sanity[4]; uint8_t version[4]; uint8_t vol_name[8]; uint8_t sec_size[2]; uint8_t num_parts[2]; uint8_t reserved0[40]; struct { uint8_t type[2]; uint8_t flag[2]; uint8_t start_sec[4]; uint8_t size_sec[4]; } part[16]; uint8_t timestamp[16][4]; uint8_t asciilabel[128]; /* END of VTOC */ uint8_t num_ph_cyl[4]; uint8_t num_cyl[4]; uint8_t num_alt_cyl[2]; uint8_t cyl_offset[2]; uint8_t num_head[4]; uint8_t sec_per_tr[4]; uint8_t interleave[2]; uint8_t skew[2]; uint8_t alt_per_cyl[2]; uint8_t rpm[2]; uint8_t write_reinstruct[2]; uint8_t read_reinstruct[2]; uint8_t reserved1[8]; uint8_t reserved2[12]; uint8_t magic[2]; uint8_t checksum[2]; } sun_dlabel_i386; #define SUN_MAGIC 0xDABE #define SUN_SANITY 0x600DDEEE #define SUN_FLAG_UNMNT 0x01 #define SUN_FLAG_RO 0x10 #define SUN_SPARC_PART_SOFFSET 0 #define SUN_I386_PART_SOFFSET 1 #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_vs.h000644 000765 000024 00000041433 12576320700 017245 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 * */ /** * \file tsk_vs.h * External header file for media management (volume system) support. * Note that this file is not meant to be directly included. * It is included by both libtsk.h and tsk_vs_i.h. */ /** * \defgroup vslib C Volume System Functions * \defgroup vslib_cpp C++ Volume System Classes */ #ifndef _TSK_VS_H #define _TSK_VS_H #ifdef __cplusplus extern "C" { #endif /* Structures */ typedef struct TSK_VS_INFO TSK_VS_INFO; typedef struct TSK_VS_PART_INFO TSK_VS_PART_INFO; /** * Definition for callback function that vs_part_walk() calls for * each partition that it walks. * * @param a_vs Pointer to volume system being analyzed * @param a_vs_part Pointer to current partition in the walk * @param a_ptr Pointer that was passed to vs_part_walk by caller * @return Status on whether the vs_part_walk() function should * continue, stop, or error. */ typedef TSK_WALK_RET_ENUM(*TSK_VS_PART_WALK_CB) (TSK_VS_INFO * a_vs, const TSK_VS_PART_INFO * a_vs_part, void *a_ptr); /** * Flags for the partition type. */ typedef enum { TSK_VS_TYPE_DETECT = 0x0000, ///< Use autodetection methods TSK_VS_TYPE_DOS = 0x0001, ///< DOS Partition table TSK_VS_TYPE_BSD = 0x0002, ///< BSD Partition table TSK_VS_TYPE_SUN = 0x0004, ///< Sun VTOC TSK_VS_TYPE_MAC = 0x0008, ///< Mac partition table TSK_VS_TYPE_GPT = 0x0010, ///< GPT partition table TSK_VS_TYPE_DBFILLER = 0x00F0, ///< fake partition table type for loaddb (for images that do not have a volume system) TSK_VS_TYPE_UNSUPP = 0xffff, ///< Unsupported } TSK_VS_TYPE_ENUM; /** * Data structure used to store state and basic information * for open volume systems. */ struct TSK_VS_INFO { int tag; ///< \internal Will be set to TSK_VS_INFO_TAG if structure is still allocated, 0 if not TSK_IMG_INFO *img_info; ///< Pointer to disk image that VS is in TSK_VS_TYPE_ENUM vstype; ///< Type of volume system / media management TSK_DADDR_T offset; ///< Byte offset where VS starts in disk image unsigned int block_size; ///< Size of blocks in bytes TSK_ENDIAN_ENUM endian; ///< Endian ordering of data TSK_VS_PART_INFO *part_list; ///< Linked list of partitions TSK_PNUM_T part_count; ///< number of partitions void (*close) (TSK_VS_INFO *); ///< \internal Progs should call tsk_vs_close(). }; #define TSK_VS_INFO_TAG 0x52301642 /*************************************************************** * Generic structures for partitions / slices */ /** * Flag values that describe the partitions in the VS. Refer * to \ref vs_open2 for more details. */ typedef enum { TSK_VS_PART_FLAG_ALLOC = 0x01, ///< Sectors are allocated to a volume in the volume system TSK_VS_PART_FLAG_UNALLOC = 0x02, ///< Sectors are not allocated to a volume TSK_VS_PART_FLAG_META = 0x04, ///< Sectors contain volume system metadata and could also be ALLOC or UNALLOC TSK_VS_PART_FLAG_ALL = 0x07, ///< Show all sectors in the walk. } TSK_VS_PART_FLAG_ENUM; /** * Linked list entry that describes a volume in a generic way. */ struct TSK_VS_PART_INFO { int tag; TSK_VS_PART_INFO *prev; ///< Pointer to previous partition (or NULL) TSK_VS_PART_INFO *next; ///< Pointer to next partition (or NULL) TSK_VS_INFO *vs; ///< Pointer to parent volume system handle TSK_DADDR_T start; ///< Sector offset of start of partition TSK_DADDR_T len; ///< Number of sectors in partition char *desc; ///< UTF-8 description of partition (volume system type-specific) int8_t table_num; ///< Table address that describes this partition int8_t slot_num; ///< Entry in the table that describes this partition TSK_PNUM_T addr; ///< Address of this partition TSK_VS_PART_FLAG_ENUM flags; ///< Flags for partition }; #define TSK_VS_PART_INFO_TAG 0x40121253 // to and from type ids and names extern TSK_VS_TYPE_ENUM tsk_vs_type_toid(const TSK_TCHAR *); extern TSK_VS_TYPE_ENUM tsk_vs_type_toid_utf8(const char *); extern const char *tsk_vs_type_toname(TSK_VS_TYPE_ENUM); extern const char *tsk_vs_type_todesc(TSK_VS_TYPE_ENUM); extern TSK_VS_TYPE_ENUM tsk_vs_type_supported(); extern void tsk_vs_type_print(FILE *); // open a volume system extern TSK_VS_INFO *tsk_vs_open(TSK_IMG_INFO *, TSK_DADDR_T, TSK_VS_TYPE_ENUM); extern void tsk_vs_close(TSK_VS_INFO *); // read data in the volume system extern ssize_t tsk_vs_read_block(TSK_VS_INFO * a_vs, TSK_DADDR_T a_addr, char *buf, size_t len); // open a partition extern const TSK_VS_PART_INFO *tsk_vs_part_get(const TSK_VS_INFO *, TSK_PNUM_T idx); extern uint8_t tsk_vs_part_walk(TSK_VS_INFO * vs, TSK_PNUM_T start, TSK_PNUM_T last, TSK_VS_PART_FLAG_ENUM flags, TSK_VS_PART_WALK_CB action, void *ptr); // read data in partitions extern ssize_t tsk_vs_part_read(const TSK_VS_PART_INFO * a_vs_part, TSK_OFF_T a_off, char *buf, size_t len); extern ssize_t tsk_vs_part_read_block(const TSK_VS_PART_INFO * a_vs_part, TSK_DADDR_T a_addr, char *buf, size_t len); #ifdef __cplusplus } #endif #ifdef __cplusplus class TskVsInfo; class TskVsPartInfo; /** * Definition for callback function that vs_part_walk() calls for * each partition that it walks. * * @param a_vs Pointer to volume system being analyzed * @param a_vs_part Pointer to current partition in the walk * @param a_ptr Pointer that was passed to vs_part_walk by caller * @return Status on whether the vs_part_walk() function should * continue, stop, or error. */ typedef TSK_WALK_RET_ENUM(*TSK_VS_PART_WALK_CPP_CB) (TskVsInfo * a_vs, const TskVsPartInfo * a_vs_part, void *a_ptr); /** \internal * Internal structure to pass C++ volume system part walk data into C block walk call back. */ typedef struct { TSK_VS_PART_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_VS_PART_WALK_CPP_DATA; /** \internal * Internal function used to call C++ Block Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_vs_part_walk_cpp_c_cb(TSK_VS_INFO * a_vs, const TSK_VS_PART_INFO * a_vs_part, void *a_ptr); /** * \ingroup vslib_cpp * Stores information about a volume / partition inside of an open volume * system. */ class TskVsPartInfo { friend class TskFsInfo; private: TSK_VS_PART_INFO * m_vsPartInfo; TskVsPartInfo(const TskVsPartInfo & rhs); TskVsPartInfo & operator=(const TskVsPartInfo & rhs); public: /** * Create an object from its C struct. * @param a_vsPartInfo Pointer to C struct for partition. If NULL, the * remaining getX() methods will be undefined. */ TskVsPartInfo(TSK_VS_PART_INFO * a_vsPartInfo) { m_vsPartInfo = a_vsPartInfo; }; /** * Reads data starting at a byte address relative to the start of a VOLUME in a volume system. * See tsk_vs_part_read() for details. * @param a_off Byte offset to read from, relative to start of VOLUME in volume system. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes) * @return Number of bytes read or -1 on error */ ssize_t read(TSK_OFF_T a_off, char *a_buf, size_t a_len) { if (m_vsPartInfo != NULL) return tsk_vs_part_read(m_vsPartInfo, a_off, a_buf, a_len); else return 0; }; /** * Reads one or more blocks of data with an address relative to the start of a VOLUME in a volume system. * See tsk_vs_part_read_block() for details. * @param a_addr Block address to start reading from, relative to start of VOLUME in volume system. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes - must be a multiple of block_size) * @return Number of bytes read or -1 on error */ ssize_t readBlock(TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { if (m_vsPartInfo != NULL) return tsk_vs_part_read_block(m_vsPartInfo, a_addr, a_buf, a_len); else return 0; }; /** * Return sector offset of start of partition * @return sector offset of start of partition */ TSK_DADDR_T getStart() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->start; else return 0; }; /** * Return number of sectors in partition * @return number of sectors in partition */ TSK_DADDR_T getLen() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->len; else return 0; }; /** * Return UTF-8 description of partition (volume system type-specific) * @return description of partition */ const char *getDesc() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->desc; else return NULL; }; /** * Return table address that describes this partition * @return table address that describes this partition */ int8_t getTableNum() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->table_num; else return 0; }; /** * Return entry in the table that describes this partition * @return entry in the table that describes this partition */ int8_t getSlotNum() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->slot_num; else return 0; }; /** * Return address of this partition * @return address of this partition */ TSK_PNUM_T getAddr() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->addr; else return 0; }; /** * Return flags for partition * @return flags for partition */ TSK_VS_PART_FLAG_ENUM getFlags() const { if (m_vsPartInfo != NULL) return m_vsPartInfo->flags; else return (TSK_VS_PART_FLAG_ENUM) 0; }; }; /** * \ingroup vslib_cpp * Stores information about an open volume system. * To use this object, open() should be called first. */ class TskVsInfo { private: TSK_VS_INFO * m_vsInfo; bool m_opened; // true if open() was called and we need to free it TskVsInfo(const TskVsInfo & rhs); TskVsInfo & operator=(const TskVsInfo & rhs); public: TskVsInfo(TSK_VS_INFO * a_vsInfo) { m_vsInfo = a_vsInfo; m_opened = false; }; TskVsInfo() { m_vsInfo = NULL; m_opened = false; }; ~TskVsInfo() { close(); }; /** * Walk a range of partitions and pass the data to a callback function. * See tsk_vs_part_walk() for details. * @param a_start Address of first partition to walk from. * @param a_last Address of last partition to walk to. * @param a_flags Flags that are used to identify which of the partitions in the range should be returned (if 0, all partitions will be returned). * @param a_action Callback action to call for each partition. * @param a_ptr Pointer to data that will be passed to callback. * @return 1 on error and 0 on success */ uint8_t vsPartWalk(TSK_PNUM_T a_start, TSK_PNUM_T a_last, TSK_VS_PART_FLAG_ENUM a_flags, TSK_VS_PART_WALK_CPP_CB a_action, void *a_ptr) { TSK_VS_PART_WALK_CPP_DATA vsPartData; vsPartData.cppAction = a_action; vsPartData.cPtr = a_ptr; return tsk_vs_part_walk(m_vsInfo, a_start, a_last, a_flags, tsk_vs_part_walk_cpp_c_cb, &vsPartData); }; /** * Open a disk image and process the media management system * data. See tsk_vs_open() for details. * * @param a_imgInfo The opened disk image. * @param a_offset Byte offset in the disk image to start analyzing from. * @param a_type Type of volume system (including auto detect) * * @return 1 on error and 0 on success. */ uint8_t open(TskImgInfo * a_imgInfo, TSK_DADDR_T a_offset, TSK_VS_TYPE_ENUM a_type) { if ((m_vsInfo = tsk_vs_open(a_imgInfo->m_imgInfo, a_offset, a_type)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /** * Reads one or more blocks of data with an address relative to the start of the volume system. * See tsk_vs_read_block() for details. * @param a_addr Sector address to read from, relative to start of VOLUME SYSTEM. * @param a_buf Buffer to store data in * @param a_len Amount of data to read (in bytes - must be a multiple of block_size) * @return Number of bytes read or -1 on error */ ssize_t readBlock(TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { if (m_vsInfo != NULL) return tsk_vs_read_block(m_vsInfo, a_addr, a_buf, a_len); else return 0; }; /** * Closes an open volume system. See for tsk_vs_close() details. */ void close() { if ((m_vsInfo) && (m_opened)) tsk_vs_close(m_vsInfo); m_vsInfo = NULL; }; /** * Return the byte offset where volume system starts in disk image * @return byte offset */ TSK_DADDR_T getOffset() const { if (m_vsInfo != NULL) return m_vsInfo->offset; else return 0; }; /** * Return size of volume system blocks in bytes * @return size of a block in bytes */ unsigned int getBlockSize() const { if (m_vsInfo != NULL) return m_vsInfo->block_size; else return 0; }; /** * Return number of partitions * @return number of partitions */ TSK_PNUM_T getPartCount() const { if (m_vsInfo != NULL) return m_vsInfo->part_count; else return 0; }; /** * Get reference to a volume in the volume system. * See tsk_vs_part_get() for details. * @param a_idx Index for volume to return (0-based) * @return Pointer to partition or NULL on error. Caller is responsible for freeing object. */ const TskVsPartInfo *getPart(TSK_PNUM_T a_idx) const { // @@@ Error handling. return new TskVsPartInfo(const_cast < TSK_VS_PART_INFO * >(tsk_vs_part_get(m_vsInfo, a_idx))); }; /** * Get a reference to the parent image object. * @return Pointer to object or NULL on error. Caller is responsible for freeing object. */ const TskImgInfo *getImgInfo() const { if (m_vsInfo == NULL) return 0; return new TskImgInfo(m_vsInfo->img_info); }; /** * Return type of volume system / media management * @return type of volume system / media management */ TSK_VS_TYPE_ENUM getVsType() const { if (m_vsInfo != NULL) return m_vsInfo->vstype; else return (TSK_VS_TYPE_ENUM) 0; }; /** * Parse a string with the volume system type and return its internal ID. * See tsk_vs_type_toid() for details. * @param a_str String to parse. * @return ID of string (or unsupported if the name is unknown) */ static TSK_VS_TYPE_ENUM typeToId(const TSK_TCHAR * a_str) { return tsk_vs_type_toid(a_str); }; /** * Print the supported volume system type names to an open handle. * See tsk_vs_type_print() for details. * @param a_hFile Handle to print to. */ static void typePrint(FILE * a_hFile) { tsk_vs_type_print(a_hFile); }; /** * Return the supported volume system types. * See tsk_vs_type_supported() for details. * @return The bit in the return value is 1 if the type is supported. */ static TSK_VS_TYPE_ENUM typeSupported() { return tsk_vs_type_supported(); }; /** * Return the string name of a partition type ID. * See tsk_vs_type_toname() for details. * @param a_type Volume system type * @return name of type or NULL on error */ static const char *typeToName(TSK_VS_TYPE_ENUM a_type) { return tsk_vs_type_toname(a_type); }; /** * Return the string description of a partition type ID. * See tsk_vs_type_todesc() for details. * @param a_type Volume system type * @return description of type or NULL on error */ static const char *typeToDesc(TSK_VS_TYPE_ENUM a_type) { return tsk_vs_type_todesc(a_type); }; }; #endif #endif sleuthkit-4.2.0/tsk/vs/tsk_vs_i.h000644 000765 000024 00000002625 12576320700 017555 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** * \file tsk_vs_i.h * Contains the internal library definitions for the volume system functions. This should * be included by the code in the volume system library. */ #ifndef _TSK_VS_I_H #define _TSK_VS_I_H // Include the other internal TSK header files #include "tsk/base/tsk_base_i.h" #include "tsk/img/tsk_img_i.h" // include the external vs header file #include "tsk_vs.h" #include extern TSK_VS_INFO *tsk_vs_dos_open(TSK_IMG_INFO *, TSK_DADDR_T, uint8_t); extern TSK_VS_INFO *tsk_vs_mac_open(TSK_IMG_INFO *, TSK_DADDR_T); extern TSK_VS_INFO *tsk_vs_bsd_open(TSK_IMG_INFO *, TSK_DADDR_T); extern TSK_VS_INFO *tsk_vs_sun_open(TSK_IMG_INFO *, TSK_DADDR_T); extern TSK_VS_INFO *tsk_vs_gpt_open(TSK_IMG_INFO *, TSK_DADDR_T); extern uint8_t tsk_vs_part_unused(TSK_VS_INFO *); extern TSK_VS_PART_INFO *tsk_vs_part_add(TSK_VS_INFO *, TSK_DADDR_T, TSK_DADDR_T, TSK_VS_PART_FLAG_ENUM, char *, int8_t, int8_t); extern void tsk_vs_part_free(TSK_VS_INFO *); // Endian macros - actual functions in misc/ #define tsk_vs_guessu16(vs, x, mag) \ tsk_guess_end_u16(&(vs->endian), (x), (mag)) #define tsk_vs_guessu32(vs, x, mag) \ tsk_guess_end_u32(&(vs->endian), (x), (mag)) #endif sleuthkit-4.2.0/tsk/sorter/default.sort000644 000765 000024 00000007657 12576320700 021020 0ustar00carrierstaff000000 000000 # # default.sort # default config file for Sleuth Kit sorter # # These settings have the lowest priority of all config files # # It is used for ALL platform types though # # # Category # If the keyword is found in the 'file' output, then the data is saved # to either the summary file or even copied if the appropriate flags are # given # # category cat_name keywords # # # Extension # If the keywords are found in the 'file' output, and the file extension # is different than then the one on the file, an alert is generated # 'somewhere' # ext ext1,ext2,ext3 keywords ########################################################################## # Multimedia ########################################################################## # Audio category audio audio category audio MIDI ext mid,rmi MIDI category audio MP3 ext mp3 MP3 # Images category images image data ext jpg,jpeg,jpe JPEG image data ext gif GIF image data ext tif TIFF image data ext png PNG image data category images bitmap data ext bmp PC bitmap data category images font ext ttf true type font # Video category video RealMedia ext rm RealMedia ########################################################################## # archive & compression ########################################################################## # archive category archive archive ext zip,jar Zip archive data ext tar tar archive category archive DB ext db Berkeley DB # compression category compress compress ext gz,tgz gzip compressed data ext Z compress'd data ########################################################################## # Executables ########################################################################## # Execs category exec executable category exec \sscript # the above can cause errors with postscript and transcript category exec batch file # NOTE: Some windows binaries have the term "executable not relocatable" # which will trigger on this when it should trigger on executable category exec relocatable # Java category exec class data ext class Java class data category exec object ext o object category exec python compiled ########################################################################## # Documents, ########################################################################## category documents document # Microsoft ext doc,dot,ppt,pot,xls,xlt,msc,pcb Microsoft Office Document category documents Rich Text Format ext rtf Rich Text Format # Corel & Word Perfect category documents Corel\/WP ext wpg,wpd,shw Corel\/WP # Lotus category documents Lotus 1\-2\-3 ext wb2 Lotus 1\-2\-3 # Adobe ext pdf PDF document ext ps,eps PostScript document ########################################################################## # Text ########################################################################## category text ASCII(.*?)text ext txt,log ASCII(.*?)text ext c,cpp,h,js ASCII(.*?)text ext sh,csh ASCII(.*?)text ext conf ASCII(.*?)text category text character data ext txt character data category text ISO\-8859(.*?)text ext txt ISO\-8859(.*?)text category text HTML document text ext htm,html,hta HTML document text category text program text ext c,cpp,h,js program text category text \ssource ########################################################################## # Other ########################################################################## # Disk category disk boot sector category disk filesystem data # Crypto category crypto PGP ext asc PGP armored # Postscript Printer Description category system PPD file ext ppd PPD file # 'file' reports 'data' for all unknown binary files # do not bother with extensions with this category data ^data$ sleuthkit-4.2.0/tsk/sorter/freebsd.sort000644 000765 000024 00000002575 12576320700 021000 0ustar00carrierstaff000000 000000 # # config file for Sleuth Kit sorter # # FreeBSD Platform # # To make custom modifications, you can also use a file named # freebsd.lcl.sort ########################################################################## # Multimedia ########################################################################## # Images # Audio # video ########################################################################## # archive & compression ########################################################################## ########################################################################## # Executables & Source Code ########################################################################## # object & relocatable are already a category in default ext so,0,1,2,3,4,5,6,7,8,9 shared object #ext o relocatable #category exec library #ext so library #category exec ar archive #ext a ar archive ########################################################################## # Documents, text, and internet (web) ########################################################################## # Text #ext afm ASCII font metrics ext conf ASCII(.*?)text ########################################################################## # Other ########################################################################## category system pixmap image text ext xpm pixmap image text sleuthkit-4.2.0/tsk/sorter/images.sort000644 000765 000024 00000001255 12576320700 020625 0ustar00carrierstaff000000 000000 # # images.sort # Save Images only # config file for Sleuth Kit sorter # # Category # If the keyword is found in the 'file' output, then the data is saved # to either the summary file or even copied if the appropriate flags are # given # # category cat_name keywords # # # Extension # If the keywords are found in the 'file' output, and the file extension # is different than then the one on the file, an alert is generated # 'somewhere' # ext ext1,ext2,ext3 keywords # Images category images image data ext jpg,jpeg,jpe JPEG image data ext gif GIF image data ext tif TIFF image data ext png PNG image data category images bitmap data ext bmp PC bitmap data sleuthkit-4.2.0/tsk/sorter/linux.sort000644 000765 000024 00000002512 12576320700 020514 0ustar00carrierstaff000000 000000 # # config file for Sleuth Kit sorter # # Linux Platform # # To make custom modifications, you can also use a file named # linux.lcl.sort ########################################################################## # Multimedia ########################################################################## # Images # Audio # video ########################################################################## # archive & compression ########################################################################## ########################################################################## # Executables & Source Code ########################################################################## # object and relocatable are already a category in default ext so,0,1,2,3,4,5,6,7,8,9 shared object ext o relocatable #category exec library #ext so library #category exec ar archive #ext a ar archive ########################################################################## # Documents, text, and internet (web) ########################################################################## # Text #ext afm ASCII font metrics category text input text category text TeX DVI ########################################################################## # Other ########################################################################## sleuthkit-4.2.0/tsk/sorter/openbsd.sort000644 000765 000024 00000002575 12576320700 021020 0ustar00carrierstaff000000 000000 # # config file for Sleuth Kit sorter # # OpenBSD Platform # # To make custom modifications, you can also use a file named # openbsd.lcl.sort ########################################################################## # Multimedia ########################################################################## # Images # Audio # video ########################################################################## # archive & compression ########################################################################## ########################################################################## # Executables & Source Code ########################################################################## # object & relocatable are already a category in default ext so,0,1,2,3,4,5,6,7,8,9 shared object #ext o relocatable #category exec library #ext so library #category exec ar archive #ext a ar archive ########################################################################## # Documents, text, and internet (web) ########################################################################## # Text #ext afm ASCII font metrics ext conf ASCII(.*?)text ########################################################################## # Other ########################################################################## category system pixmap image text ext xpm pixmap image text sleuthkit-4.2.0/tsk/sorter/solaris.sort000644 000765 000024 00000002425 12576320700 021034 0ustar00carrierstaff000000 000000 # # config file for Sleuth Kit sorter # # Solaris Platform # # To make custom modifications, you can also use a file named # solaris.lcl.sort ########################################################################## # Multimedia ########################################################################## # Images # Audio # video ########################################################################## # archive & compression ########################################################################## ########################################################################## # Executables & Source Code ########################################################################## # object & relocatable are already a category in default ext so,0,1,2,3,4,5,6,7,8,9 shared object ext o relocatable category exec library ext so library category exec ar archive ext a ar archive ########################################################################## # Documents, text, and internet (web) ########################################################################## # Text ext afm ASCII font metrics ########################################################################## # Other ########################################################################## sleuthkit-4.2.0/tsk/sorter/windows.sort000644 000765 000024 00000005146 12576320700 021055 0ustar00carrierstaff000000 000000 # # config file for Sleuth Kit sorter # # Windows Platform # # To make custom modifications, you can also use a file named # windows.lcl.sort ########################################################################## # Multimedia ########################################################################## # Images category images icon resource ext ico ms\-windows icon resource category images animated cursor ext ani animated cursor # It seems that a lot of ttf files come up as raw G3 data ... # category ignore raw G3 data, byte\-padded # Audio ext wav WAVE audio category audio Winamp ext avs Winamp plug in category audio AVI ext avi AVI # Video ########################################################################## # archive & compression ########################################################################## # archive category archive cabinet file data ext cab cabinet file data ext wmz Zip archive data # compression ########################################################################## # Executables ########################################################################## # execs ext exe,dll,com,ocx,sys,tlb,drv,cpl,scr,ax MS\-DOS executable ext 386,acm,flt,fon,lrc,vxd,x32 MS\-DOS executable category exec Windows PE ext exe,dll,com,ocx,sys,wpc,acm,cpl Windows PE ext dll relocatable category exec batch file ext bat batch file # source code category exec MSVC program database ext pdb MSVC program database ########################################################################## # Documents ########################################################################## category documents Outlook binary email folder ext pst Outlook binary email folder ########################################################################## # Text ########################################################################## ext ini,inf,srg,dep ASCII(.*?)text ext ini,inf ISO\-8859(.*?)text ########################################################################## # Other ########################################################################## # System category system Help Data ext hlp Windows Help Data category system Registry file ext dat,log,sav Registry file category system ms\-Windows shortcut ext lnk ms\-Windows shortcut category system Internet shortcut ext url Internet shortcut category system hyperterm ext ht hyperterm # Image Color Matching Profile category system Color Management System ext icm Color Management System sleuthkit-4.2.0/tsk/img/.indent.pro000644 000765 000024 00000000037 12576320700 017763 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nlp -nut sleuthkit-4.2.0/tsk/img/aff.c000644 000765 000024 00000024203 12576320700 016603 0ustar00carrierstaff000000 000000 /* * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** * \file aff.c * Internal code to interface with afflib to read and open AFF image files */ #include "tsk_img_i.h" #if HAVE_LIBAFFLIB typedef int bool; #include "aff.h" /* Note: The routine -assumes- we are under a lock on &(img_info->cache_lock)) */ static ssize_t aff_read(TSK_IMG_INFO * img_info, TSK_OFF_T offset, char *buf, size_t len) { ssize_t cnt; IMG_AFF_INFO *aff_info = (IMG_AFF_INFO *) img_info; if (tsk_verbose) tsk_fprintf(stderr, "aff_read: byte offset: %" PRIuOFF " len: %" PRIuOFF "\n", offset, len); if (offset > img_info->size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ_OFF); tsk_error_set_errstr("aff_read - %" PRIuOFF, offset); return -1; } if (aff_info->seek_pos != offset) { if (af_seek(aff_info->af_file, offset, SEEK_SET) != offset) { tsk_error_reset(); // @@@ ADD more specific error messages tsk_error_set_errno(TSK_ERR_IMG_SEEK); tsk_error_set_errstr("aff_read - %" PRIuOFF " - %s", offset, strerror(errno)); return -1; } aff_info->seek_pos = offset; } cnt = af_read(aff_info->af_file, (unsigned char *) buf, len); if (cnt < 0) { // @@@ Add more specific error message tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); tsk_error_set_errstr("aff_read - offset: %" PRIuOFF " - len: %" PRIuSIZE " - %s", offset, len, strerror(errno)); return -1; } /* AFF will return 0 if the page does not exist -- fill the * buffer with zeros in this case */ if (cnt == 0) { // @@@ We could improve this if there is an AFF call // to see if the data exists or not if ((af_eof(aff_info->af_file) == 0) && (offset + len < img_info->size)) { memset(buf, 0, len); cnt = len; } } aff_info->seek_pos += cnt; return cnt; } static void aff_imgstat(TSK_IMG_INFO * img_info, FILE * hFile) { IMG_AFF_INFO *aff_info = (IMG_AFF_INFO *) img_info; unsigned char buf[512]; size_t buf_len = 512; tsk_fprintf(hFile, "IMAGE FILE INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Image Type: "); switch (aff_info->type) { case AF_IDENTIFY_AFF: tsk_fprintf(hFile, "AFF\n"); break; case AF_IDENTIFY_AFD: tsk_fprintf(hFile, "AFD\n"); break; case AF_IDENTIFY_AFM: tsk_fprintf(hFile, "AFM\n"); break; default: tsk_fprintf(hFile, "AFFLIB (%d)\n", aff_info->type); break; } tsk_fprintf(hFile, "\nSize in bytes: %" PRIuOFF "\n", img_info->size); // we won't have the rest of the info for the non-AFF formats. if (img_info->itype == TSK_IMG_TYPE_AFF_ANY) return; tsk_fprintf(hFile, "\nMD5: "); if (af_get_seg(aff_info->af_file, AF_MD5, NULL, buf, &buf_len) == 0) { int i; for (i = 0; i < 16; i++) { tsk_fprintf(hFile, "%x", buf[i]); } tsk_fprintf(hFile, "\n"); } else { tsk_fprintf(hFile, "Segment not found\n"); } buf_len = 512; tsk_fprintf(hFile, "SHA1: "); if (af_get_seg(aff_info->af_file, AF_SHA1, NULL, buf, &buf_len) == 0) { int i; for (i = 0; i < 20; i++) { tsk_fprintf(hFile, "%x", buf[i]); } tsk_fprintf(hFile, "\n"); } else { tsk_fprintf(hFile, "Segment not found\n"); } /* Creator segment */ buf_len = 512; if (af_get_seg(aff_info->af_file, AF_CREATOR, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Creator: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_CASE_NUM, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Case Number: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_IMAGE_GID, NULL, buf, &buf_len) == 0) { unsigned int i; tsk_fprintf(hFile, "Image GID: "); for (i = 0; i < buf_len; i++) { tsk_fprintf(hFile, "%X", buf[i]); } tsk_fprintf(hFile, "\n"); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_ACQUISITION_DATE, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Acquisition Date: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_ACQUISITION_NOTES, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Acquisition Notes: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_ACQUISITION_DEVICE, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Acquisition Device: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_AFFLIB_VERSION, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "AFFLib Version: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_DEVICE_MANUFACTURER, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Device Manufacturer: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_DEVICE_MODEL, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Device Model: %s\n", buf); } buf_len = 512; if (af_get_seg(aff_info->af_file, AF_DEVICE_SN, NULL, buf, &buf_len) == 0) { buf[buf_len] = '\0'; tsk_fprintf(hFile, "Device SN: %s\n", buf); } return; } static void aff_close(TSK_IMG_INFO * img_info) { IMG_AFF_INFO *aff_info = (IMG_AFF_INFO *) img_info; af_close(aff_info->af_file); tsk_img_free(aff_info); } TSK_IMG_INFO * aff_open(const TSK_TCHAR * const images[], unsigned int a_ssize) { IMG_AFF_INFO *aff_info; TSK_IMG_INFO *img_info; int type; char *image; #ifdef TSK_WIN32 // convert wchar_t* image path to char* to conform to // the AFFLIB API UTF16 *utf16 = (UTF16 *) images[0]; size_t ilen = wcslen(utf16); size_t olen = ilen * 4 + 1; UTF8 *utf8 = (UTF8 *) tsk_malloc(olen); image = (char *) utf8; if (image == NULL) return NULL; TSKConversionResult retval = tsk_UTF16toUTF8_lclorder((const UTF16 **) &utf16, &utf16[ilen], &utf8, &utf8[olen], TSKlenientConversion); *utf8 = '\0'; if (retval != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr("aff_open file: %" PRIttocTSK ": Error converting path to UTF-8 %d\n", images[0], retval); free(image); return NULL; } utf8 = (UTF8 *) image; while (*utf8) { if (*utf8 > 127) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr("aff_open file: %" PRIttocTSK ": Non-Latin paths are not supported for AFF images\n", images[0]); free(image); return NULL; } utf8++; } #else image = (char *) tsk_malloc(strlen(images[0]) + 1); if (image == NULL) return NULL; strncpy(image, images[0], strlen(images[0]) + 1); #endif if ((aff_info = (IMG_AFF_INFO *) tsk_img_malloc(sizeof(IMG_AFF_INFO))) == NULL) { free(image); return NULL; } img_info = (TSK_IMG_INFO *) aff_info; img_info->read = aff_read; img_info->close = aff_close; img_info->imgstat = aff_imgstat; img_info->sector_size = 512; if (a_ssize) img_info->sector_size = a_ssize; type = af_identify_file_type(image, 1); if ((type == AF_IDENTIFY_ERR) || (type == AF_IDENTIFY_NOEXIST)) { if (tsk_verbose) { tsk_fprintf(stderr, "aff_open: Error determining type of file: %" PRIttocTSK "\n", images[0]); perror("aff_open"); } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("aff_open file: %" PRIttocTSK ": Error checking type", images[0]); tsk_img_free(aff_info); free(image); return NULL; } else if (type == AF_IDENTIFY_AFF) { img_info->itype = TSK_IMG_TYPE_AFF_AFF; } else if (type == AF_IDENTIFY_AFD) { img_info->itype = TSK_IMG_TYPE_AFF_AFD; } else if (type == AF_IDENTIFY_AFM) { img_info->itype = TSK_IMG_TYPE_AFF_AFM; } else { img_info->itype = TSK_IMG_TYPE_AFF_ANY; } aff_info->af_file = af_open(image, O_RDONLY | O_BINARY, 0); if (!aff_info->af_file) { // @@@ Need to check here if the open failed because of an incorrect password. tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("aff_open file: %" PRIttocTSK ": Error opening - %s", images[0], strerror(errno)); tsk_img_free(aff_info); if (tsk_verbose) { tsk_fprintf(stderr, "Error opening AFF/AFD/AFM file\n"); perror("aff_open"); } free(image); return NULL; } // verify that a password was given and we can read encrypted data. if (af_cannot_decrypt(aff_info->af_file)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_PASSWD); tsk_error_set_errstr("aff_open file: %" PRIttocTSK, images[0]); tsk_img_free(aff_info); if (tsk_verbose) { tsk_fprintf(stderr, "Error opening AFF/AFD/AFM file (incorrect password)\n"); } free(image); return NULL; } aff_info->type = type; img_info->size = af_imagesize(aff_info->af_file); af_seek(aff_info->af_file, 0, SEEK_SET); aff_info->seek_pos = 0; free(image); return img_info; } #endif sleuthkit-4.2.0/tsk/img/aff.h000644 000765 000024 00000001674 12576320700 016617 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /* * Header files for AFF-specific data structures and functions. */ #ifndef _AFF_H #define _AFF_H #if HAVE_LIBAFFLIB #include // mingw's pthread.h will try to read a config.h if HAVE_CONFIG_H #if HAVE_CONFIG_H #undef HAVE_CONFIG_H #include #define HAVE_CONFIG_H 1 #else #include #endif extern TSK_IMG_INFO *aff_open(const TSK_TCHAR * const images[], unsigned int a_ssize); /** \internal * Stores AFF-specific data */ typedef struct { TSK_IMG_INFO img_info; AFFILE *af_file; TSK_OFF_T seek_pos; // shared and protected by cache_lock in IMG_INFO uint16_t type; /* TYPE - uses AF_IDENTIFY_x values */ } IMG_AFF_INFO; #endif #endif sleuthkit-4.2.0/tsk/img/ewf.c000644 000765 000024 00000035613 12576320700 016637 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit - Add on for Expert Witness Compression Format (EWF) image support * * Copyright (c) 2006, 2011 Joachim Metz * * This software is distributed under the Common Public License 1.0 * * Based on raw image support of the Sleuth Kit from * Brian Carrier. */ /** \file ewf.c * Internal code for TSK to interface with libewf. */ #include "tsk_img_i.h" #if HAVE_LIBEWF #include "ewf.h" #define TSK_EWF_ERROR_STRING_SIZE 512 #if defined( HAVE_LIBEWF_V2_API ) /** * Get error string from libewf and make buffer emtpy if that didn't work. * @returns 1 if error message was not set */ static uint8_t getError(libewf_error_t * ewf_error, char error_string[TSK_EWF_ERROR_STRING_SIZE]) { int retval; error_string[0] = '\0'; retval = libewf_error_backtrace_sprint(ewf_error, error_string, TSK_EWF_ERROR_STRING_SIZE); if (retval) return 1; return 0; } #endif static ssize_t ewf_image_read(TSK_IMG_INFO * img_info, TSK_OFF_T offset, char *buf, size_t len) { #if defined( HAVE_LIBEWF_V2_API ) char error_string[TSK_EWF_ERROR_STRING_SIZE]; libewf_error_t *ewf_error = NULL; #endif ssize_t cnt; IMG_EWF_INFO *ewf_info = (IMG_EWF_INFO *) img_info; if (tsk_verbose) tsk_fprintf(stderr, "ewf_image_read: byte offset: %" PRIuOFF " len: %" PRIuSIZE "\n", offset, len); if (offset > img_info->size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ_OFF); tsk_error_set_errstr("ewf_image_read - %" PRIuOFF, offset); return -1; } tsk_take_lock(&(ewf_info->read_lock)); #if defined( HAVE_LIBEWF_V2_API ) cnt = libewf_handle_read_random(ewf_info->handle, buf, len, offset, &ewf_error); if (cnt < 0) { char *errmsg = NULL; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); if (getError(ewf_error, error_string)) errmsg = strerror(errno); else errmsg = error_string; tsk_error_set_errstr("ewf_image_read - offset: %" PRIuOFF " - len: %" PRIuSIZE " - %s", offset, len, errmsg); tsk_release_lock(&(ewf_info->read_lock)); return -1; } #else cnt = libewf_read_random(ewf_info->handle, buf, len, offset); if (cnt < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); tsk_error_set_errstr("ewf_image_read - offset: %" PRIuOFF " - len: %" PRIuSIZE " - %s", offset, len, strerror(errno)); tsk_release_lock(&(ewf_info->read_lock)); return -1; } #endif tsk_release_lock(&(ewf_info->read_lock)); return cnt; } static void ewf_image_imgstat(TSK_IMG_INFO * img_info, FILE * hFile) { IMG_EWF_INFO *ewf_info = (IMG_EWF_INFO *) img_info; tsk_fprintf(hFile, "IMAGE FILE INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Image Type:\t\tewf\n"); tsk_fprintf(hFile, "\nSize of data in bytes:\t%" PRIuOFF "\n", img_info->size); if (ewf_info->md5hash_isset == 1) { tsk_fprintf(hFile, "MD5 hash of data:\t%s\n", ewf_info->md5hash); } return; } static void ewf_image_close(TSK_IMG_INFO * img_info) { int i; IMG_EWF_INFO *ewf_info = (IMG_EWF_INFO *) img_info; #if defined ( HAVE_LIBEWF_V2_API) libewf_handle_close(ewf_info->handle, NULL); libewf_handle_free(&(ewf_info->handle), NULL); #else libewf_close(ewf_info->handle); #endif // this stuff crashes if we used glob. v2 of the API has a free method. // not clear from the docs what we should do in v1... // @@@ Probably a memory leak in v1 unless libewf_close deals with it if (ewf_info->used_ewf_glob == 0) { for (i = 0; i < ewf_info->num_imgs; i++) { free(ewf_info->images[i]); } free(ewf_info->images); } else { libewf_error_t *error; libewf_glob_free( ewf_info->images, ewf_info->num_imgs, &error); } tsk_deinit_lock(&(ewf_info->read_lock)); tsk_img_free(img_info); } /* Tests if the image file header against the * header (magic) signature specified. * Returns a 0 on no match and a 1 on a match, and -1 on error. */ #if 0 static int img_file_header_signature_ncmp(const char *filename, const char *file_header_signature, int size_of_signature) { int match; ssize_t read_count = 0; char header[512]; int fd; if ((filename == NULL) || (file_header_signature == NULL)) { return (0); } if (size_of_signature <= 0) { return (0); } if ((fd = open(filename, O_RDONLY | O_BINARY)) < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("ewf magic testing: %s", filename); return -1; } read_count = read(fd, header, 512); if (read_count != 512) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); tsk_error_set_errstr("ewf magic testing: %s", filename); return -1; } close(fd); match = strncmp(file_header_signature, header, size_of_signature) == 0; return (match); } #endif TSK_IMG_INFO * ewf_open(int a_num_img, const TSK_TCHAR * const a_images[], unsigned int a_ssize) { #if defined( HAVE_LIBEWF_V2_API ) char error_string[TSK_EWF_ERROR_STRING_SIZE]; libewf_error_t *ewf_error = NULL; int result = 0; #elif !defined( LIBEWF_STRING_DIGEST_HASH_LENGTH_MD5 ) uint8_t md5_hash[16]; #endif IMG_EWF_INFO *ewf_info = NULL; TSK_IMG_INFO *img_info = NULL; #if !defined( HAVE_LIBEWF_V2_API) if (tsk_verbose) libewf_set_notify_values(stderr, 1); #endif if ((ewf_info = (IMG_EWF_INFO *) tsk_img_malloc(sizeof(IMG_EWF_INFO))) == NULL) { return NULL; } img_info = (TSK_IMG_INFO *) ewf_info; // See if they specified only the first of the set... ewf_info->used_ewf_glob = 0; if (a_num_img == 1) { #if defined( HAVE_LIBEWF_V2_API) #ifdef TSK_WIN32 if (libewf_glob_wide(a_images[0], TSTRLEN(a_images[0]), LIBEWF_FORMAT_UNKNOWN, &ewf_info->images, &ewf_info->num_imgs, &ewf_error) == -1) { #else if (libewf_glob(a_images[0], TSTRLEN(a_images[0]), LIBEWF_FORMAT_UNKNOWN, &ewf_info->images, &ewf_info->num_imgs, &ewf_error) == -1) { #endif tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_MAGIC); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open: Not an E01 glob name (%s)", error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); return NULL; } #else //use v1 #ifdef TSK_WIN32 ewf_info->num_imgs = libewf_glob_wide(a_images[0], TSTRLEN(a_images[0]), LIBEWF_FORMAT_UNKNOWN, &ewf_info->images); #else ewf_info->num_imgs = libewf_glob(a_images[0], TSTRLEN(a_images[0]), LIBEWF_FORMAT_UNKNOWN, &ewf_info->images); #endif if (ewf_info->num_imgs <= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_MAGIC); tsk_error_set_errstr("ewf_open: Not an E01 glob name"); tsk_img_free(ewf_info); return NULL; } #endif // end v1 ewf_info->used_ewf_glob = 1; if (tsk_verbose) tsk_fprintf(stderr, "ewf_open: found %d segment files via libewf_glob\n", ewf_info->num_imgs); } else { int i; ewf_info->num_imgs = a_num_img; if ((ewf_info->images = (TSK_TCHAR **) tsk_malloc(a_num_img * sizeof(TSK_TCHAR *))) == NULL) { tsk_img_free(ewf_info); return NULL; } for (i = 0; i < a_num_img; i++) { if ((ewf_info->images[i] = (TSK_TCHAR *) tsk_malloc((TSTRLEN(a_images[i]) + 1) * sizeof(TSK_TCHAR))) == NULL) { tsk_img_free(ewf_info); return NULL; } TSTRNCPY(ewf_info->images[i], a_images[i], TSTRLEN(a_images[i]) + 1); } } #if defined( HAVE_LIBEWF_V2_API ) // Check the file signature before we call the library open #if defined( TSK_WIN32 ) if (libewf_check_file_signature_wide(a_images[0], &ewf_error) != 1) #else if (libewf_check_file_signature(a_images[0], &ewf_error) != 1) #endif { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_MAGIC); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open: Not an EWF file (%s)", error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Not an EWF file\n"); } return (NULL); } if (libewf_handle_initialize(&(ewf_info->handle), &ewf_error) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error initializing handle (%s)", a_images[0], error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Unable to create EWF handle\n"); } return (NULL); } #if defined( TSK_WIN32 ) if (libewf_handle_open_wide(ewf_info->handle, (wchar_t * const *) ewf_info->images, ewf_info->num_imgs, LIBEWF_OPEN_READ, &ewf_error) != 1) #else if (libewf_handle_open(ewf_info->handle, (char *const *) ewf_info->images, ewf_info->num_imgs, LIBEWF_OPEN_READ, &ewf_error) != 1) #endif { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error opening (%s)", a_images[0], error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Error opening EWF file\n"); } return (NULL); } if (libewf_handle_get_media_size(ewf_info->handle, (size64_t *) & (img_info->size), &ewf_error) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error getting size of image (%s)", a_images[0], error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Error getting size of EWF file\n"); } return (NULL); } result = libewf_handle_get_utf8_hash_value_md5(ewf_info->handle, (uint8_t *) ewf_info->md5hash, 33, &ewf_error); if (result == -1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); getError(ewf_error, error_string); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error getting MD5 of image (%s)", a_images[0], error_string); libewf_error_free(&ewf_error); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Error getting size of EWF file\n"); } return (NULL); } ewf_info->md5hash_isset = result; #else // V1 API // Check the file signature before we call the library open #if defined( TSK_WIN32 ) if (libewf_check_file_signature_wide(a_images[0]) != 1) #else if (libewf_check_file_signature(a_images[0]) != 1) #endif { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_MAGIC); tsk_error_set_errstr("ewf_open: Not an EWF file"); tsk_img_free(ewf_info); if (tsk_verbose) tsk_fprintf(stderr, "Not an EWF file\n"); return NULL; } #if defined( TSK_WIN32 ) ewf_info->handle = libewf_open_wide( (wchar_t * const *) ewf_info->images, ewf_info->num_imgs, LIBEWF_OPEN_READ); #else ewf_info->handle = libewf_open( (char *const *) ewf_info->images, ewf_info->num_imgs, LIBEWF_OPEN_READ); #endif if (ewf_info->handle == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error opening", ewf_info->images[0]); tsk_img_free(ewf_info); if (tsk_verbose != 0) { tsk_fprintf(stderr, "Error opening EWF file\n"); } return (NULL); } #if defined( LIBEWF_STRING_DIGEST_HASH_LENGTH_MD5 ) // 2007 version img_info->size = libewf_get_media_size(ewf_info->handle); ewf_info->md5hash_isset = libewf_get_stored_md5_hash(ewf_info->handle, ewf_info->md5hash, LIBEWF_STRING_DIGEST_HASH_LENGTH_MD5); #else // libewf-20080322 version if (libewf_get_media_size(ewf_info->handle, (size64_t *) & (img_info->size)) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("ewf_open file: %" PRIttocTSK ": Error getting size of image", ewf_info->images[0]); tsk_img_free(ewf_info); if (tsk_verbose) { tsk_fprintf(stderr, "Error getting size of EWF file\n"); } return (NULL); } if (libewf_get_md5_hash(ewf_info->handle, md5_hash, 16) == 1) { int md5_string_iterator = 0; int md5_hash_iterator = 0; for (md5_hash_iterator = 0; md5_hash_iterator < 16; md5_hash_iterator++) { int digit = md5_hash[md5_hash_iterator] / 16; if (digit <= 9) { ewf_info->md5hash[md5_string_iterator++] = '0' + (char) digit; } else { ewf_info->md5hash[md5_string_iterator++] = 'a' + (char) (digit - 10); } digit = md5_hash[md5_hash_iterator] % 16; if (digit <= 9) { ewf_info->md5hash[md5_string_iterator++] = '0' + (char) digit; } else { ewf_info->md5hash[md5_string_iterator++] = 'a' + (char) (digit - 10); } } ewf_info->md5hash_isset = 1; } #endif /* defined( LIBEWF_STRING_DIGEST_HASH_LENGTH_MD5 ) */ #endif /* defined( HAVE_LIBEWF_V2_API ) */ if (a_ssize != 0) { img_info->sector_size = a_ssize; } else { img_info->sector_size = 512; } img_info->itype = TSK_IMG_TYPE_EWF_EWF; img_info->read = &ewf_image_read; img_info->close = &ewf_image_close; img_info->imgstat = &ewf_image_imgstat; // initialize the read lock tsk_init_lock(&(ewf_info->read_lock)); return (img_info); } #endif /* HAVE_LIBEWF */ sleuthkit-4.2.0/tsk/img/ewf.h000644 000765 000024 00000002517 12576320700 016641 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit - Add on for Expert Witness Compression Format (EWF) image support * * Copyright (c) 2006, 2011 Joachim Metz * * This software is distributed under the Common Public License 1.0 * * Based on raw image support of the Sleuth Kit from * Brian Carrier. */ /* * Header files for EWF-specific data structures and functions. */ #ifndef _TSK_IMG_EWF_H #define _TSK_IMG_EWF_H #if HAVE_LIBEWF // we used to check only for TSK_WIN32, but that fails on mingw #if defined(_MSC_VER) #include #endif #include // libewf version 2 no longer defines LIBEWF_HANDLE #undef HAVE_LIBEWF_V2_API #if !defined( LIBEWF_HANDLE ) #define HAVE_LIBEWF_V2_API #endif #ifdef __cplusplus extern "C" { #endif extern TSK_IMG_INFO *ewf_open(int, const TSK_TCHAR * const images[], unsigned int a_ssize); typedef struct { TSK_IMG_INFO img_info; libewf_handle_t *handle; char md5hash[33]; int md5hash_isset; TSK_TCHAR **images; int num_imgs; uint8_t used_ewf_glob; // 1 if libewf_glob was used during open tsk_lock_t read_lock; ///< Lock for reads since libewf is not thread safe -- only works if you have a single instance of EWF_INFO for all threads. } IMG_EWF_INFO; #ifdef __cplusplus } #endif #endif #endif sleuthkit-4.2.0/tsk/img/img_io.c000644 000765 000024 00000020304 12576320700 017310 0ustar00carrierstaff000000 000000 /* * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ /** * \file img_io.c * Contains the basic img reading API redirection functions. */ #include "tsk_img_i.h" /** * \ingroup imglib * Reads data from an open disk image * @param a_img_info Disk image to read from * @param a_off Byte offset to start reading from * @param a_buf Buffer to read into * @param a_len Number of bytes to read into buffer * @returns -1 on error or number of bytes read */ ssize_t tsk_img_read(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_off, char *a_buf, size_t a_len) { #define CACHE_AGE 1000 ssize_t read_count = 0; int cache_index = 0; int cache_next = 0; // index to lowest age cache (to use next) size_t len2 = 0; if (a_img_info == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_read: a_img_info: NULL"); return -1; } // Do not allow a_buf to be NULL. if (a_buf == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_read: a_buf: NULL"); return -1; } // The function cannot handle negative offsets. if (a_off < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_read: a_off: %" PRIuOFF, a_off); return -1; } // Protect a_off against overflowing when a_len is added since TSK_OFF_T // maps to an int64 we prefer it over size_t although likely checking // for ( a_len > SSIZE_MAX ) is better but the code does not seem to // use that approach. if ((TSK_OFF_T) a_len < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_read: a_len: %zd", a_len); return -1; } /* cache_lock is used for both the cache in IMG_INFO and * the shared variables in the img type specific INFO structs. * grab it now so that it is held before any reads. */ tsk_take_lock(&(a_img_info->cache_lock)); // if they ask for more than the cache length, skip the cache if ((a_len + (a_off % 512)) > TSK_IMG_INFO_CACHE_LEN) { ssize_t nbytes; /* Some of the lower-level methods like block-sized reads. * So if the len is not that multiple, then make it. */ if (a_len % a_img_info->sector_size) { char *buf2 = a_buf; size_t len_tmp; len_tmp = roundup(a_len, a_img_info->sector_size); if ((buf2 = (char *) tsk_malloc(len_tmp)) == NULL) { tsk_release_lock(&(a_img_info->cache_lock)); return -1; } nbytes = a_img_info->read(a_img_info, a_off, buf2, len_tmp); if ((nbytes > 0) && (nbytes < (ssize_t) a_len)) { memcpy(a_buf, buf2, nbytes); } else { memcpy(a_buf, buf2, a_len); nbytes = (ssize_t)a_len; } free(buf2); } else { nbytes = a_img_info->read(a_img_info, a_off, a_buf, a_len); } tsk_release_lock(&(a_img_info->cache_lock)); return nbytes; } // TODO: why not just return 0 here (and be POSIX compliant)? // and why not check earlier for this condition? if (a_off >= a_img_info->size) { tsk_release_lock(&(a_img_info->cache_lock)); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ_OFF); tsk_error_set_errstr("tsk_img_read - %" PRIuOFF, a_off); return -1; } /* See if the requested length is going to be too long. * we'll use this length when checking the cache. */ len2 = a_len; // Protect against INT64_MAX + INT64_MAX > value if (((TSK_OFF_T) len2 > a_img_info->size) || (a_off >= (a_img_info->size - (TSK_OFF_T)len2))) { len2 = (size_t) (a_img_info->size - a_off); } // check if it is in the cache for (cache_index = 0; cache_index < TSK_IMG_INFO_CACHE_NUM; cache_index++) { // Look into the in-use cache entries if (a_img_info->cache_len[cache_index] > 0) { // the read_count check makes sure we don't go back in after data was read if ((read_count == 0) && (a_img_info->cache_off[cache_index] <= a_off) && (a_img_info->cache_off[cache_index] + a_img_info->cache_len[cache_index] >= a_off + len2)) { /* if (tsk_verbose) fprintf(stderr, "tsk_img_read: Read found in cache %d\n", cache_index ); */ // We found it... memcpy(a_buf, &a_img_info->cache[cache_index][a_off - a_img_info->cache_off[cache_index]], len2); read_count = (ssize_t) len2; // reset its "age" since it was useful a_img_info->cache_age[cache_index] = CACHE_AGE; // we don't break out of the loop so that we update all ages } else { /* decrease its "age" since it was not useful. * We don't let used ones go below 1 so that they are not * confused with entries that have never been used. */ a_img_info->cache_age[cache_index]--; // see if this is the most eligible replacement if ((a_img_info->cache_len[cache_next] > 0) && (a_img_info->cache_age[cache_index] < a_img_info->cache_age[cache_next])) cache_next = cache_index; } } else { cache_next = cache_index; } } // if we didn't find it, then load it into the cache_next entry if (read_count == 0) { size_t read_size = 0; // round the offset down to a sector boundary a_img_info->cache_off[cache_next] = (a_off / 512) * 512; /* if (tsk_verbose) fprintf(stderr, "tsk_img_read: Loading data into cache %d (%" PRIuOFF ")\n", cache_next, a_img_info->cache_off[cache_next]); */ // Read a full cache block or the remaining data. read_size = TSK_IMG_INFO_CACHE_LEN; if ((a_img_info->cache_off[cache_next] + (TSK_OFF_T)read_size) > a_img_info->size) { read_size = (size_t) (a_img_info->size - a_img_info->cache_off[cache_next]); } read_count = a_img_info->read(a_img_info, a_img_info->cache_off[cache_next], a_img_info->cache[cache_next], read_size); // if no error, then set the variables and copy the data // Although a read_count of -1 indicates an error, // since read_count is used in the calculation it may not be negative. // Also it does not make sense to copy data when the read_count is 0. if (read_count > 0) { TSK_OFF_T rel_off = 0; a_img_info->cache_age[cache_next] = CACHE_AGE; a_img_info->cache_len[cache_next] = read_count; // Determine the offset relative to the start of the cached data. rel_off = a_off - a_img_info->cache_off[cache_next]; // Make sure we were able to read sufficient data into the cache. if (rel_off > (TSK_OFF_T) read_count) { len2 = 0; } // Make sure not to copy more than is available in the cache. else if ((rel_off + (TSK_OFF_T) len2) > (TSK_OFF_T) read_count) { len2 = (size_t) (read_count - rel_off); } // Only copy data when we have something to copy. if (len2 > 0) { memcpy(a_buf, &(a_img_info->cache[cache_next][rel_off]), len2); } read_count = (ssize_t) len2; } else { a_img_info->cache_len[cache_next] = 0; a_img_info->cache_age[cache_next] = 0; a_img_info->cache_off[cache_next] = 0; } } tsk_release_lock(&(a_img_info->cache_lock)); return read_count; } sleuthkit-4.2.0/tsk/img/img_open.c000644 000765 000024 00000033022 12576320700 017643 0ustar00carrierstaff000000 000000 /* * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved * Copyright (c) 2005 Brian Carrier. All rights reserved * * tsk_img_open * * This software is distributed under the Common Public License 1.0 */ /** * \file img_open.c * Contains the basic img_open function call, that interfaces with * the format specific _open calls */ #include "tsk_img_i.h" #include "raw.h" #if HAVE_LIBAFFLIB typedef int bool; #include "aff.h" #endif #if HAVE_LIBEWF #include "ewf.h" #endif /** * \ingroup imglib * Opens a single (non-split) disk image file so that it can be read. This is a * wrapper around tsk_img_open(). See it for more details on detection etc. See * tsk_img_open_sing_utf8() for a version of this function that always takes * UTF-8 as input. * * @param a_image The path to the image file * @param type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return Pointer to TSK_IMG_INFO or NULL on error */ TSK_IMG_INFO * tsk_img_open_sing(const TSK_TCHAR * a_image, TSK_IMG_TYPE_ENUM type, unsigned int a_ssize) { const TSK_TCHAR *const a = a_image; return tsk_img_open(1, &a, type, a_ssize); } /** * \ingroup imglib * Opens one or more disk image files so that they can be read. If a file format * type is specified, this function will call the specific routine to open the file. * Otherwise, it will detect the type (it will default to raw if no specific type can * be detected). This function must be called before a disk image can be read from. * Note that the data type used to store the image paths is a TSK_TCHAR, which changes * depending on a Unix or Windows build. If you will always have UTF8, then consider * using tsk_img_open_utf8(). * * @param num_img The number of images to open (will be > 1 for split images). * @param images The path to the image files (the number of files must * be equal to num_img and they must be in a sorted order) * @param type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return Pointer to TSK_IMG_INFO or NULL on error */ TSK_IMG_INFO * tsk_img_open(int num_img, const TSK_TCHAR * const images[], TSK_IMG_TYPE_ENUM type, unsigned int a_ssize) { TSK_IMG_INFO *img_info = NULL; // Get rid of any old error messages laying around tsk_error_reset(); if ((num_img == 0) || (images[0] == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_NOFILE); tsk_error_set_errstr("tsk_img_open"); return NULL; } if ((a_ssize > 0) && (a_ssize < 512)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("sector size is less than 512 bytes (%d)", a_ssize); return NULL; } if ((a_ssize % 512) != 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("sector size is not a multiple of 512 (%d)", a_ssize); return NULL; } if (tsk_verbose) TFPRINTF(stderr, _TSK_T("tsk_img_open: Type: %d NumImg: %d Img1: %s\n"), type, num_img, images[0]); /* If no type is given, then we use the autodetection methods * In case the image file matches the signatures of multiple formats, * we try all of the embedded formats */ if (type == TSK_IMG_TYPE_DETECT) { TSK_IMG_INFO *img_set = NULL; #if HAVE_LIBAFFLIB || HAVE_LIBEWF char *set = NULL; #endif // we rely on tsk_errno, so make sure it is 0 tsk_error_reset(); /* Try the non-raw formats first */ #if HAVE_LIBAFFLIB if ((img_info = aff_open(images, a_ssize)) != NULL) { /* we don't allow the "ANY" when autodetect is used because * we only want to detect the tested formats. */ if (img_info->itype == TSK_IMG_TYPE_AFF_ANY) { img_info->close(img_info); } else { set = "AFF"; img_set = img_info; } } else { // If AFF is otherwise happy except for a password, stop trying to guess if (tsk_error_get_errno() == TSK_ERR_IMG_PASSWD) { return NULL; } tsk_error_reset(); } #endif #if HAVE_LIBEWF if ((img_info = ewf_open(num_img, images, a_ssize)) != NULL) { if (set == NULL) { set = "EWF"; img_set = img_info; } else { img_set->close(img_set); img_info->close(img_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_UNKTYPE); tsk_error_set_errstr("EWF or %s", set); return NULL; } } else { tsk_error_reset(); } #endif // if any of the non-raw formats were detected, then use it. if (img_set != NULL) return img_set; // otherwise, try raw if ((img_info = raw_open(num_img, images, a_ssize)) != NULL) { return img_info; } else if (tsk_error_get_errno() != 0) { return NULL; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_UNKTYPE); return NULL; } /* * Type values */ switch (type) { case TSK_IMG_TYPE_RAW: img_info = raw_open(num_img, images, a_ssize); break; #if HAVE_LIBAFFLIB case TSK_IMG_TYPE_AFF_AFF: case TSK_IMG_TYPE_AFF_AFD: case TSK_IMG_TYPE_AFF_AFM: case TSK_IMG_TYPE_AFF_ANY: img_info = aff_open(images, a_ssize); break; #endif #if HAVE_LIBEWF case TSK_IMG_TYPE_EWF_EWF: img_info = ewf_open(num_img, images, a_ssize); break; #endif default: tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_UNSUPTYPE); tsk_error_set_errstr("%d", type); return NULL; } return img_info; } /** * \ingroup imglib * Opens a single (non-split) disk image file so that it can be read. This version * always takes a UTF-8 encoding of the disk image. See tsk_img_open_sing() for a * version that takes a wchar_t or char depending on the platform. * This is a wrapper around tsk_img_open(). See it for more details on detection etc. * * @param a_image The UTF-8 path to the image file * @param type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return Pointer to TSK_IMG_INFO or NULL on error */ TSK_IMG_INFO * tsk_img_open_utf8_sing(const char *a_image, TSK_IMG_TYPE_ENUM type, unsigned int a_ssize) { const char *const a = a_image; return tsk_img_open_utf8(1, &a, type, a_ssize); } /** * \ingroup imglib * Opens one or more disk image files so that they can be read. This is a wrapper * around tsk_img_open() and this version always takes a UTF-8 encoding of the * image files. See its description for more details. * * @param num_img The number of images to open (will be > 1 for split images). * @param images The path to the UTF-8 encoded image files (the number of files must * be equal to num_img and they must be in a sorted order) * @param type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return Pointer to TSK_IMG_INFO or NULL on error */ TSK_IMG_INFO * tsk_img_open_utf8(int num_img, const char *const images[], TSK_IMG_TYPE_ENUM type, unsigned int a_ssize) { #ifdef TSK_WIN32 { /* Note that there is an assumption in this code that wchar_t is 2-bytes. * this is a correct assumption for Windows, but not for all systems... */ TSK_IMG_INFO *retval = NULL; wchar_t **images16; int i; // allocate a buffer to store the UTF-16 version of the images. if ((images16 = (wchar_t **) tsk_malloc(sizeof(wchar_t *) * num_img)) == NULL) { return NULL; } for (i = 0; i < num_img; i++) { size_t ilen; UTF16 *utf16; UTF8 *utf8; TSKConversionResult retval2; // we allocate the buffer with the same number of chars as the UTF-8 length ilen = strlen(images[i]); if ((images16[i] = (wchar_t *) tsk_malloc((ilen + 1) * sizeof(wchar_t))) == NULL) { goto tsk_utf8_cleanup; } utf8 = (UTF8 *) images[i]; utf16 = (UTF16 *) images16[i]; retval2 = tsk_UTF8toUTF16((const UTF8 **) &utf8, &utf8[ilen], &utf16, &utf16[ilen], TSKlenientConversion); if (retval2 != TSKconversionOK) { tsk_error_set_errno(TSK_ERR_IMG_CONVERT); tsk_error_set_errstr ("tsk_img_open_utf8: Error converting image %s %d", images[i], retval2); goto tsk_utf8_cleanup; } *utf16 = '\0'; } retval = tsk_img_open(num_img, images16, type, a_ssize); // free up the memory tsk_utf8_cleanup: for (i = 0; i < num_img; i++) { if (images16[i]) free(images16[i]); } free(images16); return retval; } #else return tsk_img_open(num_img, images, type, a_ssize); #endif } #if 0 /* This interface needs some more thought because the size of wchar is not standard. * If the goal i to provide a constant wchar interface, then we need to incorporate * UTF-32 to UTF-8 support as well. If the goal is to provide a standard UTF-16 * interface, we should use another type besiddes wchar_t. */ TSK_IMG_INFO * tsk_img_open_utf16(int num_img, wchar_t * const images[], TSK_IMG_TYPE_ENUM type) { #if TSK_WIN32 return tsk_img_open(num_img, images, type); #else { TSK_IMG_INFO *retval; int i; char **images8; TSK_ENDIAN_ENUM endian; uint16_t tmp1; /* The unicode conversio routines are primarily to convert Unicode * in file and volume system images, which means they could be in * an endian ordering different from the local one. We need to figure * out our local ordering so we can give it the right flag */ tmp1 = 1; if (tsk_guess_end_u16(&endian, (uint8_t *) & tmp1, 1)) { // @@@@ return NULL; } // convert UTF16 to UTF8 if ((images8 = (char **) tsk_malloc(sizeof(char *) * num_img)) == NULL) { return NULL; } for (i = 0; i < num_img; i++) { size_t ilen; UTF16 *utf16; UTF8 *utf8; TSKConversionResult retval2; // we allocate the buffer to be four times the utf-16 length. ilen = wcslen(images[i]); ilen <<= 2; if ((images8[i] = (char *) tsk_malloc(ilen)) == NULL) { return NULL; } utf16 = (UTF16 *) images[i]; utf8 = (UTF8 *) images8[i]; retval2 = tsk_UTF16toUTF8_lclorder((const UTF16 **) &utf16, &utf16[wcslen(images[i]) + 1], &utf8, &utf8[ilen + 1], TSKlenientConversion); if (retval2 != TSKconversionOK) { tsk_error_set_errno(TSK_ERR_IMG_CONVERT); tsk_error_set_errstr ("tsk_img_open_utf16: Error converting image %d %d", i, retval2); return NULL; } *utf8 = '\0'; } retval = tsk_img_open(num_img, (const TSK_TCHAR **) images8, type); for (i = 0; i < num_img; i++) { free(images8[i]); } free(images8); return retval; } #endif } #endif /** * \ingroup imglib * Closes an open disk image. * @param a_img_info Pointer to the open disk image structure. */ void tsk_img_close(TSK_IMG_INFO * a_img_info) { if (a_img_info == NULL) { return; } a_img_info->close(a_img_info); } /** * \internal * Return the list of names for this open images. * This is sort of a hack implementation and is internal only at this * point. Returns pointers into the IMG_INFO structs and should not be * modified or freed. * @param a_img_info Image to pull names from * @param a_num_imgs Will contain number of elements in the return array. * @returns List of names. */ const TSK_TCHAR ** tsk_img_get_names(TSK_IMG_INFO *a_img_info, int *a_num_imgs) { if (a_img_info == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_get_names: IMG_INFO is NULL"); return NULL; } if (a_num_imgs == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_ARG); tsk_error_set_errstr("tsk_img_get_names: a_num_imgs is NULL"); return NULL; } *a_num_imgs = 0; switch (a_img_info->itype) { case TSK_IMG_TYPE_RAW: { IMG_RAW_INFO *raw_info = (IMG_RAW_INFO *)a_img_info; *a_num_imgs = raw_info->num_img; return raw_info->images; } #if HAVE_LIBEWF case TSK_IMG_TYPE_EWF_EWF: { IMG_EWF_INFO *ewf_info = (IMG_EWF_INFO *)a_img_info; *a_num_imgs = ewf_info->num_imgs; return ewf_info->images; } break; #endif default: return NULL; } } sleuthkit-4.2.0/tsk/img/img_types.c000644 000765 000024 00000007443 12576320700 020056 0ustar00carrierstaff000000 000000 /* ** img_types ** The Sleuth Kit ** ** Identify the type of image file being used ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /** \file img_types.c * Contains basic functions to parse and print the names of the supported disk image types. */ #include "tsk_img_i.h" /** \internal * used to parse and print supported types */ typedef struct { char *name; uint8_t code; char *comment; } IMG_TYPES; /** \internal * The table used to parse input strings * - in order of expected usage */ static IMG_TYPES img_open_table[] = { {"raw", TSK_IMG_TYPE_RAW, "Single or split raw file (dd)"}, #if HAVE_LIBAFFLIB {"aff", TSK_IMG_TYPE_AFF_AFF, "Advanced Forensic Format"}, {"afd", TSK_IMG_TYPE_AFF_AFD, "AFF Multiple File"}, {"afm", TSK_IMG_TYPE_AFF_AFM, "AFF with external metadata"}, {"afflib", TSK_IMG_TYPE_AFF_ANY, "All AFFLIB image formats (including beta ones)"}, #endif #if HAVE_LIBEWF {"ewf", TSK_IMG_TYPE_EWF_EWF, "Expert Witness format (encase)"}, #endif {0}, }; /** * \ingroup imglib * Parses a string that specifies an image format to determine the * associated type ID. This is used by the TSK command line tools to * parse the type given on the command line. * * @param str String of image format type, always UTF-8 * @return ID of image type */ TSK_IMG_TYPE_ENUM tsk_img_type_toid_utf8(const char *str) { IMG_TYPES *sp; for (sp = img_open_table; sp->name; sp++) { if (strcmp(str, sp->name) == 0) { return sp->code; } } return TSK_IMG_TYPE_UNSUPP; } /** * \ingroup imglib * Parses a string that specifies an image format to determine the * associated type ID. This is used by the TSK command line tools to * parse the type given on the command line. * * @param str String of image format type * @return ID of image type */ TSK_IMG_TYPE_ENUM tsk_img_type_toid(const TSK_TCHAR * str) { char tmp[16]; int i; // convert to char for (i = 0; i < 15 && str[i] != '\0'; i++) { tmp[i] = (char) str[i]; } tmp[i] = '\0'; return tsk_img_type_toid_utf8(tmp); } /** * \ingroup imglib * Prints the name and description of the supported image types to a handle. * This is used by the TSK command line tools to print the supported types * to the console. * @param hFile Handle to print names and descriptions to. */ void tsk_img_type_print(FILE * hFile) { IMG_TYPES *sp; tsk_fprintf(hFile, "Supported image format types:\n"); for (sp = img_open_table; sp->name; sp++) tsk_fprintf(hFile, "\t%s (%s)\n", sp->name, sp->comment); } /** * \ingroup imglib * Returns the name of an image format type, given its type ID. * @param type ID of image type * @returns Pointer to string of the name. */ const char * tsk_img_type_toname(TSK_IMG_TYPE_ENUM type) { IMG_TYPES *sp; for (sp = img_open_table; sp->name; sp++) if (sp->code == type) return sp->name; return NULL; } /** * \ingroup imglib * Returns the description of an image format type, given its type ID. * @param type ID of image type * @returns Pointer to string of the description */ const char * tsk_img_type_todesc(TSK_IMG_TYPE_ENUM type) { IMG_TYPES *sp; for (sp = img_open_table; sp->name; sp++) if (sp->code == type) return sp->comment; return NULL; } /** * \ingroup imglib * Returns the supported file format types. * @returns A bit in the return value is set to 1 if the type is supported. */ TSK_IMG_TYPE_ENUM tsk_img_type_supported() { TSK_IMG_TYPE_ENUM sup_types = 0; IMG_TYPES *sp; for (sp = img_open_table; sp->name; sp++) { sup_types |= sp->code; } return sup_types; } sleuthkit-4.2.0/tsk/img/Makefile.am000644 000765 000024 00000000436 12576320700 017741 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskimg.la libtskimg_la_SOURCES = img_open.c img_types.c raw.c raw.h \ aff.c aff.h ewf.c ewf.h tsk_img_i.h img_io.c mult_files.c indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ sleuthkit-4.2.0/tsk/img/Makefile.in000644 000765 000024 00000044705 12576326300 017763 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/img ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskimg_la_LIBADD = am_libtskimg_la_OBJECTS = img_open.lo img_types.lo raw.lo aff.lo \ ewf.lo img_io.lo mult_files.lo libtskimg_la_OBJECTS = $(am_libtskimg_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(libtskimg_la_SOURCES) DIST_SOURCES = $(libtskimg_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskimg.la libtskimg_la_SOURCES = img_open.c img_types.c raw.c raw.h \ aff.c aff.h ewf.c ewf.h tsk_img_i.h img_io.c mult_files.c all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/img/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/img/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskimg.la: $(libtskimg_la_OBJECTS) $(libtskimg_la_DEPENDENCIES) $(EXTRA_libtskimg_la_DEPENDENCIES) $(AM_V_CCLD)$(LINK) $(libtskimg_la_OBJECTS) $(libtskimg_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/aff.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ewf.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/img_io.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/img_open.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/img_types.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mult_files.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/raw.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/img/mult_files.c000644 000765 000024 00000014226 12576320700 020216 0ustar00carrierstaff000000 000000 /* * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2011 Brian Carrier, Basis Technology. All rights reserved * * mult_files * * This software is distributed under the Common Public License 1.0 * */ /** * \file mult_files.c * Internal code to find remainder of files in a split raw set */ #include "tsk_img_i.h" // return non-zero if str ends with suffix, ignoring case static int endsWith(const TSK_TCHAR * str, const TSK_TCHAR * suffix) { if (TSTRLEN(str) >= TSTRLEN(suffix)) { return (TSTRICMP(&str[TSTRLEN(str) - TSTRLEN(suffix)], suffix) == 0); } return 0; } /** Generate the name of the Nth segment of an image, given the starting name. * If the name scheme isn't recognized, just returns the starting name for * segment 1 and NULL for subsequent segments. * * @param a_startingName First name in the list (must be full name) * @param a_segmentNumber The file number to generate a name for (starting at 1) * @returns newly-allocated file name for this segment number or NULL on error */ static TSK_TCHAR * getSegmentName(const TSK_TCHAR * a_startingName, int a_segmentNumber) { size_t nameLen = TSTRLEN(a_startingName); TSK_TCHAR *newName = (TSK_TCHAR *) tsk_malloc((nameLen + 32) * sizeof(TSK_TCHAR)); // (extra space to allow for .NNN.dmgpart for .dmg file names and for // large segment numbers, which could be 10 digits for 32-bit int) if (newName == NULL) return NULL; // segment 1 uses the original file name always TSTRNCPY(newName, a_startingName, nameLen + 1); if (a_segmentNumber == 1) { return newName; } // .dmg case: second part is .002.dmgpart (etc.) if (endsWith(a_startingName, _TSK_T(".dmg"))) { TSNPRINTF(newName + nameLen - 3, 35, _TSK_T("%03d.dmgpart"), a_segmentNumber); return newName; } // numeric counter case, 3 digit if (endsWith(a_startingName, _TSK_T(".001")) || endsWith(a_startingName, _TSK_T("_001"))) { // don't limit to 3 digits (FTK produces files named // foo.1000 for 1000-part DD images) TSNPRINTF(newName + nameLen - 3, 35, _TSK_T("%03d"), a_segmentNumber); return newName; } // 0-based numeric counter case, 3 digit if (endsWith(a_startingName, _TSK_T(".000")) || endsWith(a_startingName, _TSK_T("_000"))) { TSNPRINTF(newName + nameLen - 3, 35, _TSK_T("%03d"), a_segmentNumber - 1); return newName; } // numeric counter case, 2 digit if (endsWith(a_startingName, _TSK_T(".01")) || endsWith(a_startingName, _TSK_T("_01"))) { TSNPRINTF(newName + nameLen - 2, 34, _TSK_T("%02d"), a_segmentNumber); return newName; } // 0-based numeric counter case, 2 digit if (endsWith(a_startingName, _TSK_T(".00")) || endsWith(a_startingName, _TSK_T("_00"))) { TSNPRINTF(newName + nameLen - 2, 34, _TSK_T("%02d"), a_segmentNumber - 1); return newName; } // alphabetic counter, 3 character if (endsWith(a_startingName, _TSK_T(".aaa")) || endsWith(a_startingName, _TSK_T("xaaa")) || endsWith(a_startingName, _TSK_T("_aaa"))) { // preserve case for the alpha characters a_segmentNumber--; newName[nameLen - 1] += (a_segmentNumber % 26); a_segmentNumber /= 26; newName[nameLen - 2] += (a_segmentNumber % 26); a_segmentNumber /= 26; newName[nameLen - 3] += (a_segmentNumber % 26); a_segmentNumber /= 26; if (a_segmentNumber > 0) { // too many segments for format free(newName); return NULL; } return newName; } // alphabetic counter, 2 character if (endsWith(a_startingName, _TSK_T(".aa")) || endsWith(a_startingName, _TSK_T("xaa")) || endsWith(a_startingName, _TSK_T("_aa"))) { // preserve case for the alpha characters a_segmentNumber--; newName[nameLen - 1] += (a_segmentNumber % 26); a_segmentNumber /= 26; newName[nameLen - 2] += (a_segmentNumber % 26); a_segmentNumber /= 26; if (a_segmentNumber > 0) { // too many segments for format free(newName); return NULL; } return newName; } // numeric counter, variable width if (endsWith(a_startingName, _TSK_T(".bin"))) { TSNPRINTF(newName + nameLen - 4, 36, _TSK_T("(%d).bin"), a_segmentNumber); return newName; } // unknown name format free(newName); return NULL; } /** * @param a_startingName First name in the list (must be full name) * @param [out] a_numFound Number of images that are in returned list * @returns array of names that caller must free (NULL on error or if supplied file does not exist) */ TSK_TCHAR ** tsk_img_findFiles(const TSK_TCHAR * a_startingName, int *a_numFound) { TSK_TCHAR **retNames = NULL; TSK_TCHAR *nextName; TSK_TCHAR **tmpNames; int fileCount = 0; struct STAT_STR stat_buf; *a_numFound = 0; // iterate through potential segment names while ((nextName = getSegmentName(a_startingName, fileCount + 1)) != NULL) { // does the file exist? if (TSTAT(nextName, &stat_buf) < 0) { free(nextName); break; } if (tsk_verbose) tsk_fprintf(stderr, "tsk_img_findFiles: %" PRIttocTSK " found\n", nextName); // add to list fileCount++; if (fileCount == 1) tmpNames = (TSK_TCHAR **) tsk_malloc(sizeof(TSK_TCHAR *)); else tmpNames = (TSK_TCHAR **) tsk_realloc(retNames, fileCount * sizeof(TSK_TCHAR *)); if (tmpNames == NULL) { if (retNames != NULL) free(retNames); return NULL; } retNames = tmpNames; retNames[fileCount - 1] = nextName; } if (fileCount <= 0) return NULL; if (tsk_verbose) tsk_fprintf(stderr, "tsk_img_findFiles: %d total segments found\n", fileCount); *a_numFound = fileCount; return retNames; } sleuthkit-4.2.0/tsk/img/raw.c000644 000765 000024 00000056177 12576320700 016657 0ustar00carrierstaff000000 000000 /* * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved * Copyright (c) 2005 Brian Carrier. All rights reserved * * raw * * This software is distributed under the Common Public License 1.0 * */ /** * \file raw.c * Internal code to open and read single or split raw disk images */ #include "tsk_img_i.h" #include "raw.h" #ifdef __APPLE__ #include #endif #ifdef TSK_WIN32 #include #endif /** * \internal * Read from one of the multiple files in a split set of disk images. * * @param split_info Disk image info to read from * @param idx Index of the disk image in the set to read from * @param buf [out] Buffer to write data to * @param len Number of bytes to read * @param rel_offset Byte offset in the disk image to read from (not the offset in the full disk image set) * * @return -1 on error or number of bytes read */ static ssize_t raw_read_segment(IMG_RAW_INFO * raw_info, int idx, char *buf, size_t len, TSK_OFF_T rel_offset) { IMG_SPLIT_CACHE *cimg; ssize_t cnt; /* Is the image already open? */ if (raw_info->cptr[idx] == -1) { if (tsk_verbose) { tsk_fprintf(stderr, "raw_read_segment: opening file into slot %d: %" PRIttocTSK "\n", raw_info->next_slot, raw_info->images[idx]); } /* Grab the next cache slot */ cimg = &raw_info->cache[raw_info->next_slot]; /* Free it if being used */ if (cimg->fd != 0) { if (tsk_verbose) { tsk_fprintf(stderr, "raw_read_segment: closing file %" PRIttocTSK "\n", raw_info->images[cimg->image]); } #ifdef TSK_WIN32 CloseHandle(cimg->fd); #else close(cimg->fd); #endif raw_info->cptr[cimg->image] = -1; } #ifdef TSK_WIN32 cimg->fd = CreateFile(raw_info->images[idx], FILE_READ_DATA, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); if ( cimg->fd == INVALID_HANDLE_VALUE ) { int lastError = (int)GetLastError(); cimg->fd = 0; /* so we don't close it next time */ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" - %d", raw_info->images[idx], lastError); return -1; } #else if ((cimg->fd = open(raw_info->images[idx], O_RDONLY | O_BINARY)) < 0) { cimg->fd = 0; /* so we don't close it next time */ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" - %s", raw_info->images[idx], strerror(errno)); return -1; } #endif cimg->image = idx; cimg->seek_pos = 0; raw_info->cptr[idx] = raw_info->next_slot; if (++raw_info->next_slot == SPLIT_CACHE) { raw_info->next_slot = 0; } } else { /* image already open */ cimg = &raw_info->cache[raw_info->cptr[idx]]; } #ifdef TSK_WIN32 { DWORD nread; if (cimg->seek_pos != rel_offset) { LARGE_INTEGER li; li.QuadPart = rel_offset; li.LowPart = SetFilePointer(cimg->fd, li.LowPart, &li.HighPart, FILE_BEGIN); if ((li.LowPart == INVALID_SET_FILE_POINTER) && (GetLastError() != NO_ERROR)) { int lastError = (int)GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_SEEK); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" offset %" PRIuOFF " seek - %d", raw_info->images[idx], rel_offset, lastError); return -1; } cimg->seek_pos = rel_offset; } //For physical drive when the buffer is larger than remaining data, // WinAPI ReadFile call returns -1 //in this case buffer of exact length must be passed to ReadFile if ((raw_info->is_winobj) && (rel_offset + len > raw_info->img_info.size )) len = (size_t)(raw_info->img_info.size - rel_offset); if (FALSE == ReadFile(cimg->fd, buf, (DWORD) len, &nread, NULL)) { int lastError = GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" offset: %" PRIuOFF " read len: %" PRIuSIZE " - %d", raw_info->images[idx], rel_offset, len, lastError); return -1; } cnt = (ssize_t) nread; } #else if (cimg->seek_pos != rel_offset) { if (lseek(cimg->fd, rel_offset, SEEK_SET) != rel_offset) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_SEEK); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" offset %" PRIuOFF " seek - %s", raw_info->images[idx], rel_offset, strerror(errno)); return -1; } cimg->seek_pos = rel_offset; } cnt = read(cimg->fd, buf, len); if (cnt < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ); tsk_error_set_errstr("raw_read: file \"%" PRIttocTSK "\" offset: %" PRIuOFF " read len: %" PRIuSIZE " - %s", raw_info->images[idx], rel_offset, len, strerror(errno)); return -1; } #endif cimg->seek_pos += cnt; return cnt; } /** * \internal * Read data from a (potentially split) raw disk image. The offset to * start reading from is equal to the volume offset plus the read offset. * * Note: The routine -assumes- we are under a lock on &(img_info->cache_lock)) * * @param img_info Disk image to read from * @param offset Byte offset in image to start reading from * @param buf [out] Buffer to write data to * @param len Number of bytes to read * * @return number of bytes read or -1 on error */ static ssize_t raw_read(TSK_IMG_INFO * img_info, TSK_OFF_T offset, char *buf, size_t len) { IMG_RAW_INFO *raw_info = (IMG_RAW_INFO *) img_info; int i; if (tsk_verbose) { tsk_fprintf(stderr, "raw_read: byte offset: %" PRIuOFF " len: %" PRIuSIZE "\n", offset, len); } if (offset > img_info->size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ_OFF); tsk_error_set_errstr("raw_read: offset %" PRIuOFF " too large", offset); return -1; } // Find the location of the offset for (i = 0; i < raw_info->num_img; i++) { /* Does the data start in this image? */ if (offset < raw_info->max_off[i]) { TSK_OFF_T rel_offset; size_t read_len; ssize_t cnt; /* Get the offset relative to this image segment */ if (i > 0) { rel_offset = offset - raw_info->max_off[i - 1]; } else { rel_offset = offset; } /* Get the length to read */ if ((raw_info->max_off[i] - offset) >= len) read_len = len; else read_len = (size_t) (raw_info->max_off[i] - offset); if (tsk_verbose) { tsk_fprintf(stderr, "raw_read: found in image %d relative offset: %" PRIuOFF " len: %" PRIuOFF "\n", i, rel_offset, (TSK_OFF_T) read_len); } cnt = raw_read_segment(raw_info, i, buf, read_len, rel_offset); if (cnt < 0) { return -1; } if ((TSK_OFF_T) cnt != read_len) { return cnt; } /* read from the next image segment(s) if needed */ if (((TSK_OFF_T) cnt == read_len) && (read_len != len)) { ssize_t cnt2; len -= read_len; while (len > 0) { /* go to the next image segment */ i++; if (raw_info->max_off[i] - raw_info->max_off[i - 1] >= len) read_len = len; else read_len = (size_t) (raw_info->max_off[i] - raw_info->max_off[i - 1]); if (tsk_verbose) { tsk_fprintf(stderr, "raw_read: additional image reads: image %d len: %" PRIuOFF "\n", i, read_len); } cnt2 = raw_read_segment(raw_info, i, &buf[cnt], read_len, 0); if (cnt2 < 0) { return -1; } cnt += cnt2; if ((TSK_OFF_T) cnt2 != read_len) { return cnt; } len -= cnt2; } } return cnt; } } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_READ_OFF); tsk_error_set_errstr("raw_read: offset %" PRIuOFF " not found in any segments", offset); return -1; } /** * \internal * Display information about the disk image set. * * @param img_info Disk image to analyze * @param hFile Handle to print information to */ static void raw_imgstat(TSK_IMG_INFO * img_info, FILE * hFile) { IMG_RAW_INFO *raw_info = (IMG_RAW_INFO *) img_info; int i; tsk_fprintf(hFile, "IMAGE FILE INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Image Type: raw\n"); tsk_fprintf(hFile, "\nSize in bytes: %" PRIuOFF "\n", img_info->size); if (raw_info->num_img > 1) { tsk_fprintf(hFile, "\n--------------------------------------------\n"); tsk_fprintf(hFile, "Split Information:\n"); for (i = 0; i < raw_info->num_img; i++) { tsk_fprintf(hFile, "%" PRIttocTSK " (%" PRIuOFF " to %" PRIuOFF ")\n", raw_info->images[i], (TSK_OFF_T) (i == 0) ? 0 : raw_info->max_off[i - 1], (TSK_OFF_T) (raw_info->max_off[i] - 1)); } } return; } /** * \internal * Free the memory and close the file handles for the disk image * * @param img_info Disk image to close */ static void raw_close(TSK_IMG_INFO * img_info) { IMG_RAW_INFO *raw_info = (IMG_RAW_INFO *) img_info; int i; for (i = 0; i < SPLIT_CACHE; i++) { if (raw_info->cache[i].fd != 0) #ifdef TSK_WIN32 CloseHandle(raw_info->cache[i].fd); #else close(raw_info->cache[i].fd); #endif } for (i = 0; i < raw_info->num_img; i++) { if (raw_info->images[i]) free(raw_info->images[i]); } if (raw_info->max_off) free(raw_info->max_off); if (raw_info->images) free(raw_info->images); if (raw_info->cptr) free(raw_info->cptr); tsk_img_free(raw_info); } /** * Get the size in bytes of the given file. * * @param a_file The file to test * @param is_winobj 1 if the file is a windows object and not a real file * * @return the size in bytes, or -1 on error/unknown, * -2 if unreadable, -3 if it's a directory. */ static TSK_OFF_T get_size(const TSK_TCHAR * a_file, uint8_t a_is_winobj) { TSK_OFF_T size = -1; struct STAT_STR sb; if (TSTAT(a_file, &sb) < 0) { if (a_is_winobj) { /* stat can fail for Windows objects; ignore that */ if (tsk_verbose) { tsk_fprintf(stderr, "raw_open: ignoring stat result on Windows device %" PRIttocTSK "\n", a_file); } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_STAT); tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK "\" - %s", a_file, strerror(errno)); return -2; } } else if ((sb.st_mode & S_IFMT) == S_IFDIR) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_MAGIC); tsk_error_set_errstr("raw_open: image \"%" PRIttocTSK "\" - is a directory", a_file); return -3; } #ifdef TSK_WIN32 { HANDLE fd; DWORD dwHi, dwLo; if ((fd = CreateFile(a_file, FILE_READ_DATA, FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL)) == INVALID_HANDLE_VALUE) { int lastError = (int)GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); // print string of commonly found errors if (lastError == ERROR_ACCESS_DENIED) { tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - access denied", a_file); } else if (lastError == ERROR_SHARING_VIOLATION) { tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - sharing violation", a_file); } else if (lastError == ERROR_FILE_NOT_FOUND) { tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - file not found", a_file); } else { tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - (error %d)", a_file, lastError); } return -2; } /* We need different techniques to determine the size of Windows physical * devices versus normal files */ if (a_is_winobj == 0) { dwLo = GetFileSize(fd, &dwHi); if (dwLo == 0xffffffff) { int lastError = (int)GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - GetFileSize: %d", a_file, lastError); size = -1; } else { size = dwLo | ((TSK_OFF_T) dwHi << 32); } } else { //use GET_PARTITION_INFO_EX prior to IOCTL_DISK_GET_DRIVE_GEOMETRY // to determine the physical disk size because //calculating it with the help of GET_DRIVE_GEOMETRY gives only // approximate number DWORD junk; PARTITION_INFORMATION_EX partition; if (FALSE == DeviceIoControl(fd, IOCTL_DISK_GET_PARTITION_INFO_EX, NULL, 0, &partition, sizeof(partition), &junk, (LPOVERLAPPED)NULL) ) { DISK_GEOMETRY pdg; if (FALSE == DeviceIoControl(fd, IOCTL_DISK_GET_DRIVE_GEOMETRY, NULL, 0, &pdg, sizeof(pdg), &junk, (LPOVERLAPPED) NULL)) { int lastError = (int)GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - DeviceIoControl: %d", a_file, lastError); size = -1; } else { size = pdg.Cylinders.QuadPart * (TSK_OFF_T) pdg.TracksPerCylinder * (TSK_OFF_T) pdg.SectorsPerTrack * (TSK_OFF_T) pdg.BytesPerSector; } } else { size = partition.PartitionLength.QuadPart; } } CloseHandle(fd); } #else int fd; if ((fd = open(a_file, O_RDONLY | O_BINARY)) < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OPEN); tsk_error_set_errstr("raw_open: file \"%" PRIttocTSK "\" - %s", a_file, strerror(errno)); return -2; } #ifdef __APPLE__ /* OS X doesn't support SEEK_END on char devices */ if ((sb.st_mode & S_IFMT) != S_IFCHR) { size = lseek(fd, 0, SEEK_END); } if (size <= 0) { int blkSize; long long blkCnt; if (ioctl(fd, DKIOCGETBLOCKSIZE, &blkSize) >= 0) { if (ioctl(fd, DKIOCGETBLOCKCOUNT, &blkCnt) >= 0) { size = blkCnt * (long long) blkSize; } } } #else /* We don't use the stat output because it doesn't work on raw * devices and such */ size = lseek(fd, 0, SEEK_END); #endif close(fd); #endif return size; } /** * \internal * Open the set of disk images as a set of split raw images * * @param a_num_img Number of images in set * @param a_images List of disk image paths (in sorted order) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return NULL on error */ TSK_IMG_INFO * raw_open(int a_num_img, const TSK_TCHAR * const a_images[], unsigned int a_ssize) { IMG_RAW_INFO *raw_info; TSK_IMG_INFO *img_info; int i; TSK_OFF_T first_seg_size; if ((raw_info = (IMG_RAW_INFO *) tsk_img_malloc(sizeof(IMG_RAW_INFO))) == NULL) return NULL; img_info = (TSK_IMG_INFO *) raw_info; img_info->itype = TSK_IMG_TYPE_RAW; img_info->read = raw_read; img_info->close = raw_close; img_info->imgstat = raw_imgstat; img_info->sector_size = 512; if (a_ssize) img_info->sector_size = a_ssize; raw_info->is_winobj = 0; #if defined(TSK_WIN32) || defined(__CYGWIN__) /* determine if this is the path to a Windows device object */ if ((a_images[0][0] == _TSK_T('\\')) && (a_images[0][1] == _TSK_T('\\')) && ((a_images[0][2] == _TSK_T('.')) || (a_images[0][2] == _TSK_T('?'))) && (a_images[0][3] == _TSK_T('\\'))) { raw_info->is_winobj = 1; } #endif /* Check that the first image file exists and is not a directory */ first_seg_size = get_size(a_images[0], raw_info->is_winobj); if (first_seg_size < -1) { tsk_img_free(raw_info); return NULL; } /* see if there are more of them... */ if ((a_num_img == 1) && (raw_info->is_winobj == 0)) { if ((raw_info->images = tsk_img_findFiles(a_images[0], &raw_info->num_img)) == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_STAT); tsk_error_set_errstr ("raw_open: could not find segment files starting at \"%" PRIttocTSK "\"", a_images[0]); tsk_img_free(raw_info); return NULL; } } else { raw_info->num_img = a_num_img; raw_info->images = (TSK_TCHAR **) tsk_malloc(sizeof(TSK_TCHAR *) * a_num_img); if (raw_info->images == NULL) { tsk_img_free(raw_info); return NULL; } for (i = 0; i < raw_info->num_img; i++) { size_t len = TSTRLEN(a_images[i]); raw_info->images[i] = (TSK_TCHAR *) tsk_malloc(sizeof(TSK_TCHAR) * (len + 1)); if (raw_info->images[i] == NULL) { int j; for (j = 0; j < i; j++) { free(raw_info->images[j]); } free(raw_info->images); tsk_img_free(raw_info); return NULL; } TSTRNCPY(raw_info->images[i], a_images[i], len + 1); } } /* sanity check: when we have multiple segments, the size of * each must be known */ if ((raw_info->num_img > 1) && (first_seg_size < 0)) { if (tsk_verbose) { tsk_fprintf(stderr, "raw_open: file size is unknown in a segmented raw image\n"); } for (i = 0; i < raw_info->num_img; i++) { free(raw_info->images[i]); } free(raw_info->images); tsk_img_free(raw_info); return NULL; } /* initialize the split cache */ raw_info->cptr = (int *) tsk_malloc(raw_info->num_img * sizeof(int)); if (raw_info->cptr == NULL) { for (i = 0; i < raw_info->num_img; i++) { free(raw_info->images[i]); } free(raw_info->images); tsk_img_free(raw_info); return NULL; } memset((void *) &raw_info->cache, 0, SPLIT_CACHE * sizeof(IMG_SPLIT_CACHE)); raw_info->next_slot = 0; /* initialize the offset table and re-use the first segment * size gathered above */ raw_info->max_off = (TSK_OFF_T *) tsk_malloc(raw_info->num_img * sizeof(TSK_OFF_T)); if (raw_info->max_off == NULL) { free(raw_info->cptr); for (i = 0; i < raw_info->num_img; i++) { free(raw_info->images[i]); } free(raw_info->images); tsk_img_free(raw_info); return NULL; } img_info->size = first_seg_size; raw_info->max_off[0] = img_info->size; raw_info->cptr[0] = -1; if (tsk_verbose) { tsk_fprintf(stderr, "raw_open: segment: 0 size: %" PRIuOFF " max offset: %" PRIuOFF " path: %" PRIttocTSK "\n", first_seg_size, raw_info->max_off[0], raw_info->images[0]); } /* get size info for each file - we do not open each one because that * could cause us to run out of file decsriptors when we only need a few. * The descriptors are opened as needed */ for (i = 1; i < raw_info->num_img; i++) { TSK_OFF_T size; raw_info->cptr[i] = -1; size = get_size(raw_info->images[i], raw_info->is_winobj); if (size < 0) { if (size == -1) { if (tsk_verbose) { tsk_fprintf(stderr, "raw_open: file size is unknown in a segmented raw image\n"); } } free(raw_info->cptr); for (i = 0; i < raw_info->num_img; i++) { free(raw_info->images[i]); } free(raw_info->images); tsk_img_free(raw_info); return NULL; } /* add the size of this image to the total and save the current max */ img_info->size += size; raw_info->max_off[i] = img_info->size; if (tsk_verbose) { tsk_fprintf(stderr, "raw_open: segment: %d size: %" PRIuOFF " max offset: %" PRIuOFF " path: %" PRIttocTSK "\n", i, size, raw_info->max_off[i], raw_info->images[i]); } } return img_info; } /* tsk_img_malloc - init lock after tsk_malloc * This is for img module and all its inheritances */ void * tsk_img_malloc(size_t a_len) { TSK_IMG_INFO *imgInfo; if ((imgInfo = (TSK_IMG_INFO *) tsk_malloc(a_len)) == NULL) return NULL; //init lock tsk_init_lock(&(imgInfo->cache_lock)); imgInfo->tag = TSK_IMG_INFO_TAG; return (void *) imgInfo; } /* tsk_img_free - deinit lock before free memory * This is for img module and all its inheritances */ void tsk_img_free(void *a_ptr) { TSK_IMG_INFO *imgInfo = (TSK_IMG_INFO *) a_ptr; //deinit lock tsk_deinit_lock(&(imgInfo->cache_lock)); imgInfo->tag = 0; free(imgInfo); } sleuthkit-4.2.0/tsk/img/raw.h000644 000765 000024 00000002170 12576320700 016644 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /* * Contains the single raw data file-specific functions and structures. */ #ifndef _RAW_H #define _RAW_H #ifdef __cplusplus extern "C" { #endif extern TSK_IMG_INFO *raw_open(int a_num_img, const TSK_TCHAR * const a_images[], unsigned int a_ssize); #define SPLIT_CACHE 15 typedef struct { #ifdef TSK_WIN32 HANDLE fd; #else int fd; #endif int image; TSK_OFF_T seek_pos; } IMG_SPLIT_CACHE; typedef struct { TSK_IMG_INFO img_info; int num_img; uint8_t is_winobj; // the following are protected by cache_lock in IMG_INFO TSK_TCHAR **images; TSK_OFF_T *max_off; int *cptr; /* exists for each image - points to entry in cache */ IMG_SPLIT_CACHE cache[SPLIT_CACHE]; /* small number of fds for open images */ int next_slot; } IMG_RAW_INFO; #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/img/tsk_img.h000644 000765 000024 00000030223 12576320700 017510 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ #ifndef _TSK_IMG_H #define _TSK_IMG_H /** * \file tsk_img.h * Contains the external library definitions for the disk image functions. * Note that this file is not meant to be directly included. * It is included by both libtsk.h and tsk_img_i.h. */ /** * \defgroup imglib C Disk Image Functions * \defgroup imglib_cpp C++ Disk Image Classes */ #ifdef __cplusplus extern "C" { #endif /** * \ingroup imglib * Macro that takes a image type and returns 1 if the type * is for a raw file format. */ #define TSK_IMG_TYPE_ISRAW(t) \ ((((t) & TSK_IMG_TYPE_RAW))?1:0) /** * \ingroup imglib * Macro that takes a image type and returns 1 if the type * is for an AFF file format. */ #define TSK_IMG_TYPE_ISAFF(t) \ ((((t) & TSK_IMG_TYPE_AFF_AFF) || ((t) & TSK_IMG_TYPE_AFF_AFD) || ((t) & TSK_IMG_TYPE_AFF_AFM) || \ ((t) & TSK_IMG_TYPE_AFF_ANY))?1:0) /** * \ingroup imglib * Macro that takes a image type and returns 1 if the type * is for an EWF file format. */ #define TSK_IMG_TYPE_ISEWF(t) \ ((((t) & TSK_IMG_TYPE_EWF_EWF))?1:0) /** * Flag values for the disk image format type. Each type has a * bit associated with it. There are TSK_IMG_TYPE_ISXXX macros * to determine the broad group of the type (raw vs aff etc.) */ typedef enum { TSK_IMG_TYPE_DETECT = 0x0000, ///< Use autodetection methods TSK_IMG_TYPE_RAW = 0x0001, ///< Raw disk image (single or split) TSK_IMG_TYPE_RAW_SING = TSK_IMG_TYPE_RAW, ///< Raw single (backward compatibility) depreciated TSK_IMG_TYPE_RAW_SPLIT = TSK_IMG_TYPE_RAW, ///< Raw single (backward compatibility) depreciated TSK_IMG_TYPE_AFF_AFF = 0x0004, ///< AFF AFF Format TSK_IMG_TYPE_AFF_AFD = 0x0008, ///< AFD AFF Format TSK_IMG_TYPE_AFF_AFM = 0x0010, ///< AFM AFF Format TSK_IMG_TYPE_AFF_ANY = 0x0020, ///< Any format supported by AFFLIB (including beta ones) TSK_IMG_TYPE_EWF_EWF = 0x0040, ///< EWF version TSK_IMG_TYPE_EXTERNAL = 0x1000, ///< external defined format which at least implements TSK_IMG_INFO, used by pytsk TSK_IMG_TYPE_UNSUPP = 0xffff, ///< Unsupported disk image type } TSK_IMG_TYPE_ENUM; #define TSK_IMG_INFO_CACHE_NUM 4 #define TSK_IMG_INFO_CACHE_LEN 65536 typedef struct TSK_IMG_INFO TSK_IMG_INFO; #define TSK_IMG_INFO_TAG 0x39204231 /** * Created when a disk image has been opened and stores general information and handles. */ struct TSK_IMG_INFO { uint32_t tag; ///< Set to TSK_IMG_INFO_TAG when struct is alloc TSK_IMG_TYPE_ENUM itype; ///< Type of disk image format TSK_OFF_T size; ///< Total size of image in bytes unsigned int sector_size; ///< sector size of device in bytes (typically 512) unsigned int page_size; ///< page size of NAND page in bytes (defaults to 2048) unsigned int spare_size; ///< spare or OOB size of NAND in bytes (defaults to 64) tsk_lock_t cache_lock; ///< Lock for cache and associated values char cache[TSK_IMG_INFO_CACHE_NUM][TSK_IMG_INFO_CACHE_LEN]; ///< read cache (r/w shared - lock) TSK_OFF_T cache_off[TSK_IMG_INFO_CACHE_NUM]; ///< starting byte offset of corresponding cache entry (r/w shared - lock) int cache_age[TSK_IMG_INFO_CACHE_NUM]; ///< "Age" of corresponding cache entry, higher means more recently used (r/w shared - lock) size_t cache_len[TSK_IMG_INFO_CACHE_NUM]; ///< Length of cache entry used (0 if never used) (r/w shared - lock) ssize_t(*read) (TSK_IMG_INFO * img, TSK_OFF_T off, char *buf, size_t len); ///< \internal External progs should call tsk_img_read() void (*close) (TSK_IMG_INFO *); ///< \internal Progs should call tsk_img_close() void (*imgstat) (TSK_IMG_INFO *, FILE *); ///< Pointer to file type specific function }; // open and close functions extern TSK_IMG_INFO *tsk_img_open_sing(const TSK_TCHAR * a_image, TSK_IMG_TYPE_ENUM type, unsigned int a_ssize); extern TSK_IMG_INFO *tsk_img_open(int, const TSK_TCHAR * const images[], TSK_IMG_TYPE_ENUM, unsigned int a_ssize); extern TSK_IMG_INFO *tsk_img_open_utf8_sing(const char *a_image, TSK_IMG_TYPE_ENUM type, unsigned int a_ssize); extern TSK_IMG_INFO *tsk_img_open_utf8(int num_img, const char *const images[], TSK_IMG_TYPE_ENUM type, unsigned int a_ssize); extern void tsk_img_close(TSK_IMG_INFO *); // read functions extern ssize_t tsk_img_read(TSK_IMG_INFO * img, TSK_OFF_T off, char *buf, size_t len); // type conversion functions extern TSK_IMG_TYPE_ENUM tsk_img_type_toid_utf8(const char *); extern TSK_IMG_TYPE_ENUM tsk_img_type_toid(const TSK_TCHAR *); extern const char *tsk_img_type_toname(TSK_IMG_TYPE_ENUM); extern const char *tsk_img_type_todesc(TSK_IMG_TYPE_ENUM); extern TSK_IMG_TYPE_ENUM tsk_img_type_supported(); extern void tsk_img_type_print(FILE *); #ifdef __cplusplus } #endif #ifdef __cplusplus /** \ingroup imglib_cpp * Stores information about an image that is open and being analyzed. * To use this object, open() should be called first. Otherwise, the get() * methods will return undefined values. */ class TskImgInfo { friend class TskFsInfo; friend class TskVsInfo; private: TSK_IMG_INFO * m_imgInfo; bool m_opened; // true if open() was called and we need to free it TskImgInfo(const TskImgInfo & rhs); TskImgInfo & operator=(const TskImgInfo & rhs); public: TskImgInfo() { m_imgInfo = NULL; m_opened = false; }; ~TskImgInfo() { if (m_imgInfo == NULL) { return; } m_imgInfo->close(m_imgInfo); }; TskImgInfo(TSK_IMG_INFO * a_imgInfo) { m_imgInfo = a_imgInfo; m_opened = false; }; /** * Opens a single (non-split) disk image file so that it can be read. * See tsk_img_open_sing() for more details. * * @param a_image The path to the image file * @param a_type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return 1 on error and 0 on success */ uint8_t open(const TSK_TCHAR * a_image, TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { if ((m_imgInfo = tsk_img_open_sing(a_image, a_type, a_ssize)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /** * Opens one or more disk image files so that they can be read. e UTF8, then consider * See tsk_img_open() for more details. * * @param a_num_img The number of images to open (will be > 1 for split images). * @param a_images The path to the image files (the number of files must * be equal to num_img and they must be in a sorted order) * @param a_type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return 1 on error and 0 on success */ uint8_t open(int a_num_img, const TSK_TCHAR * const a_images[], TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { if ((m_imgInfo = tsk_img_open(a_num_img, a_images, a_type, a_ssize)) != NULL) { m_opened = true; return 0; } else { return 1; } }; #ifdef TSK_WIN32 /** * Opens a single (non-split) disk image file so that it can be read. This version * always takes a UTF-8 encoding of the disk image. See tsk_img_open_utf8_sing() for details. * * @param a_image The UTF-8 path to the image file * @param a_type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return 1 on error and 0 on success */ uint8_t open(const char *a_image, TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { if ((m_imgInfo = tsk_img_open_utf8_sing(a_image, a_type, a_ssize)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /** * Opens one or more disk image files so that they can be read. This * version always takes a UTF-8 encoding of the image files. See tsk_img_open_utf8() * for more details. * * @param a_num_img The number of images to open (will be > 1 for split images). * @param a_images The path to the UTF-8 encoded image files (the number of files must * be equal to a_num_img and they must be in a sorted order) * @param a_type The disk image type (can be autodetection) * @param a_ssize Size of device sector in bytes (or 0 for default) * * @return 1 on error and 0 on success */ uint8_t open(int a_num_img, const char *const a_images[], TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { if ((m_imgInfo = tsk_img_open_utf8(a_num_img, a_images, a_type, a_ssize)) != NULL) { m_opened = true; return 0; } else { return 1; } }; #endif /** * Reads data from an open disk image * * @param a_off Byte offset to start reading from * @param a_buf Buffer to read into * @param a_len Number of bytes to read into buffer * @returns number of bytes read or -1 on error */ ssize_t read(TSK_OFF_T a_off, char *a_buf, size_t a_len) { return tsk_img_read(m_imgInfo, a_off, a_buf, a_len); }; /** * returns the image format type. * @returns image format type */ TSK_IMG_TYPE_ENUM getType() const { if (m_imgInfo != NULL) return m_imgInfo->itype; else return (TSK_IMG_TYPE_ENUM) 0; }; /** * Returns the size of the image. * @returns total size of image in bytes */ TSK_OFF_T getSize() const { if (m_imgInfo != NULL) return m_imgInfo->size; else return 0; }; /** * Returns the sector size of the disk * @returns sector size of original device in bytes */ unsigned int getSectorSize() const { if (m_imgInfo != NULL) return m_imgInfo->sector_size; else return 0; }; /** * Parses a string that specifies an image format to determine the * associated type ID. This is used by the TSK command line tools to * parse the type given on the command line. * * @param a_str String of image format type * @return ID of image type */ static TSK_IMG_TYPE_ENUM typeToId(const TSK_TCHAR * a_str) { return tsk_img_type_toid(a_str); }; /** * Returns the name of an image format type, given its type ID. * @param a_type ID of image type * @returns Pointer to string of the name. */ static const char *typeToName(TSK_IMG_TYPE_ENUM a_type) { return tsk_img_type_toname(a_type); }; /** * Returns the description of an image format type, given its type ID. * @param a_type ID of image type * @returns Pointer to string of the description */ static const char *typeToDesc(TSK_IMG_TYPE_ENUM a_type) { return tsk_img_type_todesc(a_type); }; /** * Returns the supported file format types. * @returns A bit in the return value is set to 1 if the type is supported. */ static TSK_IMG_TYPE_ENUM typeSupported() { return tsk_img_type_supported(); }; /** * Prints the name and description of the supported image types to a handle. * This is used by the TSK command line tools to print the supported types * to the console. * @param a_file Handle to print names and descriptions to. */ static void typePrint(FILE * a_file) { tsk_img_type_print(a_file); }; }; #endif //__cplusplus #endif //_TSK_IMG_H sleuthkit-4.2.0/tsk/img/tsk_img_i.h000644 000765 000024 00000002121 12576320700 020014 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ #ifndef _TSK_IMG_I_H #define _TSK_IMG_I_H /* * Contains the internal library definitions for the disk image functions. This should * be included by the code in the img library. */ // include the base internal header file #include "tsk/base/tsk_base_i.h" // include the external disk image header file #include "tsk_img.h" // other standard includes #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif // Cygwin needs this, but not everyone defines it #ifndef O_BINARY #define O_BINARY 0 #endif extern void *tsk_img_malloc(size_t); extern void tsk_img_free(void *); extern TSK_TCHAR **tsk_img_findFiles(const TSK_TCHAR * a_startingName, int *a_numFound); extern const TSK_TCHAR ** tsk_img_get_names(TSK_IMG_INFO *a_img_info, int *a_num_imgs); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/hashdb/.indent.pro000644 000765 000024 00000000031 12576320700 020432 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nut sleuthkit-4.2.0/tsk/hashdb/binsrch_index.cpp000755 000765 000024 00000155407 12576320700 021721 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ #include "tsk_hashdb_i.h" #include "tsk_hash_info.h" /** * \file binsrch_index.cpp * Functions common to all text hash databases (i.e. NSRL, HashKeeper, EnCase, etc.). * Examples include index management and index-based lookup. */ // A mapping of initial hash digits to offsets in the index file is used to // set the initial bounds of the binary search of the index file that is done // for lookups. The mapping is from the first three digits (three nibbles) of // the hash, so there are 2 ^ 12 or 4096 possible entries. static const size_t IDX_IDX_ENTRY_COUNT = 4096; static const size_t IDX_IDX_SIZE = IDX_IDX_ENTRY_COUNT * sizeof(uint64_t); static const uint64_t IDX_IDX_ENTRY_NOT_SET = 0xFFFFFFFFFFFFFFFF; /** * Called by the various text-based databases to setup the TSK_HDB_BINSRCH_INFO struct. * This will setup the basic function pointers, that will be overwritten by the more * specific methods. */ TSK_HDB_BINSRCH_INFO *hdb_binsrch_open(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; if ((hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)tsk_malloc(sizeof(TSK_HDB_BINSRCH_INFO))) == NULL) { return NULL; } // Do basic initialization of hdb_binsrch_info if (hdb_info_base_open((TSK_HDB_INFO*)hdb_binsrch_info, db_path)) { return NULL; } // override basic settings with basic text settings hdb_binsrch_info->hDb = hDb; hdb_binsrch_info->base.uses_external_indexes = hdb_binsrch_uses_external_indexes; hdb_binsrch_info->base.get_index_path = hdb_binsrch_get_index_path; hdb_binsrch_info->base.has_index = hdb_binsrch_has_index; hdb_binsrch_info->base.open_index = hdb_binsrch_open_idx; hdb_binsrch_info->base.lookup_str = hdb_binsrch_lookup_str; hdb_binsrch_info->base.lookup_raw = hdb_binsrch_lookup_bin; hdb_binsrch_info->base.lookup_verbose_str = hdb_binsrch_lookup_verbose_str; hdb_binsrch_info->base.accepts_updates = hdb_binsrch_accepts_updates; hdb_binsrch_info->base.close_db = hdb_binsrch_close; // The database type and function pointers will need to be set // by the "derived class" caller these things vary by database type. hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_INVALID_ID; hdb_binsrch_info->base.make_index = NULL; hdb_binsrch_info->get_entry = NULL; // Some text hash database types support indexes for more than one hash // type, so setting the hash type and length are deferred until the desired // index is created/opened. hdb_binsrch_info->hash_type = TSK_HDB_HTYPE_INVALID_ID; hdb_binsrch_info->hash_len = 0; return hdb_binsrch_info; } /** \internal * Setup the hash-type specific information (such as length, index entry * sizes, index name etc.) in the HDB_INFO structure. * * @param hdb_info Structure to fill in. * @param htype Hash type being used * @return 1 on error and 0 on success */ static uint8_t hdb_binsrch_idx_init_hash_type_info(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info, TSK_HDB_HTYPE_ENUM htype) { if (hdb_binsrch_info->hash_type != TSK_HDB_HTYPE_INVALID_ID) { return 0; } /* Make the name for the index file */ size_t flen = TSTRLEN(hdb_binsrch_info->base.db_fname) + 32; hdb_binsrch_info->idx_fname = (TSK_TCHAR *) tsk_malloc(flen * sizeof(TSK_TCHAR)); if (hdb_binsrch_info->idx_fname == NULL) { return 1; } /* Make the name for the index of the index file */ hdb_binsrch_info->idx_idx_fname = (TSK_TCHAR *) tsk_malloc(flen * sizeof(TSK_TCHAR)); if (hdb_binsrch_info->idx_idx_fname == NULL) { return 1; } /* Set hash type specific information */ switch (htype) { case TSK_HDB_HTYPE_MD5_ID: hdb_binsrch_info->hash_type = htype; hdb_binsrch_info->hash_len = TSK_HDB_HTYPE_MD5_LEN; TSNPRINTF(hdb_binsrch_info->idx_fname, flen, _TSK_T("%s-%") PRIcTSK _TSK_T(".idx"), hdb_binsrch_info->base.db_fname, TSK_HDB_HTYPE_MD5_STR); TSNPRINTF(hdb_binsrch_info->idx_idx_fname, flen, _TSK_T("%s-%") PRIcTSK _TSK_T(".idx2"), hdb_binsrch_info->base.db_fname, TSK_HDB_HTYPE_MD5_STR); return 0; case TSK_HDB_HTYPE_SHA1_ID: hdb_binsrch_info->hash_type = htype; hdb_binsrch_info->hash_len = TSK_HDB_HTYPE_SHA1_LEN; TSNPRINTF(hdb_binsrch_info->idx_fname, flen, _TSK_T("%s-%") PRIcTSK _TSK_T(".idx"), hdb_binsrch_info->base.db_fname, TSK_HDB_HTYPE_SHA1_STR); TSNPRINTF(hdb_binsrch_info->idx_idx_fname, flen, _TSK_T("%s-%") PRIcTSK _TSK_T(".idx2"), hdb_binsrch_info->base.db_fname, TSK_HDB_HTYPE_SHA1_STR); return 0; // listed to prevent compiler warnings case TSK_HDB_HTYPE_INVALID_ID: case TSK_HDB_HTYPE_SHA2_256_ID: default: break; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "hdb_binsrch_idx_init_hash_type_info: Invalid hash type as argument: %d", htype); return 1; } uint8_t hdb_binsrch_uses_external_indexes() { return 1; } const TSK_TCHAR* hdb_binsrch_get_index_path(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { if (hdb_binsrch_open_idx(hdb_info, htype)) { return NULL; } else { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info; return hdb_binsrch_info->idx_fname; } } uint8_t hdb_binsrch_has_index(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { if (hdb_binsrch_open_idx(hdb_info, htype)) { return 0; } else { return 1; } } static uint8_t hdb_binsrch_load_index_offsets(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info) { const char *func_name = "hdb_binsrch_load_index_offsets"; if (!hdb_binsrch_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: TSK_HDB_BINSRCH_INFO* is NULL", func_name); return 1; } if (!hdb_binsrch_info->idx_idx_fname) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: hdb_binsrch_info->idx_idx_fname is NULL", func_name); return 1; } // Attempt to open the file that contains the index of the index. // For older text-format hash databases, this additional index may // not exist, and that's o.k., lookups will just be slower. FILE *idx_idx_file = NULL; TSK_OFF_T idx_idx_size = 0; #ifdef TSK_WIN32 { HANDLE hWin; if (-1 == GetFileAttributes(hdb_binsrch_info->idx_idx_fname)) { // The file does not exist. Not a problem. return 0; } if ((hWin = CreateFile(hdb_binsrch_info->idx_idx_fname, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, 0, 0)) == INVALID_HANDLE_VALUE) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: error opening index of index: %"PRIttocTSK" - %d", func_name, hdb_binsrch_info->idx_idx_fname, (int)GetLastError()); return 1; } idx_idx_file =_fdopen(_open_osfhandle((intptr_t) hWin, _O_RDONLY), "rb"); if (!idx_idx_file) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: error converting file handle from Windows to C for: %"PRIttocTSK, func_name, hdb_binsrch_info->idx_idx_fname); return 1; } DWORD szHi = 0; DWORD szLow = GetFileSize(hWin, &szHi); if (szLow == 0xffffffff) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: error getting size of index of index file: %"PRIttocTSK" - %d", func_name, hdb_binsrch_info->idx_idx_fname, (int)GetLastError()); return 1; } idx_idx_size = szLow | ((uint64_t) szHi << 32); } #else { struct stat sb; if (stat(hdb_binsrch_info->idx_idx_fname, &sb) < 0) { // The file does not exist. Not a problem. return 0; } idx_idx_size = sb.st_size; if (NULL == (idx_idx_file = fopen(hdb_binsrch_info->idx_idx_fname, "rb"))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: error opening index of index: %"PRIttocTSK, func_name, hdb_binsrch_info->idx_idx_fname); return 1; } } #endif // Read the stored mapping of initial hash digits to offsets in the index file // into memory. The mapping is for the first three digits of the hashes // (three nibbles), so there are 2 ^ 12 or 4096 possible entries. if (IDX_IDX_SIZE != idx_idx_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr("%s: index of index is wrong size", func_name); return 1; } hdb_binsrch_info->idx_offsets = (uint64_t*)tsk_malloc(IDX_IDX_SIZE); if (NULL == hdb_binsrch_info->idx_offsets) { return 1; } if (1 != fread((void*)hdb_binsrch_info->idx_offsets, IDX_IDX_SIZE, 1, idx_idx_file)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr("%s: error reading index of index", func_name); return 1; } fclose(idx_idx_file); return 0; } /** \internal * Setup the internal variables to read an index. This * opens the index and sets the needed size information. * * @param hdb_info Hash database to analyze * @param hash The hash type that was used to make the index. * * @return 1 on error and 0 on success */ static uint8_t hdb_binsrch_open_idx_file(TSK_HDB_INFO *hdb_info_base, TSK_HDB_HTYPE_ENUM htype) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; char head[TSK_HDB_MAXLEN]; char head2[TSK_HDB_MAXLEN]; char *ptr; if ((htype != TSK_HDB_HTYPE_MD5_ID) && (htype != TSK_HDB_HTYPE_SHA1_ID)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Invalid hash type : %d", htype); return 1; } if (hdb_binsrch_idx_init_hash_type_info(hdb_binsrch_info, htype)) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 1; } /* Verify the index exists, get its size, and open it */ #ifdef TSK_WIN32 { HANDLE hWin; DWORD szLow, szHi; if (-1 == GetFileAttributes(hdb_binsrch_info->idx_fname)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_MISSING); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error finding index file: %"PRIttocTSK, hdb_binsrch_info->idx_fname); return 1; } if ((hWin = CreateFile(hdb_binsrch_info->idx_fname, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, 0, 0)) == INVALID_HANDLE_VALUE) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "hdb_binsrch_open_idx: Error opening index file: %"PRIttocTSK, hdb_binsrch_info->idx_fname); return 1; } hdb_binsrch_info->hIdx = _fdopen(_open_osfhandle((intptr_t) hWin, _O_RDONLY), "r"); if (hdb_binsrch_info->hIdx == NULL) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error converting Windows handle to C handle"); return 1; } szLow = GetFileSize(hWin, &szHi); if (szLow == 0xffffffff) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error getting size of index file: %"PRIttocTSK" - %d", hdb_binsrch_info->idx_fname, (int)GetLastError()); return 1; } hdb_binsrch_info->idx_size = szLow | ((uint64_t) szHi << 32); } #else { struct stat sb; if (stat(hdb_binsrch_info->idx_fname, &sb) < 0) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_MISSING); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error finding index file: %s", hdb_binsrch_info->idx_fname); return 1; } hdb_binsrch_info->idx_size = sb.st_size; if (NULL == (hdb_binsrch_info->hIdx = fopen(hdb_binsrch_info->idx_fname, "r"))) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error opening index file: %s", hdb_binsrch_info->idx_fname); return 1; } } #endif /* Do some testing on the first line */ if (NULL == fgets(head, TSK_HDB_MAXLEN, hdb_binsrch_info->hIdx)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Header line of index file"); return 1; } if (strncmp(head, TSK_HDB_IDX_HEAD_TYPE_STR, strlen(TSK_HDB_IDX_HEAD_TYPE_STR)) != 0) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Invalid index file: Missing header line"); return 1; } /* Do some testing on the second line */ if (NULL == fgets(head2, TSK_HDB_MAXLEN, hdb_binsrch_info->hIdx)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error reading line 2 of index file"); return 1; } /* Set the offset to the start of the index entries */ if (strncmp(head2, TSK_HDB_IDX_HEAD_NAME_STR, strlen(TSK_HDB_IDX_HEAD_NAME_STR)) != 0) { hdb_binsrch_info->idx_off = (uint16_t) (strlen(head)); } else { hdb_binsrch_info->idx_off = (uint16_t) (strlen(head) + strlen(head2)); } /* Skip the pipe symbol */ ptr = &head[strlen(TSK_HDB_IDX_HEAD_TYPE_STR) + 1]; /* Set the line length consistent with the hash type and the newline representation in the file.*/ hdb_binsrch_info->idx_llen = TSK_HDB_IDX_LEN(htype); ptr[strlen(ptr) - 1] = '\0'; if ((ptr[strlen(ptr) - 1] == 10) || (ptr[strlen(ptr) - 1] == 13)) { ptr[strlen(ptr) - 1] = '\0'; hdb_binsrch_info->idx_llen++; } /* Verify the header value in the index */ if (strcmp(ptr, TSK_HDB_DBTYPE_NSRL_STR) == 0) { if ((hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_NSRL_ID) && (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_IDXONLY_ID)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: DB detected as %s, index type has NSRL", ptr); return 1; } } else if (strcmp(ptr, TSK_HDB_DBTYPE_MD5SUM_STR) == 0) { if ((hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_MD5SUM_ID) && (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_IDXONLY_ID)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: DB detected as %s, index type has MD5SUM", ptr); return 1; } } else if (strcmp(ptr, TSK_HDB_DBTYPE_HK_STR) == 0) { if ((hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_HK_ID) && (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_IDXONLY_ID)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: DB detected as %s, index type has hashkeeper", ptr); return 1; } } else if (strcmp(ptr, TSK_HDB_DBTYPE_ENCASE_STR) == 0) { if ((hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_ENCASE_ID) && (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_IDXONLY_ID)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: DB detected as %s, index type has EnCase", ptr); return 1; } } else if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_IDXONLY_ID) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Unknown Database Type in index header: %s", ptr); return 1; } /* Do some sanity checking */ if (((hdb_binsrch_info->idx_size - hdb_binsrch_info->idx_off) % hdb_binsrch_info->idx_llen) != 0) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "hdb_binsrch_open_idx_file: Error, size of index file is not a multiple of row size"); return 1; } /* allocate a buffer for a row */ if ((hdb_binsrch_info->idx_lbuf = (char*)tsk_malloc(hdb_binsrch_info->idx_llen + 1)) == NULL) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 1; } return 0; } /** \internal * Opens and index file and loads the index of the index file into memory. * * @param hdb_info Hash database to analyze * @param hash The hash type that was used to make the index. * * @return 1 on error and 0 on success */ uint8_t hdb_binsrch_open_idx(TSK_HDB_INFO *hdb_info_base, TSK_HDB_HTYPE_ENUM htype) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; // Lock for lazy load of hIdx and lazy alloc of idx_lbuf. tsk_take_lock(&hdb_binsrch_info->base.lock); // if it is already open, bail out if (hdb_binsrch_info->hIdx != NULL) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 0; } if (hdb_binsrch_open_idx_file(hdb_info_base, htype)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("hdb_binsrch_open_idx: unable to open index file"); return 1; } /* To speed up lookups, a mapping of the first three bytes of a hash to * an offset in the index file will be loaded into memory, if available. */ if (hdb_binsrch_load_index_offsets(hdb_binsrch_info)) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 1; } tsk_release_lock(&hdb_binsrch_info->base.lock); return 0; } /** Initialize the TSK hash DB index file. This creates the intermediate file, * which will have entries added to it. This file must be sorted before the * process is finished. * * @param hdb_binsrch_info Hash database state structure * @param htype String of index type to create * * @return 1 on error and 0 on success * */ uint8_t hdb_binsrch_idx_initialize(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info, TSK_TCHAR *htype) { const char *func_name = "hdb_binsrch_idx_init"; TSK_HDB_HTYPE_ENUM hash_type = TSK_HDB_HTYPE_INVALID_ID; size_t flen = 0; char dbtmp[32]; int i = 0; /* Use the string of the index/hash type to figure out some * settings */ // convert to char -- cheating way to deal with WCHARs.. for (i = 0; i < 31 && htype[i] != '\0'; i++) { dbtmp[i] = (char) htype[i]; } dbtmp[i] = '\0'; if (strcmp(dbtmp, TSK_HDB_DBTYPE_NSRL_MD5_STR) == 0) { if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_NSRL_ID) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: database detected as: %d index creation as: %d", func_name, hdb_binsrch_info->base.db_type, TSK_HDB_DBTYPE_NSRL_ID); return 1; } hash_type = TSK_HDB_HTYPE_MD5_ID; } else if (strcmp(dbtmp, TSK_HDB_DBTYPE_NSRL_SHA1_STR) == 0) { if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_NSRL_ID) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: database detected as: %d index creation as: %d", func_name, hdb_binsrch_info->base.db_type, TSK_HDB_DBTYPE_NSRL_ID); return 1; } hash_type = TSK_HDB_HTYPE_SHA1_ID; } else if (strcmp(dbtmp, TSK_HDB_DBTYPE_MD5SUM_STR) == 0) { if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_MD5SUM_ID) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: database detected as: %d index creation as: %d", func_name, hdb_binsrch_info->base.db_type, TSK_HDB_DBTYPE_MD5SUM_ID); return 1; } hash_type = TSK_HDB_HTYPE_MD5_ID; } else if (strcmp(dbtmp, TSK_HDB_DBTYPE_HK_STR) == 0) { if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_HK_ID) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: database detected as: %d index creation as: %d", func_name, hdb_binsrch_info->base.db_type, TSK_HDB_DBTYPE_HK_ID); return 1; } hash_type = TSK_HDB_HTYPE_MD5_ID; } else if (strcmp(dbtmp, TSK_HDB_DBTYPE_ENCASE_STR) == 0) { if (hdb_binsrch_info->base.db_type != TSK_HDB_DBTYPE_ENCASE_ID) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: database detected as: %d index creation as: %d", func_name, hdb_binsrch_info->base.db_type, TSK_HDB_DBTYPE_ENCASE_ID); return 1; } hash_type = TSK_HDB_HTYPE_MD5_ID; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: Unknown database/hash type request: %s", func_name, dbtmp); return 1; } /* Setup the internal hash information */ if (hdb_binsrch_idx_init_hash_type_info(hdb_binsrch_info, hash_type)) { return 1; } /* Make the name for the unsorted intermediate index file */ flen = TSTRLEN(hdb_binsrch_info->base.db_fname) + 32; hdb_binsrch_info->uns_fname = (TSK_TCHAR *) tsk_malloc(flen * sizeof(TSK_TCHAR)); if (hdb_binsrch_info->uns_fname == NULL) { return 1; } TSNPRINTF(hdb_binsrch_info->uns_fname, flen, _TSK_T("%s-%") PRIcTSK _TSK_T("-ns.idx"), hdb_binsrch_info->base.db_fname, TSK_HDB_HTYPE_STR(hdb_binsrch_info->hash_type)); /* Create temp unsorted file of offsets */ #ifdef TSK_WIN32 { HANDLE hWin; if ((hWin = CreateFile(hdb_binsrch_info->uns_fname, GENERIC_WRITE, 0, 0, CREATE_ALWAYS, 0, 0)) == INVALID_HANDLE_VALUE) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CREATE); tsk_error_set_errstr( "%s: %"PRIttocTSK" GetFileSize: %d", func_name, hdb_binsrch_info->uns_fname, (int)GetLastError()); return 1; } hdb_binsrch_info->hIdxTmp = _fdopen(_open_osfhandle((intptr_t) hWin, _O_WRONLY), "wb"); if (hdb_binsrch_info->hIdxTmp == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: Error converting Windows handle to C handle", func_name); return 1; } } #else if (NULL == (hdb_binsrch_info->hIdxTmp = fopen(hdb_binsrch_info->uns_fname, "w"))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CREATE); tsk_error_set_errstr( "%s: Error creating temp index file: %s", func_name, hdb_binsrch_info->uns_fname); return 1; } #endif /* Print the header */ fprintf(hdb_binsrch_info->hIdxTmp, "%s|%s\n", TSK_HDB_IDX_HEAD_NAME_STR, hdb_binsrch_info->base.db_name); switch (hdb_binsrch_info->base.db_type) { case TSK_HDB_DBTYPE_NSRL_ID: fprintf(hdb_binsrch_info->hIdxTmp, "%s|%s\n", TSK_HDB_IDX_HEAD_TYPE_STR, TSK_HDB_DBTYPE_NSRL_STR); break; case TSK_HDB_DBTYPE_MD5SUM_ID: fprintf(hdb_binsrch_info->hIdxTmp, "%s|%s\n", TSK_HDB_IDX_HEAD_TYPE_STR, TSK_HDB_DBTYPE_MD5SUM_STR); break; case TSK_HDB_DBTYPE_HK_ID: fprintf(hdb_binsrch_info->hIdxTmp, "%s|%s\n", TSK_HDB_IDX_HEAD_TYPE_STR, TSK_HDB_DBTYPE_HK_STR); break; case TSK_HDB_DBTYPE_ENCASE_ID: fprintf(hdb_binsrch_info->hIdxTmp, "%s|%s\n", TSK_HDB_IDX_HEAD_TYPE_STR, TSK_HDB_DBTYPE_ENCASE_STR); break; /* Used to stop warning messages about missing enum value */ case TSK_HDB_DBTYPE_IDXONLY_ID: default: tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CREATE); tsk_error_set_errstr("%s: Invalid db type", func_name); return 1; } return 0; } /** * Add a string entry to the intermediate index file. * * @param hdb_binsrch_info Hash database state info * @param hvalue String of hash value to add * @param offset Byte offset of hash entry in original database. * @return 1 on error and 0 on success */ uint8_t hdb_binsrch_idx_add_entry_str(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info, char *hvalue, TSK_OFF_T offset) { int i; // make the hashes all upper case for (i = 0; hvalue[i] != '\0'; i++) { if (islower((int) hvalue[i])) fprintf(hdb_binsrch_info->hIdxTmp, "%c", toupper((int) hvalue[i])); else fprintf(hdb_binsrch_info->hIdxTmp, "%c", hvalue[i]); } /* Print the entry to the unsorted index file */ fprintf(hdb_binsrch_info->hIdxTmp, "|%.16llu\n", (unsigned long long) offset); return 0; } /** * Add a binary entry to the intermediate index file. * * @param hdb_binsrch_info Hash database state info * @param hvalue Array of integers of hash value to add * @param hlen Number of bytes in hvalue * @param offset Byte offset of hash entry in original database. * @return 1 on error and 0 on success */ uint8_t hdb_binsrch_idx_add_entry_bin(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info, unsigned char *hvalue, int hlen, TSK_OFF_T offset) { int i; for (i = 0; i < hlen; i++) { fprintf(hdb_binsrch_info->hIdxTmp, "%02X", hvalue[i]); } /* Print the entry to the unsorted index file */ fprintf(hdb_binsrch_info->hIdxTmp, "|%.16llu\n", (unsigned long long) offset); return 0; } static uint8_t hdb_binsrch_make_idx_idx(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info) { const char *func_name = "hdb_binsrch_make_idx_idx"; if (!hdb_binsrch_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: TSK_HDB_BINSRCH_INFO* is NULL", func_name); return 1; } if (!hdb_binsrch_info->idx_idx_fname) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: hdb_binsrch_info->idx_idx_fname is NULL", func_name); return 1; } // Open the index file. This will read past the header, so the file // pointer will be positioned at the offset of the first hash line // in the index file. if (hdb_binsrch_open_idx_file(&(hdb_binsrch_info->base), hdb_binsrch_info->hash_type)) { // error message was already set. return 1; } // Create the file for the index of the index file. FILE *idx_idx_file = NULL; #ifdef TSK_WIN32 { HANDLE hWin; if ((hWin = CreateFile(hdb_binsrch_info->idx_idx_fname, GENERIC_WRITE, 0, 0, CREATE_ALWAYS, 0, 0)) == INVALID_HANDLE_VALUE) { int winErrNo = (int)GetLastError(); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CREATE); tsk_error_set_errstr( "%s: error creating index of index file %"PRIttocTSK" - %d)", func_name, hdb_binsrch_info->idx_idx_fname, winErrNo); return 1; } idx_idx_file = _fdopen(_open_osfhandle((intptr_t) hWin, _O_WRONLY), "wb"); if (idx_idx_file == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr( "%s: error converting file handle from Windows to C for: %"PRIttocTSK, func_name, hdb_binsrch_info->idx_idx_fname); return 1; } } #else if (NULL == (idx_idx_file = fopen(hdb_binsrch_info->idx_idx_fname, "wb"))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CREATE); tsk_error_set_errstr( "%s: error creating index of index file %"PRIttocTSK, func_name, hdb_binsrch_info->idx_idx_fname); return 1; } #endif // Allocate an array to hold the starting offsets in the index file for each // set of hashes with identical intitial (3) nibbles. hdb_binsrch_info->idx_offsets = (uint64_t*)tsk_malloc(IDX_IDX_SIZE); if (NULL == hdb_binsrch_info->idx_offsets) { return 1; } memset(hdb_binsrch_info->idx_offsets, 0xFF, IDX_IDX_SIZE); // Populate the array. Note that the index is sorted, so the first // occurence of any nibble indicates the starting offset for the // corresponding set. TSK_OFF_T idx_off = hdb_binsrch_info->idx_off; char digits[4]; long int offsets_idx = 0; while (fgets(hdb_binsrch_info->idx_lbuf, (int)hdb_binsrch_info->idx_llen + 1, hdb_binsrch_info->hIdx)) { strncpy(digits, hdb_binsrch_info->idx_lbuf, 3); offsets_idx = strtol(digits, NULL, 16); if (hdb_binsrch_info->idx_offsets[offsets_idx] == IDX_IDX_ENTRY_NOT_SET) { hdb_binsrch_info->idx_offsets[offsets_idx] = idx_off; } idx_off += hdb_binsrch_info->idx_llen; } // Write the array to the index of the index file so that it // can be reloaded into memory the next time the index is opened. uint8_t ret_val = (1 == fwrite((const void*)hdb_binsrch_info->idx_offsets, IDX_IDX_SIZE, 1, idx_idx_file)) ? 0 : 1; fclose(idx_idx_file); return ret_val; } /** * Finalize index creation process by sorting the index and removing the * intermediate temp file. * * @param hdb_binsrch_info Hash database state info structure. * @return 1 on error and 0 on success */ uint8_t hdb_binsrch_idx_finalize(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info) { /* Close the unsorted file */ fclose(hdb_binsrch_info->hIdxTmp); hdb_binsrch_info->hIdxTmp = NULL; /* Close the existing index if it is open, and unset the old index file data. */ if (hdb_binsrch_info->hIdx) { fclose(hdb_binsrch_info->hIdx); hdb_binsrch_info->hIdx = NULL; } hdb_binsrch_info->idx_size = 0; hdb_binsrch_info->idx_off = 0; hdb_binsrch_info->idx_llen = 0; if (hdb_binsrch_info->idx_lbuf != NULL) { free(hdb_binsrch_info->idx_lbuf); hdb_binsrch_info->idx_lbuf = NULL; } if (tsk_verbose) tsk_fprintf(stderr, "hdb_idxfinalize: Sorting index\n"); #ifdef TSK_WIN32 wchar_t buf[TSK_HDB_MAXLEN]; /// @@ Expand this to be SYSTEM_ROOT -- GetWindowsDirectory() wchar_t *sys32 = _TSK_T("C:\\WINDOWS\\System32\\sort.exe"); DWORD stat; STARTUPINFO myStartInfo; PROCESS_INFORMATION pinfo; stat = GetFileAttributes(sys32); if ((stat != -1) && ((stat & FILE_ATTRIBUTE_DIRECTORY) == 0)) { TSNPRINTF(buf, TSK_HDB_MAXLEN, _TSK_T("%s /o \"%s\" \"%s\""), sys32, hdb_binsrch_info->idx_fname, hdb_binsrch_info->uns_fname); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_MISSING); tsk_error_set_errstr("Cannot find sort executable"); return 1; } GetStartupInfo(&myStartInfo); if (FALSE == CreateProcess(NULL, buf, NULL, NULL, FALSE, 0, NULL, NULL, &myStartInfo, &pinfo)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr( "Error starting sorting index file using %S", buf); return 1; } if (WAIT_FAILED == WaitForSingleObject(pinfo.hProcess, INFINITE)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr( "Error (waiting) sorting index file using %S", buf); return 1; } if (FALSE == DeleteFile(hdb_binsrch_info->uns_fname)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_DELETE); tsk_error_set_errstr( "Error deleting temp file: %d", (int)GetLastError()); return 1; } // verify it was created stat = GetFileAttributes(hdb_binsrch_info->idx_fname); if (stat == -1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("hdb_binsrch_finalize: sorted file not created"); return 1; } #else char buf[TSK_HDB_MAXLEN]; const char *root = "/bin/sort"; const char *usr = "/usr/bin/sort"; const char *local = "/usr/local/bin/sort"; struct stat stats; if (0 == stat(local, &stats)) { snprintf(buf, TSK_HDB_MAXLEN, "%s -o %s %s", local, hdb_binsrch_info->idx_fname, hdb_binsrch_info->uns_fname); } else if (0 == stat(usr, &stats)) { snprintf(buf, TSK_HDB_MAXLEN, "%s -o \"%s\" \"%s\"", usr, hdb_binsrch_info->idx_fname, hdb_binsrch_info->uns_fname); } else if (0 == stat(root, &stats)) { snprintf(buf, TSK_HDB_MAXLEN, "%s -o \"%s\" \"%s\"", root, hdb_binsrch_info->idx_fname, hdb_binsrch_info->uns_fname); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_MISSING); tsk_error_set_errstr("Cannot find sort executable"); return 1; } if (0 != system(buf)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr( "Error sorting index file using %s", buf); return 1; } unlink(hdb_binsrch_info->uns_fname); if (stat(hdb_binsrch_info->idx_fname, &stats)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("hdb_binsrch_finalize: sorted file not created"); return 1; } #endif // To speed up lookups, create a mapping of the first three bytes of a hash // to an offset in the index file. if (hdb_binsrch_make_idx_idx(hdb_binsrch_info)) { tsk_error_set_errstr2( "hdb_binsrch_idx_finalize: error creating index of index file"); return 1; } return 0; } /** * \ingroup hashdblib * Search the index for a text/ASCII hash value * * @param hdb_info_base Open hash database (with index) * @param hash Hash value to search for (NULL terminated string) * @param flags Flags to use in lookup * @param action Callback function to call for each hash db entry * (not called if QUICK flag is given) * @param ptr Pointer to data to pass to each callback * * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t hdb_binsrch_lookup_str(TSK_HDB_INFO * hdb_info_base, const char *hash, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { const char *func_name = "hdb_binsrch_lookup_str"; TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; TSK_OFF_T poffset; TSK_OFF_T up; // Offset of the first byte past the upper limit that we are looking in TSK_OFF_T low; // offset of the first byte of the lower limit that we are looking in int cmp; uint8_t wasFound = 0; size_t i; TSK_HDB_HTYPE_ENUM htype; char ucHash[TSK_HDB_HTYPE_SHA1_LEN + 1]; // Set to the longest hash length + 1 /* Sanity checks on the hash input */ if (strlen(hash) == TSK_HDB_HTYPE_MD5_LEN) { htype = TSK_HDB_HTYPE_MD5_ID; } else if (strlen(hash) == TSK_HDB_HTYPE_SHA1_LEN) { htype = TSK_HDB_HTYPE_SHA1_ID; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: Invalid hash length: %s", func_name, hash); return -1; } for (i = 0; i < strlen(hash); i++) { if (isxdigit((int) hash[i]) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: Invalid hash value (hex only): %s", func_name, hash); return -1; } } // verify the index is open if (hdb_binsrch_open_idx(hdb_info_base, htype)) return -1; /* Sanity check */ if (hdb_binsrch_info->hash_len != strlen(hash)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: Hash passed is different size than expected (%d vs %Zd)", func_name, hdb_binsrch_info->hash_len, strlen(hash)); return -1; } // Convert hash to uppercase for(i = 0;i < strlen(hash);i++){ if(islower(hash[i])){ ucHash[i] = toupper(hash[i]); } else{ ucHash[i] = hash[i]; } } ucHash[strlen(hash)] = '\0'; // Do a lookup in the index of the index file. The index of the index file is // a mapping of the first three digits of a hash to the offset in the index // file of the first index entry of the possibly empty set of index entries // for hashes with those initial digits. if (hdb_binsrch_info->idx_offsets) { // Convert the initial hash digits into an index into the index offsets. // This will give the offset into the index file for the set of hashes // that contains the sought hash. char digits[4]; strncpy(digits, ucHash, 3); digits[3] = '\0'; long int idx_idx_off = strtol(digits, NULL, 16); if ((idx_idx_off < 0) || (idx_idx_off > (long int)IDX_IDX_ENTRY_COUNT)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "%s: error finding index in secondary index for %s", func_name, ucHash); return -1; } // Determine the bounds for the binary search of the sorted index file. // The lower bound is the start of the set of entries that may contain // the sought hash. The upper bound is the offset one past the end // of that entry set, or EOF. low = hdb_binsrch_info->idx_offsets[idx_idx_off]; if (IDX_IDX_ENTRY_NOT_SET != (uint64_t)low) { do { ++idx_idx_off; if (idx_idx_off == (long int)IDX_IDX_ENTRY_COUNT) { // The set of hashes to search is the last set. Use the end of the index // file as the upper bound for the binary search. up = hdb_binsrch_info->idx_size; break; } else { up = hdb_binsrch_info->idx_offsets[idx_idx_off]; } } while (IDX_IDX_ENTRY_NOT_SET == (uint64_t)up); } else { // Quick out - the hash does not map to an index offset. // It is not in the hash database. return 0; } } else { // There is no index for the index file. Search the entire file. low = hdb_binsrch_info->idx_off; up = hdb_binsrch_info->idx_size; } poffset = 0; // We have to lock access to idx_lbuf, but since we're in a loop, // I'm assuming one lock up front is better than many inside. tsk_take_lock(&hdb_binsrch_info->base.lock); while (1) { TSK_OFF_T offset; /* If top and bottom are the same, it's not there */ if (up == low) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 0; } /* Get the middle of the windows that we are looking at */ offset = rounddown(((up - low) / 2), hdb_binsrch_info->idx_llen); /* Sanity Check */ if ((offset % hdb_binsrch_info->idx_llen) != 0) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "hdb_lookup: Error, new offset is not a multiple of the line length"); return -1; } /* The middle offset is relative to the low offset, so add them */ offset += low; /* If we didn't move, then it's not there */ if (poffset == offset) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 0; } /* Seek to the offset and read it */ if (0 != fseeko(hdb_binsrch_info->hIdx, offset, SEEK_SET)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "hdb_lookup: Error seeking in search: %" PRIuOFF, offset); return -1; } if (NULL == fgets(hdb_binsrch_info->idx_lbuf, (int) hdb_binsrch_info->idx_llen + 1, hdb_binsrch_info->hIdx)) { if (feof(hdb_binsrch_info->hIdx)) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 0; } tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "Error reading index file: %lu", (unsigned long) offset); return -1; } /* Sanity Check */ if ((strlen(hdb_binsrch_info->idx_lbuf) < hdb_binsrch_info->idx_llen) || (hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len] != '|')) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "Invalid line in index file: %lu (%s)", (unsigned long) (offset / hdb_binsrch_info->idx_llen), hdb_binsrch_info->idx_lbuf); return -1; } /* Set the delimter to NULL so we can treat the hash as a string */ hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len] = '\0'; cmp = strcasecmp(hdb_binsrch_info->idx_lbuf, ucHash); /* The one we just read is too small, so set the new lower bound * at the start of the next row */ if (cmp < 0) { low = offset + hdb_binsrch_info->idx_llen; } /* The one we just read is too big, so set the upper bound at this * entry */ else if (cmp > 0) { up = offset; } /* We found it */ else { wasFound = 1; if (flags & TSK_HDB_FLAG_QUICK) { tsk_release_lock(&hdb_binsrch_info->base.lock); return 1; } else { TSK_OFF_T tmpoff, db_off; #ifdef TSK_WIN32 db_off = _atoi64(&hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len + 1]); #else db_off = strtoull(&hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len + 1], NULL, 10); #endif /* Print the one that we found first */ if (hdb_binsrch_info-> get_entry(hdb_info_base, ucHash, db_off, flags, action, ptr)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_set_errstr2( "hdb_lookup"); return -1; } /* there could be additional entries both before and after * this entry - but we can restrict ourselves to the up * and low bounds from our previous hunting */ tmpoff = offset - hdb_binsrch_info->idx_llen; while (tmpoff >= low) { /* Break if we are at the header */ if (tmpoff <= 0) break; if (0 != fseeko(hdb_binsrch_info->hIdx, tmpoff, SEEK_SET)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "hdb_lookup: Error seeking for prev entries: %" PRIuOFF, tmpoff); return -1; } if (NULL == fgets(hdb_binsrch_info->idx_lbuf, (int) hdb_binsrch_info->idx_llen + 1, hdb_binsrch_info->hIdx)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "Error reading index file (prev): %lu", (unsigned long) tmpoff); return -1; } else if (strlen(hdb_binsrch_info->idx_lbuf) < hdb_binsrch_info->idx_llen) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "Invalid index file line (prev): %lu", (unsigned long) tmpoff); return -1; } hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len] = '\0'; if (strcasecmp(hdb_binsrch_info->idx_lbuf, ucHash) != 0) { break; } #ifdef TSK_WIN32 db_off = _atoi64(&hdb_binsrch_info-> idx_lbuf[hdb_binsrch_info->hash_len + 1]); #else db_off = strtoull(&hdb_binsrch_info-> idx_lbuf[hdb_binsrch_info->hash_len + 1], NULL, 10); #endif if (hdb_binsrch_info-> get_entry(hdb_info_base, ucHash, db_off, flags, action, ptr)) { tsk_release_lock(&hdb_binsrch_info->base.lock); return -1; } tmpoff -= hdb_binsrch_info->idx_llen; } /* next entries */ tmpoff = offset + hdb_binsrch_info->idx_llen; while (tmpoff < up) { if (0 != fseeko(hdb_binsrch_info->hIdx, tmpoff, SEEK_SET)) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "hdb_lookup: Error seeking for next entries: %" PRIuOFF, tmpoff); return -1; } if (NULL == fgets(hdb_binsrch_info->idx_lbuf, (int) hdb_binsrch_info->idx_llen + 1, hdb_binsrch_info->hIdx)) { if (feof(hdb_binsrch_info->hIdx)) break; tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READIDX); tsk_error_set_errstr( "Error reading index file (next): %lu", (unsigned long) tmpoff); return -1; } else if (strlen(hdb_binsrch_info->idx_lbuf) < hdb_binsrch_info->idx_llen) { tsk_release_lock(&hdb_binsrch_info->base.lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "Invalid index file line (next): %lu", (unsigned long) tmpoff); return -1; } hdb_binsrch_info->idx_lbuf[hdb_binsrch_info->hash_len] = '\0'; if (strcasecmp(hdb_binsrch_info->idx_lbuf, ucHash) != 0) { break; } #ifdef TSK_WIN32 db_off = _atoi64(&hdb_binsrch_info-> idx_lbuf[hdb_binsrch_info->hash_len + 1]); #else db_off = strtoull(&hdb_binsrch_info-> idx_lbuf[hdb_binsrch_info->hash_len + 1], NULL, 10); #endif if (hdb_binsrch_info-> get_entry(hdb_info_base, ucHash, db_off, flags, action, ptr)) { tsk_release_lock(&hdb_binsrch_info->base.lock); return -1; } tmpoff += hdb_binsrch_info->idx_llen; } } break; } poffset = offset; } tsk_release_lock(&hdb_binsrch_info->base.lock); return wasFound; } /** * \ingroup hashdblib * Search the index for the given hash value given (in binary form). * * @param hdb_info Open hash database (with index) * @param hash Array with binary hash value to search for * @param len Number of bytes in binary hash value * @param flags Flags to use in lookup * @param action Callback function to call for each hash db entry * (not called if QUICK flag is given) * @param ptr Pointer to data to pass to each callback * * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t hdb_binsrch_lookup_bin(TSK_HDB_INFO * hdb_info, uint8_t * hash, uint8_t len, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { char hashbuf[TSK_HDB_HTYPE_SHA1_LEN + 1]; int i; static const char hex[] = "0123456789abcdef"; if (2 * len > TSK_HDB_HTYPE_SHA1_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "tsk_hdb_lookup_raw: hash value too long\n"); return -1; } for (i = 0; i < len; i++) { hashbuf[2 * i] = hex[(hash[i] >> 4) & 0xf]; hashbuf[2 * i + 1] = hex[hash[i] & 0xf]; } hashbuf[2 * len] = '\0'; return tsk_hdb_lookup_str(hdb_info, hashbuf, flags, action, ptr); } /** * \ingroup hashdblib * \internal * Looks up a hash and any additional data associated with the hash in a * hash database. * @param hdb_info_base A struct representing an open hash database. * @param hash A hash value in string form. * @param result A TskHashInfo struct to populate on success. * @return -1 on error, 0 if hash value was not found, 1 if hash value * was found. */ int8_t hdb_binsrch_lookup_verbose_str(TSK_HDB_INFO *hdb_info_base, const char *hash, void *lookup_result) { // Verify the length of the hash value argument. TSK_HDB_HTYPE_ENUM hash_type = TSK_HDB_HTYPE_INVALID_ID; size_t hash_len = strlen(hash); if (TSK_HDB_HTYPE_MD5_LEN == hash_len) { hash_type = TSK_HDB_HTYPE_MD5_ID; } else if (TSK_HDB_HTYPE_SHA1_LEN == hash_len) { hash_type = TSK_HDB_HTYPE_SHA1_ID; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("hdb_binsrch_lookup_verbose_str: invalid hash, length incorrect: %s", hash); return -1; } // Due to a bug in the extended lookup code for text-format hash databases, // do a simple yes/no look up until the bug is fixed. int8_t ret_val = hdb_binsrch_lookup_str(hdb_info_base, hash, TSK_HDB_FLAG_QUICK, NULL, NULL); if (1 == ret_val) { TskHashInfo *result = static_cast(lookup_result); if (TSK_HDB_HTYPE_MD5_ID == hash_type) { result->hashMd5 = hash; } else { result->hashSha1 = hash; } } return ret_val; } uint8_t hdb_binsrch_accepts_updates() { return 0; } void hdb_binsrch_close(TSK_HDB_INFO *hdb_info_base) { TSK_HDB_BINSRCH_INFO *hdb_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; if (hdb_info->hDb) { fclose(hdb_info->hDb); hdb_info->hDb = NULL; } if (hdb_info->idx_fname) { free(hdb_info->idx_fname); hdb_info->idx_fname = NULL; } if (hdb_info->hIdx) { fclose(hdb_info->hIdx); hdb_info->hIdx = NULL; } if (hdb_info->hIdxTmp) { fclose(hdb_info->hIdxTmp); hdb_info->hIdxTmp = NULL; } if (hdb_info->uns_fname) { free(hdb_info->uns_fname); hdb_info->uns_fname = NULL; } if (hdb_info->idx_lbuf) { free(hdb_info->idx_lbuf); hdb_info->idx_lbuf = NULL; } if (hdb_info->idx_offsets) { free(hdb_info->idx_offsets); hdb_info->idx_offsets = NULL; } hdb_info_base_close(hdb_info_base); free(hdb_info); } sleuthkit-4.2.0/tsk/hashdb/encase.c000644 000765 000024 00000017060 12576320700 017765 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2012-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ /** * \file encase.c * Contains the Encase hash database specific extraction and printing routines. */ #include "tsk_hashdb_i.h" /** * Test the file to see if it is an Encase database * * @param hFile File handle to hash database * * @return 1 if encase and 0 if not */ uint8_t encase_test(FILE * hFile) { char buf[8]; fseeko(hFile, 0, SEEK_SET); if (8 != fread(buf, sizeof(char), 8, hFile)) return 0; if (memcmp(buf, "HASH\x0d\x0a\xff\x00", 8)) return 0; return 1; } /** * Set db_name using information from this database type * * @param hdb_info the hash database object */ static void encase_name(TSK_HDB_BINSRCH_INFO * hdb_info) { FILE * hFile = hdb_info->hDb; wchar_t buf[40]; UTF16 *utf16; UTF8 *utf8; size_t ilen; memset(hdb_info->base.db_name, '\0', TSK_HDB_NAME_MAXLEN); if(!hFile) { if (tsk_verbose) fprintf(stderr, "Error getting name from Encase hash db; using file name instead"); hdb_base_db_name_from_path(&hdb_info->base); return; } memset(buf, '\0', 40); fseeko(hFile, 1032, SEEK_SET); if (39 != fread(buf, sizeof(wchar_t), 39, hFile)) { if (tsk_verbose) fprintf(stderr, "Error getting name from Encase hash db; using file name instead"); hdb_base_db_name_from_path(&hdb_info->base); return; } // do some arithmetic on type sizes to be platform independent ilen = wcslen(buf) * (sizeof(wchar_t) / sizeof(UTF16)); utf8 = (UTF8 *) hdb_info->base.db_name; utf16 = (UTF16 *) buf; tsk_UTF16toUTF8(TSK_LIT_ENDIAN, (const UTF16 **) &utf16, &utf16[ilen], &utf8, &utf8[78], TSKlenientConversion); } TSK_HDB_INFO *encase_open(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; // get the basic binary-search info struct hdb_binsrch_info = hdb_binsrch_open(hDb, db_path); if (NULL == hdb_binsrch_info) { return NULL; } // overwrite the database-specific ones hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_ENCASE_ID; encase_name(hdb_binsrch_info); hdb_binsrch_info->base.make_index = encase_make_index; hdb_binsrch_info->get_entry = encase_get_entry; return (TSK_HDB_INFO*)hdb_binsrch_info; } /** * Process the database to create a sorted index of it. Consecutive * entries with the same hash value are not added to the index, but * will be found during lookup. * * @param hdb_info_base Hash database to make index of. * @param dbtype Type of hash database (should always be TSK_HDB_DBTYPE_ENCASE_STR) * * @return 1 on error and 0 on success. */ uint8_t encase_make_index(TSK_HDB_INFO * hdb_info_base, TSK_TCHAR * dbtype) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; unsigned char buf[19]; char phash[19]; TSK_OFF_T offset = 0; int db_cnt = 0, idx_cnt = 0; /* Initialize the TSK index file */ if (hdb_binsrch_idx_initialize(hdb_binsrch_info, dbtype)) { tsk_error_set_errstr2( "encase_makeindex"); return 1; } /* Status */ if (tsk_verbose) TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"), hdb_binsrch_info->base.db_fname); memset(phash, '0', sizeof(phash)); memset(buf, '0', sizeof(buf)); /* read the file and add to the index */ fseek(hdb_binsrch_info->hDb, 1152, SEEK_SET); while (18 == fread(buf,sizeof(char),18,hdb_binsrch_info->hDb)) { db_cnt++; /* We only want to add one of each hash to the index */ if (memcmp(buf, phash, 18) == 0) { continue; } /* Add the entry to the index */ if (hdb_binsrch_idx_add_entry_bin(hdb_binsrch_info, buf, 16, offset)) { tsk_error_set_errstr2( "encase_make_index"); return 1; } idx_cnt++; /* Set the previous has value */ memcpy(phash, buf, 18); offset += 18; } if (idx_cnt > 0) { if (tsk_verbose) { fprintf(stderr, " Valid Database Entries: %d\n", db_cnt); fprintf(stderr, " Index File Entries %s: %d\n", (idx_cnt == db_cnt) ? "" : "(optimized)", idx_cnt); } /* Close and sort the index */ if (hdb_binsrch_idx_finalize(hdb_binsrch_info)) { tsk_error_set_errstr2( "encase_makeindex"); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "encase_makeindex: No valid entries found in database"); return 1; } return 0; } /** * Find the entry at a * given offset. The offset was likely determined from the index. * The callback is called for each entry. EnCase does not store names, * so the callback is called with just the hash value. * * @param hdb_info Hash database to get data from * @param hash MD5 hash value that was searched for * @param offset Byte offset where hash value should be located in db_file * @param flags (not used) * @param action Callback used for each entry found in lookup * @param cb_ptr Pointer to data passed to callback * * @return 1 on error and 0 on succuss */ uint8_t encase_get_entry(TSK_HDB_INFO * hdb_info, const char *hash, TSK_OFF_T offset, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *cb_ptr) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info; int found = 0; char buf[19]; if (tsk_verbose) fprintf(stderr, "encase_getentry: Lookup up hash %s at offset %" PRIuOFF "\n", hash, offset); if (strlen(hash) != TSK_HDB_HTYPE_MD5_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "encase_getentry: Invalid hash value: %s", hash); return 1; } memset(buf, 0, sizeof(buf)); /* Loop so that we can find multiple occurances of the same hash */ fseeko(hdb_binsrch_info->hDb, offset, SEEK_SET); while (1) { int retval; char hash_str[TSK_HDB_HTYPE_MD5_LEN+1]; if (18 != fread(buf,sizeof(char),18,hdb_binsrch_info->hDb)) { if (feof(hdb_binsrch_info->hDb)) { break; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "encase_getentry: Error reading database"); return 1; } snprintf(hash_str, TSK_HDB_HTYPE_MD5_LEN+1, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); /* Is this the one that we want? */ if (0 != strcasecmp(hash_str, hash)) { break; } retval = action(hdb_info, hash, "", cb_ptr); if (retval == TSK_WALK_ERROR) { return 1; } else if (retval == TSK_WALK_STOP) { return 0; } found = 1; /* Advance to the next row */ offset += 18; } if (found == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "encase_getentry: Hash not found in file at offset: %lu", (unsigned long) offset); return 1; } return 0; } sleuthkit-4.2.0/tsk/hashdb/hashkeeper.c000644 000765 000024 00000032360 12576320700 020646 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 * */ #include "tsk_hashdb_i.h" /** * \file hashkeeper.c * Contains functions to read and process hash keeper database files */ /** * Test the file to see if it is a hashkeeper database * * @param hFile File handle to hash database * * @return 1 if hk and 0 if not */ uint8_t hk_test(FILE * hFile) { char buf[TSK_HDB_MAXLEN]; int cnt = 0; char *ptr; fseek(hFile, 0, SEEK_SET); // read in header line if (NULL == fgets(buf, TSK_HDB_MAXLEN, hFile)) return 0; if (strlen(buf) < 150) return 0; ptr = buf; // "file_id","hashset_id","file_name","directory","hash","file_size","date_modified","time_modified","time_zone","comments","date_accessed","time_accessed" if (strncmp(ptr, "\"file_id\"", strlen("\"file_id\"")) != 0) return 0; /* Cycle through the line looking at the fields in between the commas */ while (NULL != (ptr = strchr(ptr, ','))) { cnt++; if (cnt == 1) { if (strncmp(ptr, ",\"hashset_id\"", strlen(",\"hashset_id\"")) != 0) return 0; } else if (cnt == 2) { if (strncmp(ptr, ",\"file_name\"", strlen(",\"file_name\"")) != 0) return 0; } else if (cnt == 3) { if (strncmp(ptr, ",\"directory\"", strlen(",\"directory\"")) != 0) return 0; } else if (cnt == 4) { if (strncmp(ptr, ",\"hash\"", strlen(",\"hash\"")) != 0) return 0; } else { break; } ptr++; } return 1; } TSK_HDB_INFO *hk_open(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; // get the basic binary-search info struct hdb_binsrch_info = hdb_binsrch_open(hDb, db_path); if (NULL == hdb_binsrch_info) { return NULL; } // overwrite the database-specific ones hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_HK_ID; hdb_binsrch_info->base.make_index = hk_makeindex; hdb_binsrch_info->get_entry = hk_getentry; return (TSK_HDB_INFO*)hdb_binsrch_info; } /** * Give a line from a hash keeper database, parse out the * MD5 (and other) text. NOTE that this will add NULL values * to the input text. * * @param str [in] String to parse * @param md5 [out] Pointer to a pointer, which will be assigned to the MD5 text in original string * @param name [in] Poiner to buffer where name can be copied into (can be NULL) * @param n_len [in] Length of name buffer * @param other [in] Pointer to buffer where extended data should be copied to (can be NULL) * @param o_len [in] Length of other buffer */ static int hk_parse_md5(char *str, char **md5, char *name, int n_len, char *other, int o_len) { char *ptr = str; char *file = NULL, *dir = NULL, *file_id = NULL, *hash_id = NULL; int cnt = 0; if ((str == NULL) || (strlen(str) < TSK_HDB_HTYPE_MD5_LEN)) return 1; if ((md5 == NULL) && (name == NULL) && (other == NULL)) return 0; /* * 0 file_id * 1 hashset_id * 2 file_name * 3 directory * 4 hash * 5 file_size * 6 date_modified * 7 time modified * 8 time_zone * 9 comments * 10 date_accessed * 11 time_accessed */ /* Assign the file_id if we are looking for it */ if (other != NULL) { file_id = ptr; } while (NULL != (ptr = strchr(ptr, ','))) { cnt++; /* End of file_id, begin hash_id */ if ((cnt == 1) && (other != NULL)) { *ptr = '\0'; ptr++; hash_id = ptr; } /* End of hash_id, begin name */ else if (cnt == 2) { /* Finish the 'other' stuff */ if (other != NULL) { *ptr = '\0'; snprintf(other, o_len, "Hash ID: %s File ID: %s", hash_id, file_id); } /* Are we done? */ if ((name == NULL) && (md5 == NULL)) return 0; /* get the pointer to the name */ if (name) { if (ptr[1] != '"') return 1; file = &ptr[2]; /* We utilize the other loop code to find the end of * the name */ } } /* end of the name, begin directory - which may not exist */ else if ((cnt == 3) && (name != NULL)) { /* finish up the name */ if (ptr[-1] != '"') return 1; ptr[-1] = '\0'; /* get the directory start, if it exists */ if (ptr[1] == '"') { dir = &ptr[2]; } else { dir = NULL; } } /* end of directory, begin MD5 value */ else if (cnt == 4) { /* Copy the name into the buffer */ if (name != NULL) { name[0] = '\0'; if (dir) { /* finish up the dir */ if (ptr[-1] != '"') return 1; ptr[-1] = '\0'; strncpy(name, dir, n_len); strncat(name, "\\", n_len); } if (file) { strncat(name, file, n_len); } else { return 1; } } if (md5 == NULL) return 0; /* Do a length check and more sanity checks */ if ((strlen(ptr) < 2 + TSK_HDB_HTYPE_MD5_LEN) || (ptr[1] != '"') || (ptr[2 + TSK_HDB_HTYPE_MD5_LEN] != '"')) { return 1; } ptr = &ptr[2]; ptr[TSK_HDB_HTYPE_MD5_LEN] = '\0'; *md5 = ptr; /* Final sanity check */ if (NULL != strchr(ptr, ',')) { return 1; } return 0; } /* If the next field is in quotes then we need to skip to the * next quote and ignore any ',' in there */ if (ptr[1] == '"') { if (NULL == (ptr = strchr(&ptr[2], '"'))) { return 1; } } else { ptr++; } } return 1; } /** * Process the database to create a sorted index of it. Consecutive * entries with the same hash value are not added to the index, but * will be found during lookup. * * @param hdb_info_base Hash database to make index of * @param dbtype Text of database type (should always be TSK_HDB_DBTYPE_HK_STR) * * @return 1 on error and 0 on success. */ uint8_t hk_makeindex(TSK_HDB_INFO * hdb_info_base, TSK_TCHAR * dbtype) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; int i; size_t len = 0; char buf[TSK_HDB_MAXLEN]; char *hash = NULL, phash[TSK_HDB_HTYPE_MD5_LEN + 1]; TSK_OFF_T offset = 0; int db_cnt = 0, idx_cnt = 0, ig_cnt = 0; if (hdb_binsrch_idx_initialize(hdb_binsrch_info, dbtype)) { tsk_error_set_errstr2( "hk_makeindex"); return 1; } /* Status */ if (tsk_verbose) TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"), hdb_binsrch_info->base.db_fname); /* Allocate a buffer to hold the previous hash values */ memset(phash, '0', TSK_HDB_HTYPE_MD5_LEN + 1); /* read each line of the file */ fseek(hdb_binsrch_info->hDb, 0, SEEK_SET); for (i = 0; NULL != fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb); offset += (TSK_OFF_T) len, i++) { // skip the header line if (i == 0) { ig_cnt++; continue; } len = strlen(buf); /* Parse each line to get the MD5 value */ if (hk_parse_md5(buf, &hash, NULL, 0, NULL, 0)) { ig_cnt++; continue; } db_cnt++; /* If this entry is for the same hash value as the last entry, * the skip it -- we'll look for it during lookup */ if (memcmp(hash, phash, TSK_HDB_HTYPE_MD5_LEN) == 0) { continue; } /* Add the entry to the index */ if (hdb_binsrch_idx_add_entry_str(hdb_binsrch_info, hash, offset)) { tsk_error_set_errstr2( "hk_makeindex"); return 1; } idx_cnt++; /* Set the previous hash value */ strncpy(phash, hash, TSK_HDB_HTYPE_MD5_LEN + 1); } if (idx_cnt > 0) { if (tsk_verbose) { fprintf(stderr, " Valid Database Entries: %d\n", db_cnt); fprintf(stderr, " Invalid Database Entries (headers or errors): %d\n", ig_cnt); fprintf(stderr, " Index File Entries %s: %d\n", (idx_cnt == db_cnt) ? "" : "(optimized)", idx_cnt); } /* Finish the index making process */ if (hdb_binsrch_idx_finalize(hdb_binsrch_info)) { tsk_error_set_errstr2( "hk_makeindex"); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "hk_makeindex: No valid entries found in database"); return 1; } return 0; } /** * Find the corresponding name at the * given offset. The offset was likely determined from the index. * The entries in the DB following the one specified are also processed * if they have the same hash value and their name is different. * The callback is called for each entry. * * Note: This routine assumes that &hdb_info->lock is locked by the caller. * * @param hdb_info Data base to get data from. * @param hash MD5 hash value that was searched for * @param offset Byte offset where hash value should be located in db_file * @param flags * @param action Callback used for each entry found in lookup * @param cb_ptr Pointer to data passed to callback * * @return 1 on error and 0 on success */ uint8_t hk_getentry(TSK_HDB_INFO * hdb_info, const char *hash, TSK_OFF_T offset, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *cb_ptr) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info; char buf[TSK_HDB_MAXLEN], name[TSK_HDB_MAXLEN], *ptr = NULL, pname[TSK_HDB_MAXLEN], other[TSK_HDB_MAXLEN]; int found = 0; if (tsk_verbose) fprintf(stderr, "hk_getentry: Lookup up hash %s at offset %" PRIuOFF "\n", hash, offset); if (strlen(hash) != TSK_HDB_HTYPE_MD5_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "hk_getentry: Invalid hash value: %s", hash); return 1; } memset(pname, '0', TSK_HDB_MAXLEN); /* Loop so that we can find multiple occurances of the same hash */ while (1) { size_t len; if (0 != fseeko(hdb_binsrch_info->hDb, offset, SEEK_SET)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "hk_getentry: Error seeking to get file name: %lu", (unsigned long) offset); return 1; } if (NULL == fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb)) { if (feof(hdb_binsrch_info->hDb)) { break; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "hk_getentry: Error reading database"); return 1; } len = strlen(buf); if (len < TSK_HDB_HTYPE_MD5_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "hk_getentry: Invalid entry in database (too short): %s", buf); return 1; } if (hk_parse_md5(buf, &ptr, name, TSK_HDB_MAXLEN, ((flags & TSK_HDB_FLAG_EXT) ? other : NULL), ((flags & TSK_HDB_FLAG_EXT) ? TSK_HDB_MAXLEN : 0))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "hk_getentry: Invalid entry in database: %s", buf); return 1; } /* Is this the one that we want? */ if (0 != strcasecmp(ptr, hash)) { break; } if (strcmp(name, pname) != 0) { int retval; retval = action(hdb_info, hash, name, cb_ptr); if (retval == TSK_WALK_ERROR) { return 1; } else if (retval == TSK_WALK_STOP) { return 0; } found = 1; strncpy(pname, name, TSK_HDB_MAXLEN); } /* Advance to the next row */ offset += len; } if (found == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "hk_getentry: Hash not found in file at offset: %lu", (unsigned long) offset); return 1; } return 0; } sleuthkit-4.2.0/tsk/hashdb/hdb_base.c000755 000765 000024 00000022657 12576320700 020271 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ #include "tsk_hashdb_i.h" /** * \file hdb_base.c * "Base" functions for hash databases. Many are no-ops / stubs */ /** * \ingroup hashdblib * Sets hash database name in hdb_info based on database file path. * @param hdb_info Struct representation of an open hash database. */ void hdb_base_db_name_from_path(TSK_HDB_INFO *hdb_info) { #ifdef TSK_WIN32 const char PATH_CHAR = '\\'; #else const char PATH_CHAR = '/'; #endif TSK_TCHAR * begin; TSK_TCHAR * end; int i; hdb_info->db_name[0] = '\0'; begin = TSTRRCHR(hdb_info->db_fname, PATH_CHAR); #ifdef TSK_WIN32 // cygwin can have forward slashes, so try that too on Windows if (!begin) { begin = TSTRRCHR(hdb_info->db_fname, '/'); } #endif if (!begin) { begin = hdb_info->db_fname; } else { // unlikely since this means that the dbname is "/" if (TSTRLEN(begin) == 1) return; else begin++; } // end points to the byte after the last one we want to use if ((TSTRLEN(hdb_info->db_fname) > 4) && (TSTRICMP(&hdb_info->db_fname[TSTRLEN(hdb_info->db_fname)-4], _TSK_T(".idx")) == 0)) end = &hdb_info->db_fname[TSTRLEN(hdb_info->db_fname)-4]; else end = begin + TSTRLEN(begin); // @@@ This only works for file names with Latin characters. It may need // to be fixed some day. Leave it be for now. for(i = 0; i < (end-begin); i++) { hdb_info->db_name[i] = (char) begin[i]; } hdb_info->db_name[i] = '\0'; } /** * \ingroup hashdblib * \internal * Initializes TSK_HDB_INFO struct with "base class" method pointers and basic * setup of values. * @param hdb_info Allocated struct to initialize. * @param db_path * @return 0 on sucess, 1 on failure. */ uint8_t hdb_info_base_open(TSK_HDB_INFO *hdb_info, const TSK_TCHAR *db_path) { // copy the database path into the struct size_t path_len = TSTRLEN(db_path); hdb_info->db_fname = (TSK_TCHAR*)tsk_malloc((path_len + 1) * sizeof(TSK_TCHAR)); if (!hdb_info->db_fname) { return 1; } TSTRNCPY(hdb_info->db_fname, db_path, path_len); // set the name based on path hdb_base_db_name_from_path(hdb_info); hdb_info->db_type = TSK_HDB_DBTYPE_INVALID_ID; tsk_init_lock(&hdb_info->lock); hdb_info->transaction_in_progress = 0; hdb_info->get_db_path = hdb_base_get_db_path; hdb_info->get_display_name = hdb_base_get_display_name; hdb_info->uses_external_indexes = hdb_base_uses_external_indexes; hdb_info->get_index_path = hdb_base_get_index_path; hdb_info->has_index = hdb_base_has_index; hdb_info->make_index = hdb_base_make_index; hdb_info->open_index = hdb_base_open_index; hdb_info->lookup_str = hdb_base_lookup_str; hdb_info->lookup_raw = hdb_base_lookup_bin; hdb_info->lookup_verbose_str = hdb_base_lookup_verbose_str; hdb_info->accepts_updates = hdb_base_accepts_updates; hdb_info->add_entry = hdb_base_add_entry; hdb_info->begin_transaction = hdb_base_begin_transaction; hdb_info->commit_transaction = hdb_base_commit_transaction; hdb_info->rollback_transaction = hdb_base_rollback_transaction; hdb_info->close_db = hdb_info_base_close; return 0; } const TSK_TCHAR * hdb_base_get_db_path(TSK_HDB_INFO *hdb_info) { // The "base class" assumption is that the hash database is implemented // as a user-accessible file (e.g., it is a SQLite database or a text- // format database). In the future, it may become necessary to accomodate // connection strings. return hdb_info->db_fname; } const char * hdb_base_get_display_name(TSK_HDB_INFO *hdb_info) { return hdb_info->db_name; } uint8_t hdb_base_uses_external_indexes() { // The "base class" assumption is that the hash database does not use // user-accessible external index files (e.g., it is a relational // database). return 0; } const TSK_TCHAR* hdb_base_get_index_path(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { // The "base class" assumption is that the hash database does not have // user-accessible external index files (e.g., it is a relational // database). It follows that the hash database path and index path are // the same, assuming that the hash database is implemented // as a user-accessible file (e.g., it is a SQLite database). return hdb_info->db_fname; } uint8_t hdb_base_has_index(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { // The "base class" assumption is that the hash database does not have // user-accessible external index files (e.g., it is a relational database). // It follows that the hash database has an index as soon as it is created. // This implementation of this function also says that look ups for all hash // algorithm types are supported. return 1; } uint8_t hdb_base_make_index(TSK_HDB_INFO *hdb_info, TSK_TCHAR *htype) { // The "base class" assumption is that the hash database does not have // user-accessible external index files (e.g., it is a relational // database). It follows that the hash database has an index upon creation. // Make this a no-op by simply returning the success code. return 0; } uint8_t hdb_base_open_index(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { // The "base class" assumption is that the hash database does not use // user-accessible external index files (e.g., it is a relational // database). It follows that the hash database has an index when it is // created and it is already "open". Make this a no-op by simply returning // the success code. return 0; } int8_t hdb_base_lookup_str(TSK_HDB_INFO *hdb_info, const char *hash, TSK_HDB_FLAG_ENUM flag, TSK_HDB_LOOKUP_FN callback, void *data) { // This function always needs an "override" by "derived classes." tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_lookup_str: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return -1; } int8_t hdb_base_lookup_bin(TSK_HDB_INFO *hdb_info, uint8_t *hash, uint8_t hash_len, TSK_HDB_FLAG_ENUM flag, TSK_HDB_LOOKUP_FN callback, void *data) { // This function always needs an "override" by "derived classes." tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_lookup_bin: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return -1; } int8_t hdb_base_lookup_verbose_str(TSK_HDB_INFO *hdb_info, const char *hash, void *result) { // This function always needs an "override" by "derived classes." tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_lookup_verbose_str: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return -1; } uint8_t hdb_base_accepts_updates() { // The "base class" assumption is that the database accepts updates (e.g., // it is a relational database and there is a "derived class override" of // add_entry() function that does INSERTs). return 1; } uint8_t hdb_base_add_entry(TSK_HDB_INFO *hdb_info, const char *file_name, const char *md5, const char *sha1, const char *sha2_256, const char *comment) { // This function needs an "override" by "derived classes" unless there is an // "override" of the accepts_updates function that returns 0 (false). tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_add_entry: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return 1; } uint8_t hdb_base_begin_transaction(TSK_HDB_INFO *hdb_info) { // This function needs an "override" by "derived classes" unless there is an // "override" of the accepts_updates function that returns 0 (false). tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_begin_transaction: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return 1; } uint8_t hdb_base_commit_transaction(TSK_HDB_INFO *hdb_info) { // This function needs an "override" by "derived classes" unless there is an // "override" of the accepts_updates function that returns 0 (false). tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_commit_transaction: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return 1; } uint8_t hdb_base_rollback_transaction(TSK_HDB_INFO *hdb_info) { // This function needs an "override" by "derived classes" unless there is an // "override" of the accepts_updates function that returns 0 (false). tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNSUPFUNC); tsk_error_set_errstr("hdb_base_rollback_transaction: operation not supported for hdb_info->db_type=%u", hdb_info->db_type); return 1; } /** * \ingroup hashdblib * De-initializes struct representation of a hash database. * @param hdb_info Struct representation of a hash database. * @return 0 on sucess, 1 on failure. */ void hdb_info_base_close(TSK_HDB_INFO *hdb_info) { if (NULL == hdb_info) { return; } if (hdb_info->db_fname) { free(hdb_info->db_fname); hdb_info->db_fname = NULL; } tsk_deinit_lock(&hdb_info->lock); } sleuthkit-4.2.0/tsk/hashdb/idxonly.c000644 000765 000024 00000005743 12576320700 020222 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2014 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** * \file idxonly.c * Contains the dummy functions that are used when only an index is used for lookups and the * original database is gone. */ #include "tsk_hashdb_i.h" /** * Set db_name using information from this database type * * @param hdb_info the hash database object */ static void idxonly_name(TSK_HDB_BINSRCH_INFO *hdb_binsrch_info) { FILE * hFile; char buf[TSK_HDB_NAME_MAXLEN]; char *bufptr = buf; size_t i = 0; memset(hdb_binsrch_info->base.db_name, '\0', TSK_HDB_NAME_MAXLEN); // Currently only supporting md5 and sha1 index files. Try to get the // database name from the index file. if(hdb_binsrch_open_idx((TSK_HDB_INFO*)hdb_binsrch_info, TSK_HDB_HTYPE_MD5_ID)) { if(hdb_binsrch_open_idx((TSK_HDB_INFO*)hdb_binsrch_info, TSK_HDB_HTYPE_SHA1_ID)) { if (tsk_verbose) fprintf(stderr, "Failed to get name from index (index does not exist); using file name instead"); hdb_base_db_name_from_path((TSK_HDB_INFO*)hdb_binsrch_info); return; } } hFile = hdb_binsrch_info->hIdx; fseeko(hFile, 0, 0); if(NULL == fgets(buf, TSK_HDB_NAME_MAXLEN, hFile) || NULL == fgets(buf, TSK_HDB_NAME_MAXLEN, hFile) || strncmp(buf, TSK_HDB_IDX_HEAD_NAME_STR, strlen(TSK_HDB_IDX_HEAD_NAME_STR)) != 0) { if (tsk_verbose) fprintf(stderr, "Failed to read name from index; using file name instead"); hdb_base_db_name_from_path((TSK_HDB_INFO*)hdb_binsrch_info); return; } bufptr = strchr(buf, '|'); bufptr++; while(bufptr[i] != '\r' && bufptr[i] != '\n' && i < strlen(bufptr)) { hdb_binsrch_info->base.db_name[i] = bufptr[i]; i++; } } TSK_HDB_INFO *idxonly_open(const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; hdb_binsrch_info = hdb_binsrch_open(NULL, db_path); if (NULL == hdb_binsrch_info) { return NULL; } hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_IDXONLY_ID; idxonly_name(hdb_binsrch_info); hdb_binsrch_info->base.get_db_path = idxonly_get_db_path; hdb_binsrch_info->get_entry = idxonly_getentry; return (TSK_HDB_INFO*)hdb_binsrch_info; } const TSK_TCHAR * idxonly_get_db_path(TSK_HDB_INFO *hdb_info) { // The database path member of the TSK_HDB_INFO is filled in, but that is // just for the sake of the common index file name construction algorithm. return NULL; } uint8_t idxonly_getentry(TSK_HDB_INFO * hdb_info, const char *hash, TSK_OFF_T offset, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *cb_ptr) { if (!(flags & TSK_HDB_FLAG_QUICK) && (NULL != action)) { action(hdb_info, hash, NULL, cb_ptr); } return 0; } sleuthkit-4.2.0/tsk/hashdb/Makefile.am000644 000765 000024 00000000557 12576320700 020422 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskhashdb.la libtskhashdb_la_SOURCES = \ encase.c hashkeeper.c idxonly.c md5sum.c nsrl.c \ sqlite_hdb.cpp binsrch_index.cpp tsk_hashdb.c hdb_base.c \ tsk_hash_info.h tsk_hashdb.h tsk_hashdb_i.h indent: indent *.cpp *.c *.h clean-local: -rm -f *.c~ *.h~ sleuthkit-4.2.0/tsk/hashdb/Makefile.in000644 000765 000024 00000051552 12576326300 020436 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/hashdb ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskhashdb_la_LIBADD = am_libtskhashdb_la_OBJECTS = encase.lo hashkeeper.lo idxonly.lo \ md5sum.lo nsrl.lo sqlite_hdb.lo binsrch_index.lo tsk_hashdb.lo \ hdb_base.lo libtskhashdb_la_OBJECTS = $(am_libtskhashdb_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libtskhashdb_la_SOURCES) DIST_SOURCES = $(libtskhashdb_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskhashdb.la libtskhashdb_la_SOURCES = \ encase.c hashkeeper.c idxonly.c md5sum.c nsrl.c \ sqlite_hdb.cpp binsrch_index.cpp tsk_hashdb.c hdb_base.c \ tsk_hash_info.h tsk_hashdb.h tsk_hashdb_i.h all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/hashdb/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/hashdb/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskhashdb.la: $(libtskhashdb_la_OBJECTS) $(libtskhashdb_la_DEPENDENCIES) $(EXTRA_libtskhashdb_la_DEPENDENCIES) $(AM_V_CXXLD)$(CXXLINK) $(libtskhashdb_la_OBJECTS) $(libtskhashdb_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/binsrch_index.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/encase.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hashkeeper.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hdb_base.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/idxonly.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/md5sum.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nsrl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sqlite_hdb.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_hashdb.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.cpp *.c *.h clean-local: -rm -f *.c~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/hashdb/md5sum.c000644 000765 000024 00000026631 12576320700 017745 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ /** * \file md5sum.c * Contains the MD5sum hash database specific extraction and printing routines. */ #include "tsk_hashdb_i.h" #define STR_EMPTY "" /** * Test the file to see if it is a md5sum database * * @param hFile File handle to hash database * * @return 1 if md5sum and 0 if not */ uint8_t md5sum_test(FILE * hFile) { char buf[TSK_HDB_MAXLEN]; fseeko(hFile, 0, SEEK_SET); if (NULL == fgets(buf, TSK_HDB_MAXLEN, hFile)) return 0; if (strlen(buf) < TSK_HDB_HTYPE_MD5_LEN) return 0; if ((buf[0] == 'M') && (buf[1] == 'D') && (buf[2] == '5') && (buf[3] == ' ') && (buf[4] == '(')) { return 1; } if ((isxdigit((int) buf[0])) && (isxdigit((int) buf[TSK_HDB_HTYPE_MD5_LEN - 1])) && (isspace((int) buf[TSK_HDB_HTYPE_MD5_LEN]))) { return 1; } return 0; } TSK_HDB_INFO *md5sum_open(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; // get the basic binary-search info struct hdb_binsrch_info = hdb_binsrch_open(hDb, db_path); if (NULL == hdb_binsrch_info) { return NULL; } // overwrite with more specific methods hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_MD5SUM_ID; hdb_binsrch_info->get_entry = md5sum_getentry; hdb_binsrch_info->base.make_index = md5sum_makeindex; return (TSK_HDB_INFO*)hdb_binsrch_info; } /** * Given a line of text from an MD5sum database, return pointers * to the start start of the name and MD5 hash values (original * string will have NULL values in it). * * @param [in]Input string from database -- THIS WILL BE MODIFIED * @param [out] Will contain a pointer to MD5 value in input string * @param [out] Will contain a pointer to name value in input string (input could be NULL) * * @return 1 on error and 0 on success */ static uint8_t md5sum_parse_md5(char *str, char **md5, char **name) { char *ptr; if (strlen(str) < TSK_HDB_HTYPE_MD5_LEN + 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_parse_md5: String is too short: %s", str); return 1; } /* Format of: MD5 NAME or even just the MD5 value */ if ((isxdigit((int) str[0])) && (isxdigit((int) str[TSK_HDB_HTYPE_MD5_LEN - 1])) && (isspace((int) str[TSK_HDB_HTYPE_MD5_LEN]))) { unsigned int i; size_t len = strlen(str); if (md5 != NULL) { *md5 = &str[0]; } i = TSK_HDB_HTYPE_MD5_LEN; str[i++] = '\0'; /* Just the MD5 values */ if (i >= len) { if (name != NULL) { *name = STR_EMPTY; } return 0; } while ((i < len) && ((str[i] == ' ') || (str[i] == '\t'))) { i++; } if ((len == i) || (str[i] == '\n')) { return 0; } if (str[i] == '*') { i++; } if (name != NULL) { *name = &str[i]; } ptr = &str[i]; if (ptr[strlen(ptr) - 1] == '\n') ptr[strlen(ptr) - 1] = '\0'; } /* Format of: MD5 (NAME) = MD5 */ else if ((str[0] == 'M') && (str[1] == 'D') && (str[2] == '5') && (str[3] == ' ') && (str[4] == '(')) { ptr = &str[5]; if (name != NULL) { *name = ptr; } if (NULL == (ptr = strchr(ptr, ')'))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_parse_md5: Missing ) in name: %s", str); return 1; } *ptr = '\0'; ptr++; if (4 + TSK_HDB_HTYPE_MD5_LEN > strlen(ptr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_parse_md5: Invalid MD5 value: %s", ptr); return 1; } if ((*(ptr) != ' ') || (*(++ptr) != '=') || (*(++ptr) != ' ') || (!isxdigit((int) *(++ptr))) || (ptr[TSK_HDB_HTYPE_MD5_LEN] != '\n')) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_parse_md5: Invalid hash value %s", ptr); return 1; } *md5 = ptr; ptr[TSK_HDB_HTYPE_MD5_LEN] = '\0'; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_parse_md5: Invalid md5sum format in file: %s\n", str); return 1; } return 0; } /** * Process the database to create a sorted index of it. Consecutive * entries with the same hash value are not added to the index, but * will be found during lookup. * * @param hdb_info_base Hash database to make index of. * @param dbtype Type of hash database (should always be TSK_HDB_DBTYPE_MD5SUM_STR) * * @return 1 on error and 0 on success. */ uint8_t md5sum_makeindex(TSK_HDB_INFO *hdb_info_base, TSK_TCHAR * dbtype) { TSK_HDB_BINSRCH_INFO *hdb_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; int i; char buf[TSK_HDB_MAXLEN]; char *hash = NULL, phash[TSK_HDB_HTYPE_MD5_LEN + 1]; TSK_OFF_T offset = 0; int db_cnt = 0, idx_cnt = 0, ig_cnt = 0; size_t len; /* Initialize the TSK index file */ if (hdb_binsrch_idx_initialize(hdb_info, dbtype)) { tsk_error_set_errstr2( "md5sum_makeindex"); return 1; } /* Status */ if (tsk_verbose) TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"), hdb_info->base.db_fname); /* Allocate a buffer for the previous hash value */ memset(phash, '0', TSK_HDB_HTYPE_MD5_LEN + 1); /* read the file and add to the index */ fseek(hdb_info->hDb, 0, SEEK_SET); for (i = 0; NULL != fgets(buf, TSK_HDB_MAXLEN, hdb_info->hDb); offset += (TSK_OFF_T) len, i++) { len = strlen(buf); /* Parse each line */ if (md5sum_parse_md5(buf, &hash, NULL)) { ig_cnt++; continue; } db_cnt++; /* We only want to add one of each hash to the index */ if (memcmp(hash, phash, TSK_HDB_HTYPE_MD5_LEN) == 0) { continue; } /* Add the entry to the index */ if (hdb_binsrch_idx_add_entry_str(hdb_info, hash, offset)) { tsk_error_set_errstr2( "md5sum_makeindex"); return 1; } idx_cnt++; /* Set the previous has value */ strncpy(phash, hash, TSK_HDB_HTYPE_MD5_LEN + 1); } if (idx_cnt > 0) { if (tsk_verbose) { fprintf(stderr, " Valid Database Entries: %d\n", db_cnt); fprintf(stderr, " Invalid Database Entries (headers or errors): %d\n", ig_cnt); fprintf(stderr, " Index File Entries %s: %d\n", (idx_cnt == db_cnt) ? "" : "(optimized)", idx_cnt); } /* Close and sort the index */ if (hdb_binsrch_idx_finalize(hdb_info)) { tsk_error_set_errstr2( "md5sum_makeindex"); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_makeindex: No valid entries found in database"); return 1; } return 0; } /** * Find the corresponding name at a * given offset. The offset was likely determined from the index. * The entries in the DB following the one specified are also processed * if they have the same hash value and their name is different. * The callback is called for each entry. * * @param hdb_info Hash database to get data from * @param hash MD5 hash value that was searched for * @param offset Byte offset where hash value should be located in db_file * @param flags (not used) * @param action Callback used for each entry found in lookup * @param cb_ptr Pointer to data passed to callback * * @return 1 on error and 0 on succuss */ uint8_t md5sum_getentry(TSK_HDB_INFO * hdb_info, const char *hash, TSK_OFF_T offset, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *cb_ptr) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info; char buf[TSK_HDB_MAXLEN], *name, *ptr = NULL, pname[TSK_HDB_MAXLEN]; int found = 0; if (tsk_verbose) fprintf(stderr, "md5sum_getentry: Lookup up hash %s at offset %" PRIuOFF "\n", hash, offset); if (strlen(hash) != TSK_HDB_HTYPE_MD5_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "md5sum_getentry: Invalid hash value: %s", hash); return 1; } memset(pname, '0', TSK_HDB_MAXLEN); /* Loop so that we can find multiple occurances of the same hash */ while (1) { size_t len; if (0 != fseeko(hdb_binsrch_info->hDb, offset, SEEK_SET)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "md5sum_getentry: Error seeking to get file name: %lu", (unsigned long) offset); return 1; } if (NULL == fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb)) { if (feof(hdb_binsrch_info->hDb)) { break; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "md5sum_getentry: Error reading database"); return 1; } len = strlen(buf); if (len < TSK_HDB_HTYPE_MD5_LEN) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_getentry: Invalid entry in database (too short): %s", buf); return 1; } if (md5sum_parse_md5(buf, &ptr, &name)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "md5sum_getentry: Invalid entry in database: %s", buf); return 1; } /* Is this the one that we want? */ if (0 != strcasecmp(ptr, hash)) { break; } if (strcmp(name, pname) != 0) { int retval; retval = action(hdb_info, hash, name, cb_ptr); if (retval == TSK_WALK_ERROR) { return 1; } else if (retval == TSK_WALK_STOP) { return 0; } found = 1; strncpy(pname, name, TSK_HDB_MAXLEN); } /* Advance to the next row */ offset += len; } if (found == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "md5sum_getentry: Hash not found in file at offset: %lu", (unsigned long) offset); return 1; } return 0; } sleuthkit-4.2.0/tsk/hashdb/nsrl.c000644 000765 000024 00000047016 12576320700 017511 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ #include "tsk_hashdb_i.h" /** * \file nsrl.c * NSRL specific functions to read the database. */ /** * Version of NSRL Database */ enum TSK_HDB_NSRL_FORM_ENUM { TSK_HDB_NSRL_FORM1 = (1 << 0), ///< Version 1 TSK_HDB_NSRL_FORM2 = (1 << 1) ///< Version 2 }; typedef enum TSK_HDB_NSRL_FORM_ENUM TSK_HDB_NSRL_FORM_ENUM; /** * Analyze the header line of the database to determine the version of NSRL * * @param str line from the database file * * @return version or -1 on error */ static int get_format_ver(char *str) { /* "SHA-1","FileName","FileSize","ProductCode","OpSystemCode","MD4","MD5","CRC32","SpecialCode" */ if ((str[9] == 'F') && (str[20] == 'F') && (str[24] == 'S') && (str[31] == 'P') && (str[45] == 'O')) return TSK_HDB_NSRL_FORM1; /* "SHA-1","MD5","CRC32","FileName","FileSize","ProductCode","OpSystemCode","Specia lCode" */ else if ((str[9] == 'M') && (str[15] == 'C') && (str[23] == 'F') && (str[34] == 'F') && (str[45] == 'P')) return TSK_HDB_NSRL_FORM2; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl: Unknown header format: %s\n", str); return -1; } /** * Test the file to see if it is an NSRL database * * @param hFile File handle to hash database * * @return 1 if NSRL and 0 if not */ uint8_t nsrl_test(FILE * hFile) { char buf[TSK_HDB_MAXLEN]; fseeko(hFile, 0, SEEK_SET); if (NULL == fgets(buf, TSK_HDB_MAXLEN, hFile)) return 0; if (strlen(buf) < 45) return 0; // Basic checks in first field if ((buf[0] != '"') || (buf[1] != 'S') || (buf[2] != 'H') || (buf[3] != 'A') || (buf[4] != '-') || (buf[5] != '1') || (buf[6] != '"')) return 0; if (-1 == get_format_ver(buf)) return 0; return 1; } TSK_HDB_INFO *nsrl_open(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = NULL; // get the basic binary-search info struct hdb_binsrch_info = hdb_binsrch_open(hDb, db_path); if (NULL == hdb_binsrch_info) { return NULL; } // overwrite the database-specific methods hdb_binsrch_info->base.db_type = TSK_HDB_DBTYPE_NSRL_ID; hdb_binsrch_info->base.make_index = nsrl_makeindex; hdb_binsrch_info->get_entry = nsrl_getentry; return (TSK_HDB_INFO*)hdb_binsrch_info; } /** * Perform a basic check on a string to see if it starts with quotes * and contains a possible SHA-1 value * * @param x string to test * @return 1 if NSRL and 0 if not */ #define is_valid_nsrl(x) \ ( (strlen((x)) > TSK_HDB_HTYPE_SHA1_LEN + 4) && \ ((x)[0] == '"') && ((x)[TSK_HDB_HTYPE_SHA1_LEN + 1] == '"') && \ ((x)[TSK_HDB_HTYPE_SHA1_LEN + 2] == ',') && ((x)[TSK_HDB_HTYPE_SHA1_LEN + 3] == '"') ) /** * Parse a line from the NSRL database and set pointers to the SHA1 and Name. This will modify * the input text by adding NULL values! * * @param str String to parse * @param sha1 Pointer to a pointer that will contain location of SHA1 in original text * @param name Pointer to a pointer that will contain location of the name in original text * @param ver Version of NSRL we are parsing * * @return 1 on error and 0 on success */ static uint8_t nsrl_parse_sha1(char *str, char **sha1, char **name, int ver) { char *ptr = NULL; /* Sanity check */ if (is_valid_nsrl(str) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_sha1: Invalid string to parse: %s", str); return 1; } /* Do they want the hash? */ if (sha1 != NULL) { /* set the hash pointer to just the SHA value (past the ") */ ptr = &str[1]; ptr[TSK_HDB_HTYPE_SHA1_LEN] = '\0'; /* Final sanity check to make sure there are no ',' in hash */ if (NULL != strchr(ptr, ',')) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_sha1: Invalid string to parse (commas after SHA1): %s", ptr); return 1; } /* Assign the argument if it is not NULL */ *sha1 = ptr; } /* Do they want the name? */ if (name != NULL) { if (ver == TSK_HDB_NSRL_FORM1) { /* Extract out the name - the field after SHA1 */ ptr = &str[TSK_HDB_HTYPE_SHA1_LEN + 4]; // 4 = 3 " and 1 , *name = ptr; if (NULL == (ptr = strchr(ptr, ','))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_sha1: Invalid string to parse (commas after name): %s", ptr); return 1; } /* Seek back to cover the final " */ ptr[-1] = '\0'; } else if (ver == TSK_HDB_NSRL_FORM2) { /* Extract out the name - the field after SHA1, MD5, and CRC */ ptr = &str[1 + TSK_HDB_HTYPE_SHA1_LEN + 3 + TSK_HDB_HTYPE_MD5_LEN + 3 + TSK_HDB_HTYPE_CRC32_LEN + 3]; *name = ptr; if (NULL == (ptr = strchr(ptr, ','))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_sha1: Invalid string to parse (commas after name): %s", ptr); return 1; } /* Seek back to cover the final " */ ptr[-1] = '\0'; } } return 0; } /** * Parse a line from the NSRL database and set pointers to the MD5 and Name. This will modify * the input text by adding NULL values! * * @param str String to parse * @param md5 Pointer to a pointer that will contain location of MD5 in original text * @param name Pointer to a pointer that will contain location of the name in original text * @param ver Version of NSRL we are parsing * * @return 1 on error and 0 on success */ static uint8_t nsrl_parse_md5(char *str, char **md5, char **name, int ver) { char *ptr = NULL; int cnt = 0; /* Sanity check */ if (is_valid_nsrl(str) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Invalid string to parse: %s", str); return 1; } if ((md5 == NULL) && (name == NULL)) return 0; if (ver == TSK_HDB_NSRL_FORM1) { ptr = str; /* Cycle through the fields to extract name and MD5 * * 1. before name * 2. before size * 3. before prod code * 4. before OS * 5. before MD4 * 6. before MD5 */ cnt = 0; while (NULL != (ptr = strchr(ptr, ','))) { cnt++; /* Begining of the name */ if ((cnt == 1) && (name != NULL)) { *name = &ptr[2]; /* We utilize the other loop code to find the end of * the name */ } /* end of the name */ else if ((cnt == 2) && (name != NULL)) { if (ptr[-1] != '"') { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Missing Quote after name: %s", (char *) name); return 1; } ptr[-1] = '\0'; if (md5 == NULL) return 0; } /* MD5 value */ else if ((cnt == 6) && (md5 != NULL)) { /* Do a length check and more sanity checks */ if ((strlen(ptr) < 2 + TSK_HDB_HTYPE_MD5_LEN) || (ptr[1] != '"') || (ptr[2 + TSK_HDB_HTYPE_MD5_LEN] != '"')) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Invalid MD5 value: %s", ptr); return 1; } ptr = &ptr[2]; ptr[TSK_HDB_HTYPE_MD5_LEN] = '\0'; *md5 = ptr; /* Final sanity check */ if (NULL != strchr(ptr, ',')) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Missing comma after MD5: %s", (char *) md5); return 1; } return 0; } /* If the next field is in quotes then we need to skip to the * next quote and ignore any ',' in there */ if (ptr[1] == '"') { if (NULL == (ptr = strchr(&ptr[2], '"'))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Error advancing past quote"); return 1; } } else { ptr++; } } } else if (ver == TSK_HDB_NSRL_FORM2) { /* Do they want the hash? */ if (md5 != NULL) { /* set the hash pointer to just the MD5 value (past the SHA1") */ ptr = &str[1 + TSK_HDB_HTYPE_SHA1_LEN + 3]; ptr[TSK_HDB_HTYPE_MD5_LEN] = '\0'; /* Final sanity check to make sure there are no ',' in hash */ if (NULL != strchr(ptr, ',')) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Comma in MD5 value: %s", ptr); return 1; } *md5 = ptr; } /* do they want the name */ if (name != NULL) { /* Extract out the name - the field after SHA1, MD5, and CRC */ ptr = &str[1 + TSK_HDB_HTYPE_SHA1_LEN + 3 + TSK_HDB_HTYPE_MD5_LEN + 3 + TSK_HDB_HTYPE_CRC32_LEN + 3]; *name = ptr; if (NULL == (ptr = strchr(ptr, ','))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_parse_md5: Missing comma after name: %s", (char *) name); return 1; } /* Seek back to cover the final " */ ptr -= 1; *ptr = '\0'; } return 0; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "nsrl_parse_md5: Invalid version: %d\n", ver); return 1; } /** * Process the database to create a sorted index of it. Consecutive * entries with the same hash value are not added to the index, but * will be found during lookup. * * @param hdb_info_base Hash database to make index of. * @param dbtype Type of database * * @return 1 on error and 0 on success. */ uint8_t nsrl_makeindex(TSK_HDB_INFO * hdb_info_base, TSK_TCHAR * dbtype) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; size_t i, len; char buf[TSK_HDB_MAXLEN]; char *hash = NULL, phash[TSK_HDB_HTYPE_SHA1_LEN + 1]; TSK_OFF_T offset = 0; int ver = 0; int db_cnt = 0, idx_cnt = 0, ig_cnt = 0; if (hdb_binsrch_idx_initialize(hdb_binsrch_info, dbtype)) { tsk_error_set_errstr2( "nsrl_makeindex"); return 1; } /* Status */ if (tsk_verbose) TFPRINTF(stderr, _TSK_T("Extracting Data from Database (%s)\n"), hdb_info_base->db_fname); /* Allocate a buffer for the previous hash value */ memset(phash, '0', TSK_HDB_HTYPE_SHA1_LEN + 1); /* read the file */ fseek(hdb_binsrch_info->hDb, 0, SEEK_SET); for (i = 0; NULL != fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb); offset += len, i++) { len = strlen(buf); /* Get the version of the database on the first time around */ if (i == 0) { if ((ver = get_format_ver(buf)) == -1) { return 1; } ig_cnt++; continue; } /* Parse the line */ if (hdb_binsrch_info->hash_type & TSK_HDB_HTYPE_SHA1_ID) { if (nsrl_parse_sha1(buf, &hash, NULL, ver)) { ig_cnt++; continue; } } else if (hdb_binsrch_info->hash_type & TSK_HDB_HTYPE_MD5_ID) { if (nsrl_parse_md5(buf, &hash, NULL, ver)) { ig_cnt++; continue; } } db_cnt++; /* We only want to add one of each hash to the index */ if (memcmp(hash, phash, hdb_binsrch_info->hash_len) == 0) { continue; } /* Add the entry to the index */ if (hdb_binsrch_idx_add_entry_str(hdb_binsrch_info, hash, offset)) { tsk_error_set_errstr2( "nsrl_makeindex"); return 1; } idx_cnt++; /* Set the previous has value */ strncpy(phash, hash, hdb_binsrch_info->hash_len + 1); } if (idx_cnt > 0) { if (tsk_verbose) { fprintf(stderr, " Valid Database Entries: %d\n", db_cnt); fprintf(stderr, " Invalid Database Entries (headers or errors): %d\n", ig_cnt); fprintf(stderr, " Index File Entries %s: %d\n", (idx_cnt == db_cnt) ? "" : "(optimized)", idx_cnt); } /* Close and sort the index */ if (hdb_binsrch_idx_finalize(hdb_binsrch_info)) { tsk_error_set_errstr2( "nsrl_makeindex"); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_makeindex: No valid entries found in database"); return 1; } return 0; } /** * Find the corresponding name at a * given offset. The offset was likely determined from the index. * The entries in the DB following the one specified are also processed * if they have the same hash value and their name is different. * The callback is called for each entry. * * @param hdb_info_base Database to get data from. * @param hash MD5/SHA-1 hash value that was searched for * @param offset Byte offset where hash value should be located in db_file * @param flags (not used) * @param action Callback used for each entry found in lookup * @param cb_ptr Pointer to data passed to callback * * @return 1 on error and 0 on success */ uint8_t nsrl_getentry(TSK_HDB_INFO * hdb_info_base, const char *hash, TSK_OFF_T offset, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *cb_ptr) { TSK_HDB_BINSRCH_INFO *hdb_binsrch_info = (TSK_HDB_BINSRCH_INFO*)hdb_info_base; char buf[TSK_HDB_MAXLEN], *name, *cur_hash, pname[TSK_HDB_MAXLEN]; int found = 0; int ver; if (tsk_verbose) fprintf(stderr, "nsrl_getentry: Lookup up hash %s at offset %" PRIuOFF "\n", hash, offset); if ((hdb_binsrch_info->hash_type == TSK_HDB_HTYPE_MD5_ID) && (strlen(hash) != TSK_HDB_HTYPE_MD5_LEN)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "nsrl_getentry: Invalid hash value (expected to be MD5): %s\n", hash); return 1; } else if ((hdb_binsrch_info->hash_type == TSK_HDB_HTYPE_SHA1_ID) && (strlen(hash) != TSK_HDB_HTYPE_SHA1_LEN)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "nsrl_getentry: Invalid hash value (expected to be SHA1): %s\n", hash); return 1; } /* read the header line ... -- this should be done only once... */ fseeko(hdb_binsrch_info->hDb, 0, SEEK_SET); if (NULL == fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "nsrl_getentry: Error reading NSRLFile.txt header\n"); return 1; } if ((ver = get_format_ver(buf)) == -1) { tsk_error_set_errstr2( "nsrl_getentry"); return 1; } memset(pname, '0', TSK_HDB_MAXLEN); /* Loop so that we can find consecutive occurances of the same hash */ while (1) { size_t len; if (0 != fseeko(hdb_binsrch_info->hDb, offset, SEEK_SET)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "nsrl_getentry: Error seeking to get file name: %lu", (unsigned long) offset); return 1; } if (NULL == fgets(buf, TSK_HDB_MAXLEN, hdb_binsrch_info->hDb)) { if (feof(hdb_binsrch_info->hDb)) break; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_READDB); tsk_error_set_errstr( "nsrl_getentry: Error reading database"); return 1; } len = strlen(buf); if (len < TSK_HDB_HTYPE_SHA1_LEN + 5) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_getentry: Invalid entry in database (too short): %s", buf); return 1; } /* Which field are we looking for */ if (hdb_binsrch_info->hash_type == TSK_HDB_HTYPE_SHA1_ID) { if (nsrl_parse_sha1(buf, &cur_hash, &name, ver)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_getentry: Invalid entry in database: %s", buf); return 1; } } else if (hdb_binsrch_info->hash_type == TSK_HDB_HTYPE_MD5_ID) { if (nsrl_parse_md5(buf, &cur_hash, &name, ver)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_CORRUPT); tsk_error_set_errstr( "nsrl_getentry: Invalid entry in database: %s", buf); return 1; } } /* Verify that this is the hash we are looking for */ if (0 != strcasecmp(cur_hash, hash)) { break; } /* Check if this is the same name as the previous entry */ if (strcmp(name, pname) != 0) { int retval; retval = action(hdb_info_base, hash, name, cb_ptr); if (retval == TSK_WALK_STOP) return 0; else if (retval == TSK_WALK_ERROR) return 1; found = 1; strncpy(pname, name, TSK_HDB_MAXLEN); } /* Advance to the next row */ offset += len; } if (found == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr( "nsrl_getentry: Hash not found in file at offset: %lu", (unsigned long) offset); return 1; } return 0; } sleuthkit-4.2.0/tsk/hashdb/sqlite_hdb.cpp000644 000765 000024 00000060627 12576320700 021214 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 * */ #include "tsk_hashdb_i.h" #include "tsk_hash_info.h" /** * \file sqlite_hdb.cpp * Contains hash database functions for SQLite hash databases. */ static const char *SCHEMA_VERSION_PROP = "Schema Version"; static const char *SCHEMA_VERSION_NO = "1"; static const char *SQLITE_FILE_HEADER = "SQLite format 3"; static const size_t MD5_BLOB_LEN = ((TSK_HDB_HTYPE_MD5_LEN) / 2); static const char hex_digits[] = "0123456789abcdef"; static uint8_t sqlite_hdb_attempt(int resultCode, int expectedResultCode, const char *errfmt, sqlite3 *sqlite) { if (resultCode != expectedResultCode) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr(errfmt, sqlite3_errmsg(sqlite), resultCode); return 1; } return 0; } static uint8_t sqlite_hdb_attempt_exec(const char *sql, const char *errfmt, sqlite3 *sqlite) { char *errmsg = NULL; if(sqlite3_exec(sqlite, sql, NULL, NULL, &errmsg) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr(errfmt, errmsg); sqlite3_free(errmsg); return 1; } return 0; } static uint8_t sqlite_hdb_create_tables(sqlite3 *db) { if (sqlite_hdb_attempt_exec("CREATE TABLE db_properties (name TEXT NOT NULL, value TEXT);", "sqlite_hdb_create_tables: error creating db_properties table: %s\n", db)) { return 1; } char sql_stmt[1024]; snprintf(sql_stmt, 1024, "INSERT INTO db_properties (name, value) VALUES ('%s', '%s');", SCHEMA_VERSION_PROP, SCHEMA_VERSION_NO); if (sqlite_hdb_attempt_exec(sql_stmt, "sqlite_hdb_create_tables: error adding schema info to db_properties: %s\n", db)) { return 1; } if (sqlite_hdb_attempt_exec ("CREATE TABLE hashes (id INTEGER PRIMARY KEY AUTOINCREMENT, md5 BINARY(16) UNIQUE, sha1 BINARY(20), sha2_256 BINARY(32));", "sqlite_hdb_create_tables: error creating hashes table: %s\n", db)) { return 1; } if (sqlite_hdb_attempt_exec("CREATE TABLE file_names (name TEXT NOT NULL, hash_id INTEGER NOT NULL, PRIMARY KEY(name, hash_id));", "sqlite_hdb_create_tables: error creating file_names table: %s\n", db)) { return 1; } if (sqlite_hdb_attempt_exec("CREATE TABLE comments (comment TEXT NOT NULL, hash_id INTEGER NOT NULL, PRIMARY KEY(comment, hash_id));", "sqlite_hdb_create_tables: error creating comments table: %s\n", db)) { return 1; } if (sqlite_hdb_attempt_exec("CREATE INDEX md5_index ON hashes(md5);", "sqlite_hdb_create_tables: error creating md5_index on md5: %s\n", db)) { return 1; } return 0; } static uint8_t sqlite_hdb_prepare_stmt(const char *sql, sqlite3_stmt **stmt, sqlite3 *db) { if (sqlite3_prepare_v2(db, sql, -1, stmt, NULL) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_prepare_stmt: error preparing SQL statement: %s: %s\n", sql, sqlite3_errmsg(db)); return 1; } return 0; } static uint8_t prepare_statements(TSK_SQLITE_HDB_INFO *hdb_info) { if (sqlite_hdb_prepare_stmt("INSERT INTO hashes (md5) VALUES (?)", &(hdb_info->insert_md5_into_hashes), hdb_info->db)) { return 1; } if (sqlite_hdb_prepare_stmt("INSERT INTO file_names (name, hash_id) VALUES (?, ?)", &(hdb_info->insert_into_file_names), hdb_info->db)) { return 1; } if (sqlite_hdb_prepare_stmt("INSERT INTO comments (comment, hash_id) VALUES (?, ?)", &(hdb_info->insert_into_comments), hdb_info->db)) { return 1; } if (sqlite_hdb_prepare_stmt("SELECT id, md5 from hashes where md5 = ? limit 1", &(hdb_info->select_from_hashes_by_md5), hdb_info->db)) { return 1; } if (sqlite_hdb_prepare_stmt("SELECT name from file_names where hash_id = ?", &(hdb_info->select_from_file_names), hdb_info->db)) { return 1; } if (sqlite_hdb_prepare_stmt("SELECT comment from comments where hash_id = ?", &(hdb_info->select_from_comments), hdb_info->db)) { return 1; } return 0; } static uint8_t sqlite_hdb_finalize_stmt(sqlite3_stmt **stmt, sqlite3 *db) { if ((NULL != *stmt) && (sqlite3_finalize(*stmt) != SQLITE_OK)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_finalize_stmt: error finalizing SQL statement: %s\n", sqlite3_errmsg(db)); *stmt = NULL; return 1; } *stmt = NULL; return 0; } static void finalize_statements(TSK_SQLITE_HDB_INFO *hdb_info) { sqlite_hdb_finalize_stmt(&(hdb_info->insert_md5_into_hashes), hdb_info->db); sqlite_hdb_finalize_stmt(&(hdb_info->insert_into_file_names), hdb_info->db); sqlite_hdb_finalize_stmt(&(hdb_info->insert_into_comments), hdb_info->db); sqlite_hdb_finalize_stmt(&(hdb_info->select_from_hashes_by_md5), hdb_info->db); sqlite_hdb_finalize_stmt(&(hdb_info->select_from_file_names), hdb_info->db); sqlite_hdb_finalize_stmt(&(hdb_info->select_from_comments), hdb_info->db); } static sqlite3 *sqlite_hdb_open_db(TSK_TCHAR *db_file_path, bool create_tables) { sqlite3 *db = NULL; #ifdef TSK_WIN32 if (sqlite_hdb_attempt(sqlite3_open16(db_file_path, &db), SQLITE_OK, "Can't open hash database: %s (result code %d)\n", db)) { sqlite3_close(db); return NULL; } #else if (sqlite_hdb_attempt(sqlite3_open(db_file_path, &db), SQLITE_OK, "Can't open hash database: %s (result code %d)\n", db)) { sqlite3_close(db); return NULL; } #endif sqlite3_extended_result_codes(db, 1); if (sqlite_hdb_attempt_exec("PRAGMA synchronous = OFF;", "Error setting PRAGMA synchronous: %s\n", db)) { sqlite3_close(db); return NULL; } if (sqlite_hdb_attempt_exec("PRAGMA encoding = \"UTF-8\";", "Error setting PRAGMA encoding UTF-8: %s\n", db)) { sqlite3_close(db); return NULL; } if (sqlite_hdb_attempt_exec("PRAGMA read_uncommitted = True;", "Error setting PRAGMA read_uncommitted: %s\n", db)) { sqlite3_close(db); return NULL; } if (sqlite_hdb_attempt_exec("PRAGMA page_size = 4096;", "Error setting PRAGMA page_size: %s\n", db)) { sqlite3_close(db); return NULL; } // Configure the database to increase its size incrementally. int chunkSize = 1024 * 1024; if (sqlite3_file_control(db, NULL, SQLITE_FCNTL_CHUNK_SIZE, &chunkSize) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_v1_initialize: error setting chunk size %s", sqlite3_errmsg(db)); sqlite3_close(db); return NULL; } if (create_tables && sqlite_hdb_create_tables(db)) { sqlite3_close(db); return NULL; } return db; } /** * \ingroup hashdblib * \internal * Creates a new SQLite hash database. * @param db_file_path A path for the new hash database. * @return 0 on success, 1 on failure. */ uint8_t sqlite_hdb_create_db(TSK_TCHAR *db_file_path) { sqlite3 *db = sqlite_hdb_open_db(db_file_path, true); if (NULL == db) { return 1; } sqlite3_close(db); return 0; } /** * \ingroup hashdblib * \internal * Determines whether a file is a SQLite file. * @param hFile_path A handle to the file to inspect. * @return 1 if the file is a SQLite file, 0 if it is not. */ uint8_t sqlite_hdb_is_sqlite_file(FILE *hFile) { const int header_size = 16; char header[header_size]; if (1 != fread(header, header_size, 1, hFile)) { return 0; } else { return (strncmp(header, SQLITE_FILE_HEADER, strlen(SQLITE_FILE_HEADER)) == 0); } } /** * \ingroup hashdblib * \internal * Opens an existing SQLite hash database. * @param db_file_path A path for the new hash database. * @return 0 on success, 1 on failure. */ TSK_HDB_INFO *sqlite_hdb_open(TSK_TCHAR *db_path) { sqlite3 *db = sqlite_hdb_open_db(db_path, false); if (!db) { return NULL; } TSK_SQLITE_HDB_INFO *hdb_info = (TSK_SQLITE_HDB_INFO*)tsk_malloc(sizeof(TSK_SQLITE_HDB_INFO)); if (!hdb_info) { sqlite3_close(db); return NULL; } if (hdb_info_base_open((TSK_HDB_INFO*)hdb_info, db_path)) { sqlite3_close(db); free(hdb_info); return NULL; } hdb_info->db = db; if (prepare_statements(hdb_info)) { finalize_statements(hdb_info); sqlite3_close(db); return NULL; } hdb_info->base.db_type = TSK_HDB_DBTYPE_SQLITE_ID; hdb_info->base.lookup_str = sqlite_hdb_lookup_str; hdb_info->base.lookup_raw = sqlite_hdb_lookup_bin; hdb_info->base.lookup_verbose_str = sqlite_hdb_lookup_verbose_str; hdb_info->base.add_entry = sqlite_hdb_add_entry; hdb_info->base.begin_transaction = sqlite_hdb_begin_transaction; hdb_info->base.commit_transaction = sqlite_hdb_commit_transaction; hdb_info->base.rollback_transaction = sqlite_hdb_rollback_transaction; hdb_info->base.close_db = sqlite_hdb_close; return (TSK_HDB_INFO*)hdb_info; } static uint8_t* sqlite_hdb_str_to_blob(const char *str) { const size_t len = strlen(str)/2; uint8_t *blob = (uint8_t *)tsk_malloc(len+1); if (NULL == blob) { return NULL; } const char *pos = str; for (size_t count = 0; count < len; ++count) { sscanf(pos, "%2hx", (short unsigned int *) &(blob[count])); pos += 2 * sizeof(char); } return blob; } static std::string sqlite_hdb_blob_to_string(std::string binblob) { size_t blobsize = binblob.size(); if (blobsize <= TSK_HDB_MAX_BINHASH_LEN) { char hashbuf[TSK_HDB_HTYPE_SHA2_256_LEN + 1]; for (size_t i = 0; i < blobsize; ++i) { hashbuf[2 * i] = hex_digits[(binblob[i] >> 4) & 0xf]; hashbuf[2 * i + 1] = hex_digits[binblob[i] & 0xf]; } hashbuf[2 * blobsize] = '\0'; return std::string(&hashbuf[0]); } else { return ""; } } static int8_t sqlite_hdb_hash_lookup_by_md5(uint8_t *md5Blob, size_t len, TSK_SQLITE_HDB_INFO *hdb_info, TskHashInfo &result) { int8_t ret_val = -1; if (sqlite_hdb_attempt(sqlite3_bind_blob(hdb_info->select_from_hashes_by_md5, 1, md5Blob, (int)len, SQLITE_TRANSIENT), SQLITE_OK, "sqlite_hdb_hash_lookup_by_md5: error binding md5 hash blob: %s (result code %d)\n", hdb_info->db) == 0) { int result_code = sqlite3_step(hdb_info->select_from_hashes_by_md5); if (SQLITE_ROW == result_code) { // Found it. result.id = sqlite3_column_int64(hdb_info->select_from_hashes_by_md5, 0); result.hashMd5 = sqlite_hdb_blob_to_string((const char*)sqlite3_column_text(hdb_info->select_from_hashes_by_md5, 1)); ret_val = 1; } else if (SQLITE_DONE == result_code) { // Didn't find it, but no error. ret_val = 0; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_hash_lookup_by_md5: error executing SELECT: %s\n", sqlite3_errmsg(hdb_info->db)); } } sqlite3_clear_bindings(hdb_info->select_from_hashes_by_md5); sqlite3_reset(hdb_info->select_from_hashes_by_md5); return ret_val; } static int64_t sqlite_hdb_insert_md5_hash(uint8_t *md5Blob, size_t len, TSK_SQLITE_HDB_INFO *hdb_info) { int64_t row_id = 0; if (sqlite_hdb_attempt(sqlite3_bind_blob(hdb_info->insert_md5_into_hashes, 1, md5Blob, (int)len, SQLITE_TRANSIENT), SQLITE_OK, "sqlite_hdb_insert_md5_hash: error binding md5 hash blob: %s (result code %d)\n", hdb_info->db) == 0) { int result = sqlite3_step(hdb_info->insert_md5_into_hashes); if (result == SQLITE_DONE) { row_id = sqlite3_last_insert_rowid(hdb_info->db); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_insert_md5_hash: error executing INSERT: %s\n", sqlite3_errmsg(hdb_info->db)); } } sqlite3_clear_bindings(hdb_info->insert_md5_into_hashes); sqlite3_reset(hdb_info->insert_md5_into_hashes); return row_id; } static uint8_t sqlite_hdb_insert_value_and_id(sqlite3_stmt *stmt, const char *value, int64_t id, sqlite3 *db) { uint8_t ret_val = 1; if ((sqlite_hdb_attempt(sqlite3_bind_text(stmt, 1, value, (int)strlen(value), SQLITE_TRANSIENT), SQLITE_OK, "sqlite_hdb_insert_value_and_id: error binding value: %s (result code %d)\n", db) == 0) && (sqlite_hdb_attempt(sqlite3_bind_int64(stmt, 2, id), SQLITE_OK, "sqlite_hdb_insert_value_and_id: error binding id: %s (result code %d)\n", db) == 0)) { int result = sqlite3_step(stmt); if ((result != SQLITE_DONE) && (result != SQLITE_CONSTRAINT)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_insert_value_and_id: error executing INSERT: %s\n", sqlite3_errmsg(db)); } else { // Either the INSERT succeeded or the value was a duplicate, which is o.k. ret_val = 0; } } sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); return ret_val; } /** * \ingroup hashdblib * \internal * Adds an entry to a SQLite hash database. * @param hdb_info_base The struct that represents the database. * @param filename A file name to associate with the hashes, may be NULL. * @param md5 An md5 hash. * @param sha1 A SHA-1 hash, may be NULL. * @param sha256 A SHA-256 hash, may be NULL. * @param comment A comment to associate with the hashes, may be NULL. * @return 1 on error and 0 on success */ uint8_t sqlite_hdb_add_entry(TSK_HDB_INFO *hdb_info_base, const char *filename, const char *md5, const char *sha1, const char *sha256, const char *comment) { // Currently only supporting md5. const size_t md5_str_len = strlen(md5); if (TSK_HDB_HTYPE_MD5_LEN != md5_str_len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("sqlite_hdb_add_entry: md5 length incorrect (=%"PRIuSIZE")", md5_str_len); return 1; } // Convert the md5 string to a binary blob, since that's how md5 hashes // are stored in the database. uint8_t *hashBlob = sqlite_hdb_str_to_blob(md5); if (NULL == hashBlob) { return 1; } // Is this hash already in the database? tsk_take_lock(&hdb_info_base->lock); TSK_SQLITE_HDB_INFO *hdb_info = (TSK_SQLITE_HDB_INFO*)hdb_info_base; TskHashInfo lookup_result; int64_t row_id = -1; const size_t len = strlen(md5)/2; int64_t result_code = sqlite_hdb_hash_lookup_by_md5(hashBlob, len, hdb_info, lookup_result); if (1 == result_code) { // Found it. row_id = lookup_result.id; } else if (0 == result_code) { //If not, insert it. row_id = sqlite_hdb_insert_md5_hash(hashBlob, len, hdb_info); if (row_id < 1) { // Did not get a valid row_id from the INSERT. free(hashBlob); tsk_release_lock(&hdb_info_base->lock); return 1; } } else { // Error querying database. free(hashBlob); tsk_release_lock(&hdb_info_base->lock); return 1; } free(hashBlob); // Insert the file name, if any. if (NULL != filename && sqlite_hdb_insert_value_and_id(hdb_info->insert_into_file_names, filename, row_id, hdb_info->db) == 1) { tsk_release_lock(&hdb_info_base->lock); return 1; } // Insert the comment, if any. if (NULL != comment && sqlite_hdb_insert_value_and_id(hdb_info->insert_into_comments, comment, row_id, hdb_info->db) == 1) { tsk_release_lock(&hdb_info_base->lock); return 1; } tsk_release_lock(&hdb_info_base->lock); return 0; } /** * \ingroup hashdblib * \internal * Looks up a hash in a SQLite hash database. * @param hdb_info_base The struct that represents the database. * @param hash Hash value to search for (NULL terminated string). * @param flags Flags to use in lookup. * @param action Callback function (not called if QUICK flag is given) * @param ptr Pointer to data to pass to callback * @return -1 on error, 0 if hash value not found, 1 if hash value found. */ int8_t sqlite_hdb_lookup_str(TSK_HDB_INFO * hdb_info_base, const char* hash, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { // Currently only supporting lookups of md5 hashes. const size_t len = strlen(hash); if (TSK_HDB_HTYPE_MD5_LEN != len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("sqlite_hdb_lookup_str: hash length incorrect (=%"PRIuSIZE"), expecting %d", len, TSK_HDB_HTYPE_MD5_LEN); return 1; } uint8_t *hashBlob = sqlite_hdb_str_to_blob(hash); if (!hashBlob) { return 1; } int8_t ret_val = sqlite_hdb_lookup_bin(hdb_info_base, hashBlob, MD5_BLOB_LEN, flags, action, ptr); free(hashBlob); return ret_val; } /** * \ingroup hashdblib * \internal * Looks up a hash in a SQLite hash database. * @param hdb_info_base The struct that represents the database. * @param hash Hash value to search for (binary form, array of bytes). * @param len Number of bytes in binary hash value * @param flags Flags to use in lookup. * @param action Callback function (not called if QUICK flag is given) * @param ptr Pointer to data to pass to callback * @return -1 on error, 0 if hash value not found, 1 if hash value found. */ int8_t sqlite_hdb_lookup_bin(TSK_HDB_INFO *hdb_info_base, uint8_t *hash, uint8_t len, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { // Currently only supporting lookups of md5 hashes. if (MD5_BLOB_LEN != len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("sqlite_hdb_lookup_bin: len=%"PRIu8", expected %"PRIuSIZE, len, MD5_BLOB_LEN); return -1; } // Do the look up. TskHashInfo result; int8_t ret_val = sqlite_hdb_lookup_verbose_bin(hdb_info_base, hash, len, &result); // Do the callback, if warranted. if ((1 == ret_val) && !(flags & TSK_HDB_FLAG_QUICK) && (NULL != action)) { if (result.fileNames.size() > 0) { for (std::vector::iterator it = result.fileNames.begin(); it != result.fileNames.end(); ++it) { action(hdb_info_base, result.hashMd5.c_str(), (*it).c_str(), ptr); } } else { action(hdb_info_base, result.hashMd5.c_str(), NULL, ptr); } } return ret_val; } static uint8_t sqlite_hdb_get_assoc_strings(sqlite3 *db, sqlite3_stmt *stmt, int64_t hash_id, std::vector &out) { uint8_t ret_val = 1; if (sqlite_hdb_attempt(sqlite3_bind_int64(stmt, 1, hash_id), SQLITE_OK, "sqlite_hdb_get_assoc_strings: error binding hash_id: %s (result code %d)\n", db) == 0) { while(1) { int result_code = sqlite3_step(stmt); if (SQLITE_ROW == result_code) { out.push_back((const char*)sqlite3_column_text(stmt, 0)); ret_val = 0; } else if (SQLITE_DONE == result_code) { ret_val = 0; break; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("sqlite_hdb_get_assoc_strings: error executing SELECT: %s\n", sqlite3_errmsg(db)); ret_val = 1; break; } }; } sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); return ret_val; } /** * \ingroup hashdblib * \internal * Looks up a hash and any additional data associated with the hash in a * hash database. * @param hdb_info_base A struct representing an open hash database. * @param hash A hash value in string form. * @param result A TskHashInfo struct to populate on success. * @return -1 on error, 0 if hash value was not found, 1 if hash value * was found. */ int8_t sqlite_hdb_lookup_verbose_str(TSK_HDB_INFO *hdb_info_base, const char *hash, void *result) { // Currently only supporting lookups of md5 hashes. const size_t len = strlen(hash); if (TSK_HDB_HTYPE_MD5_LEN != len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("sqlite_hdb_lookup_verbose_str: hash length incorrect (=%"PRIuSIZE"), expecting %d", len, TSK_HDB_HTYPE_MD5_LEN); return -1; } uint8_t *hashBlob = sqlite_hdb_str_to_blob(hash); if (!hashBlob) { return -1; } int8_t ret_val = sqlite_hdb_lookup_verbose_bin(hdb_info_base, hashBlob, MD5_BLOB_LEN, result); free(hashBlob); return ret_val; } /** * \ingroup hashdblib * \internal * Looks up a hash and any additional data associated with the hash in a * hash database. * @param hdb_info_base A struct representing an open hash database. * @param hash A hash value in binary form. * @param hash_len The length of the hash value in bytes. * @param lookup_result A TskHashInfo struct to populate on success. * @return -1 on error, 0 if hash value was not found, 1 if hash value * was found. */ int8_t sqlite_hdb_lookup_verbose_bin(TSK_HDB_INFO *hdb_info_base, uint8_t *hash, uint8_t hash_len, void *lookup_result) { // Currently only supporting lookups of md5 hashes. if (MD5_BLOB_LEN != hash_len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("sqlite_hdb_lookup_verbose_bin: hash_len=%d, expected %d", hash_len, TSK_HDB_HTYPE_MD5_LEN / 2); return -1; } // Do the lookup. tsk_take_lock(&hdb_info_base->lock); TSK_SQLITE_HDB_INFO *hdb_info = (TSK_SQLITE_HDB_INFO*)hdb_info_base; TskHashInfo *result = static_cast(lookup_result); int8_t ret_val = sqlite_hdb_hash_lookup_by_md5(hash, hash_len, hdb_info, *result); if (ret_val < 1) { tsk_release_lock(&hdb_info_base->lock); return ret_val; } // Get any file names associated with the hash. if (sqlite_hdb_get_assoc_strings(hdb_info->db, hdb_info->select_from_file_names, result->id, result->fileNames)) { tsk_release_lock(&hdb_info_base->lock); return -1; } // Get any comments associated with the hash. if (sqlite_hdb_get_assoc_strings(hdb_info->db, hdb_info->select_from_comments, result->id, result->comments)) { tsk_release_lock(&hdb_info_base->lock); return -1; } tsk_release_lock(&hdb_info_base->lock); return 1; } /** * \ingroup hashdblib * \internal * Begins a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t sqlite_hdb_begin_transaction(TSK_HDB_INFO *hdb_info_base) { TSK_SQLITE_HDB_INFO *hdb_info = reinterpret_cast(hdb_info_base); if (sqlite_hdb_attempt_exec("BEGIN", "sqlite_hdb_base_begin_transaction: %s\n", hdb_info->db)) { return 1; } else { return 0; } } /** * \ingroup hashdblib * \internal * Commits a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t sqlite_hdb_commit_transaction(TSK_HDB_INFO *hdb_info_base) { TSK_SQLITE_HDB_INFO *hdb_info = reinterpret_cast(hdb_info_base); if (sqlite_hdb_attempt_exec("COMMIT", "sqlite_hdb_commit_transaction: %s\n", hdb_info->db)) { return 1; } else { return 0; } } /** * \ingroup hashdblib * \internal * Rolls back a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t sqlite_hdb_rollback_transaction(TSK_HDB_INFO *hdb_info_base) { TSK_SQLITE_HDB_INFO *hdb_info = reinterpret_cast(hdb_info_base); if (sqlite_hdb_attempt_exec("ROLLBACK", "sqlite_hdb_rollback_transaction: %s\n", hdb_info->db)) { return 1; } else { return 0; } } /* * Closes an SQLite hash database. * @param idx_info the index to close */ void sqlite_hdb_close(TSK_HDB_INFO *hdb_info_base) { TSK_SQLITE_HDB_INFO *hdb_info = (TSK_SQLITE_HDB_INFO*)hdb_info_base; if (hdb_info->db) { finalize_statements(hdb_info); sqlite3_close(hdb_info->db); } hdb_info->db = NULL; hdb_info_base_close(hdb_info_base); free(hdb_info); } sleuthkit-4.2.0/tsk/hashdb/tsk_hash_info.h000644 000765 000024 00000001107 12576320700 021346 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2013 Brian Carrier. All rights reserved */ /** * \file tsk_hash_info.h */ /** * \defgroup hashdblib C Hash Database Functions * \defgroup hashdblib_cpp C++ Hash Database Classes */ #ifndef _TSK_SQLITE_INDEX_H #define _TSK_SQLITE_INDEX_H #include #include struct TskHashInfo { int64_t id; std::string hashMd5; std::string hashSha1; std::string hashSha2_256; std::vector fileNames; std::vector comments; }; #endif sleuthkit-4.2.0/tsk/hashdb/tsk_hashdb.c000644 000765 000024 00000047011 12576320700 020640 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved * * * This software is distributed under the Common Public License 1.0 */ #include "tsk_hashdb_i.h" #ifdef TSK_WIN32 #include #endif /** * \file tsk_hashdb.c * Contains the code to open and close all supported hash database types. */ static FILE * hdb_open_file(TSK_TCHAR *file_path) { FILE *file = NULL; #ifdef TSK_WIN32 int fd = 0; if (_wsopen_s(&fd, file_path, _O_RDONLY | _O_BINARY, _SH_DENYNO, 0) == 0) { file = _wfdopen(fd, L"rb"); } #else file = fopen(file_path, "rb"); #endif return file; } static TSK_HDB_DBTYPE_ENUM hdb_determine_db_type(FILE *hDb, const TSK_TCHAR *db_path) { TSK_HDB_DBTYPE_ENUM db_type = TSK_HDB_DBTYPE_INVALID_ID; if (sqlite_hdb_is_sqlite_file(hDb)) { fseeko(hDb, 0, SEEK_SET); return TSK_HDB_DBTYPE_SQLITE_ID; } // Try each supported text-format database type to ensure a confident // identification. Only one of the tests should succeed. fseeko(hDb, 0, SEEK_SET); if (nsrl_test(hDb)) { db_type = TSK_HDB_DBTYPE_NSRL_ID; } fseeko(hDb, 0, SEEK_SET); if (md5sum_test(hDb)) { if (db_type != TSK_HDB_DBTYPE_INVALID_ID) { fseeko(hDb, 0, SEEK_SET); return TSK_HDB_DBTYPE_INVALID_ID; } db_type = TSK_HDB_DBTYPE_MD5SUM_ID; } fseeko(hDb, 0, SEEK_SET); if (encase_test(hDb)) { if (db_type != TSK_HDB_DBTYPE_INVALID_ID) { fseeko(hDb, 0, SEEK_SET); return TSK_HDB_DBTYPE_INVALID_ID; } db_type = TSK_HDB_DBTYPE_ENCASE_ID; } fseeko(hDb, 0, SEEK_SET); if (hk_test(hDb)) { if (db_type != TSK_HDB_DBTYPE_INVALID_ID) { fseeko(hDb, 0, SEEK_SET); return TSK_HDB_DBTYPE_INVALID_ID; } db_type = TSK_HDB_DBTYPE_HK_ID; } fseeko(hDb, 0, SEEK_SET); return db_type; } /** * \ingroup hashdblib * Creates a new hash database. * @param file_path Path for database to create. * @return 0 on success, 1 otherwise */ uint8_t tsk_hdb_create(TSK_TCHAR *file_path) { TSK_TCHAR *ext = NULL; if (NULL == file_path) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_create: NULL file path"); return 1; } ext = TSTRRCHR(file_path, _TSK_T('.')); if ((NULL != ext) && (TSTRLEN(ext) >= 4) && (TSTRCMP(ext, _TSK_T(".kdb")) == 0)) { return sqlite_hdb_create_db(file_path); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_create: path must end in .kdb extension"); return 1; } } /** * \ingroup hashdblib * * Opens an existing hash database. * @param file_path Path to database or database index file. * @param flags Flags for opening the database. * @return Pointer to a struct representing the hash database or NULL on error. */ TSK_HDB_INFO * tsk_hdb_open(TSK_TCHAR *file_path, TSK_HDB_OPEN_ENUM flags) { const char *func_name = "tsk_hdb_open"; uint8_t file_path_is_idx_path = 0; TSK_TCHAR *db_path = NULL; TSK_TCHAR *ext = NULL; FILE *hDb = NULL; FILE *hIdx = NULL; TSK_HDB_DBTYPE_ENUM db_type = TSK_HDB_DBTYPE_INVALID_ID; TSK_HDB_INFO *hdb_info = NULL; if (NULL == file_path) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL file path", func_name); return NULL; } // Determine the hash database path using the given file path. Note that // direct use of an external index file for a text-format hash database for // simple yes/no lookups is both explicitly and implicitly supported. For // such "index only" databases, the path to where the hash database is // normally required to be is still needed because of the way the code for // text-format hash databases has been written. db_path = (TSK_TCHAR*)tsk_malloc((TSTRLEN(file_path) + 1) * sizeof(TSK_TCHAR)); if (NULL == db_path) { return NULL; } ext = TSTRRCHR(file_path, _TSK_T('-')); if ((NULL != ext) && (TSTRLEN(ext) == 8 || TSTRLEN(ext) == 9) && ((TSTRCMP(ext, _TSK_T("-md5.idx")) == 0) || TSTRCMP(ext, _TSK_T("-sha1.idx")) == 0)) { // The file path extension suggests the path is for an external index // file generated by TSK for a text-format hash database. In this case, // the database path should be the given file path sans the extension // because the hash database, if it is available for lookups, is // required to be be in the same directory as the external index file. file_path_is_idx_path = 1; TSTRNCPY(db_path, file_path, (ext - file_path)); } else { TSTRNCPY(db_path, file_path, TSTRLEN(file_path)); } // Determine the database type. if ((flags & TSK_HDB_OPEN_IDXONLY) == 0) { hDb = hdb_open_file(db_path); if (NULL != hDb) { db_type = hdb_determine_db_type(hDb, db_path); if (TSK_HDB_DBTYPE_INVALID_ID == db_type) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_UNKTYPE); tsk_error_set_errstr("%s: error determining hash database type of %"PRIttocTSK, func_name, db_path); free(db_path); return NULL; } } else { if (file_path_is_idx_path) { db_type = TSK_HDB_DBTYPE_IDXONLY_ID; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr("%s: failed to open %"PRIttocTSK, func_name, db_path); free(db_path); return NULL; } } } else { db_type = TSK_HDB_DBTYPE_IDXONLY_ID; } switch (db_type) { case TSK_HDB_DBTYPE_NSRL_ID: hdb_info = nsrl_open(hDb, db_path); break; case TSK_HDB_DBTYPE_MD5SUM_ID: hdb_info = md5sum_open(hDb, db_path); break; case TSK_HDB_DBTYPE_ENCASE_ID: hdb_info = encase_open(hDb, db_path); break; case TSK_HDB_DBTYPE_HK_ID: hdb_info = hk_open(hDb, db_path); break; case TSK_HDB_DBTYPE_IDXONLY_ID: hIdx = hdb_open_file(file_path); if (NULL == hIdx) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_OPEN); tsk_error_set_errstr("%s: database is index only, failed to open index %"PRIttocTSK, func_name, db_path); free(db_path); return NULL; } else { fclose(hIdx); } hdb_info = idxonly_open(db_path); break; case TSK_HDB_DBTYPE_SQLITE_ID: if (NULL != hDb) { fclose(hDb); } hdb_info = sqlite_hdb_open(db_path); break; // included to prevent compiler warnings that it is not used case TSK_HDB_DBTYPE_INVALID_ID: break; } if (NULL != db_path) { free(db_path); } return hdb_info; } const TSK_TCHAR * tsk_hdb_get_db_path(TSK_HDB_INFO * hdb_info) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_get_db_path: NULL hdb_info"); return 0; } return hdb_info->get_db_path(hdb_info); } const char * tsk_hdb_get_display_name(TSK_HDB_INFO * hdb_info) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_get_display_name: NULL hdb_info"); return 0; } return hdb_info->get_display_name(hdb_info); } uint8_t tsk_hdb_uses_external_indexes(TSK_HDB_INFO *hdb_info) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_get_display_name: NULL hdb_info"); return 0; } return hdb_info->uses_external_indexes(); } const TSK_TCHAR * tsk_hdb_get_idx_path(TSK_HDB_INFO * hdb_info, TSK_HDB_HTYPE_ENUM htype) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_get_idx_path: NULL hdb_info"); return 0; } return hdb_info->get_index_path(hdb_info, htype); } uint8_t tsk_hdb_open_idx(TSK_HDB_INFO *hdb_info, TSK_HDB_HTYPE_ENUM htype) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_open_idx: NULL hdb_info"); return 0; } return hdb_info->open_index(hdb_info, htype); } /** * \ingroup hashdblib * Determine if the open hash database has an index. * * @param hdb_info Hash database to consider * @param htype Hash type that index should be of * * @return 1 if index exists and 0 if not */ uint8_t tsk_hdb_has_idx(TSK_HDB_INFO * hdb_info, TSK_HDB_HTYPE_ENUM htype) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_has_idx: NULL hdb_info"); return 0; } return (hdb_info->open_index(hdb_info, htype) == 0) ? 1 : 0; } /** * \ingroup hashdblib * Test for index only (legacy) * Assumes that the db was opened using the TSK_HDB_OPEN_TRY option. * * @param hdb_info Hash database to consider * * @return 1 if there is only a legacy index AND no db, 0 otherwise */ uint8_t tsk_hdb_is_idx_only(TSK_HDB_INFO *hdb_info) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_is_idx_only: NULL hdb_info"); return 0; } return (hdb_info->db_type == TSK_HDB_DBTYPE_IDXONLY_ID); } /** * \ingroup hashdblib * Create an index for an open hash database. * @param hdb_info Open hash database to index * @param type Text of hash database type * @returns 1 on error */ uint8_t tsk_hdb_make_index(TSK_HDB_INFO *hdb_info, TSK_TCHAR *type) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_make_index: NULL hdb_info"); return 1; } return hdb_info->make_index(hdb_info, type); } /** * \ingroup hashdblib * Searches a hash database for a text/ASCII hash value. * @param hdb_info Struct representing an open hash database. * @param hash Hash value to search for (NULL terminated string). * @param flags Flags to control behavior of the lookup. * @param action Callback function to call for each entry in the hash * database that matches the hash value argument (not called if QUICK * flag is given). * @param ptr Pointer to data to pass to each invocation of the callback. * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t tsk_hdb_lookup_str(TSK_HDB_INFO *hdb_info, const char *hash, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_lookup_str: NULL hdb_info"); return -1; } return hdb_info->lookup_str(hdb_info, hash, flags, action, ptr); } /** * \ingroup hashdblib * Search the index for the given hash value given (in binary form). * * @param hdb_info Open hash database (with index) * @param hash Array with binary hash value to search for * @param len Number of bytes in binary hash value * @param flags Flags to use in lookup * @param action Callback function to call for each hash db entry * (not called if QUICK flag is given) * @param ptr Pointer to data to pass to each callback * * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t tsk_hdb_lookup_raw(TSK_HDB_INFO * hdb_info, uint8_t * hash, uint8_t len, TSK_HDB_FLAG_ENUM flags, TSK_HDB_LOOKUP_FN action, void *ptr) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_lookup_raw: NULL hdb_info"); return -1; } return hdb_info->lookup_raw(hdb_info, hash, len, flags, action, ptr); } int8_t tsk_hdb_lookup_verbose_str(TSK_HDB_INFO *hdb_info, const char *hash, void *result) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_lookup_verbose_str: NULL hdb_info"); return -1; } if (!hash) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_lookup_verbose_str: NULL hash"); return -1; } if (!result) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_lookup_verbose_str: NULL result"); return -1; } return hdb_info->lookup_verbose_str(hdb_info, hash, result); } /** * \ingroup hashdblib * Indicates whether a hash database accepts updates. * @param hdb_info The hash database object * @return 1 if hash database accepts updates, 0 if it does not */ uint8_t tsk_hdb_accepts_updates(TSK_HDB_INFO *hdb_info) { const char *func_name = "tsk_hdb_accepts_updates"; if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL hdb_info", func_name); return 0; } if (!hdb_info->accepts_updates) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL add_entry function ptr", func_name); return 0; } return hdb_info->accepts_updates(); } /** * \ingroup hashdblib * Adds a new entry to a hash database. * @param hdb_info The hash database object * @param filename Name of the file that was hashed (can be NULL) * @param md5 Text representation of MD5 hash (can be NULL) * @param sha1 Text representation of SHA1 hash (can be NULL) * @param sha256 Text representation of SHA256 hash (can be NULL) * @param comment A comment to asociate with the hash (can be NULL) * @return 1 on error, 0 on success */ uint8_t tsk_hdb_add_entry(TSK_HDB_INFO *hdb_info, const char *filename, const char *md5, const char *sha1, const char *sha256, const char *comment) { const char *func_name = "tsk_hdb_add_entry"; if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL hdb_info", func_name); return 1; } if (!hdb_info->add_entry) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL add_entry function ptr", func_name); return 1; } if (hdb_info->accepts_updates()) { return hdb_info->add_entry(hdb_info, filename, md5, sha1, sha256, comment); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: operation not supported for this database type (=%u)", func_name, hdb_info->db_type); return 1; } } /** * \ingroup hashdblib * Begins a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t tsk_hdb_begin_transaction(TSK_HDB_INFO *hdb_info) { const char *func_name = "tsk_hdb_begin_transaction"; if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL hdb_info", func_name); return 1; } if (!hdb_info->begin_transaction) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL begin_transaction function ptr", func_name); return 1; } if (hdb_info->accepts_updates()) { if (!hdb_info->transaction_in_progress) { if (hdb_info->begin_transaction(hdb_info)) { return 1; } else { hdb_info->transaction_in_progress = 1; return 0; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: transaction already begun", func_name); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: operation not supported for this database type (=%u)", func_name, hdb_info->db_type); return 1; } } /** * \ingroup hashdblib * Commits a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t tsk_hdb_commit_transaction(TSK_HDB_INFO *hdb_info) { const char *func_name = "tsk_hdb_commit_transaction"; if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL hdb_info", func_name); return 1; } if (!hdb_info->commit_transaction) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL commit_transaction function ptr", func_name); return 1; } if (hdb_info->accepts_updates()) { if (hdb_info->transaction_in_progress) { if (hdb_info->commit_transaction(hdb_info)) { return 1; } else { hdb_info->transaction_in_progress = 0; return 0; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: transaction not begun", func_name); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: operation not supported for this database type (=%u)", func_name, hdb_info->db_type); return 1; } } /** * \ingroup hashdblib * Rolls back a transaction on a hash database. * @param hdb_info A hash database info object * @return 1 on error, 0 on success */ uint8_t tsk_hdb_rollback_transaction(TSK_HDB_INFO *hdb_info) { const char *func_name = "tsk_hdb_rollback_transaction"; if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL hdb_info", func_name); return 1; } if (!hdb_info->rollback_transaction) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("%s: NULL rollback_transaction function ptr", func_name); return 1; } if (hdb_info->accepts_updates()) { if (hdb_info->transaction_in_progress) { if (hdb_info->commit_transaction(hdb_info)) { return 1; } else { hdb_info->transaction_in_progress = 0; return 0; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: transaction not begun", func_name); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_PROC); tsk_error_set_errstr("%s: operation not supported for this database type (=%u)", func_name, hdb_info->db_type); return 1; } } /** * \ingroup hashdblib * Closes an open hash database. * @param hdb_info The hash database object */ void tsk_hdb_close(TSK_HDB_INFO *hdb_info) { if (!hdb_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_HDB_ARG); tsk_error_set_errstr("tsk_hdb_close: NULL hdb_info"); return; } hdb_info->close_db(hdb_info); } sleuthkit-4.2.0/tsk/hashdb/tsk_hashdb.h000644 000765 000024 00000032065 12576320700 020650 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved */ /** * \file tsk_hashdb.h * External header file for hash database support. * Note that this file is not meant to be directly included. * It is included by both libtsk.h and tsk_hashdb_i.h. */ /** * \defgroup hashdblib C Hash Database Functions * \defgroup hashdblib_cpp C++ Hash Database Classes */ #include "tsk/auto/sqlite3.h" #ifndef _TSK_HDB_H #define _TSK_HDB_H #ifdef __cplusplus extern "C" { #endif /** * Flags used for lookups */ enum TSK_HDB_FLAG_ENUM { TSK_HDB_FLAG_QUICK = 0x01, ///< Quickly return if hash is found (do not return file name etc.) TSK_HDB_FLAG_EXT = 0x02 ///< Return other details besides only file name (not used }; typedef enum TSK_HDB_FLAG_ENUM TSK_HDB_FLAG_ENUM; /** * Hash algorithm types */ enum TSK_HDB_HTYPE_ENUM { TSK_HDB_HTYPE_INVALID_ID = 0, ///< Invalid algorithm signals error. TSK_HDB_HTYPE_MD5_ID = 1, ///< MD5 Algorithm TSK_HDB_HTYPE_SHA1_ID = 2, ///< SHA1 Algorithm TSK_HDB_HTYPE_SHA2_256_ID = 4, ///< SHA2-256 (aka SHA-256) Algorithm }; typedef enum TSK_HDB_HTYPE_ENUM TSK_HDB_HTYPE_ENUM; #define TSK_HDB_HTYPE_MD5_STR "md5" ///< String name for MD5 algorithm #define TSK_HDB_HTYPE_SHA1_STR "sha1" ///< String name for SHA1 algorithm #define TSK_HDB_HTYPE_SHA2_256_STR "sha2_256" ///< String name for SHA256 algorithm #define TSK_HDB_HTYPE_SHA2_256_LEN 64 ///< Length of SHA256 hash #define TSK_HDB_HTYPE_SHA1_LEN 40 ///< Length of SHA1 hash #define TSK_HDB_HTYPE_MD5_LEN 32 ///< Length of MD5 hash #define TSK_HDB_HTYPE_CRC32_LEN 8 ///< Length of CRC hash #define TSK_HDB_MAX_BINHASH_LEN 32 ///< Half the length of biggest hash /** * Return the name of the hash algorithm, given its ID */ #define TSK_HDB_HTYPE_STR(x) \ ( ((x) & TSK_HDB_HTYPE_MD5_ID) ? (TSK_HDB_HTYPE_MD5_STR) : ( \ ( ((x) & TSK_HDB_HTYPE_SHA1_ID) ? (TSK_HDB_HTYPE_SHA1_STR) : ( \ ( ((x) & TSK_HDB_HTYPE_SHA2_256_ID) ? TSK_HDB_HTYPE_SHA2_256_STR : "") ) ) ) ) /** * Return the length of a hash, given its ID */ #define TSK_HDB_HTYPE_LEN(x) \ ( ((x) & TSK_HDB_HTYPE_MD5_ID) ? (TSK_HDB_HTYPE_MD5_LEN) : ( \ ( ((x) & TSK_HDB_HTYPE_SHA1_ID) ? (TSK_HDB_HTYPE_SHA1_LEN) : ( \ ( ((x) & TSK_HDB_HTYPE_SHA2_256_ID) ? TSK_HDB_HTYPE_SHA2_256_LEN : 0) ) ) ) ) /** * Hash Database types */ enum TSK_HDB_DBTYPE_ENUM { TSK_HDB_DBTYPE_INVALID_ID = 0, ///< Invalid type signals error. TSK_HDB_DBTYPE_NSRL_ID = 1, ///< NIST NSRL format TSK_HDB_DBTYPE_MD5SUM_ID = 2, ///< md5sum format TSK_HDB_DBTYPE_HK_ID = 3, ///< hashkeeper format TSK_HDB_DBTYPE_IDXONLY_ID = 4, ///< Only the database index was opened -- original dbtype is unknown TSK_HDB_DBTYPE_ENCASE_ID = 5, ///< EnCase format TSK_HDB_DBTYPE_SQLITE_ID = 6 ///< SQLite format }; typedef enum TSK_HDB_DBTYPE_ENUM TSK_HDB_DBTYPE_ENUM; /** * String versions of DB types */ #define TSK_HDB_DBTYPE_NSRL_STR "nsrl" ///< NSRL database #define TSK_HDB_DBTYPE_NSRL_MD5_STR "nsrl-md5" ///< NSRL database with MD5 index #define TSK_HDB_DBTYPE_NSRL_SHA1_STR "nsrl-sha1" ///< NSRL database with SHA1 index #define TSK_HDB_DBTYPE_MD5SUM_STR "md5sum" ///< md5sum #define TSK_HDB_DBTYPE_HK_STR "hk" ///< Hash Keeper #define TSK_HDB_DBTYPE_ENCASE_STR "encase" ///< EnCase /// List of supported hash database types with external indexes; essentially index types. #define TSK_HDB_DBTYPE_SUPPORT_STR "nsrl-md5, nsrl-sha1, md5sum, encase, hk" #define TSK_HDB_NAME_MAXLEN 512 //< Max length for database name typedef struct TSK_HDB_INFO TSK_HDB_INFO; typedef TSK_WALK_RET_ENUM(*TSK_HDB_LOOKUP_FN) (TSK_HDB_INFO *, const char *hash, const char *name, void *); /** * Represents an open hash database. Instances are created using the * tsk_hdb_open() API and are passed to hash database API functions. */ struct TSK_HDB_INFO { TSK_TCHAR *db_fname; ///< Database file path, may be NULL for an index only database char db_name[TSK_HDB_NAME_MAXLEN]; ///< Name of the database, for callbacks TSK_HDB_DBTYPE_ENUM db_type; ///< Type of database tsk_lock_t lock; ///< Lock for lazy loading and idx_lbuf uint8_t transaction_in_progress; ///< Flag set and unset when transaction are begun and ended const TSK_TCHAR*(*get_db_path)(TSK_HDB_INFO*); const char*(*get_display_name)(TSK_HDB_INFO*); uint8_t(*uses_external_indexes)(); const TSK_TCHAR*(*get_index_path)(TSK_HDB_INFO*, TSK_HDB_HTYPE_ENUM); uint8_t(*has_index)(TSK_HDB_INFO*, TSK_HDB_HTYPE_ENUM); uint8_t(*make_index)(TSK_HDB_INFO*, TSK_TCHAR*); uint8_t(*open_index)(TSK_HDB_INFO*, TSK_HDB_HTYPE_ENUM); int8_t(*lookup_str)(TSK_HDB_INFO*, const char*, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void*); int8_t(*lookup_raw)(TSK_HDB_INFO*, uint8_t *, uint8_t, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void*); int8_t(*lookup_verbose_str)(TSK_HDB_INFO *, const char *, void *); uint8_t(*accepts_updates)(); uint8_t(*add_entry)(TSK_HDB_INFO*, const char*, const char*, const char*, const char*, const char *); uint8_t(*begin_transaction)(TSK_HDB_INFO *); uint8_t(*commit_transaction)(TSK_HDB_INFO *); uint8_t(*rollback_transaction)(TSK_HDB_INFO *); void(*close_db)(TSK_HDB_INFO *); }; /** * Represents a text-format hash database (NSRL, EnCase, etc.) with the TSK binary search index. */ typedef struct TSK_HDB_BINSRCH_INFO { TSK_HDB_INFO base; FILE *hDb; ///< File handle to database (always open) uint8_t(*get_entry) (TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); ///< \internal Database-specific function to find entry at a given offset TSK_HDB_HTYPE_ENUM hash_type; ///< Type of hash used in currently open index uint16_t hash_len; ///< Length of hash used in currently open index TSK_TCHAR *idx_fname; ///< Name of index file, may be NULL for database without external index FILE *hIdx; ///< File handle to index (only open during lookups) FILE *hIdxTmp; ///< File handle to temp (unsorted) index file (only open during index creation) TSK_TCHAR *uns_fname; ///< Name of unsorted index file TSK_OFF_T idx_size; ///< Size of index file uint16_t idx_off; ///< Offset in index file to first index entry size_t idx_llen; ///< Length of each line in index char *idx_lbuf; ///< Buffer to hold a line from the index (r/w shared - lock) TSK_TCHAR *idx_idx_fname; ///< Name of index of index file, may be NULL uint64_t *idx_offsets; ///< Maps the first three bytes of a hash value to an offset in the index file } TSK_HDB_BINSRCH_INFO; /** * Represents a TSK SQLite hash database (it doesn't need an external index). */ typedef struct TSK_SQLITE_HDB_INFO { TSK_HDB_INFO base; sqlite3 *db; sqlite3_stmt *insert_md5_into_hashes; ///< Once initialized, prepared statements are tied to a specific database sqlite3_stmt *insert_into_file_names; sqlite3_stmt *insert_into_comments; sqlite3_stmt *select_from_hashes_by_md5; sqlite3_stmt *select_from_file_names; sqlite3_stmt *select_from_comments; } TSK_SQLITE_HDB_INFO; /** * Options for opening a hash database */ enum TSK_HDB_OPEN_ENUM { TSK_HDB_OPEN_NONE = 0, ///< No special flags TSK_HDB_OPEN_IDXONLY = (0x1 << 0) ///< Open only the index -- do not look for the original DB }; typedef enum TSK_HDB_OPEN_ENUM TSK_HDB_OPEN_ENUM; /* Hash database API functions */ extern uint8_t tsk_hdb_create(TSK_TCHAR *); extern TSK_HDB_INFO *tsk_hdb_open(TSK_TCHAR *, TSK_HDB_OPEN_ENUM); extern const TSK_TCHAR *tsk_hdb_get_db_path(TSK_HDB_INFO * hdb_info); extern const char *tsk_hdb_get_display_name(TSK_HDB_INFO * hdb_info); extern uint8_t tsk_hdb_is_idx_only(TSK_HDB_INFO *); extern uint8_t tsk_hdb_uses_external_indexes(TSK_HDB_INFO *); extern uint8_t tsk_hdb_has_idx(TSK_HDB_INFO * hdb_info, TSK_HDB_HTYPE_ENUM); extern uint8_t tsk_hdb_make_index(TSK_HDB_INFO *, TSK_TCHAR *); extern const TSK_TCHAR *tsk_hdb_get_idx_path(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern uint8_t tsk_hdb_open_idx(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern int8_t tsk_hdb_lookup_str(TSK_HDB_INFO *, const char *, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t tsk_hdb_lookup_raw(TSK_HDB_INFO *, uint8_t *, uint8_t, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t tsk_hdb_lookup_verbose_str(TSK_HDB_INFO *, const char *, void *); extern uint8_t tsk_hdb_accepts_updates(TSK_HDB_INFO *); extern uint8_t tsk_hdb_add_entry(TSK_HDB_INFO *, const char*, const char*, const char*, const char*, const char*); extern uint8_t tsk_hdb_begin_transaction(TSK_HDB_INFO *); extern uint8_t tsk_hdb_commit_transaction(TSK_HDB_INFO *); extern uint8_t tsk_hdb_rollback_transaction(TSK_HDB_INFO *); extern void tsk_hdb_close(TSK_HDB_INFO *); #ifdef __cplusplus } #endif #ifdef __cplusplus /** * \ingroup hashdblib_cpp * Stores information about an open hash database. * To use this object, open() should be called first. Otherwise, the other * functions will have undefined return values. */ class TskHdbInfo{ private: TSK_HDB_INFO * m_hdbInfo; TskHdbInfo(const TskHdbInfo& rhs); TskHdbInfo& operator=(const TskHdbInfo& rhs); public: /** * Close an open hash database. */ ~TskHdbInfo() { tsk_hdb_close(m_hdbInfo); }; /** * Open a hash database. See tsk_hdb_open() for details. * * @param a_dbFile Path to database. * @param a_flags Flags for opening the database. * * @return 1 on error and 0 on success */ uint8_t open(TSK_TCHAR * a_dbFile, TSK_HDB_OPEN_ENUM a_flags) { if ((m_hdbInfo = tsk_hdb_open(a_dbFile, a_flags)) != NULL) return 0; else return 1; }; /** * Search the index for a text/ASCII hash value * See tsk_hdb_lookup_str() for details. * @param a_hash Hash value to search for (NULL terminated string) * @param a_flags Flags to use in lookup * @param a_action Callback function to call for each hash db entry * (not called if QUICK flag is given) * @param a_ptr Pointer to data to pass to each callback * * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t lookupStr(const char *a_hash, TSK_HDB_FLAG_ENUM a_flags, TSK_HDB_LOOKUP_FN a_action, void *a_ptr) { if (m_hdbInfo != NULL) return tsk_hdb_lookup_str(m_hdbInfo, a_hash, a_flags, a_action, a_ptr); else return 0; }; /** * Search the index for the given hash value given (in binary form). * See tsk_hdb_lookup_raw() for details. * @param a_hash Array with binary hash value to search for * @param a_len Number of bytes in binary hash value * @param a_flags Flags to use in lookup * @param a_action Callback function to call for each hash db entry * (not called if QUICK flag is given) * @param a_ptr Pointer to data to pass to each callback * * @return -1 on error, 0 if hash value not found, and 1 if value was found. */ int8_t lookupRaw(uint8_t * a_hash, uint8_t a_len, TSK_HDB_FLAG_ENUM a_flags, TSK_HDB_LOOKUP_FN a_action, void *a_ptr) { if (m_hdbInfo != NULL) return tsk_hdb_lookup_raw(m_hdbInfo, a_hash, a_len, a_flags, a_action, a_ptr); else return 0; }; /** * Create an index for an open hash database. * See tsk_hdb_makeindex() for details. * @param a_type Text of hash database type * @return 1 on error */ uint8_t createIndex(TSK_TCHAR * a_type) { if (m_hdbInfo != NULL) return tsk_hdb_make_index(m_hdbInfo, a_type); else return 0; }; /** * Determine if the open hash database has an index. * See tsk_hdb_hasindex for details. * @param a_htype Hash type that index should be of * * @return 1 if index exists and 0 if not */ uint8_t hasIndex(uint8_t a_htype) { if (m_hdbInfo != NULL) return tsk_hdb_has_idx(m_hdbInfo, (TSK_HDB_HTYPE_ENUM)a_htype); else return 0; }; /** * get type of database * @return type of database, or TSK_HDB_DBTYPE_INVALID_ID on error. */ TSK_HDB_DBTYPE_ENUM getDbType() const { if (m_hdbInfo != NULL) return m_hdbInfo->db_type; return TSK_HDB_DBTYPE_INVALID_ID; }; }; #endif #endif sleuthkit-4.2.0/tsk/hashdb/tsk_hashdb_i.h000644 000765 000024 00000015771 12576320700 021165 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2014 Brian Carrier. All rights reserved */ /** * \file tsk_hashdb_i.h * Contains the internal library definitions for the hash database functions. This should * be included by the code in the hash database library. */ #ifndef _TSK_HASHDB_I_H #define _TSK_HASHDB_I_H // Include the other internal TSK header files #include "tsk/base/tsk_base_i.h" // include the external header file #include "tsk_hashdb.h" #include #include #include #include #include #include #ifdef TSK_WIN32 #include #include #endif #ifdef __cplusplus extern "C" { #endif #define TSK_HDB_MAXLEN 512 ///< Default buffer size used in many places #define TSK_HDB_OFF_LEN 16 ///< Number of digits used in offset field in index /** * Get the length of an index file line - 2 for comma and newline */ #define TSK_HDB_IDX_LEN(x) \ ( TSK_HDB_HTYPE_LEN(x) + TSK_HDB_OFF_LEN + 2) /** * Strings used in index header. It is one longer than a * sha-1 hash - so that it always sorts to the top */ #define TSK_HDB_IDX_HEAD_TYPE_STR "00000000000000000000000000000000000000000" #define TSK_HDB_IDX_HEAD_NAME_STR "00000000000000000000000000000000000000001" // "Base" hash database functions. extern void hdb_base_db_name_from_path(TSK_HDB_INFO *); extern uint8_t hdb_info_base_open(TSK_HDB_INFO *, const TSK_TCHAR *); extern const TSK_TCHAR *hdb_base_get_db_path(TSK_HDB_INFO *); extern const char *hdb_base_get_display_name(TSK_HDB_INFO *); extern uint8_t hdb_base_uses_external_indexes(); extern const TSK_TCHAR *hdb_base_get_index_path(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern uint8_t hdb_base_has_index(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern uint8_t hdb_base_make_index(TSK_HDB_INFO *, TSK_TCHAR *); extern uint8_t hdb_base_open_index(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern int8_t hdb_base_lookup_str(TSK_HDB_INFO *, const char *, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t hdb_base_lookup_bin(TSK_HDB_INFO *, uint8_t *, uint8_t, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t hdb_base_lookup_verbose_str(TSK_HDB_INFO *, const char *, void *); extern uint8_t hdb_base_accepts_updates(); extern uint8_t hdb_base_add_entry(TSK_HDB_INFO *, const char *, const char *, const char *, const char *, const char *); extern uint8_t hdb_base_begin_transaction(TSK_HDB_INFO *); extern uint8_t hdb_base_commit_transaction(TSK_HDB_INFO *); extern uint8_t hdb_base_rollback_transaction(TSK_HDB_INFO *); extern void hdb_info_base_close(TSK_HDB_INFO *); // Hash database functions common to all text format hash databases // (NSRL, md5sum, EnCase, HashKeeper, index only). These databases have // external indexes. extern TSK_HDB_BINSRCH_INFO *hdb_binsrch_open(FILE *, const TSK_TCHAR *); extern uint8_t hdb_binsrch_uses_external_indexes(); extern const TSK_TCHAR *hdb_binsrch_get_index_path(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern uint8_t hdb_binsrch_has_index(TSK_HDB_INFO*, TSK_HDB_HTYPE_ENUM); extern uint8_t hdb_binsrch_open_idx(TSK_HDB_INFO *, TSK_HDB_HTYPE_ENUM); extern uint8_t hdb_binsrch_idx_initialize(TSK_HDB_BINSRCH_INFO *, TSK_TCHAR *); extern uint8_t hdb_binsrch_idx_add_entry_str(TSK_HDB_BINSRCH_INFO *, char *, TSK_OFF_T); extern uint8_t hdb_binsrch_idx_add_entry_bin(TSK_HDB_BINSRCH_INFO *, unsigned char *, int, TSK_OFF_T); extern uint8_t hdb_binsrch_idx_finalize(TSK_HDB_BINSRCH_INFO *); extern int8_t hdb_binsrch_lookup_str(TSK_HDB_INFO *, const char *, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t hdb_binsrch_lookup_bin(TSK_HDB_INFO *, uint8_t *, uint8_t, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t hdb_binsrch_lookup_verbose_str(TSK_HDB_INFO *, const char *, void *); extern uint8_t hdb_binsrch_accepts_updates(); extern void hdb_binsrch_close(TSK_HDB_INFO *) ; // Hash database functions for NSRL hash databases. extern uint8_t nsrl_test(FILE *); extern TSK_HDB_INFO *nsrl_open(FILE *, const TSK_TCHAR *); extern uint8_t nsrl_makeindex(TSK_HDB_INFO *, TSK_TCHAR * htype); extern uint8_t nsrl_getentry(TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); // Hash database functions for hash databases generated using md5Sum. extern uint8_t md5sum_test(FILE *); extern TSK_HDB_INFO *md5sum_open(FILE *, const TSK_TCHAR *); extern uint8_t md5sum_makeindex(TSK_HDB_INFO *, TSK_TCHAR * htype); extern uint8_t md5sum_getentry(TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); // Hash database functions for hash databases generated using EnCase. extern uint8_t encase_test(FILE *); extern TSK_HDB_INFO *encase_open(FILE *, const TSK_TCHAR *); extern uint8_t encase_make_index(TSK_HDB_INFO *, TSK_TCHAR * htype); extern uint8_t encase_get_entry(TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); // Hash database functions for hash databases generated using HashKeeper. extern uint8_t hk_test(FILE *); extern TSK_HDB_INFO *hk_open(FILE *, const TSK_TCHAR *); extern uint8_t hk_makeindex(TSK_HDB_INFO *, TSK_TCHAR * htype); extern uint8_t hk_getentry(TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); // Hash database functions for external index files standing in for the // original hash databases. extern TSK_HDB_INFO *idxonly_open(const TSK_TCHAR *); extern const TSK_TCHAR *idxonly_get_db_path(TSK_HDB_INFO *); extern uint8_t idxonly_makeindex(TSK_HDB_INFO *, TSK_TCHAR *); extern uint8_t idxonly_getentry(TSK_HDB_INFO *, const char *, TSK_OFF_T, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); // Hash database functions for SQLite hash databases. extern uint8_t sqlite_hdb_is_sqlite_file(FILE *); extern uint8_t sqlite_hdb_create_db(TSK_TCHAR*); extern TSK_HDB_INFO *sqlite_hdb_open(TSK_TCHAR *); extern int8_t sqlite_hdb_lookup_str(TSK_HDB_INFO *, const char *, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t sqlite_hdb_lookup_bin(TSK_HDB_INFO *, uint8_t *, uint8_t, TSK_HDB_FLAG_ENUM, TSK_HDB_LOOKUP_FN, void *); extern int8_t sqlite_hdb_lookup_verbose_str(TSK_HDB_INFO *, const char *, void *); extern int8_t sqlite_hdb_lookup_verbose_bin(TSK_HDB_INFO *, uint8_t *, uint8_t, void *); extern uint8_t sqlite_hdb_add_entry(TSK_HDB_INFO *, const char *, const char *, const char *, const char *, const char *); extern uint8_t sqlite_hdb_begin_transaction(TSK_HDB_INFO *); extern uint8_t sqlite_hdb_commit_transaction(TSK_HDB_INFO *); extern uint8_t sqlite_hdb_rollback_transaction(TSK_HDB_INFO *); extern void sqlite_hdb_close(TSK_HDB_INFO *); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/.indent.pro000644 000765 000024 00000000036 12576320700 017616 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nlp -nut sleuthkit-4.2.0/tsk/fs/dcalc_lib.c000644 000765 000024 00000014567 12576320700 017613 0ustar00carrierstaff000000 000000 /* ** blkcalc ** The Sleuth Kit ** ** Calculates the corresponding block number between 'blkls' and 'dd' images ** when given an 'blkls' block number, it determines the block number it ** had in a 'dd' image. When given a 'dd' image, it determines the ** value it would have in a 'blkls' image (if the block is unallocated) ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All Rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All Rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file dcalc_lib.c * Contains the library API functions used by the TSK blkcalc command * line tool. */ #include "tsk_fs_i.h" /** \internal * Structure to store data for callbacks. */ typedef struct { TSK_DADDR_T count; TSK_DADDR_T uncnt; uint8_t found; TSK_OFF_T flen; } BLKCALC_DATA; /* function used when -d is given ** ** keeps a count of unallocated blocks seen thus far ** ** If the specified block is allocated, an error is given, else the ** count of unalloc blocks is given ** ** This is called for all blocks (alloc and unalloc) */ static TSK_WALK_RET_ENUM count_dd_act(const TSK_FS_BLOCK * fs_block, void *ptr) { BLKCALC_DATA *data = (BLKCALC_DATA *) ptr; if (fs_block->flags & TSK_FS_BLOCK_FLAG_UNALLOC) data->uncnt++; if (data->count-- == 0) { if (fs_block->flags & TSK_FS_BLOCK_FLAG_UNALLOC) tsk_printf("%" PRIuDADDR "\n", data->uncnt); else printf ("ERROR: unit is allocated, it will not be in an blkls image\n"); data->found = 1; return TSK_WALK_STOP; } return TSK_WALK_CONT; } /* ** count how many unalloc blocks there are. ** ** This is called for unalloc blocks only */ static TSK_WALK_RET_ENUM count_blkls_act(const TSK_FS_BLOCK * fs_block, void *ptr) { BLKCALC_DATA *data = (BLKCALC_DATA *) ptr; if (data->count-- == 0) { tsk_printf("%" PRIuDADDR "\n", fs_block->addr); data->found = 1; return TSK_WALK_STOP; } return TSK_WALK_CONT; } /* SLACK SPACE call backs */ static TSK_WALK_RET_ENUM count_slack_file_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { BLKCALC_DATA *data = (BLKCALC_DATA *) ptr; if (tsk_verbose) tsk_fprintf(stderr, "count_slack_file_act: Remaining File: %" PRIuOFF " Buffer: %" PRIuSIZE "\n", data->flen, size); /* This is not the last data unit */ if (data->flen >= size) { data->flen -= size; } /* We have passed the end of the allocated space */ else if (data->flen == 0) { if (data->count-- == 0) { tsk_printf("%" PRIuDADDR "\n", addr); data->found = 1; return TSK_WALK_STOP; } } /* This is the last data unit and there is unused space */ else if (data->flen < size) { if (data->count-- == 0) { tsk_printf("%" PRIuDADDR "\n", addr); data->found = 1; return TSK_WALK_STOP; } data->flen = 0; } return TSK_WALK_CONT; } static TSK_WALK_RET_ENUM count_slack_inode_act(TSK_FS_FILE * fs_file, void *ptr) { BLKCALC_DATA *data = (BLKCALC_DATA *) ptr; if (tsk_verbose) tsk_fprintf(stderr, "count_slack_inode_act: Processing meta data: %" PRIuINUM "\n", fs_file->meta->addr); /* We will now do a file walk on the content */ if (TSK_FS_TYPE_ISNTFS(fs_file->fs_info->ftype) == 0) { data->flen = fs_file->meta->size; if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_SLACK, count_slack_file_act, ptr)) { /* ignore any errors */ if (tsk_verbose) tsk_fprintf(stderr, "Error walking file %" PRIuINUM, fs_file->meta->addr); tsk_error_reset(); } } /* For NTFS we go through each non-resident attribute */ else { int i, cnt; cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if (fs_attr->flags & TSK_FS_ATTR_NONRES) { data->flen = fs_attr->size; if (tsk_fs_file_walk_type(fs_file, fs_attr->type, fs_attr->id, TSK_FS_FILE_WALK_FLAG_SLACK, count_slack_file_act, ptr)) { /* ignore any errors */ if (tsk_verbose) tsk_fprintf(stderr, "Error walking file %" PRIuINUM, fs_file->meta->addr); tsk_error_reset(); } } } } return TSK_WALK_CONT; } /* Return 1 if block is not found, 0 if it was found, and -1 on error */ int8_t tsk_fs_blkcalc(TSK_FS_INFO * fs, TSK_FS_BLKCALC_FLAG_ENUM a_lclflags, TSK_DADDR_T a_cnt) { BLKCALC_DATA data; data.count = a_cnt; data.found = 0; if (a_lclflags == TSK_FS_BLKCALC_BLKLS) { if (tsk_fs_block_walk(fs, fs->first_block, fs->last_block, (TSK_FS_BLOCK_WALK_FLAG_UNALLOC | TSK_FS_BLOCK_WALK_FLAG_META | TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_AONLY), count_blkls_act, &data)) return -1; } else if (a_lclflags == TSK_FS_BLKCALC_DD) { if (tsk_fs_block_walk(fs, fs->first_block, fs->last_block, (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC | TSK_FS_BLOCK_WALK_FLAG_META | TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_AONLY), count_dd_act, &data)) return -1; } else if (a_lclflags == TSK_FS_BLKCALC_SLACK) { if (fs->inode_walk(fs, fs->first_inum, fs->last_inum, TSK_FS_META_FLAG_ALLOC, count_slack_inode_act, &data)) return -1; } if (data.found == 0) { tsk_printf("Block too large\n"); return 1; } else { return 0; } } sleuthkit-4.2.0/tsk/fs/dcat_lib.c000644 000765 000024 00000014760 12576320700 017453 0ustar00carrierstaff000000 000000 /* ** blkcat ** The Sleuth Kit ** ** Given an image , block number, and size, display the contents ** of the block to stdout. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ #include "tsk_fs_i.h" #include /** * \file dcat_lib.c * Contains the library API functions used by the TSK blkcat command * line tool. */ /** * \internal * Print block statistics to stdout * * @param fs File system to analyze */ static void stats(TSK_FS_INFO * fs) { tsk_printf("%d: Size of Addressable Unit\n", fs->block_size); } /** * Read a specific number of blocks and print the contents to STDOUT * * @param fs File system to analyze * @param lclflags flags * @param addr Starting block address to read from * @param read_num_units Number of blocks to read * * @return 1 on error and 0 on success */ uint8_t tsk_fs_blkcat(TSK_FS_INFO * fs, TSK_FS_BLKCAT_FLAG_ENUM lclflags, TSK_DADDR_T addr, TSK_DADDR_T read_num_units) { char *buf; ssize_t cnt; int i; if (lclflags & TSK_FS_BLKCAT_STAT) { stats(fs); return 0; } if (addr + read_num_units - 1 > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_blkcat: requested size is larger than last block in image (%" PRIuDADDR ")", fs->last_block); return 1; } #ifdef TSK_WIN32 if (-1 == _setmode(_fileno(stdout), _O_BINARY)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr ("blkcat_lib: error setting stdout to binary: %s", strerror(errno)); return 1; } #endif if (lclflags & TSK_FS_BLKCAT_HTML) { tsk_printf("\n"); tsk_printf("\n"); tsk_printf("Unit: %" PRIuDADDR " Size: %" PRIuOFF " bytes\n", addr, read_num_units * fs->block_size); tsk_printf("\n"); tsk_printf("\n"); } if ((lclflags & TSK_FS_BLKCAT_HEX) && (lclflags & TSK_FS_BLKCAT_HTML)) tsk_printf("\n"); if ((buf = tsk_malloc(fs->block_size)) == NULL) return 1; for (i = 0; i < read_num_units; i++) { /* Read the block */ cnt = tsk_fs_read_block(fs, addr + i, buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr("blkcat: Error reading block at %" PRIuDADDR, addr); return 1; } /* do a hexdump like printout */ if (lclflags & TSK_FS_BLKCAT_HEX) { TSK_OFF_T idx1, idx2; for (idx1 = 0; idx1 < fs->block_size; idx1 += 16) { /* Print the offset */ if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("", i * fs->block_size + idx1); else tsk_printf("%" PRIuOFF "\t", i * fs->block_size + idx1); /* Print the hex */ for (idx2 = 0; idx2 < 16; idx2++) { if ((lclflags & TSK_FS_BLKCAT_HTML) && (0 == (idx2 % 4))) tsk_printf(""); else tsk_printf(" "); } } /* Print the ASCII */ tsk_printf("\t"); for (idx2 = 0; idx2 < 16; idx2++) { if ((lclflags & TSK_FS_BLKCAT_HTML) && (0 == (idx2 % 4))) tsk_printf(""); else tsk_printf(" "); } } if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf(""); tsk_printf("\n"); } } /* print in all ASCII */ else if (lclflags & TSK_FS_BLKCAT_ASCII) { TSK_OFF_T idx; for (idx = 0; idx < fs->block_size; idx++) { if ((isprint((int) buf[idx])) || (buf[idx] == '\t')) { tsk_printf("%c", buf[idx]); } else if ((buf[idx] == '\n') || (buf[idx] == '\r')) { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("
"); tsk_printf("%c", buf[idx]); } else tsk_printf("."); } } /* print raw */ else { if (fwrite(buf, fs->block_size, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr ("blkcat_lib: error writing to stdout: %s", strerror(errno)); free(buf); return 1; } } } free(buf); if (lclflags & TSK_FS_BLKCAT_HEX) { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("
%" PRIuOFF ""); tsk_printf("%.2x", buf[idx2 + idx1] & 0xff); if (3 == (idx2 % 4)) { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf(""); if ((isascii((int) buf[idx2 + idx1])) && (!iscntrl((int) buf[idx2 + idx1]))) tsk_printf("%c", buf[idx2 + idx1]); else tsk_printf("."); if (3 == (idx2 % 4)) { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("
\n"); else tsk_printf("\n"); } else if (lclflags & TSK_FS_BLKCAT_ASCII) { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("
"); tsk_printf("\n"); } else { if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("
"); } if (lclflags & TSK_FS_BLKCAT_HTML) tsk_printf("\n\n"); return 0; } sleuthkit-4.2.0/tsk/fs/dls_lib.c000644 000765 000024 00000015542 12576320700 017321 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. ** */ /** * \file dls_lib.c * Contains the library API functions used by the TSK blkls command * line tool. */ /* TCT: * * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA */ #include "tsk_fs_i.h" #ifdef TSK_WIN32 #include #endif /* call backs for listing details * * return 1 on error * */ static uint8_t print_list_head(TSK_FS_INFO * fs) { char hostnamebuf[BUFSIZ]; #ifndef TSK_WIN32 if (gethostname(hostnamebuf, sizeof(hostnamebuf) - 1) < 0) { if (tsk_verbose) tsk_fprintf(stderr, "blkls_lib: error getting hostname: %s\n", strerror(errno)); strcpy(hostnamebuf, "unknown"); } hostnamebuf[sizeof(hostnamebuf) - 1] = 0; #else strcpy(hostnamebuf, "unknown"); #endif /* * Identify table type and table origin. */ tsk_printf("class|host|image|first_time|unit\n"); tsk_printf("blkls|%s||%" PRIu64 "|%s\n", hostnamebuf, (uint64_t) time(NULL), fs->duname); tsk_printf("addr|alloc\n"); return 0; } static TSK_WALK_RET_ENUM print_list(const TSK_FS_BLOCK * fs_block, void *ptr) { tsk_printf("%" PRIuDADDR "|%s\n", fs_block->addr, (fs_block->flags & TSK_FS_BLOCK_FLAG_ALLOC) ? "a" : "f"); return TSK_WALK_CONT; } /* print_block - write data block to stdout */ static TSK_WALK_RET_ENUM print_block(const TSK_FS_BLOCK * fs_block, void *ptr) { if (tsk_verbose) tsk_fprintf(stderr, "write block %" PRIuDADDR "\n", fs_block->addr); if (fwrite(fs_block->buf, fs_block->fs_info->block_size, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr("blkls_lib: error writing to stdout: %s", strerror(errno)); return TSK_WALK_ERROR; } return TSK_WALK_CONT; } /** \internal * Structure to store data for callbacks. */ typedef struct { TSK_OFF_T flen; } BLKLS_DATA; /* SLACK SPACE call backs */ static TSK_WALK_RET_ENUM slack_file_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { BLKLS_DATA *data = (BLKLS_DATA *) ptr; if (tsk_verbose) tsk_fprintf(stderr, "slack_file_act: File: %" PRIuINUM " Remaining File: %" PRIuOFF " Buffer: %u\n", fs_file->meta->addr, data->flen, size); /* This is not the last data unit */ if (data->flen >= size) { data->flen -= size; } /* We have passed the end of the allocated space */ else if (data->flen == 0) { if (fwrite(buf, size, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr("blkls_lib: error writing to stdout: %s", strerror(errno)); return TSK_WALK_ERROR; } } /* This is the last data unit and there is unused space */ else if (data->flen < size) { /* Clear the used space and print it */ memset(buf, 0, (size_t) data->flen); if (fwrite(buf, size, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr("blkls_lib: error writing to stdout: %s", strerror(errno)); return TSK_WALK_ERROR; } data->flen = 0; } return TSK_WALK_CONT; } /* Call back for inode_walk */ static TSK_WALK_RET_ENUM slack_inode_act(TSK_FS_FILE * fs_file, void *ptr) { BLKLS_DATA *data = (BLKLS_DATA *) ptr; if (tsk_verbose) tsk_fprintf(stderr, "slack_inode_act: Processing meta data: %" PRIuINUM "\n", fs_file->meta->addr); /* We will now do a file walk on the content and print the * data after the specified size of the file */ if (TSK_FS_TYPE_ISNTFS(fs_file->fs_info->ftype) == 0) { data->flen = fs_file->meta->size; if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_SLACK, slack_file_act, ptr)) { if (tsk_verbose) tsk_fprintf(stderr, "slack_inode_act: error walking file: %" PRIuINUM, fs_file->meta->addr); tsk_error_reset(); } } /* For NTFS we go through each non-resident attribute */ else { int i, cnt; cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if (fs_attr->flags & TSK_FS_ATTR_NONRES) { data->flen = fs_attr->size; if (tsk_fs_file_walk_type(fs_file, fs_attr->type, fs_attr->id, TSK_FS_FILE_WALK_FLAG_SLACK, slack_file_act, ptr)) { if (tsk_verbose) tsk_fprintf(stderr, "slack_inode_act: error walking file: %" PRIuINUM, fs_file->meta->addr); tsk_error_reset(); } } } } return TSK_WALK_CONT; } /* Return 1 on error and 0 on success */ uint8_t tsk_fs_blkls(TSK_FS_INFO * fs, TSK_FS_BLKLS_FLAG_ENUM a_blklsflags, TSK_DADDR_T bstart, TSK_DADDR_T blast, TSK_FS_BLOCK_WALK_FLAG_ENUM a_block_flags) { BLKLS_DATA data; if (a_blklsflags & TSK_FS_BLKLS_SLACK) { /* get the info on each allocated inode */ if (fs->inode_walk(fs, fs->first_inum, fs->last_inum, TSK_FS_META_FLAG_ALLOC, slack_inode_act, &data)) return 1; } else if (a_blklsflags & TSK_FS_BLKLS_LIST) { if (print_list_head(fs)) return 1; a_block_flags |= TSK_FS_BLOCK_WALK_FLAG_AONLY; if (tsk_fs_block_walk(fs, bstart, blast, a_block_flags, print_list, &data)) return 1; } else { #ifdef TSK_WIN32 if (-1 == _setmode(_fileno(stdout), _O_BINARY)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr ("blkls_lib: error setting stdout to binary: %s", strerror(errno)); return 1; } #endif if (tsk_fs_block_walk(fs, bstart, blast, a_block_flags, print_block, &data)) return 1; } return 0; } sleuthkit-4.2.0/tsk/fs/dstat_lib.c000644 000765 000024 00000004205 12576320700 017650 0ustar00carrierstaff000000 000000 /* ** blkstat ** The Sleuth Kit ** ** Get the details about a data unit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file dstat_lib.c * Contains the library API functions used by the TSK blkstat command * line tool. */ #include "tsk_fs_i.h" #include "tsk_ffs.h" #include "tsk_ext2fs.h" #include "tsk_fatfs.h" static TSK_WALK_RET_ENUM blkstat_act(const TSK_FS_BLOCK * fs_block, void *ptr) { tsk_printf("%s: %" PRIuDADDR "\n", fs_block->fs_info->duname, fs_block->addr); tsk_printf("%sAllocated%s\n", (fs_block->flags & TSK_FS_BLOCK_FLAG_ALLOC) ? "" : "Not ", (fs_block->flags & TSK_FS_BLOCK_FLAG_META) ? " (Meta)" : ""); if (TSK_FS_TYPE_ISFFS(fs_block->fs_info->ftype)) { FFS_INFO *ffs = (FFS_INFO *) fs_block->fs_info; tsk_printf("Group: %" PRI_FFSGRP "\n", ffs->grp_num); } else if (TSK_FS_TYPE_ISEXT(fs_block->fs_info->ftype)) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs_block->fs_info; if (fs_block->addr >= ext2fs->first_data_block) tsk_printf("Group: %" PRI_EXT2GRP "\n", ext2fs->grp_num); } else if (TSK_FS_TYPE_ISFAT(fs_block->fs_info->ftype)) { FATFS_INFO *fatfs = (FATFS_INFO *) fs_block->fs_info; /* Does this have a cluster address? */ if (fs_block->addr >= fatfs->firstclustsect) { tsk_printf("Cluster: %" PRIuDADDR "\n", (2 + (fs_block->addr - fatfs->firstclustsect) / fatfs->csize)); } } return TSK_WALK_STOP; } uint8_t tsk_fs_blkstat(TSK_FS_INFO * fs, TSK_DADDR_T addr) { int flags = (TSK_FS_BLOCK_WALK_FLAG_UNALLOC | TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_META | TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_AONLY); return tsk_fs_block_walk(fs, addr, addr, flags, blkstat_act, NULL); } sleuthkit-4.2.0/tsk/fs/exfatfs.c000755 000765 000024 00000107346 12576320700 017360 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /* * This code makes use of research presented in the following paper: * "Reverse Engineering the exFAT File System" by Robert Shullich * Retrieved May 2013 from: * http://www.sans.org/reading_room/whitepapers/forensics/reverse-engineering-microsoft-exfat-file-system_33274 * * Some additional details concerning TexFAT were obtained in May 2013 * from: * http://msdn.microsoft.com/en-us/library/ee490643(v=winembedded.60).aspx */ /** * \file exfatfs.c * Contains the internal TSK exFAT file system code to "open" an exFAT file * system found in a device image and do the equivalent of a UNIX "stat" call * on the file system. */ #include "tsk_exfatfs.h" #include "tsk_fs_i.h" #include "tsk_fatfs.h" #include /** * \internal * Parses the MBR of an exFAT file system to obtain file system size * information - bytes per sector, sectors per cluster, and sectors per FAT - * to add to a FATFS_INFO object. * * @param [in, out] a_fatfs Generic FAT file system info structure. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_get_fs_size_params(FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_get_fs_size_params"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); EXFATFS_MASTER_BOOT_REC *exfatbs = NULL; assert(a_fatfs != NULL); /* Get bytes per sector. * Bytes per sector is a base 2 logarithm, defining a range of sizes with * a min of 512 bytes and a max of 4096 bytes. */ exfatbs = (EXFATFS_MASTER_BOOT_REC*)(&a_fatfs->boot_sector_buffer); a_fatfs->ssize_sh = (uint16_t)exfatbs->bytes_per_sector; if ((a_fatfs->ssize_sh < 9) || (a_fatfs->ssize_sh > 12)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an FATFS file system (invalid sector size)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid sector size base 2 logarithm (%d), not in range (9 - 12)\n", func_name, a_fatfs->ssize); } return FATFS_FAIL; } a_fatfs->ssize = (1 << a_fatfs->ssize_sh); /* Get sectors per cluster. * Sectors per cluster is a base 2 logarithm. The max cluster size is * 32 MiB, so the sum of the bytes per sector and sectors per cluster * logs cannot exceed 25. */ if ((a_fatfs->ssize_sh + exfatbs->sectors_per_cluster) > 25) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an exFAT file system (invalid cluster size)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid cluster size (%d)\n", func_name, a_fatfs->csize); } return FATFS_FAIL; } a_fatfs->csize = (1 << exfatbs->sectors_per_cluster); /* Get sectors per FAT. * It will at least be non-zero. */ a_fatfs->sectperfat = tsk_getu32(fs->endian, exfatbs->fat_len_in_sectors); if (a_fatfs->sectperfat == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an exFAT file system (invalid sectors per FAT)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid number of sectors per FAT (%d)\n", func_name, a_fatfs->sectperfat); } return FATFS_FAIL; } return FATFS_OK; } /** * \internal * Parses the MBR of an exFAT file system to obtain file system layout * information to add to a FATFS_INFO object. * * @param [in, out] a_fatfs Generic FAT file system info structure. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_get_fs_layout(FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_get_fs_layout"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); EXFATFS_MASTER_BOOT_REC *exfatbs = NULL; uint64_t vol_len_in_sectors = 0; uint64_t last_sector_of_cluster_heap = 0; assert(a_fatfs != NULL); /* Get the size of the volume. It should be non-zero. */ exfatbs = (EXFATFS_MASTER_BOOT_REC*)(&a_fatfs->boot_sector_buffer); vol_len_in_sectors = tsk_getu64(fs->endian, exfatbs->vol_len_in_sectors); if (vol_len_in_sectors == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an exFAT file system (invalid volume length)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid volume length in sectors (%"PRIu64")\n", func_name, vol_len_in_sectors); } return FATFS_FAIL; } /* Get the number of FATs. There will be one FAT for regular exFAT and two * FATs for TexFAT (transactional exFAT). */ a_fatfs->numfat = exfatbs->num_fats; if ((a_fatfs->numfat != 1) && (a_fatfs->numfat != 2)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an exFAT file system (number of FATs)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid number of FATs (%d)\n", func_name, a_fatfs->numfat); } return FATFS_FAIL; } /* Get the sector address of the first FAT (FAT0). * It should be non-zero and within the boundaries of the volume. * Note that if the file system is TexFAT, FAT1 will be the working copy * of the FAT and FAT0 will be the stable copy of the last known good FAT. * Therefore, the Sleuthkit should use FAT0. */ a_fatfs->firstfatsect = tsk_getu32(fs->endian, exfatbs->fat_offset); if ((a_fatfs->firstfatsect == 0) || ((uint64_t)a_fatfs->firstfatsect >= vol_len_in_sectors)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("Not an exFAT file system (invalid first FAT sector)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid first FAT sector (%" PRIuDADDR ")\n", func_name, a_fatfs->firstfatsect); } return FATFS_FAIL; } /* Get the sector address of the cluster heap (data area). It should be * after the FATs and within the boundaries of the volume. */ a_fatfs->firstdatasect = tsk_getu32(fs->endian, exfatbs->cluster_heap_offset); if ((a_fatfs->firstdatasect <= (a_fatfs->firstfatsect + (a_fatfs->sectperfat * a_fatfs->numfat) - 1)) || ((uint64_t)a_fatfs->firstdatasect >= vol_len_in_sectors)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("Not an exFAT file system (invalid first data sector"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid first data sector (%" PRIuDADDR ")\n", func_name, a_fatfs->firstdatasect); } return FATFS_FAIL; } /* Unlike FAT12 and FAT16, but like FAT32, the sector address of the first * cluster (cluster #2, there is no cluster #0 or cluster #1) is the same * as the sector address of the cluster heap (data area). */ a_fatfs->firstclustsect = a_fatfs->firstdatasect; /* Get the total number of clusters. It should be non-zero, and should * define a cluster heap (data area) that is within the boundaries of the * volume. */ a_fatfs->clustcnt = tsk_getu32(fs->endian, exfatbs->cluster_cnt); last_sector_of_cluster_heap = a_fatfs->firstdatasect + (a_fatfs->clustcnt * a_fatfs->csize) - 1; if ((a_fatfs->clustcnt == 0) || (last_sector_of_cluster_heap >= vol_len_in_sectors)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("Not an exFAT file system (invalid cluster count)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid cluster count (%" PRIuDADDR ")\n", func_name, a_fatfs->clustcnt); } return FATFS_FAIL; } /* The first cluster is #2, so the final cluster is: */ a_fatfs->lastclust = 1 + a_fatfs->clustcnt; /* This bit mask is required to make the FATFS_CLUST_2_SECT macro work * for exFAT. It is the same as the FAT32 mask. */ a_fatfs->mask = EXFATFS_MASK; /* Get the sector address of the root directory. It should be within the * cluster heap (data area). */ a_fatfs->rootsect = FATFS_CLUST_2_SECT(a_fatfs, tsk_getu32(fs->endian, exfatbs->root_dir_cluster)); if ((a_fatfs->rootsect < a_fatfs->firstdatasect) || ((uint64_t)a_fatfs->rootsect > last_sector_of_cluster_heap)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("Not an exFAT file system (invalid root directory sector address)"); if (tsk_verbose) { fprintf(stderr, "%s: Invalid root directory sector address (%"PRIuDADDR")\n", func_name, a_fatfs->rootsect); } return FATFS_FAIL; } /* The number of directory entries in the root directory is not specified * in the exFAT boot sector. */ a_fatfs->numroot = 0; return FATFS_OK; } /** * \internal * Searches the root directory of an exFAT file system for an allocation bitmap * directory entry. If the entry is found, data from the entry is saved to a * FATFS_INFO object. * * @param [in, out] a_fatfs Generic FAT file system info structure. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_get_alloc_bitmap(FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_get_alloc_bitmap"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); TSK_DADDR_T current_sector = 0; TSK_DADDR_T last_sector_of_data_area = 0; char *sector_buf = NULL; ssize_t bytes_read = 0; EXFATFS_ALLOC_BITMAP_DIR_ENTRY *dentry = NULL; uint64_t i = 0; uint64_t first_sector_of_alloc_bitmap = 0; uint64_t alloc_bitmap_length_in_bytes = 0; uint64_t last_sector_of_alloc_bitmap = 0; assert(a_fatfs != NULL); if ((sector_buf = (char*)tsk_malloc(a_fatfs->ssize)) == NULL) { return FATFS_FAIL; } current_sector = a_fatfs->rootsect; last_sector_of_data_area = a_fatfs->firstdatasect + (a_fatfs->clustcnt * a_fatfs->csize) - 1; while (current_sector < last_sector_of_data_area) { /* Read in a sector from the root directory. The allocation bitmap * directory entries will probably be near the beginning of the * directory, probably in the first sector. */ bytes_read = tsk_fs_read_block(fs, current_sector, sector_buf, a_fatfs->ssize); if (bytes_read != a_fatfs->ssize) { if (bytes_read >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: sector: %" PRIuDADDR, func_name, current_sector); free(sector_buf); return FATFS_FAIL; } /* Read the directory entries in the sector, looking for allocation * bitmap entries. There will be one entry unless the file system is * TexFAT (transactional exFAT), in which case there will be two. */ for (i = 0; i < a_fatfs->ssize; i += sizeof(FATFS_DENTRY)) { dentry = (EXFATFS_ALLOC_BITMAP_DIR_ENTRY*)&(sector_buf[i]); /* The type of the directory entry is encoded in the first byte * of the entry. See EXFATFS_DIR_ENTRY_TYPE_ENUM. */ if (exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP) { /* Do an in-depth test. */ if (!exfatfs_is_alloc_bitmap_dentry((FATFS_DENTRY*)dentry, FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN, a_fatfs)) { continue; } /* The first bit of the flags byte is 0 for the first * allocation bitmap directory entry and 1 for the second * bitmap directory entry. If TexFAT is in use and there are * two allocation bitmaps, the first bitmap should be the * stable copy of the last known good allocation bitmap. * Therefore, the SleuthKit will use the first bitmap to * determine which clusters are allocated. */ if (~(dentry->flags & 0x01)) { first_sector_of_alloc_bitmap = FATFS_CLUST_2_SECT(a_fatfs, tsk_getu32(fs->endian, dentry->first_cluster_of_bitmap)); alloc_bitmap_length_in_bytes = tsk_getu64(fs->endian, dentry->length_of_alloc_bitmap_in_bytes); last_sector_of_alloc_bitmap = first_sector_of_alloc_bitmap + (roundup(alloc_bitmap_length_in_bytes, a_fatfs->ssize) / a_fatfs->ssize) - 1; /* The allocation bitmap must lie within the boundaries of the data area. * It also must be big enough for the number of clusters reported in the VBR. */ if ((first_sector_of_alloc_bitmap >= a_fatfs->firstdatasect) && (last_sector_of_alloc_bitmap <= last_sector_of_data_area) && (alloc_bitmap_length_in_bytes >= (a_fatfs->clustcnt + 7) / 8)) { a_fatfs->EXFATFS_INFO.first_sector_of_alloc_bitmap = first_sector_of_alloc_bitmap; a_fatfs->EXFATFS_INFO.length_of_alloc_bitmap_in_bytes = alloc_bitmap_length_in_bytes; free(sector_buf); return FATFS_OK; } } } } } free(sector_buf); return FATFS_FAIL; } /** * \internal * Parses the MBR of an exFAT file system to obtain a volume serial number to * add to a FATFS_INFO object. * * @param [in, out] a_fatfs Generic FAT file system info structure. */ static void exfatfs_get_volume_id(FATFS_INFO *a_fatfs) { TSK_FS_INFO *fs = &(a_fatfs->fs_info); EXFATFS_MASTER_BOOT_REC *exfatbs = NULL; assert(a_fatfs != NULL); exfatbs = (EXFATFS_MASTER_BOOT_REC*)(&a_fatfs->boot_sector_buffer); for (fs->fs_id_used = 0; fs->fs_id_used < 4; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = exfatbs->vol_serial_no[fs->fs_id_used]; } } /** * \internal * Sets the file system layout members of a FATFS_INFO object for an exFAT file * system. Note that there are no "block" or "inode" concepts in exFAT. So, to * conform to the SleuthKit generic file system model, sectors are treated as * blocks, directory entries are treated as inodes, and inode addresses (inode * numbers) are assigned to every directory-entry-sized chunk of the file * system. This is the same mapping previously established for TSK treatment of * the other FAT file systems. As with those sister file systems, any given * inode address may or may not point to a conceptual exFAT inode. * * @param [in, out] a_fatfs Generic FAT file system info structure. */ static void exfatfs_setup_fs_layout_model(FATFS_INFO *a_fatfs) { TSK_FS_INFO *fs = &(a_fatfs->fs_info); EXFATFS_MASTER_BOOT_REC *exfatbs = NULL; assert(a_fatfs != NULL); fs->duname = "Sector"; fs->block_size = a_fatfs->ssize; exfatbs = (EXFATFS_MASTER_BOOT_REC*)(&a_fatfs->boot_sector_buffer); fs->block_count = tsk_getu64(fs->endian, exfatbs->vol_len_in_sectors); fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; /* Determine the last block actually included in the image, since the * end of the file system could be "cut off." */ if ((TSK_DADDR_T) ((fs->img_info->size - fs->offset) / fs->block_size) < fs->block_count) { fs->last_block_act = (fs->img_info->size - fs->offset) / fs->block_size - 1; } /* Calculate the maximum number of directory entries that will fit in a * sector and a cluster. */ a_fatfs->dentry_cnt_se = a_fatfs->ssize / sizeof(FATFS_DENTRY); a_fatfs->dentry_cnt_cl = a_fatfs->dentry_cnt_se * a_fatfs->csize; /* The first entry in an exFAT FAT is a media type indicator. * The second entry is simply a meaningless 0xFFFFFFFF. * The first inode address is therefore 2. */ fs->first_inum = FATFS_FIRSTINO; fs->root_inum = FATFS_ROOTINO; /* Calculate inode addresses for the virtual files (MBR, one or two FATS) * and the virtual orphan files directory. */ fs->last_inum = (FATFS_SECT_2_INODE(a_fatfs, fs->last_block_act + 1) - 1) + FATFS_NUM_VIRT_FILES(a_fatfs); a_fatfs->mbr_virt_inum = fs->last_inum - FATFS_NUM_VIRT_FILES(a_fatfs) + 1; a_fatfs->fat1_virt_inum = a_fatfs->mbr_virt_inum + 1; if (a_fatfs->numfat == 2) { a_fatfs->fat2_virt_inum = a_fatfs->fat1_virt_inum + 1; } else { a_fatfs->fat2_virt_inum = a_fatfs->fat1_virt_inum; } /* Calculate the total number of inodes. */ fs->inum_count = fs->last_inum - fs->first_inum + 1; } /** * \internal * Initializes the data structures used to cache the cluster addresses that * make up FAT chains in an exFAT file system, and the lock used to make the * data structures thread-safe. * * @param [in, out] a_fatfs Generic FAT file system info structure. */ static void exfatfs_init_fat_cache(FATFS_INFO *a_fatfs) { uint32_t i = 0; assert(a_fatfs != NULL); for (i = 0; i < FATFS_FAT_CACHE_N; i++) { a_fatfs->fatc_addr[i] = 0; a_fatfs->fatc_ttl[i] = 0; } tsk_init_lock(&a_fatfs->cache_lock); tsk_init_lock(&a_fatfs->dir_lock); a_fatfs->inum2par = NULL; } /** * \internal * Initializes the data structure used to map inode addresses to parent inode * addresses in an exFAT file system, and the lock used to make the data * structure thread-safe. * * @param [in, out] a_fatfs Generic FAT file system info structure. */ static void exfatfs_init_inums_map(FATFS_INFO *a_fatfs) { assert(a_fatfs != NULL); tsk_init_lock(&a_fatfs->dir_lock); a_fatfs->inum2par = NULL; } /** * \internal * * Sets the function pointers in a FATFS_INFO object for an exFAT file system * to point to either generic FAT file system functions or to exFAT file system * functions. * * @param [in, out] a_fatfs Generic FAT file system info structure. */ static void exfatfs_set_func_ptrs(FATFS_INFO *a_fatfs) { TSK_FS_INFO *fs = &(a_fatfs->fs_info); assert(a_fatfs != NULL); fs->close = fatfs_close; /* File system category functions. */ fs->fsstat = exfatfs_fsstat; fs->fscheck = fatfs_fscheck; /* Content category functions. */ fs->block_walk = fatfs_block_walk; fs->block_getflags = fatfs_block_getflags; /* Meta data category functions. */ fs->inode_walk = fatfs_inode_walk; fs->istat = fatfs_istat; fs->file_add_meta = fatfs_inode_lookup; fs->get_default_attr_type = fatfs_get_default_attr_type; fs->load_attrs = fatfs_make_data_runs; /* Name category functions. */ fs->dir_open_meta = fatfs_dir_open_meta; fs->name_cmp = fatfs_name_cmp; /* NOP journal functions - exFAT has no file system journal. */ fs->jblk_walk = fatfs_jblk_walk; fs->jentry_walk = fatfs_jentry_walk; fs->jopen = fatfs_jopen; /* Specialization for exFAT functions. */ a_fatfs->is_cluster_alloc = exfatfs_is_cluster_alloc; a_fatfs->is_dentry = exfatfs_is_dentry; a_fatfs->dinode_copy = exfatfs_dinode_copy; a_fatfs->inode_lookup = exfatfs_inode_lookup; a_fatfs->inode_walk_should_skip_dentry = exfatfs_inode_walk_should_skip_dentry; a_fatfs->istat_attr_flags = exfatfs_istat_attr_flags; a_fatfs->dent_parse_buf = exfatfs_dent_parse_buf; } /** * Open an exFAT file system in an image file. * * @param [in, out] a_fatfs Generic FAT file system info structure. * @return 0 on success, 1 otherwise, per TSK convention. */ uint8_t exfatfs_open(FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_open"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); assert(a_fatfs != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name)) { return FATFS_FAIL; } if (exfatfs_get_fs_size_params(a_fatfs) == FATFS_FAIL || exfatfs_get_fs_layout(a_fatfs) == FATFS_FAIL) { return FATFS_FAIL; } if (exfatfs_get_fs_layout(a_fatfs) == FATFS_OK) { exfatfs_setup_fs_layout_model(a_fatfs); } else { return FATFS_FAIL; } if (exfatfs_get_alloc_bitmap(a_fatfs) == FATFS_FAIL) { return FATFS_FAIL; } exfatfs_get_volume_id(a_fatfs); exfatfs_init_inums_map(a_fatfs); exfatfs_init_fat_cache(a_fatfs); exfatfs_set_func_ptrs(a_fatfs); fs->ftype = TSK_FS_TYPE_EXFAT; return FATFS_OK; } /** * \internal * Searches an exFAT file system for its volume label directory entry, which * should be in the root directory of the file system. If the entry is found, * its metadata is copied into the TSK_FS_META object of a TSK_FS_FILE object. * * @param [in] a_fatfs Generic FAT file system info structure. * @param [out] a_fatfs Generic file system file structure. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_find_volume_label_dentry(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file) { const char *func_name = "exfatfs_find_volume_label_dentry"; TSK_FS_INFO *fs = (TSK_FS_INFO *)a_fatfs; TSK_DADDR_T current_sector = 0; TSK_DADDR_T last_sector_of_data_area = 0; int8_t sector_is_alloc = 0; char *sector_buf = NULL; ssize_t bytes_read = 0; TSK_INUM_T current_inum = 0; FATFS_DENTRY *dentry = NULL; uint64_t i = 0; assert(a_fatfs != NULL); assert(a_fs_file != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name)) { return FATFS_FAIL; } /* Allocate or reset the TSK_FS_META object. */ if (a_fs_file->meta == NULL) { if ((a_fs_file->meta = tsk_fs_meta_alloc(FATFS_FILE_CONTENT_LEN)) == NULL) { return FATFS_FAIL; } } else { tsk_fs_meta_reset(a_fs_file->meta); } /* Allocate a buffer for reading in sector-size chunks of the image. */ if ((sector_buf = (char*)tsk_malloc(a_fatfs->ssize)) == NULL) { return FATFS_FAIL; } current_sector = a_fatfs->rootsect; last_sector_of_data_area = a_fatfs->firstdatasect + (a_fatfs->clustcnt * a_fatfs->csize) - 1; while (current_sector < last_sector_of_data_area) { /* Read in a sector from the root directory. The volume label * directory entry will probably be near the beginning of the * directory, probably in the first sector. */ bytes_read = tsk_fs_read_block(fs, current_sector, sector_buf, a_fatfs->ssize); if (bytes_read != a_fatfs->ssize) { if (bytes_read >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: error reading sector: %" PRIuDADDR, func_name, current_sector); free(sector_buf); return FATFS_FAIL; } /* Get the allocation status of the sector (yes, it should be allocated). */ sector_is_alloc = fatfs_is_sectalloc(a_fatfs, current_sector); if (sector_is_alloc == -1) { return FATFS_FAIL; } /* Get the inode address of the first directory entry of the sector. */ current_inum = FATFS_SECT_2_INODE(a_fatfs, current_sector); /* Loop through the putative directory entries in the sector, * until the volume label entry is found. */ for (i = 0; i < a_fatfs->ssize; i += sizeof(FATFS_DENTRY)) { dentry = (FATFS_DENTRY*)&(sector_buf[i]); /* The type of the directory entry is encoded in the first byte * of the entry. See EXFATFS_DIR_ENTRY_TYPE_ENUM. */ if (exfatfs_get_enum_from_type(dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL) { if (!exfatfs_is_vol_label_dentry(dentry, FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN)) { continue; } /* Found it, save it to the TSK_FS_META object of the * TSK_FS_FILE object and exit. */ if (exfatfs_dinode_copy(a_fatfs, current_inum, dentry, sector_is_alloc, a_fs_file) == TSK_OK) { return FATFS_OK; } else { return FATFS_FAIL; } } } } free(sector_buf); return FATFS_OK; } /** * \internal * Prints file system category data for an exFAT file system to a file * handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_fsstat_fs_info(TSK_FS_INFO *a_fs, FILE *a_hFile) { FATFS_INFO *fatfs = NULL; EXFATFS_MASTER_BOOT_REC *exfatbs = NULL; TSK_FS_FILE *fs_file = NULL; assert(a_fs != NULL); assert(a_hFile != NULL); fatfs = (FATFS_INFO*)a_fs; exfatbs = (EXFATFS_MASTER_BOOT_REC*)&(fatfs->boot_sector_buffer); if ((fs_file = tsk_fs_file_alloc(a_fs)) == NULL) { return FATFS_FAIL; } if ((fs_file->meta = tsk_fs_meta_alloc(FATFS_FILE_CONTENT_LEN)) == NULL) { return FATFS_FAIL; } tsk_fprintf(a_hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(a_hFile, "--------------------------------------------\n"); tsk_fprintf(a_hFile, "File System Type: exFAT\n"); tsk_fprintf(a_hFile, "\nVolume Serial Number: %x%x-%x%x\n", exfatbs->vol_serial_no[3], exfatbs->vol_serial_no[2], exfatbs->vol_serial_no[1], exfatbs->vol_serial_no[0]); if (exfatfs_find_volume_label_dentry(fatfs, fs_file) == 0) { tsk_fprintf(a_hFile, "Volume Label (from root directory): %s\n", fs_file->meta->name2->name); } else { tsk_fprintf(a_hFile, "Volume Label:\n"); } tsk_fprintf(a_hFile, "File System Name (from MBR): %s\n", exfatbs->fs_name); tsk_fprintf(a_hFile, "File System Revision: %x.%x\n", exfatbs->fs_revision[1], exfatbs->fs_revision[0]); tsk_fprintf(a_hFile, "Partition Offset: %" PRIuDADDR "\n", tsk_getu64(a_fs->endian, exfatbs->partition_offset)); tsk_fprintf(a_hFile, "Number of FATs: %d\n", fatfs->numfat); tsk_fs_file_close(fs_file); return FATFS_OK; } /** * \internal * Prints file system layout data for an exFAT file system to a file * handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. * @return 0 on success, 1 otherwise, per TSK convention. */ static uint8_t exfatfs_fsstat_fs_layout_info(TSK_FS_INFO *a_fs, FILE *a_hFile) { const char *func_name = "exfatfs_fsstat_fs_layout_info"; FATFS_INFO *fatfs = NULL; uint64_t i = 0; TSK_DADDR_T fat_base_sect = 0; TSK_DADDR_T clust_heap_len = 0; TSK_LIST *root_dir_clusters_seen = NULL; TSK_DADDR_T current_cluster; TSK_DADDR_T next_cluster = 0; assert(a_fs != NULL); assert(a_hFile != NULL); fatfs = (FATFS_INFO*)a_fs; tsk_fprintf(a_hFile, "\nFile System Layout (in sectors):\n"); tsk_fprintf(a_hFile, "Range: %" PRIuDADDR " - %" PRIuDADDR "\n", a_fs->first_block, a_fs->last_block); if (a_fs->last_block != a_fs->last_block_act) tsk_fprintf(a_hFile, "Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", a_fs->first_block, a_fs->last_block_act); tsk_fprintf(a_hFile, "* Reserved: 0 - %" PRIuDADDR "\n", fatfs->firstfatsect - 1); tsk_fprintf(a_hFile, "** Volume Boot Record (VBR): 0 - 11\n"); tsk_fprintf(a_hFile, "*** Boot Sector (MBR): 0\n"); tsk_fprintf(a_hFile, "** Backup Volume Boot Record (VBR): 12 - 23\n"); tsk_fprintf(a_hFile, "*** Backup Boot Sector (MBR): 12\n"); tsk_fprintf(a_hFile, "** FAT alignment space: 24 - %" PRIuDADDR "\n", fatfs->firstfatsect - 1); for (i = 0; i < fatfs->numfat; i++) { fat_base_sect = fatfs->firstfatsect + i * (fatfs->sectperfat); tsk_fprintf(a_hFile, "* FAT %" PRIuDADDR ": %" PRIuDADDR " - %" PRIuDADDR "\n", i + 1, fat_base_sect, (fat_base_sect + fatfs->sectperfat - 1)); } if (fat_base_sect + fatfs->sectperfat < fatfs->firstdatasect) { tsk_fprintf(a_hFile, "* Data Area alignment space: %" PRIuDADDR " - %" PRIuDADDR "\n", fat_base_sect + fatfs->sectperfat, fatfs->firstdatasect - 1); } tsk_fprintf(a_hFile, "* Data Area: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstdatasect, a_fs->last_block); clust_heap_len = fatfs->csize * (fatfs->lastclust - 1); tsk_fprintf(a_hFile, "** Cluster Heap: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstclustsect, (fatfs->firstclustsect + clust_heap_len - 1)); /* Walk the FAT chain for the root directory. */ current_cluster = fatfs->rootsect; next_cluster = FATFS_SECT_2_CLUST(fatfs, fatfs->rootsect); while ((next_cluster) && (0 == FATFS_ISEOF(next_cluster, FATFS_32_MASK))) { TSK_DADDR_T nxt; current_cluster = next_cluster; /* Make sure we do not get into an infinite loop */ if (tsk_list_find(root_dir_clusters_seen, next_cluster)) { if (tsk_verbose) { tsk_fprintf(stderr, "%s : Loop found while determining root directory size\n", func_name); } break; } if (tsk_list_add(&root_dir_clusters_seen, next_cluster)) { tsk_list_free(root_dir_clusters_seen); root_dir_clusters_seen = NULL; return FATFS_FAIL; } if (fatfs_getFAT(fatfs, next_cluster, &nxt)) { break; } next_cluster = nxt; } tsk_list_free(root_dir_clusters_seen); root_dir_clusters_seen = NULL; tsk_fprintf(a_hFile, "*** Root Directory: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->rootsect, (FATFS_CLUST_2_SECT(fatfs, current_cluster + 1) - 1)); if ((fatfs->firstclustsect + clust_heap_len - 1) != a_fs->last_block) { tsk_fprintf(a_hFile, "** Non-clustered: %" PRIuDADDR " - %" PRIuDADDR "\n", (fatfs->firstclustsect + clust_heap_len), a_fs->last_block); } return FATFS_OK; } /** * \internal * Prints metadata category data for an exFAT file system to a file * handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. */ static void exfatfs_fsstat_fs_metadata_info(TSK_FS_INFO *a_fs, FILE *a_hFile) { assert(a_fs != NULL); assert(a_hFile != NULL); tsk_fprintf(a_hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(a_hFile, "--------------------------------------------\n"); tsk_fprintf(a_hFile, "Metadata Layout (in virtual inodes):\n"); tsk_fprintf(a_hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", a_fs->first_inum, a_fs->last_inum); tsk_fprintf(a_hFile, "* Root Directory: %" PRIuINUM "\n", a_fs->root_inum); } /** * \internal * Prints metadata category data for an exFAT file system to a file * handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. */ static void exfatfs_fsstat_fs_content_info(TSK_FS_INFO *a_fs, FILE *a_hFile) { FATFS_INFO *fatfs = NULL; uint64_t i = 0; ssize_t bad_sector_cnt = 0; assert(a_fs != NULL); assert(a_hFile != NULL); fatfs = (FATFS_INFO*)a_fs; tsk_fprintf(a_hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(a_hFile, "--------------------------------------------\n"); tsk_fprintf(a_hFile, "Sector Size: %" PRIu16 "\n", fatfs->ssize); tsk_fprintf(a_hFile, "Cluster Size: %" PRIu32 "\n", (uint32_t) fatfs->csize << fatfs->ssize_sh); tsk_fprintf(a_hFile, "Cluster Range: 2 - %" PRIuDADDR "\n", fatfs->lastclust); /* Check each cluster of the data area to see if it is marked as bad in the * FAT. If the cluster is bad, list the bad sectors. */ bad_sector_cnt = 0; for (i = 2; i <= fatfs->lastclust; ++i) { TSK_DADDR_T entry; TSK_DADDR_T sect; /* Get the FAT table entry */ if (fatfs_getFAT(fatfs, i, &entry)) { break; } if (FATFS_ISBAD(entry, fatfs->mask) == 0) { continue; } if (bad_sector_cnt == 0) { tsk_fprintf(a_hFile, "Bad Sectors: "); } sect = FATFS_CLUST_2_SECT(fatfs, i); for (i = 0; i < fatfs->csize; ++i) { tsk_fprintf(a_hFile, "%" PRIuDADDR " ", sect + i); if ((++bad_sector_cnt % 8) == 0) { tsk_fprintf(a_hFile, "\n"); } } } if ((bad_sector_cnt > 0) && ((bad_sector_cnt % 8) != 0)) { tsk_fprintf(a_hFile, "\n"); } } /** * \internal * Prints FAT chains data for an exFAT file system to a file * handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. */ static void exfatfs_fsstat_fs_fat_chains_info(TSK_FS_INFO *a_fs, FILE *a_hFile) { FATFS_INFO *fatfs = NULL; uint64_t i = 0; TSK_DADDR_T sect_run_start = 0; TSK_DADDR_T sect_run_end = 0; TSK_DADDR_T next_cluster = 0; TSK_DADDR_T next_sector = 0; assert(a_fs != NULL); assert(a_hFile != NULL); fatfs = (FATFS_INFO*)a_fs; tsk_fprintf(a_hFile, "\nFAT CHAINS (in sectors)\n"); tsk_fprintf(a_hFile, "--------------------------------------------\n"); /* Check each cluster of the data area to see if it has a FAT chain. * If so, print out the sectors tha make up the chain. Note that exFAT file * systems only use FAT chains for the root directory, the allocation * bitmap, the upcase table, and fragmented files. */ sect_run_start = fatfs->firstclustsect; for (i = 2; i <= fatfs->lastclust; i++) { sect_run_end = FATFS_CLUST_2_SECT(fatfs, i + 1) - 1; if (fatfs_getFAT(fatfs, i, &next_cluster)) { break; } next_sector = FATFS_CLUST_2_SECT(fatfs, next_cluster); if ((next_cluster & fatfs->mask) == (i + 1)) { continue; } else if ((next_cluster & fatfs->mask)) { if (FATFS_ISEOF(next_cluster, fatfs->mask)) { tsk_fprintf(a_hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> EOF\n", sect_run_start, sect_run_end, sect_run_end - sect_run_start + 1); } else if (FATFS_ISBAD(next_cluster, fatfs->mask)) { tsk_fprintf(a_hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> BAD\n", sect_run_start, sect_run_end, sect_run_end - sect_run_start + 1); } else { tsk_fprintf(a_hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> %" PRIuDADDR "\n", sect_run_start, sect_run_end, sect_run_end - sect_run_start + 1, next_sector); } } sect_run_start = sect_run_end + 1; } } /** * \internal * Print details about an exFAT file system to a file handle. * * @param [in] a_fs Generic file system info structure for the file system. * @param [in] a_hFile The file handle. * @return 0 on success, 1 otherwise, per TSK convention. */ uint8_t exfatfs_fsstat(TSK_FS_INFO *a_fs, FILE *a_hFile) { const char *func_name = "exfatfs_fsstat"; assert(a_fs != NULL); assert(a_hFile != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fs, "a_fs", func_name) || fatfs_ptr_arg_is_null(a_hFile, "a_hFile", func_name)) { return FATFS_FAIL; } if (exfatfs_fsstat_fs_info(a_fs, a_hFile)) { return FATFS_FAIL; } if (exfatfs_fsstat_fs_layout_info(a_fs, a_hFile)) { return FATFS_FAIL; } exfatfs_fsstat_fs_metadata_info(a_fs, a_hFile); exfatfs_fsstat_fs_content_info(a_fs, a_hFile); /* Since exFAT does not store all file data in FAT chains (only fragmented files), * printing the chains could give the mistaken impression that those are the only * sectors containing file data. * * exfatfs_fsstat_fs_fat_chains_info(a_fs, a_hFile); */ return FATFS_OK; } sleuthkit-4.2.0/tsk/fs/exfatfs_dent.c000755 000765 000024 00000061144 12576320700 020365 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /* * This code makes use of research presented in the following paper: * "Reverse Engineering the exFAT File System" by Robert Shullich * Retrieved May 2013 from: * http://www.sans.org/reading_room/whitepapers/forensics/reverse-engineering-microsoft-exfat-file-system_33274 * * Some additional details concerning TexFAT were obtained in May 2013 * from: * http://msdn.microsoft.com/en-us/library/ee490643(v=winembedded.60).aspx */ /** * \file exfatfs_dent.c * Contains the internal TSK exFAT file system code to handle name category * processing. */ #include "tsk_exfatfs.h" /* Include first to make sure it stands alone. */ #include "tsk_fs_i.h" #include "tsk_fatfs.h" #include /** * \internal * \struct * Bundles a TSK_FS_NAME object and a TSK_FS_DIR object with additional data * required when assembling a name from file directory entry set. If the * TSK_FS_NAME is successfully populated, it is added to the TSK_FS_DIR. */ typedef struct { FATFS_INFO *fatfs; int8_t sector_is_allocated; EXFATFS_DIR_ENTRY_TYPE last_dentry_type; uint8_t expected_secondary_entry_count; uint8_t actual_secondary_entry_count; uint16_t expected_check_sum; uint8_t expected_name_length; uint8_t actual_name_length; TSK_FS_NAME *fs_name; TSK_FS_DIR *fs_dir; } EXFATFS_FS_NAME_INFO; /** * \internal * Reset the fields of a EXFATFS_FS_NAME_INFO to their initialized state. This * allows for reuse of the object. * * @param a_name_info The name info object. */ static void exfatfs_reset_name_info(EXFATFS_FS_NAME_INFO *a_name_info) { assert(a_name_info != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); a_name_info->last_dentry_type = EXFATFS_DIR_ENTRY_TYPE_NONE; a_name_info->expected_secondary_entry_count = 0; a_name_info->actual_secondary_entry_count = 0; a_name_info->expected_check_sum = 0; a_name_info->expected_name_length = 0; a_name_info->actual_name_length = 0; a_name_info->fs_name->name[0] = '\0'; a_name_info->fs_name->meta_addr = 0; a_name_info->fs_name->type = TSK_FS_NAME_TYPE_UNDEF; a_name_info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } /** * \internal * Add the TSK_FS_NAME object of an EXFATFS_FS_NAME_INFO object to its * TSK_FS_DIR object and reset the fields of a EXFATFS_FS_NAME_INFO to their * initialized state. This allows for reuse of the object. * * @param a_name_info The name info object. */ static void exfatfs_add_name_to_dir_and_reset_info(EXFATFS_FS_NAME_INFO *a_name_info) { assert(a_name_info != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); /* If the parsing of the directory entry or directory entry set produced * a name, add the TSK_FS_NAME object to the TSK_FS_DIR object. */ if (strlen(a_name_info->fs_name->name) > 0) { tsk_fs_dir_add(a_name_info->fs_dir, a_name_info->fs_name); } exfatfs_reset_name_info(a_name_info); } /** * \internal * Populates an EXFATFS_FS_NAME_INFO object with data parsed from a file * directory entry. Since this is the beginning of a new name, the name * previously stored on the EXFATFS_FS_NAME_INFO, if any, is saved. * * @param a_name_info The name info object. * @param a_dentry A buffer containing a file directory entry. * @param a_inum The inode address associated with the directory entry. */ static void exfats_parse_file_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { EXFATFS_FILE_DIR_ENTRY *dentry = (EXFATFS_FILE_DIR_ENTRY*)a_dentry; assert(a_name_info != NULL); assert(a_name_info->fatfs != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); assert(dentry != NULL); assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE); assert(fatfs_inum_is_in_range(a_name_info->fatfs, a_inum)); /* Starting parse of a new name, so save the current name, if any. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); /* Set the current entry type. This is used to check the sequence and * in-use state of the entries in the set. */ a_name_info->last_dentry_type = (EXFATFS_DIR_ENTRY_TYPE)dentry->entry_type; /* The number of secondary entries and the check sum for the entry set are * stored in the file entry. */ a_name_info->expected_secondary_entry_count = dentry->secondary_entries_count; a_name_info->expected_check_sum = tsk_getu16(a_name_info->fatfs->fs_info.endian, dentry->check_sum); /* The file type (regular file, directory) is stored in the file entry. */ if (dentry->attrs[0] & FATFS_ATTR_DIRECTORY) { a_name_info->fs_name->type = TSK_FS_NAME_TYPE_DIR; } else { a_name_info->fs_name->type = TSK_FS_NAME_TYPE_REG; } /* If the in-use bit of the type byte is not set, the entry set is for a * deleted or renamed file. However, trust and verify - to be marked as * allocated, the inode must also be in an allocated sector. */ if (a_name_info->sector_is_allocated && exfatfs_get_alloc_status_from_type(dentry->entry_type)) { a_name_info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } else { a_name_info->fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; } /* Make the inum of the file entry the inode address for the entry set. */ a_name_info->fs_name->meta_addr = a_inum; } /** * \internal * Populates an EXFATFS_FS_NAME_INFO object with data parsed from a file * stream directory entry. * * @param a_name_info The name info object. * @param a_dentry A buffer containing a file stream directory entry. * @param a_inum The inode address associated with the directory entry. */ static void exfats_parse_file_stream_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { EXFATFS_FILE_STREAM_DIR_ENTRY *dentry = (EXFATFS_FILE_STREAM_DIR_ENTRY*)a_dentry; assert(a_name_info != NULL); assert(a_name_info->fatfs != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); assert(dentry != NULL); assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM); assert(fatfs_inum_is_in_range(a_name_info->fatfs, a_inum)); if(exfatfs_get_enum_from_type(a_name_info->last_dentry_type) != EXFATFS_DIR_ENTRY_TYPE_FILE){ /* A file stream entry must follow a file entry, so this entry is a * false positive or there is corruption. Save the current name, * if any, and ignore this buffer. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); return; } if(exfatfs_get_alloc_status_from_type(a_name_info->last_dentry_type) != exfatfs_get_alloc_status_from_type(dentry->entry_type)){ /* The in-use bits of all of the entries in an entry set should be * same, so this entry is a false positive or there is corruption. * Save the current name, if any, and ignore this buffer. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); return; } /* Set the current entry type. This is used to check the sequence and * in-use state of the entries in the set. */ a_name_info->last_dentry_type = (EXFATFS_DIR_ENTRY_TYPE)dentry->entry_type; /* The file stream entry contains the length of the file name. */ a_name_info->expected_name_length = dentry->file_name_length; /* If all of the secondary entries for the set are present, save the name, * if any. Note that if this condition is satisfied here, the directory is * corrupted or this is a degenerate case - there should be at least one * file name entry in a directory entry set. */ ++a_name_info->actual_secondary_entry_count; if (a_name_info->actual_secondary_entry_count == a_name_info->expected_secondary_entry_count) { exfatfs_add_name_to_dir_and_reset_info(a_name_info); } } /** * \internal * Populates an EXFATFS_FS_NAME_INFO object with data parsed from a file * name directory entry. * * @param a_name_info The name info object. * @param a_dentry A buffer containing a file name directory entry. * @param a_inum The inode address associated with the directory entry. */ static void exfats_parse_file_name_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { EXFATFS_FILE_NAME_DIR_ENTRY *dentry = (EXFATFS_FILE_NAME_DIR_ENTRY*)a_dentry; size_t num_chars_to_copy = 0; assert(a_name_info != NULL); assert(a_name_info->fatfs != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); assert(dentry != NULL); assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME); assert(fatfs_inum_is_in_range(a_name_info->fatfs, a_inum)); if(exfatfs_get_enum_from_type(a_name_info->last_dentry_type) != EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM && exfatfs_get_enum_from_type(a_name_info->last_dentry_type) != EXFATFS_DIR_ENTRY_TYPE_FILE_NAME){ /* A file name entry must follow a stream or name entry, so this entry is * is a false positive or there is corruption. Save the current name, * if any, and ignore this buffer. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); return; } if(exfatfs_get_alloc_status_from_type(a_name_info->last_dentry_type) != exfatfs_get_alloc_status_from_type(dentry->entry_type)){ /* The in-use bits of all of the entries in an entry set should be * same, so this entry is a false positive or there is corruption. * Save the current name, if any, and ignore this buffer. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); return; } /* Set the current entry type. This is used to check the sequence and * in-use state of the entries in the set. */ a_name_info->last_dentry_type = (EXFATFS_DIR_ENTRY_TYPE)dentry->entry_type; /* Determine how many name chars remain according to the name length from * the file stream entry and how many chars can be obtained from this * name entry. */ num_chars_to_copy = a_name_info->expected_name_length - a_name_info->actual_name_length; if (num_chars_to_copy > EXFATFS_MAX_FILE_NAME_SEGMENT_LENGTH) { num_chars_to_copy = EXFATFS_MAX_FILE_NAME_SEGMENT_LENGTH; } /* If there is enough space remaining in the name object, convert the * name chars to UTF-8 and save them. */ if ((size_t)(a_name_info->actual_name_length + num_chars_to_copy) < a_name_info->fs_name->name_size - 1) { if (fatfs_utf16_inode_str_2_utf8(a_name_info->fatfs, (UTF16*)dentry->utf16_name_chars, num_chars_to_copy, (UTF8*)&(a_name_info->fs_name->name[a_name_info->actual_name_length]), a_name_info->fs_name->name_size, a_inum, "file name segment") != TSKconversionOK) { /* Discard whatever was written by the failed conversion and save * whatever has been found to this point, if anything. */ a_name_info->fs_name->name[a_name_info->actual_name_length] = '\0'; exfatfs_add_name_to_dir_and_reset_info(a_name_info); return; } /* Update the actual name length and null-terminate the name so far. */ a_name_info->actual_name_length += num_chars_to_copy; a_name_info->fs_name->name[a_name_info->actual_name_length] = '\0'; } /* If all of the secondary entries for the set are present, save the name, * if any. */ ++a_name_info->actual_secondary_entry_count; if (a_name_info->actual_secondary_entry_count == a_name_info->expected_secondary_entry_count) { exfatfs_add_name_to_dir_and_reset_info(a_name_info); } } /** * \internal * Populates an EXFATFS_FS_NAME_INFO object with data parsed from a volume * label directory entry. * * @param a_name_info The name info object. * @param a_dentry A buffer containing a volume label directory entry. * @param a_inum The inode address associated with the directory entry. */ static void exfats_parse_vol_label_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { EXFATFS_VOL_LABEL_DIR_ENTRY *dentry = (EXFATFS_VOL_LABEL_DIR_ENTRY*)a_dentry; const char *tag = " (Volume Label Entry)"; size_t tag_length = 0; assert(a_name_info != NULL); assert(a_name_info->fatfs != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); assert(dentry != NULL); assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL); assert(fatfs_inum_is_in_range(a_name_info->fatfs, a_inum)); /* Starting parse of a new name, save the previous name, if any. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); /* Set the current entry type. This is used to check the sequence and * in-use state of the entries in the set. */ a_name_info->last_dentry_type = (EXFATFS_DIR_ENTRY_TYPE)dentry->entry_type; if(exfatfs_get_alloc_status_from_type(dentry->entry_type) == 1){ if (fatfs_utf16_inode_str_2_utf8(a_name_info->fatfs, (UTF16*)dentry->volume_label, (size_t)dentry->utf16_char_count + 1, (UTF8*)a_name_info->fs_name->name, a_name_info->fs_name->name_size, a_inum, "volume label") != TSKconversionOK) { /* Discard whatever was written by the failed conversion. */ exfatfs_reset_name_info(a_name_info); return; } } else { strcpy(a_name_info->fs_name->name, EXFATFS_EMPTY_VOLUME_LABEL_DENTRY_NAME); } a_name_info->actual_name_length += dentry->utf16_char_count; a_name_info->fs_name->name[a_name_info->actual_name_length] = '\0'; tag_length = strlen(tag); if ((size_t)a_name_info->actual_name_length + tag_length < FATFS_MAXNAMLEN_UTF8) { strcat(a_name_info->fs_name->name, tag); } /* Record the inum associated with this name. */ a_name_info->fs_name->meta_addr = a_inum; /* Not a directory. */ a_name_info->fs_name->type = TSK_FS_NAME_TYPE_REG; if (a_name_info->sector_is_allocated) { a_name_info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } /* Save the volume label. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); } /** * \internal * Populates an EXFATFS_FS_NAME_INFO object with data parsed from a * special file directory entry. * * @param a_name_info The name info object. * @param a_dentry A buffer containing a special file directory entry. * @param a_inum The inode address associated with the directory entry. */ static void exfats_parse_special_file_dentry(EXFATFS_FS_NAME_INFO *a_name_info, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { assert(a_name_info != NULL); assert(a_name_info->fatfs != NULL); assert(a_name_info->fs_name != NULL); assert(a_name_info->fs_name->name != NULL); assert(a_name_info->fs_name->name_size == FATFS_MAXNAMLEN_UTF8); assert(a_name_info->fs_dir != NULL); assert(a_dentry != NULL); assert(exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID || exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP || exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE || exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_ACT); assert(fatfs_inum_is_in_range(a_name_info->fatfs, a_inum)); /* Starting parse of a new name, save the previous name, if any. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); /* Record the inum associated with this name. */ a_name_info->fs_name->meta_addr = a_inum; /* Set the current entry type. This is used to check the sequence and * in-use state of the entries in the set. */ a_name_info->last_dentry_type = (EXFATFS_DIR_ENTRY_TYPE)a_dentry->data[0]; switch (exfatfs_get_enum_from_type(a_dentry->data[0])) { case EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID: strcpy(a_name_info->fs_name->name, EXFATFS_VOLUME_GUID_DENTRY_NAME); break; case EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP: strcpy(a_name_info->fs_name->name, EXFATFS_ALLOC_BITMAP_DENTRY_NAME); break; case EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE: strcpy(a_name_info->fs_name->name, EXFATFS_UPCASE_TABLE_DENTRY_NAME); break; case EXFATFS_DIR_ENTRY_TYPE_TEXFAT: strcpy(a_name_info->fs_name->name, EXFATFS_TEX_FAT_DENTRY_NAME); break; case EXFATFS_DIR_ENTRY_TYPE_ACT: strcpy(a_name_info->fs_name->name, EXFATFS_ACT_DENTRY_NAME); break; // listed so that we don't get compile warnings case EXFATFS_DIR_ENTRY_TYPE_NONE: case EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL: case EXFATFS_DIR_ENTRY_TYPE_FILE: case EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM: case EXFATFS_DIR_ENTRY_TYPE_FILE_NAME: break; } /* Not a directory. */ a_name_info->fs_name->type = TSK_FS_NAME_TYPE_REG; if (a_name_info->sector_is_allocated) { a_name_info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } /* Save the virtual file name. */ exfatfs_add_name_to_dir_and_reset_info(a_name_info); } /** * \internal * Parse a buffer containing the contents of a directory and add TSK_FS_NAME * objects for each named file found to the TSK_FS_DIR representation of the * directory. * * @param a_fatfs File system information structure for file system that * contains the directory. * @param a_fs_dir Directory structure into to which parsed file metadata will * be added. * @param a_buf Buffer that contains the directory contents. * @param a_buf_len Length of buffer in bytes (must be a multiple of sector * size). * @param a_sector_addrs Array where each element is the original address of * the corresponding sector in a_buf (size of array is number of sectors in * the directory). * @return TSK_RETVAL_ENUM */ TSK_RETVAL_ENUM exfatfs_dent_parse_buf(FATFS_INFO *a_fatfs, TSK_FS_DIR *a_fs_dir, char *a_buf, TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs) { const char *func_name = "exfatfs_parse_directory_buf"; TSK_FS_INFO *fs = NULL; TSK_OFF_T num_sectors = 0; TSK_OFF_T sector_index = 0; TSK_INUM_T base_inum_of_sector = 0; EXFATFS_FS_NAME_INFO name_info; TSK_OFF_T dentry_index = 0; FATFS_DENTRY *dentry = NULL; int entries_count = 0; int invalid_entries_count = 0; uint8_t is_corrupt_dir = 0; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_dir, "a_fs_dir", func_name) || fatfs_ptr_arg_is_null(a_buf, "a_buf", func_name) || fatfs_ptr_arg_is_null(a_sector_addrs, "a_sector_addrs", func_name)) { return TSK_ERR; } assert(a_buf_len > 0); if (a_buf_len < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: invalid buffer length", func_name); return TSK_ERR; } fs = (TSK_FS_INFO*)a_fatfs; memset((void*)&name_info, 0, sizeof(EXFATFS_FS_NAME_INFO)); name_info.fatfs = a_fatfs; if ((name_info.fs_name = tsk_fs_name_alloc(FATFS_MAXNAMLEN_UTF8, 0)) == NULL) { return TSK_ERR; } name_info.fs_name->name[0] = '\0'; name_info.fs_dir = a_fs_dir; /* Loop through the sectors in the buffer. */ dentry = (FATFS_DENTRY*)a_buf; num_sectors = a_buf_len / a_fatfs->ssize; for (sector_index = 0; sector_index < num_sectors; ++sector_index) { /* Convert the address of the current sector into an inode address. */ base_inum_of_sector = FATFS_SECT_2_INODE(a_fatfs, a_sector_addrs[sector_index]); if (base_inum_of_sector > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: inode address for sector address %" PRIuDADDR " at addresses array index %" PRIuDADDR " is too large", func_name, base_inum_of_sector, sector_index); tsk_fs_name_free(name_info.fs_name); return TSK_COR; } if (tsk_verbose) { tsk_fprintf(stderr,"%s: Parsing sector %" PRIuDADDR " for dir %" PRIuINUM "\n", func_name, a_sector_addrs[sector_index], a_fs_dir->addr); } /* Get the allocation status of the current sector. */ if ((name_info.sector_is_allocated = fatfs_is_sectalloc(a_fatfs, a_sector_addrs[sector_index])) == -1) { if (tsk_verbose) { tsk_fprintf(stderr, "%s: Error looking up allocation status of sector : %" PRIuDADDR "\n", func_name, a_sector_addrs[sector_index]); tsk_error_print(stderr); } tsk_error_reset(); continue; } /* Loop through the putative directory entries in the current sector. */ for (dentry_index = 0; dentry_index < a_fatfs->dentry_cnt_se; ++dentry_index, ++dentry) { FATFS_DENTRY *current_dentry = dentry; TSK_INUM_T current_inum = base_inum_of_sector + dentry_index; EXFATFS_DIR_ENTRY_TYPE dentry_type = EXFATFS_DIR_ENTRY_TYPE_NONE; ++entries_count; if (!fatfs_inum_is_in_range(a_fatfs, current_inum)) { tsk_fs_name_free(name_info.fs_name); return TSK_ERR; } if (exfatfs_is_dentry(a_fatfs, current_dentry, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)name_info.sector_is_allocated, (uint8_t)(!is_corrupt_dir && name_info.sector_is_allocated))) { dentry_type = (EXFATFS_DIR_ENTRY_TYPE)current_dentry->data[0]; } else { dentry_type = EXFATFS_DIR_ENTRY_TYPE_NONE; } switch(exfatfs_get_enum_from_type(dentry_type)) { case EXFATFS_DIR_ENTRY_TYPE_FILE: exfats_parse_file_dentry(&name_info, current_dentry, current_inum); break; case EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM: exfats_parse_file_stream_dentry(&name_info, current_dentry, current_inum); break; case EXFATFS_DIR_ENTRY_TYPE_FILE_NAME: exfats_parse_file_name_dentry(&name_info, current_dentry, current_inum); break; case EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL: exfats_parse_vol_label_dentry(&name_info, current_dentry, current_inum); break; case EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID: case EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP: case EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE: case EXFATFS_DIR_ENTRY_TYPE_TEXFAT: case EXFATFS_DIR_ENTRY_TYPE_ACT: exfats_parse_special_file_dentry(&name_info, current_dentry, current_inum); break; case EXFATFS_DIR_ENTRY_TYPE_NONE: default: ++invalid_entries_count; if (entries_count == 4 && invalid_entries_count == 4) { /* If the first four putative entries in the buffer are not * entries, set the corrupt directory flag to make entry tests * more in-depth, even for allocated sectors. */ is_corrupt_dir = 1; } /* Starting parse of a new name, save the previous name, * if any. */ exfatfs_add_name_to_dir_and_reset_info(&name_info); break; } } } /* Save the last parsed name, if any. */ exfatfs_add_name_to_dir_and_reset_info(&name_info); tsk_fs_name_free(name_info.fs_name); return TSK_OK; } sleuthkit-4.2.0/tsk/fs/exfatfs_meta.c000755 000765 000024 00000217161 12576320700 020363 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /* * This code makes use of research presented in the following paper: * "Reverse Engineering the exFAT File System" by Robert Shullich * Retrieved May 2013 from: * http://www.sans.org/reading_room/whitepapers/forensics/reverse-engineering-microsoft-exfat-file-system_33274 * * Some additional details concerning TexFAT were obtained in May 2013 * from: * http://msdn.microsoft.com/en-us/library/ee490643(v=winembedded.60).aspx */ /** * \file exfatfs_meta.c * Contains the internal TSK exFAT file system code to access the data in the * metadata data category as defined in the book "File System Forensic * Analysis" by Brian Carrier (pp. 174-175). */ #include "tsk_exfatfs.h" /* Included first to make sure it stands alone. */ #include "tsk_fs_i.h" #include "tsk_fatfs.h" #include /** * \internal * Checks whether a specified cluster is allocated according to the allocation * bitmap of an exFAT file system. * * @param [in] a_fatfs A FATFS_INFO struct representing an exFAT file system. * @param [in] a_cluster_addr The cluster address of the cluster to check. * @return 1 if the cluster is allocated, 0 if the cluster is not allocated, * or -1 if an error occurs. */ int8_t exfatfs_is_cluster_alloc(FATFS_INFO *a_fatfs, TSK_DADDR_T a_cluster_addr) { const char *func_name = "exfatfs_is_clust_alloc"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); TSK_DADDR_T bitmap_byte_offset = 0; uint8_t bitmap_byte; ssize_t bytes_read = 0; assert(a_fatfs != NULL); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name)) { return -1; } assert((a_cluster_addr >= FATFS_FIRST_CLUSTER_ADDR) && (a_cluster_addr <= a_fatfs->lastclust)); if ((a_cluster_addr < FATFS_FIRST_CLUSTER_ADDR) || (a_cluster_addr > a_fatfs->lastclust)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: cluster address %" PRIuINUM " out of range", func_name, a_cluster_addr); return -1; } /* Normalize the cluster address. */ a_cluster_addr = a_cluster_addr - FATFS_FIRST_CLUSTER_ADDR; /* Determine the offset of the byte in the allocation bitmap that contains * the bit for the specified cluster. */ bitmap_byte_offset = (a_fatfs->EXFATFS_INFO.first_sector_of_alloc_bitmap * a_fatfs->ssize) + (a_cluster_addr / 8); /* Read the byte. */ bytes_read = tsk_fs_read(fs, bitmap_byte_offset, (char*)&bitmap_byte, 1); if (bytes_read != 1) { if (bytes_read >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: failed to read bitmap byte at offset %" PRIuINUM "", func_name, bitmap_byte_offset); return -1; } /* Check the bit that corresponds to the specified cluster. Note that this * computation does not yield 0 or 1. */ if (bitmap_byte & (1 << (a_cluster_addr % 8))) { return 1; } else { return 0; } } /** * \internal * Determine whether the contents of a buffer may be an exFAT volume label * directory entry. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @returns 1 if the directory entry buffer likely contains a volume label * directory entry, 0 otherwise. */ uint8_t exfatfs_is_vol_label_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc) { const char *func_name = "exfatfs_is_vol_label_dentry"; EXFATFS_VOL_LABEL_DIR_ENTRY *dentry = (EXFATFS_VOL_LABEL_DIR_ENTRY*)a_dentry; uint8_t i = 0; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL) { return 0; } /* There should be a single volume label directory entry at the * beginning of the root directory, so check the allocation status, if * known, of the cluster from which the buffer was filled. */ if (a_cluster_is_alloc == FATFS_DATA_UNIT_ALLOC_STATUS_UNALLOC) { return 0; } if(exfatfs_get_alloc_status_from_type(dentry->entry_type) == 1){ /* There is supposed to be a label, check its length. */ if ((dentry->utf16_char_count < 1) || (dentry->utf16_char_count > EXFATFS_MAX_VOLUME_LABEL_LEN)) { if (tsk_verbose) { fprintf(stderr, "%s: incorrect volume label length\n", func_name); } return 0; } } else { /* There is supposed to be no label, check for a zero in the length * field. */ if (dentry->utf16_char_count != 0x00) { if (tsk_verbose) { fprintf(stderr, "%s: volume label length non-zero for no label entry\n", func_name); } return 0; } /* Every byte of the UTF-16 volume label string should be 0. */ for (i = 0; i < EXFATFS_MAX_VOLUME_LABEL_LEN * 2; ++i) { if (dentry->volume_label[i] != 0x00) { if (tsk_verbose) { fprintf(stderr, "%s: non-zero byte in label for no label entry\n", func_name); } return 0; } } } return 1; } /** * \internal * Determine whether the contents of a buffer may be an exFAT volume GUID * directory entry. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @returns 1 if the directory entry buffer likely contains a volume GUID * directory entry, 0 otherwise. */ uint8_t exfatfs_is_vol_guid_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status) { const char *func_name = "exfatfs_is_vol_guid_dentry"; EXFATFS_VOL_GUID_DIR_ENTRY *dentry = (EXFATFS_VOL_GUID_DIR_ENTRY*)a_dentry; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID) { return 0; } /* There is not enough data in a volume GUID directory entry to test * anything but the entry type byte. However, a volume GUID directory * entry should be in allocated space, so check the allocation status, if * known, of the cluster from which the buffer was filled to reduce false * positives. */ return ((a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_ALLOC) || (a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN)); } /** * \internal * Determine whether the contents of a buffer may be an exFAT allocation bitmap * directory entry. The test will be more reliable if an optional FATFS_INFO * struct representing the file system is provided. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @param [in] a_fatfs A FATFS_INFO struct representing an exFAT file system, * may be NULL. * @returns 1 if the directory entry buffer likely contains an allocation * bitmap directory entry, 0 otherwise. */ uint8_t exfatfs_is_alloc_bitmap_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status, FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_is_alloc_bitmap_dentry"; EXFATFS_ALLOC_BITMAP_DIR_ENTRY *dentry = (EXFATFS_ALLOC_BITMAP_DIR_ENTRY*)a_dentry; uint32_t first_cluster_of_bitmap = 0; uint64_t length_of_alloc_bitmap_in_bytes = 0; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP) { return 0; } /* There should be a single allocation bitmap directory entry near the the * beginning of the root directory, so check the allocation status, if * known, of the cluster from which the buffer was filled. */ if (a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_UNALLOC) { return 0; } if (a_fatfs != NULL) { /* The length of the allocation bitmap should be consistent with the * number of clusters in the data area as specified in the volume boot * record. */ length_of_alloc_bitmap_in_bytes = tsk_getu64(a_fatfs->fs_info.endian, dentry->length_of_alloc_bitmap_in_bytes); if (length_of_alloc_bitmap_in_bytes != (a_fatfs->clustcnt + 7) / 8) { if (tsk_verbose) { fprintf(stderr, "%s: bitmap length incorrect\n", func_name); } return 0; } /* The first cluster of the bit map should be within the data area. * It is usually in the first cluster. */ first_cluster_of_bitmap = tsk_getu32(a_fatfs->fs_info.endian, dentry->first_cluster_of_bitmap); if ((first_cluster_of_bitmap < EXFATFS_FIRST_CLUSTER) || (first_cluster_of_bitmap > a_fatfs->lastclust)) { if (tsk_verbose) { fprintf(stderr, "%s: first cluster not in cluster heap\n", func_name); } return 0; } /* The first cluster of the allocation bitmap should be allocated (the * other conditions allow this function to be safely used to look for * the allocation bitmap during FATFS_INFO initialization, before a * cluster allocation is possible). */ if ((a_fatfs->EXFATFS_INFO.first_sector_of_alloc_bitmap > 0) && (a_fatfs->EXFATFS_INFO.length_of_alloc_bitmap_in_bytes > 0) && (exfatfs_is_cluster_alloc(a_fatfs, (TSK_DADDR_T)first_cluster_of_bitmap) != 1)) { if (tsk_verbose) { fprintf(stderr, "%s: first cluster of allocation bitmap not allocated\n", func_name); } return 0; } } return 1; } /** * \internal * Determine whether the contents of a buffer may be an exFAT upcase table * directory entry. The test will be more reliable if an optional FATFS_INFO * struct representing the file system is provided. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @param [in] a_fatfs A FATFS_INFO struct representing an exFAT file system, * may be NULL. * @returns 1 if the directory entry buffer likely contains an upcase table * directory entry, 0 otherwise. */ uint8_t exfatfs_is_upcase_table_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status, FATFS_INFO *a_fatfs) { const char *func_name = "exfatfs_is_upcase_table_dentry"; EXFATFS_UPCASE_TABLE_DIR_ENTRY *dentry = (EXFATFS_UPCASE_TABLE_DIR_ENTRY*)a_dentry; uint64_t table_size = 0; uint32_t first_cluster_of_table = 0; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE) { return 0; } /* There should be a single upcase table directory entry near the the * beginning of the root directory, so check the allocation status, if * known, of the cluster from which the buffer was filled. */ if (a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_UNALLOC) { return 0; } if (a_fatfs != NULL) { /* Check the size of the table. */ table_size = tsk_getu64(a_fatfs->fs_info.endian, dentry->table_length_in_bytes); if (table_size == 0) { if (tsk_verbose) { fprintf(stderr, "%s: table size is zero\n", func_name); } return 0; } /* Is the table size less than the size of the cluster heap * (data area)? The cluster heap size is computed by multiplying the * cluster size by the number of sectors in a cluster and then * multiplying by the number of bytes in a sector (the last operation * is optimized as a left shift by the base 2 log of sector size). */ if (table_size > (a_fatfs->clustcnt * a_fatfs->csize) << a_fatfs->ssize_sh) { if (tsk_verbose) { fprintf(stderr, "%s: table size too big\n", func_name); } return 0; } /* Is the address of the first cluster in range? */ first_cluster_of_table = tsk_getu32(a_fatfs->fs_info.endian, dentry->first_cluster_of_table); if ((first_cluster_of_table < EXFATFS_FIRST_CLUSTER) || (first_cluster_of_table > a_fatfs->lastclust)) { if (tsk_verbose) { fprintf(stderr, "%s: first cluster not in cluster heap\n", func_name); } return 0; } /* The first cluster of the table should be allocated. */ if (exfatfs_is_cluster_alloc(a_fatfs, (TSK_DADDR_T)first_cluster_of_table) != 1) { if (tsk_verbose) { fprintf(stderr, "%s: first cluster of table not allocated\n", func_name); } return 0; } } return 1; } /** * \internal * Determine whether the contents of a buffer may be an exFAT TexFAT directory * entry. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @returns 1 if the directory entry buffer likely contains a TexFAT directory * entry, 0 otherwise. */ uint8_t exfatfs_is_texfat_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status) { const char *func_name = "exfatfs_is_texfat_dentry"; EXFATFS_TEXFAT_DIR_ENTRY *dentry = (EXFATFS_TEXFAT_DIR_ENTRY*)a_dentry; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_TEXFAT) { return 0; } /* There is not enough data in a TexFAT directory entry to test anything * but the entry type byte. However, a TexFAT directory entry should be in * allocated space, so check the allocation status, if known, of the * cluster from which the buffer was filled to reduce false positives. */ return ((a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_ALLOC) || (a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN)); } /** * \internal * Determine whether the contents of a buffer may be an exFAT access control * table directory entry. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_alloc_status The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @returns 1 if the directory entry buffer likely contains an access control * table entry, 0 otherwise. */ uint8_t exfatfs_is_access_ctrl_table_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status) { const char *func_name = "exfatfs_is_texfat_dentry"; EXFATFS_TEXFAT_DIR_ENTRY *dentry = (EXFATFS_TEXFAT_DIR_ENTRY*)a_dentry; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_TEXFAT) { return 0; } /* There is not enough data in an access control table directory entry to * test anything but the entry type byte. However, an access control table * directory entry should be in allocated space, so check the allocation * status, if known, of the cluster from which the buffer was filled to * reduce false positives. */ return ((a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_ALLOC) || (a_alloc_status == FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN)); } /** * \internal * Determine whether the contents of a buffer may be an exFAT file directory * entry. The test will be more reliable if an optional TSK_ENDIAN_ENUM value * is known. This function was split into two parts so that the main * test can be run without a FATFS_INFO object. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_fatfs A FATFS_INFO struct representing an exFAT file system, * may be NULL. * @returns 1 if the directory entry buffer likely contains a file directory * entry, 0 otherwise. */ uint8_t exfatfs_is_file_dentry(FATFS_DENTRY *a_dentry, FATFS_INFO *a_fatfs) { if (a_fatfs != NULL) { TSK_FS_INFO *fs = &(a_fatfs->fs_info); return exfatfs_is_file_dentry_standalone(a_dentry, fs->endian); } else { return exfatfs_is_file_dentry_standalone(a_dentry, TSK_UNKNOWN_ENDIAN); } } /** * \internal * Determine whether the contents of a buffer may be an exFAT file directory * entry. The test will be more reliable if an optional TSK_ENDIAN_ENUM value * is known. This version of the function can be called without a TSK_FS_INFO * object. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_endian Endianness of the file system * @returns 1 if the directory entry buffer likely contains a file directory * entry, 0 otherwise. */ uint8_t exfatfs_is_file_dentry_standalone(FATFS_DENTRY *a_dentry, TSK_ENDIAN_ENUM a_endian) { const char *func_name = "exfatfs_is_file_dentry"; EXFATFS_FILE_DIR_ENTRY *dentry = (EXFATFS_FILE_DIR_ENTRY*)a_dentry; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_FILE){ return 0; } /* A file directory entry is the first entry of a file directory entry set * consisting of a file directory entry followed by a file stream directory * entry and from 1 to 17 file name directory entries. The file stream and * file name entries are called secondary entries. */ if (dentry->secondary_entries_count < EXFATFS_MIN_FILE_SECONDARY_DENTRIES_COUNT || dentry->secondary_entries_count > EXFATFS_MAX_FILE_SECONDARY_DENTRIES_COUNT) { if (tsk_verbose) { fprintf(stderr, "%s: secondary entries count out of range\n", func_name); } return 0; } if (a_endian != TSK_UNKNOWN_ENDIAN) { /* Make sure the time stamps aren't all zeros. */ if ((tsk_getu16(a_endian, dentry->modified_date) == 0) && (tsk_getu16(a_endian, dentry->modified_time) == 0) && (dentry->modified_time_tenths_of_sec == 0) && (tsk_getu16(a_endian, dentry->created_date) == 0) && (tsk_getu16(a_endian, dentry->created_time) == 0) && (dentry->created_time_tenths_of_sec == 0) && (tsk_getu16(a_endian, dentry->accessed_date) == 0) && (tsk_getu16(a_endian, dentry->accessed_time) == 0)) { if (tsk_verbose) { fprintf(stderr, "%s: time stamps all zero\n", func_name); } return 0; } } return 1; } /** * \internal * Determine whether the contents of a buffer may be an exFAT file stream * directory entry. The test will be more reliable if an optional FATFS_INFO * struct representing the file system is provided. This function was * split into two parts so that the main test can be run * without a FATFS_INFO object. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_fatfs A FATFS_INFO struct representing an exFAT file system, * may be NULL. * @returns 1 if the directory entry buffer likely contains a file stream * directory entry, 0 otherwise. */ uint8_t exfatfs_is_file_stream_dentry(FATFS_DENTRY *a_dentry, FATFS_INFO *a_fatfs) { TSK_FS_INFO *fs = NULL; uint64_t cluster_heap_size = 0; if (a_fatfs != NULL) { fs = &(a_fatfs->fs_info); /* Calculate the size of the cluster heap. The cluster heap size * is computed by multiplying the cluster size * by the number of sectors in a cluster and then * multiplying by the number of bytes in a sector (the last operation * is optimized as a left shift by the base 2 log of sector size). */ cluster_heap_size = (a_fatfs->clustcnt * a_fatfs->csize) << a_fatfs->ssize_sh; return exfatfs_is_file_stream_dentry_standalone(a_dentry, fs->endian, cluster_heap_size, a_fatfs->lastclust); } else{ return exfatfs_is_file_stream_dentry_standalone(a_dentry, TSK_UNKNOWN_ENDIAN, 0, 0); } } /** * \internal * Determine whether the contents of a buffer may be an exFAT file stream * directory entry. The test will be more reliable if the optional endianness * and cluster information are used. This version of the function can be * called without a TSK_FS_INFO object. * * The endianness must be known to run all of the extended tests. The other * parameters can be set to zero if unknown and the function will run whichever * tests are possible with the given information. * * @param [in] a_dentry A directory entry buffer. * @param [in] a_endian Endianness of the file system * @param [in] a_cluster_heap_size Size of the cluster heap (in bytes) * @param [in] a_last_cluster Last cluster in the file system * @returns 1 if the directory entry buffer likely contains a file stream * directory entry, 0 otherwise. */ uint8_t exfatfs_is_file_stream_dentry_standalone(FATFS_DENTRY *a_dentry, TSK_ENDIAN_ENUM a_endian, uint64_t a_cluster_heap_size, TSK_DADDR_T a_last_cluster) { const char *func_name = "exfatfs_is_file_stream_dentry"; EXFATFS_FILE_STREAM_DIR_ENTRY *dentry = (EXFATFS_FILE_STREAM_DIR_ENTRY*)a_dentry; uint64_t file_size = 0; uint32_t first_cluster = 0; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* Check the entry type byte. */ if (exfatfs_get_enum_from_type(dentry->entry_type) != EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM) { return 0; } if (a_endian != TSK_UNKNOWN_ENDIAN) { /* Check the size. */ file_size = tsk_getu64(a_endian, dentry->data_length); if (file_size > 0) { /* Is the file size less than the size of the cluster heap * (data area)? The cluster heap size is computed by multiplying the * cluster size by the number of sectors in a cluster and then * multiplying by the number of bytes in a sector (the last operation * is optimized as a left shift by the base 2 log of sector size). */ if(a_cluster_heap_size > 0){ if (file_size > a_cluster_heap_size) { if (tsk_verbose) { fprintf(stderr, "%s: file size too big\n", func_name); } return 0; } } /* Is the address of the first cluster in range? */ first_cluster = tsk_getu32(a_endian, dentry->first_cluster_addr); if ((first_cluster < EXFATFS_FIRST_CLUSTER) || ((a_last_cluster > 0) && (first_cluster > a_last_cluster))) { if (tsk_verbose) { fprintf(stderr, "%s: first cluster not in cluster heap\n", func_name); } return 0; } } } return 1; } /** * \internal * Determine whether the contents of a buffer may be an exFAT file name * directory entry. * * @param [in] a_dentry A directory entry buffer. * @returns 1 if the directory entry buffer likely contains an file name * directory entry, 0 otherwise. */ uint8_t exfatfs_is_file_name_dentry(FATFS_DENTRY *a_dentry) { const char *func_name = "exfatfs_is_file_name_dentry"; EXFATFS_FILE_NAME_DIR_ENTRY *dentry = (EXFATFS_FILE_NAME_DIR_ENTRY*)a_dentry; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } /* There is not enough data in a file name directory entry * to test anything but the entry type byte. */ return (exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME); } /** * \internal * Determine whether a buffer likely contains a directory entry. * For the most reliable results, request the in-depth test. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_dentry Buffer that may contain a directory entry. * @param [in] a_cluster_is_alloc The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @param [in] a_do_basic_tests_only Whether to do basic or in-depth testing. * @return 1 if the buffer likely contains a direcotry entry, 0 otherwise */ uint8_t exfatfs_is_dentry(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc, uint8_t a_do_basic_tests_only) { const char *func_name = "exfatfs_is_dentry"; assert(a_dentry != NULL); if (fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 0; } switch (exfatfs_get_enum_from_type(a_dentry->data[0])) { case EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL: return exfatfs_is_vol_label_dentry(a_dentry, a_cluster_is_alloc); case EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID: return exfatfs_is_vol_guid_dentry(a_dentry, a_cluster_is_alloc); case EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP: return exfatfs_is_alloc_bitmap_dentry(a_dentry, a_cluster_is_alloc, a_fatfs); case EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE: return exfatfs_is_upcase_table_dentry(a_dentry, a_cluster_is_alloc, a_fatfs); case EXFATFS_DIR_ENTRY_TYPE_TEXFAT: return exfatfs_is_texfat_dentry(a_dentry, a_cluster_is_alloc); case EXFATFS_DIR_ENTRY_TYPE_ACT: return exfatfs_is_access_ctrl_table_dentry(a_dentry, a_cluster_is_alloc); case EXFATFS_DIR_ENTRY_TYPE_FILE: return exfatfs_is_file_dentry(a_dentry, a_fatfs); case EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM: return exfatfs_is_file_stream_dentry(a_dentry, a_fatfs); case EXFATFS_DIR_ENTRY_TYPE_FILE_NAME: return exfatfs_is_file_name_dentry(a_dentry); default: return 0; } } /** * \internal * Construct a single, non-resident data run for the TSK_FS_META object of a * TSK_FS_FILE object. * * @param [in, out] a_fs_file Generic file with generic inode structure (TSK_FS_META). * @return 0 on success, 1 on failure, per TSK convention */ static uint8_t exfatfs_make_contiguous_data_run(TSK_FS_FILE *a_fs_file) { const char *func_name = "exfatfs_make_contiguous_data_run"; TSK_FS_META *fs_meta = NULL; TSK_FS_INFO *fs = NULL; FATFS_INFO *fatfs = NULL; TSK_DADDR_T first_cluster = 0; TSK_FS_ATTR_RUN *data_run; TSK_FS_ATTR *fs_attr = NULL; TSK_OFF_T alloc_size = 0; assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); assert(a_fs_file->fs_info != NULL); fs_meta = a_fs_file->meta; fs = (TSK_FS_INFO*)a_fs_file->fs_info; fatfs = (FATFS_INFO*)fs; if (tsk_verbose) { tsk_fprintf(stderr, "%s: Loading attrs for inode: %" PRIuINUM "\n", func_name, a_fs_file->meta->addr); } /* Get the stashed first cluster address of the file. If the address does * not make sense, set the attribute state to TSK_FS_META_ATTR_ERROR so * that there is no subsequent attempt to load a data run for this * file object. */ first_cluster = ((TSK_DADDR_T*)fs_meta->content_ptr)[0]; if ((first_cluster > (fatfs->lastclust)) && (FATFS_ISEOF(first_cluster, fatfs->mask) == 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_error_reset(); if (fs_meta->flags & TSK_FS_META_FLAG_UNALLOC) { tsk_error_set_errno(TSK_ERR_FS_RECOVER); } else { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); } tsk_error_set_errstr ("%s: Starting cluster address too large: %" PRIuDADDR, func_name, first_cluster); return 1; } /* Figure out the allocated size of the file. The minimum allocation unit * for exFAT is a cluster, so the the roundup() function is used to round * up the file size in bytes to a multiple of cluser size in bytes. */ alloc_size = roundup(fs_meta->size, (fatfs->csize * fs->block_size)); /* Allocate an attribute list for the file. */ fs_meta->attr = tsk_fs_attrlist_alloc(); /* Allocate a non-resident attribute for the file and add it to the * attribute list. */ if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } /* Allocate a single data run for the attribute. For exFAT, a data run is * a contiguous run of sectors. */ data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return 1; } /* Set the starting sector address of the run and the length of the run * in sectors. */ data_run->addr = FATFS_CLUST_2_SECT(fatfs, first_cluster); data_run->len = roundup(fs_meta->size, (fatfs->csize * fs->block_size)) / fs->block_size; /* Add the data run to the attribute and add the attribute to the * attribute list. Note that the initial size and the allocation * size are the same for exFAT. */ if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, data_run->len * fs->block_size, TSK_FS_ATTR_FLAG_NONE, 0)) { return 1; } /* Mark the attribute list as loaded. */ fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Use a volume label directory entry corresponding to the exFAT * equivalent of an inode to populate the TSK_FS_META object of a * TSK_FS_FILE object. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_inum Address of the inode. * @param [in] a_dentry A volume label directory entry. * @param a_fs_file Generic file with generic inode structure (TSK_FS_META). * @return TSK_RETVAL_ENUM. */ static TSK_RETVAL_ENUM exfatfs_copy_vol_label_inode(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, TSK_FS_FILE *a_fs_file) { EXFATFS_VOL_LABEL_DIR_ENTRY *dentry = NULL; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); dentry = (EXFATFS_VOL_LABEL_DIR_ENTRY*)a_dentry; assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL); /* If there is a volume label, copy it to the name field of the * TSK_FS_META structure. */ if (exfatfs_get_alloc_status_from_type(dentry->entry_type) == 1) { if (fatfs_utf16_inode_str_2_utf8(a_fatfs, (UTF16*)dentry->volume_label, (size_t)dentry->utf16_char_count + 1, (UTF8*)a_fs_file->meta->name2->name, sizeof(a_fs_file->meta->name2->name), a_inum, "volume label") != TSKconversionOK) { return TSK_COR; } } else { strcpy(a_fs_file->meta->name2->name, EXFATFS_EMPTY_VOLUME_LABEL_DENTRY_NAME); } return TSK_OK; } /** * \internal * Use an allocation bitmap directory entry corresponding to the exFAT * equivalent of an inode to populate the TSK_FS_META object of a * TSK_FS_FILE object. * * @param a_fatfs [in] Source file system for the directory entries. * @param [in] a_dentry An allocation bitmap directory entry. * @param a_fs_file [in, out] Generic file with generic inode structure (TSK_FS_META). * @return TSK_RETVAL_ENUM. */ static TSK_RETVAL_ENUM exfatfs_copy_alloc_bitmap_inode(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, TSK_FS_FILE *a_fs_file) { EXFATFS_ALLOC_BITMAP_DIR_ENTRY *dentry = NULL; assert(a_fatfs != NULL); assert(a_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); dentry = (EXFATFS_ALLOC_BITMAP_DIR_ENTRY*)a_dentry; assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP); /* Set the file name to a descriptive pseudo file name. */ strcpy(a_fs_file->meta->name2->name, EXFATFS_ALLOC_BITMAP_DENTRY_NAME); /* Set the size of the allocation bitmap and the address of its * first cluster. */ ((TSK_DADDR_T*)a_fs_file->meta->content_ptr)[0] = FATFS_SECT_2_CLUST(a_fatfs, a_fatfs->EXFATFS_INFO.first_sector_of_alloc_bitmap); a_fs_file->meta->size = a_fatfs->EXFATFS_INFO.length_of_alloc_bitmap_in_bytes; /* There is no FAT chain walk for the allocation bitmap. Do an eager * load instead of a lazy load of its data run. */ if (exfatfs_make_contiguous_data_run(a_fs_file)) { return TSK_ERR; } return TSK_OK; } /** * \internal * Use an UP-Case table directory entry corresponding to the exFAT equivalent * of an inode to populate the TSK_FS_META object of a TSK_FS_FILE object. * * @param a_fatfs [in] Source file system for the directory entries. * @param [in] a_dentry An upcase table directory entry. * @param a_fs_file [in, out] Generic file with generic inode structure (TSK_FS_META). * @return TSK_RETVAL_ENUM. */ static TSK_RETVAL_ENUM exfatfs_copy_upcase_table_inode(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, TSK_FS_FILE *a_fs_file) { EXFATFS_UPCASE_TABLE_DIR_ENTRY *dentry = NULL; assert(a_fatfs != NULL); assert(a_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); dentry = (EXFATFS_UPCASE_TABLE_DIR_ENTRY*)a_dentry; assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE); strcpy(a_fs_file->meta->name2->name, EXFATFS_UPCASE_TABLE_DENTRY_NAME); /* Set the size of the Up-Case table and the address of its * first cluster. */((TSK_DADDR_T*)a_fs_file->meta->content_ptr)[0] = tsk_getu32(a_fatfs->fs_info.endian, dentry->first_cluster_of_table); a_fs_file->meta->size = tsk_getu64(a_fatfs->fs_info.endian, dentry->table_length_in_bytes); /* There is no FAT chain walk for the upcase table. Do an eager * load instead of a lazy load of its data run. */ if (exfatfs_make_contiguous_data_run(a_fs_file)) { return TSK_ERR; } return TSK_OK; } /** * \internal * Given an inode address, load the corresponding directory entry and test * to see if it's an exFAT file stream directory entry. * * @param a_fatfs [in] Source file system for the directory entries. * @param a_stream_entry_inum [in] The inode address associated with the * supposed file stream entry. * @param a_sector_is_alloc [in] The allocation status of the sector that * contains the supposed file stream entry. * @param a_file_dentry_type [in] The companion file entry type, * i.e., deleted or not. * @param a_dentry [in, out] A directory entry structure. The stream * entry, if found, will be loaded into it. * @return 0 on success, 1 on failure, per TSK convention */ static uint8_t exfatfs_load_file_stream_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_stream_entry_inum, uint8_t a_sector_is_alloc, EXFATFS_DIR_ENTRY_TYPE a_file_dentry_type, FATFS_DENTRY *a_dentry) { assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_stream_entry_inum)); assert(a_dentry != NULL); if (fatfs_dentry_load(a_fatfs, a_dentry, a_stream_entry_inum) == 0 && exfatfs_is_dentry(a_fatfs, a_dentry, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)a_sector_is_alloc, a_sector_is_alloc)) { /* If the bytes at the specified inode address are a file stream entry * with the same allocation status as the file entry, report success. */ if((exfatfs_get_alloc_status_from_type(a_file_dentry_type) == exfatfs_get_alloc_status_from_type(a_dentry->data[0])) && (exfatfs_get_enum_from_type(a_file_dentry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE) && (exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM)) { return 0; } } memset((void*)a_dentry, 0, sizeof(FATFS_DENTRY)); return 1; } /** * \internal * Given an exFAT directory entry, try to find the corresponding file * stream directory or file name directory entry that follows it and * return the inum. * * @param [in] a_fatfs Source file system for the directory entries. * @param [in] a_current_entry_inum The inode address associated with the current * entry. * @param [in] a_file_dentry The file entry type (only use deleted or not) * @param [in] a_next_dentry_type The type of the dentry we're searching for * @param [out] a_next_inum The inode of the next stream/file name directory entry will be stored here (if found) * @return 0 on success, 1 on failure, per TSK convention */ static uint8_t exfatfs_next_dentry_inum(FATFS_INFO *a_fatfs, TSK_INUM_T a_current_entry_inum, EXFATFS_FILE_DIR_ENTRY *a_file_dentry, EXFATFS_DIR_ENTRY_TYPE_ENUM a_next_dentry_type, TSK_INUM_T * a_next_inum) { int8_t alloc_check_ret_val = 0; uint8_t cluster_is_alloc = 0; TSK_DADDR_T sector = 0; TSK_DADDR_T cluster = 0; TSK_DADDR_T cluster_base_sector = 0; TSK_DADDR_T last_entry_offset = 0; TSK_DADDR_T file_entry_offset = 0; TSK_DADDR_T next_cluster = 0; FATFS_DENTRY temp_dentry; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_current_entry_inum)); assert(a_file_dentry != NULL); /* Only look for file stream and file name directory entries */ if((a_next_dentry_type != EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM) && (a_next_dentry_type != EXFATFS_DIR_ENTRY_TYPE_FILE_NAME)){ return FATFS_FAIL; } sector = FATFS_INODE_2_SECT(a_fatfs, a_current_entry_inum); cluster = FATFS_SECT_2_CLUST(a_fatfs, sector); alloc_check_ret_val = exfatfs_is_cluster_alloc(a_fatfs, cluster); if (alloc_check_ret_val != -1) { cluster_is_alloc = (uint8_t)alloc_check_ret_val; } else { return FATFS_FAIL; } /* Check for the most common case first - the file stream/name entry is located * immediately after the specified one. This should always be true for any * in-use file entry in an allocated cluster that is not the last entry in * the cluster. It will also be true if the previous entry is the last entry in * the cluster and the directory that contains the file is not fragmented - * the stream/name entry will simply be the first entry of the next cluster. * Finally, if the previous entry is not in-use and was found in an unallocated * sector, the only viable place to look for the next entry is in the * bytes following the file entry, since there is no FAT chain to * consult. */ *a_next_inum = a_current_entry_inum + 1; if (fatfs_inum_is_in_range(a_fatfs, *a_next_inum)) { if(fatfs_dentry_load(a_fatfs, &temp_dentry, *a_next_inum) == 0){ if(a_next_dentry_type == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM){ if(exfatfs_is_file_stream_dentry(&temp_dentry, a_fatfs)){ return FATFS_OK; } } else if(a_next_dentry_type == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME){ if (exfatfs_is_file_name_dentry(&temp_dentry)){ return FATFS_OK; } } } } /* If the stream/name entry was not found immediately following the file entry * and the cluster is allocated, it is possible that the previous entry was the * last entry of a cluster in a fragmented directory. In this * case, the FAT can be consulted to see if there is a next cluster. If * so, the stream/name entry may be the first entry of that cluster. */ if (cluster_is_alloc) { /* Calculate the byte offset of the last possible directory entry in * the current cluster. */ cluster_base_sector = FATFS_CLUST_2_SECT(a_fatfs, cluster); last_entry_offset = (cluster_base_sector * a_fatfs->ssize) + (a_fatfs->csize * a_fatfs->ssize) - sizeof(FATFS_DENTRY); /* Get the byte offset of the file entry. Note that FATFS_INODE_2_OFF * gives the offset relative to start of a sector. */ file_entry_offset = (sector * a_fatfs->ssize) + FATFS_INODE_2_OFF(a_fatfs, a_current_entry_inum); if (file_entry_offset == last_entry_offset) { /* The file entry is the last in its cluster. Look up the next * cluster. */ if ((fatfs_getFAT(a_fatfs, cluster, &next_cluster) == 0) && (next_cluster != 0)) { /* Found the next cluster in the FAT, so get its first sector * and the inode address of the first entry of the sector. */ cluster_base_sector = FATFS_CLUST_2_SECT(a_fatfs, next_cluster); *a_next_inum = FATFS_SECT_2_INODE(a_fatfs, cluster_base_sector); if (fatfs_inum_is_in_range(a_fatfs, *a_next_inum)) { if(fatfs_dentry_load(a_fatfs, &temp_dentry, *a_next_inum) == 0){ if(a_next_dentry_type == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM){ if(exfatfs_is_file_stream_dentry(&temp_dentry, a_fatfs)){ return FATFS_OK; } } else if(a_next_dentry_type == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME){ if (exfatfs_is_file_name_dentry(&temp_dentry)){ return FATFS_OK; } } } } } } } /* Did not find the file stream/name entry. */ return FATFS_FAIL; } /** * \internal * Use a a file and a file stream directory entry corresponding to the exFAT * equivalent of an inode to populate the TSK_FS_META object of a TSK_FS_FILE * object. * * @param a_fatfs [in] Source file system for the directory entries. * @param [in] a_inum Address of the inode. * @param [in] a_dentry A file directory entry. * @param [in] a_is_alloc Allocation status of the sector that contains the * file directory entry. * @param a_fs_file [in, out] Generic file with generic inode structure (TSK_FS_META). * @return TSK_RETVAL_ENUM. */ static TSK_RETVAL_ENUM exfatfs_copy_file_inode(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_file_dentry, uint8_t a_is_alloc, TSK_FS_FILE *a_fs_file) { TSK_FS_INFO *fs = NULL; TSK_FS_META *fs_meta = NULL; EXFATFS_FILE_DIR_ENTRY *file_dentry = (EXFATFS_FILE_DIR_ENTRY*)a_file_dentry; EXFATFS_FILE_STREAM_DIR_ENTRY stream_dentry; uint32_t mode = 0; TSK_INUM_T stream_inum; TSK_INUM_T name_inum; TSK_INUM_T prev_inum; uint8_t name_chars_written; int name_index; uint8_t chars_to_copy; FATFS_DENTRY temp_dentry; char utf16_name[512]; assert(a_fatfs != NULL); assert(a_file_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); assert(exfatfs_get_enum_from_type(file_dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE); fs = &(a_fatfs->fs_info); fs_meta = a_fs_file->meta; /* Determine whether the file is a regular file or directory. */ if (file_dentry->attrs[0] & FATFS_ATTR_DIRECTORY) { fs_meta->type = TSK_FS_META_TYPE_DIR; } else { fs_meta->type = TSK_FS_META_TYPE_REG; } /* Add mode flags corresponding to file attribute flags. */ mode = fs_meta->mode; if ((file_dentry->attrs[0] & FATFS_ATTR_READONLY) == 0) { mode |= (TSK_FS_META_MODE_IRUSR | TSK_FS_META_MODE_IRGRP | TSK_FS_META_MODE_IROTH); } if ((file_dentry->attrs[0] & FATFS_ATTR_HIDDEN) == 0) { mode |= (TSK_FS_META_MODE_IWUSR | TSK_FS_META_MODE_IWGRP | TSK_FS_META_MODE_IWOTH); } fs_meta->mode = (TSK_FS_META_MODE_ENUM)mode; /* There is no notion of links in exFAT, just deleted or not deleted. * If the file is not deleted, treat this as having one link. */ fs_meta->nlink = (exfatfs_get_alloc_status_from_type(file_dentry->entry_type) == 0) ? 0 : 1; /* Copy the last modified time, converted to UNIX date format. */ if (FATFS_ISDATE(tsk_getu16(fs->endian, file_dentry->modified_date))) { fs_meta->mtime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, file_dentry->modified_date), tsk_getu16(fs->endian, file_dentry->modified_time), file_dentry->modified_time_tenths_of_sec); fs_meta->mtime_nano = fatfs_dos_2_nanosec(file_dentry->modified_time_tenths_of_sec); } else { fs_meta->mtime = 0; fs_meta->mtime_nano = 0; } /* Copy the last accessed time, converted to UNIX date format. */ if (FATFS_ISDATE(tsk_getu16(fs->endian, file_dentry->accessed_date))) { fs_meta->atime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, file_dentry->accessed_date), tsk_getu16(fs->endian, file_dentry->accessed_time), 0); } else { fs_meta->atime = 0; } fs_meta->atime_nano = 0; /* exFAT does not have a last changed time. */ fs_meta->ctime = 0; fs_meta->ctime_nano = 0; /* Copy the created time, converted to UNIX date format. */ if (FATFS_ISDATE(tsk_getu16(fs->endian, file_dentry->created_date))) { fs_meta->crtime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, file_dentry->created_date), tsk_getu16(fs->endian, file_dentry->created_time), file_dentry->created_time_tenths_of_sec); fs_meta->crtime_nano = fatfs_dos_2_nanosec(file_dentry->created_time_tenths_of_sec); } else { fs_meta->crtime = 0; fs_meta->crtime_nano = 0; } /* Attempt to load the file stream entry that goes with this file entry. * If not successful, at least the file entry meta data will be returned. */ if(exfatfs_next_dentry_inum(a_fatfs, a_inum, file_dentry, EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM, &stream_inum)){ return TSK_OK; } if (exfatfs_load_file_stream_dentry(a_fatfs, stream_inum, a_is_alloc, file_dentry->entry_type, (FATFS_DENTRY *)(&stream_dentry)) ) { return TSK_OK; } /* Set the size of the file and the address of its first cluster. */ ((TSK_DADDR_T*)a_fs_file->meta->content_ptr)[0] = tsk_getu32(a_fatfs->fs_info.endian, stream_dentry.first_cluster_addr); fs_meta->size = tsk_getu64(fs->endian, stream_dentry.data_length); /* Set the allocation status using both the allocation status of the * sector that contains the directory entries and the entry type * settings - essentially a "belt and suspenders" check. */ if ((a_is_alloc) && (exfatfs_get_alloc_status_from_type(file_dentry->entry_type) == 1) && (exfatfs_get_alloc_status_from_type(stream_dentry.entry_type) == 1)) { a_fs_file->meta->flags = TSK_FS_META_FLAG_ALLOC; /* If the FAT chain bit of the secondary flags of the stream entry is set, * the file is not fragmented and there is no FAT chain to walk. If the * file is not deleted, do an eager load instead of a lazy load of its * data run. */ if ((stream_dentry.flags & EXFATFS_INVALID_FAT_CHAIN_MASK) && (exfatfs_make_contiguous_data_run(a_fs_file))) { return TSK_ERR; } } else { a_fs_file->meta->flags = TSK_FS_META_FLAG_UNALLOC; } /* Attempt to load the file name entry(entries) that go with this file entry * First copy all UTF16 data into a single buffer * If not successful, return what we have to this point with no error */ memset(utf16_name, 0, sizeof(utf16_name)); name_chars_written = 0; prev_inum = stream_inum; for(name_index = 1; name_index < file_dentry->secondary_entries_count;name_index++){ if(exfatfs_next_dentry_inum(a_fatfs, prev_inum, file_dentry, EXFATFS_DIR_ENTRY_TYPE_FILE_NAME, &name_inum)){ /* Try to save what we've got (if we found at least one file name dentry) */ if(name_index > 1){ if(fatfs_utf16_inode_str_2_utf8(a_fatfs, (UTF16 *)utf16_name, sizeof(utf16_name), (UTF8*)a_fs_file->meta->name2->name, sizeof(a_fs_file->meta->name2->name), a_inum, "file name (partial)") != TSKconversionOK){ return TSK_OK; /* Don't want to disregard valid data read earlier */ } } return TSK_OK; } fatfs_dentry_load(a_fatfs, &temp_dentry, name_inum); if(stream_dentry.file_name_length * 2 - name_chars_written > 30){ chars_to_copy = 30; } else{ chars_to_copy = stream_dentry.file_name_length * 2 - name_chars_written; } memcpy(utf16_name + name_chars_written, &(temp_dentry.data[2]), chars_to_copy); prev_inum = name_inum; name_chars_written += chars_to_copy; } /* Copy the file name segment. */ if(fatfs_utf16_inode_str_2_utf8(a_fatfs, (UTF16 *)utf16_name, sizeof(utf16_name), (UTF8*)a_fs_file->meta->name2->name, sizeof(a_fs_file->meta->name2->name), a_inum, "file name") != TSKconversionOK){ return TSK_OK; /* Don't want to disregard valid data read earlier */ } return TSK_OK; } /** * \internal * Use a file name directory entry corresponding to the exFAT equivalent of * an inode to populate the TSK_FS_META object of a TSK_FS_FILE object. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_inum Address of the inode. * @param [in] a_is_alloc Allocation status of the sector that contains the * inode. * @param [in] a_dentry A file name directory entry. * @param a_fs_file Generic file with generic inode structure (TSK_FS_META). * @return TSK_RETVAL_ENUM. */ static TSK_RETVAL_ENUM exfatfs_copy_file_name_inode(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_is_alloc, TSK_FS_FILE *a_fs_file) { EXFATFS_FILE_NAME_DIR_ENTRY *dentry = NULL; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); dentry = (EXFATFS_FILE_NAME_DIR_ENTRY*)a_dentry; assert(exfatfs_get_enum_from_type(dentry->entry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME); /* Set the allocation status using both the allocation status of the * sector that contains the directory entries and the entry type * settings - essentially a "belt and suspenders" check. */ if ((a_is_alloc) && (exfatfs_get_alloc_status_from_type(dentry->entry_type) == 1)) { a_fs_file->meta->flags = TSK_FS_META_FLAG_ALLOC; } else { a_fs_file->meta->flags = TSK_FS_META_FLAG_UNALLOC; } /* Copy the file name segment. */ if (fatfs_utf16_inode_str_2_utf8(a_fatfs, (UTF16*)dentry->utf16_name_chars, EXFATFS_MAX_FILE_NAME_SEGMENT_LENGTH, (UTF8*)a_fs_file->meta->name2->name, sizeof(a_fs_file->meta->name2->name), a_inum, "file name segment") != TSKconversionOK) { return TSK_COR; } return TSK_OK; } /** * \internal * Initialize the members of a TSK_FS_META object before copying the contents * of an an inode consisting of one or more raw exFAT directry entries into it. * * @param [in] a_fatfs Source file system for the directory entries. * @param [in] a_inum Address of the inode. * @param [in] a_is_alloc Allocation status of the sector that contains the * inode. * @param [in, out] a_fs_file Generic file with generic inode structure to * initialize. * @return 0 on success, 1 on failure, per TSK convention */ static uint8_t exfatfs_inode_copy_init(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, uint8_t a_is_alloc, TSK_FS_FILE *a_fs_file) { TSK_FS_META *fs_meta = NULL; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); fs_meta = a_fs_file->meta; fs_meta->addr = a_inum; /* Set the allocation status based on the cluster allocation status. File * entry set entries may change this. */ a_fs_file->meta->flags = a_is_alloc ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC; /* As for FATXX, make regular file the default type. */ fs_meta->type = TSK_FS_META_TYPE_REG; /* As for FATXX, mark everything as executable. */ fs_meta->mode = (TSK_FS_META_MODE_ENUM)(TSK_FS_META_MODE_IXUSR | TSK_FS_META_MODE_IXGRP | TSK_FS_META_MODE_IXOTH); /* There is no notion of links in exFAT, just deleted or not deleted. * With not deleted being equivalent to having one link, set nlink to 1 * here so that it will be set for static things like the allocation * bitmap. The code for file inodes can reset or unset it appropriately. */ fs_meta->nlink = 1; /* Initialize size to zero. The code for particular inode types will * fill in another value, if appropriate. */ fs_meta->size = 0; /* Default values for time stamp metadata. The code for file inodes will * fill in actual time stamp data. */ fs_meta->mtime = 0; fs_meta->mtime_nano = 0; fs_meta->atime = 0; fs_meta->atime_nano = 0; fs_meta->ctime = 0; fs_meta->ctime_nano = 0; fs_meta->crtime = 0; fs_meta->crtime_nano = 0; /* Metadata that does not exist in exFAT. */ fs_meta->uid = 0; fs_meta->gid = 0; fs_meta->seq = 0; /* Allocate space for a name. */ if (fs_meta->name2 == NULL) { if ((fs_meta->name2 = (TSK_FS_META_NAME_LIST*)tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { return 1; } fs_meta->name2->next = NULL; } fs_meta->name2->name[0] = '\0'; /* Allocate space for saving the cluster address of the first cluster * of file inodes, including allocation bitmaps and upcase tables. */ if (fs_meta->content_len < FATFS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, FATFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } /* Mark the generic attribute list as not in use (in the generic file model * attributes are containers for data or metadata). Population of this * stuff is done on demand (lazy look up). */ fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } return 0; } /** * \internal * Use one or more directory entries corresponding to the exFAT equivalent of * an inode to populate the TSK_FS_META object of a TSK_FS_FILE object. * * @param [in] a_fatfs Source file system for the directory entries. * @param [in] a_inum Address of the inode. * @param [in] a_dentry A directory entry. * @param [in] a_is_alloc Allocation status of the inode. * @param [in, out] a_fs_file Generic file object with a generic inode * metadata structure. * @return TSK_RETVAL_ENUM. */ TSK_RETVAL_ENUM exfatfs_dinode_copy(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_is_alloc, TSK_FS_FILE *a_fs_file) { const char *func_name = "exfatfs_dinode_copy"; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_dentry != NULL); assert(a_fs_file != NULL); assert(a_fs_file->meta != NULL); assert(a_fs_file->fs_info != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name) || fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name) || fatfs_ptr_arg_is_null(a_fs_file->meta, "a_fs_file->meta", func_name) || fatfs_ptr_arg_is_null(a_fs_file->fs_info, "a_fs_file->fs_info", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return TSK_ERR; } if (exfatfs_inode_copy_init(a_fatfs, a_inum, a_is_alloc, a_fs_file)) { return TSK_ERR; } switch (exfatfs_get_enum_from_type(a_dentry->data[0])) { case EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL: return exfatfs_copy_vol_label_inode(a_fatfs, a_inum, a_dentry, a_fs_file); case EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID: strcpy(a_fs_file->meta->name2->name, EXFATFS_VOLUME_GUID_DENTRY_NAME); return TSK_OK; case EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP: return exfatfs_copy_alloc_bitmap_inode(a_fatfs, a_dentry, a_fs_file); case EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE: return exfatfs_copy_upcase_table_inode(a_fatfs, a_dentry, a_fs_file); case EXFATFS_DIR_ENTRY_TYPE_TEXFAT: strcpy(a_fs_file->meta->name2->name, EXFATFS_TEX_FAT_DENTRY_NAME); return TSK_OK; case EXFATFS_DIR_ENTRY_TYPE_ACT: strcpy(a_fs_file->meta->name2->name, EXFATFS_ACT_DENTRY_NAME); return TSK_OK; case EXFATFS_DIR_ENTRY_TYPE_FILE: return exfatfs_copy_file_inode(a_fatfs, a_inum, a_dentry, a_is_alloc, a_fs_file); case EXFATFS_DIR_ENTRY_TYPE_FILE_NAME: return exfatfs_copy_file_name_inode(a_fatfs, a_inum, a_dentry, a_is_alloc, a_fs_file); case EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM: default: /* Stream entries are copied in tandem with the corresponding file entry. */ return TSK_ERR; } return TSK_OK; } /** * \internal * Given an exFAT file directory entry, try to find the corresponding file * stream directory entry. * * @param [in] a_fatfs Source file system for the directory entries. * @param [in] a_file_entry_inum The inode address associated with the file * entry. * @param [in] a_sector The address of the sector where the file entry was * found. * @param [in] a_sector_is_alloc The allocation status of the sector. * @param [in] a_file_dentry_type The file entry type, deleted or not. * @param [in, out] The stream entry, if found, will be loaded into the * this generic directory entry structure. * @return 0 on success, 1 on failure, per TSK convention */ uint8_t exfatfs_find_file_stream_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_file_entry_inum, TSK_DADDR_T a_sector, uint8_t a_sector_is_alloc, EXFATFS_DIR_ENTRY_TYPE a_file_dentry_type, FATFS_DENTRY *a_stream_dentry) { const char *func_name = "exfatfs_find_file_stream_dentry"; TSK_INUM_T stream_entry_inum = 0; TSK_DADDR_T cluster = 0; TSK_DADDR_T cluster_base_sector = 0; TSK_DADDR_T last_entry_offset = 0; TSK_DADDR_T file_entry_offset = 0; TSK_DADDR_T next_cluster = 0; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_file_entry_inum)); assert(a_stream_dentry != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_stream_dentry, "a_stream_dentry", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_file_entry_inum, func_name)) { return FATFS_FAIL; } /* Check for the most common case first - the file stream entry is located * immediately after the file entry. This should always be true for any * in-use file entry in an allocated cluster that is not the last entry in * the cluster. It will also be true if the file entry is the last entry in * the cluster and the directory that contains the file is not fragmented - * the stream entry will simply be the first entry of the next cluster. * Finally, if the file entry is not in-use and was found in an unallocated * sector, the only viable place to look for the stream entry is in the * bytes following the file entry, since there is no FAT chain to * consult. */ stream_entry_inum = a_file_entry_inum + 1; if (fatfs_inum_is_in_range(a_fatfs, stream_entry_inum)) { if (exfatfs_load_file_stream_dentry(a_fatfs, stream_entry_inum, a_sector_is_alloc, a_file_dentry_type, a_stream_dentry) == 0) { /* Found it. */ return FATFS_OK; } } /* If the stream entry was not found immediately following the file entry * and the cluster is allocated, it is possible that the file entry was the * last entry of a cluster in a fragmented directory. In this * case, the FAT can be consulted to see if there is a next cluster. If * so, the stream entry may be the first entry of that cluster. */ if (a_sector_is_alloc) { /* Calculate the byte offset of the last possible directory entry in * the current cluster. */ cluster = FATFS_SECT_2_CLUST(a_fatfs, a_sector); cluster_base_sector = FATFS_CLUST_2_SECT(a_fatfs, cluster); last_entry_offset = (cluster_base_sector * a_fatfs->ssize) + (a_fatfs->csize * a_fatfs->ssize) - sizeof(FATFS_DENTRY); /* Get the byte offset of the file entry. Note that FATFS_INODE_2_OFF * gices the offset relative to start of a sector. */ file_entry_offset = (a_sector * a_fatfs->ssize) + FATFS_INODE_2_OFF(a_fatfs, a_file_entry_inum); if (file_entry_offset == last_entry_offset) { /* The file entry is the last in its cluster. Look up the next * cluster. */ if ((fatfs_getFAT(a_fatfs, cluster, &next_cluster) == 0) && (next_cluster != 0)) { /* Found the next cluster in the FAT, so get its first sector * and the inode address of the first entry of the sector. */ cluster_base_sector = FATFS_CLUST_2_SECT(a_fatfs, next_cluster); stream_entry_inum = FATFS_SECT_2_INODE(a_fatfs, cluster_base_sector); if (fatfs_inum_is_in_range(a_fatfs, stream_entry_inum)) { if (exfatfs_load_file_stream_dentry(a_fatfs, stream_entry_inum, a_sector_is_alloc, a_file_dentry_type, a_stream_dentry) == 0) { /* Found it. */ return FATFS_OK; } } } } } /* Did not find the file stream entry. */ return FATFS_FAIL; } /** * \internal * Read in the bytes from an exFAT file system that correspond to the exFAT * equivalent of an inode and use them to populate the TSK_FS_META object of * a TSK_FS_FILE object. * * @param [in] a_fatfs Source file system for the directory entries. * @param [in, out] a_fs_file The TSK_FS_FILE object. * @param [in] a_inum Inode address. * @return 0 on success, 1 on failure, per TSK convention */ uint8_t exfatfs_inode_lookup(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum) { const char *func_name = "exfatfs_inode_lookup"; TSK_DADDR_T sector = 0; int8_t sect_is_alloc = 0; FATFS_DENTRY dentry; //FATFS_DENTRY stream_dentry; //FATFS_DENTRY *secondary_dentry = NULL; EXFATFS_DIR_ENTRY_TYPE dentry_type = EXFATFS_DIR_ENTRY_TYPE_NONE; TSK_RETVAL_ENUM copy_result = TSK_OK; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name) || fatfs_ptr_arg_is_null(a_fs_file->meta, "a_fs_file->meta", func_name) || fatfs_ptr_arg_is_null(a_fs_file->fs_info, "a_fs_file->fs_info", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return 1; } /* Map the inode address to a sector. */ sector = FATFS_INODE_2_SECT(a_fatfs, a_inum); if (sector > a_fatfs->fs_info.last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: Inode %" PRIuINUM " in sector too big for image: %" PRIuDADDR, func_name, a_inum, sector); return 1; } /* Check the allocation status of the sector. This status will be used * not only as meta data to be reported, but also as a way to choose * between the basic or in-depth version of the tests (below) that * determine whether or not the bytes corrresponding to the inode are * likely to be a directory entry. Note that in other places in the code * information about whether or not the sector that contains the inode is * part of a folder is used to select the test. Here, that information is * not available, so the test here is less reliable and may result in some * false positives. */ sect_is_alloc = fatfs_is_sectalloc(a_fatfs, sector); if (sect_is_alloc == -1) { return 1; } /* Load the bytes at the specified inode address. */ memset((void*)&dentry, 0, sizeof(FATFS_DENTRY)); if (fatfs_dentry_load(a_fatfs, &dentry, a_inum)) { return 1; } /* Try typing the bytes as a directory entry.*/ if (exfatfs_is_dentry(a_fatfs, &dentry, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)sect_is_alloc, sect_is_alloc)) { dentry_type = (EXFATFS_DIR_ENTRY_TYPE)dentry.data[0]; } else { return 1; } /* For the purposes of inode lookup, the file and file stream entries * that begin a file entry set are mapped to a single inode. Thus, * file stream entries are not treated as independent inodes. */ if (exfatfs_get_enum_from_type(dentry_type) == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: %" PRIuINUM " is not an inode", func_name, a_inum); return 1; } /* Populate the TSK_FS_META object of the TSK_FS_FILE object. */ copy_result = exfatfs_dinode_copy(a_fatfs, a_inum, &dentry, sect_is_alloc, a_fs_file); if (copy_result == TSK_OK) { return 0; } else if (copy_result == TSK_COR) { /* There was a Unicode conversion error on a string, but the rest * of the inode meta data is probably o.k., so report the error (if in * verbose mode), but also report a successful look up.*/ if (tsk_verbose) { tsk_error_print(stderr); } tsk_error_reset(); return 0; } else { return 1; } } /** * \internal * Outputs file attributes for an exFAT directory entry/inode in * human-readable form. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_inum Inode address associated with the directory entry. * @param [in] a_hFile Handle of a file to which to write. * @return 0 on success, 1 on failure, per TSK convention */ uint8_t exfatfs_istat_attr_flags(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FILE *a_hFile) { const char *func_name = "exfatfs_istat_attr_flags"; FATFS_DENTRY dentry; EXFATFS_FILE_DIR_ENTRY *file_dentry = NULL; uint16_t attr_flags = 0; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_hFile != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_hFile, "a_hFile", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return FATFS_FAIL; } /* Load the bytes at the given inode address. */ if (fatfs_dentry_load(a_fatfs, (FATFS_DENTRY*)(&dentry), a_inum)) { return FATFS_FAIL; } /* Print the attributes. */ switch (exfatfs_get_enum_from_type(dentry.data[0])) { case EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL: tsk_fprintf(a_hFile, "Volume Label\n"); break; case EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID: tsk_fprintf(a_hFile, "Volume GUID\n"); break; case EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP: tsk_fprintf(a_hFile, "Allocation Bitmap\n"); break; case EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE: tsk_fprintf(a_hFile, "Up-Case Table\n"); break; case EXFATFS_DIR_ENTRY_TYPE_TEXFAT: tsk_fprintf(a_hFile, "TexFAT\n"); break; case EXFATFS_DIR_ENTRY_TYPE_ACT: tsk_fprintf(a_hFile, "Access Control Table\n"); break; case EXFATFS_DIR_ENTRY_TYPE_FILE: file_dentry = (EXFATFS_FILE_DIR_ENTRY*)&dentry; attr_flags = tsk_getu16(a_fatfs->fs_info.endian, file_dentry->attrs); if (attr_flags & FATFS_ATTR_DIRECTORY) { tsk_fprintf(a_hFile, "Directory"); } else { tsk_fprintf(a_hFile, "File"); } if (attr_flags & FATFS_ATTR_READONLY) { tsk_fprintf(a_hFile, ", Read Only"); } if (attr_flags & FATFS_ATTR_HIDDEN) { tsk_fprintf(a_hFile, ", Hidden"); } if (attr_flags & FATFS_ATTR_SYSTEM) { tsk_fprintf(a_hFile, ", System"); } if (attr_flags & FATFS_ATTR_ARCHIVE) { tsk_fprintf(a_hFile, ", Archive"); } tsk_fprintf(a_hFile, "\n"); break; case EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM: tsk_fprintf(a_hFile, "File Stream\n"); break; case EXFATFS_DIR_ENTRY_TYPE_FILE_NAME: tsk_fprintf(a_hFile, "File Name\n"); break; default: tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: Inode %" PRIuINUM " is not an exFAT directory entry", func_name, a_inum); return FATFS_FAIL; } return FATFS_OK; } /** * \internal * Determine whether an exFAT directory entry should be included in an inode * walk. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_inum Inode address associated with the directory entry. * @param [in] a_dentry A directory entry buffer. * @param [in] a_selection_flags The inode selection falgs for the inode walk. * @param [in] a_cluster_is_alloc The allocation status of the cluster that * contains the directory entry. * @return 1 if the entry should be skipped, 0 otherwise */ uint8_t exfatfs_inode_walk_should_skip_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, unsigned int a_selection_flags, int a_cluster_is_alloc) { const char *func_name = "exfatfs_inode_walk_should_skip_dentry"; unsigned int dentry_flags = 0; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_dentry != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name) || fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 1; } /* Skip file stream and file name entries. For inode walks, these entries * are handled with the file entry with which they are associated in a file * entry set. */ if (exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM || exfatfs_get_enum_from_type(a_dentry->data[0]) == EXFATFS_DIR_ENTRY_TYPE_FILE_NAME) { return 1; } /* Assign an allocation status to the entry. Allocation status is * determined first by the allocation status of the cluster that contains * the entry, then by the allocated status of the entry. */ if ((a_cluster_is_alloc) && (exfatfs_get_alloc_status_from_type(a_dentry->data[0]) == 1)) { dentry_flags = TSK_FS_META_FLAG_ALLOC; } else { dentry_flags = TSK_FS_META_FLAG_UNALLOC; } /* Does the allocation status of the entry match that of the inode * selection flags? */ if ((a_selection_flags & dentry_flags) != dentry_flags) { return 1; } /* If the inode selection flags call for only processing orphan files, * check whether or not this inode is in list of non-orphan files found via * name walk. */ if ((dentry_flags & TSK_FS_META_FLAG_UNALLOC) && (a_selection_flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(&(a_fatfs->fs_info), a_inum))) { return 1; } return 0; } /** * \internal * Returns the allocation status of a dir entry given its * dir entry type byte (stored in the high bit) * * @param [in] a_dir_entry_type Entry type byte * @return 0 if unused, 1 if used */ uint8_t exfatfs_get_alloc_status_from_type(EXFATFS_DIR_ENTRY_TYPE a_dir_entry_type) { return (a_dir_entry_type >> 7); } /** * \internal * Returns the directory type enum from the given entry type byte * (Comes from the low 7 bits) * * @param [in] a_dir_entry_type * @return Enum for this type */ EXFATFS_DIR_ENTRY_TYPE_ENUM exfatfs_get_enum_from_type(EXFATFS_DIR_ENTRY_TYPE a_dir_entry_type){ return ((EXFATFS_DIR_ENTRY_TYPE_ENUM)(a_dir_entry_type & 0x7f)); } sleuthkit-4.2.0/tsk/fs/ext2fs.c000644 000765 000024 00000345144 12576320700 017130 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002-2003 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /** *\file ext2fs.c * Contains the internal TSK ext2/ext3/ext4 file system functions. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ #include "tsk_fs_i.h" #include "tsk_ext2fs.h" #include "tsk/base/crc.h" #include //#define Ext4_DBG 1 //#define EXT4_CHECKSUMS 1 #ifdef Ext4_DBG static uint8_t debug_print_buf(unsigned char *buf, int len) { int i = 0; for (i = 0; i < len; i++) { if (i % 8 == 0) printf("%08X:\t", i); printf("0x%02X ", buf[i]); if ((i + 1) % 8 == 0) printf("\n"); } printf("\n"); return 0; } #endif /** \internal test_root - tests to see if a is power of b Adapted from E2fsprogs sparse.c Super blocks are only in block groups that are powers of 3,5, and 7 @param a the number being investigated @param b the root @return 1 if a is a power of b, otherwise 0 */ static uint8_t test_root(uint32_t a, uint32_t b) { if (a == 0) return 1; while (1) { if (a == 1) return 1; if (a % b) return 0; a = a / b; } } /** \internal ext2fs_bg_has_super - wrapper around test_root Adapted from E2fsprogs sparse.c @return 1 if block group has superblock, otherwise 0 */ static uint32_t ext2fs_bg_has_super(uint32_t feature_ro_compat, uint32_t group_block) { if (!(feature_ro_compat & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER)) return 1; if (test_root(group_block, 3) || (test_root(group_block, 5)) || test_root(group_block, 7)) return 1; return 0; } /* ext2fs_group_load - load block group descriptor into cache * * Note: This routine assumes &ext2fs->lock is locked by the caller. * * return 1 on error and 0 on success * * */ static uint8_t ext2fs_group_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num) { void *gd; TSK_OFF_T offs; ssize_t cnt; TSK_FS_INFO *fs = (TSK_FS_INFO *) ext2fs; int gd_size = tsk_getu16(fs->endian, ext2fs->fs->s_desc_size); ext4fs_gd *ext4_gd = NULL; if (!gd_size) { if (fs->ftype == TSK_FS_TYPE_EXT4 && EXT2FS_HAS_INCOMPAT_FEATURE(fs, ext2fs->fs, EXT2FS_FEATURE_INCOMPAT_64BIT)) { gd_size = sizeof(ext4fs_gd); } else { gd_size = sizeof(ext2fs_gd); } } /* * Sanity check */ if (grp_num < 0 || grp_num >= ext2fs->groups_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_group_load: invalid cylinder group number: %" PRI_EXT2GRP "", grp_num); return 1; } if (ext2fs->grp_buf == NULL) { if (fs->ftype == TSK_FS_TYPE_EXT4) { ext2fs->ext4_grp_buf = (ext4fs_gd *) tsk_malloc(gd_size); } else { ext2fs->grp_buf = (ext2fs_gd *) tsk_malloc(gd_size); } if (ext2fs->grp_buf == NULL && ext2fs->ext4_grp_buf == NULL) { return 1; } } else if (ext2fs->grp_num == grp_num) { return 0; } gd = ext2fs->grp_buf; /* * We're not reading group descriptors often, so it is OK to do small * reads instead of cacheing group descriptors in a large buffer. */ offs = ext2fs->groups_offset + grp_num * gd_size; if (fs->ftype == TSK_FS_TYPE_EXT4) gd = ext2fs->ext4_grp_buf; cnt = tsk_fs_read(&ext2fs->fs_info, offs, (char *) gd, gd_size); /*DEBUG*/ #ifdef Ext4_DBG debug_print_buf((char *) ext2fs->ext4_grp_buf, gd_size); #endif if (cnt != gd_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_group_load: Group descriptor %" PRI_EXT2GRP " at %" PRIuOFF, grp_num, offs); return 1; } /* Perform a sanity check on the data to make sure offsets are in range */ if (fs->ftype == TSK_FS_TYPE_EXT4) { ext2fs->grp_buf = (ext2fs_gd *) ext2fs->ext4_grp_buf; gd = ext2fs->ext4_grp_buf; ext4_gd = ext2fs->ext4_grp_buf; if (EXT2FS_HAS_INCOMPAT_FEATURE(fs, ext2fs->fs, EXT2FS_FEATURE_INCOMPAT_64BIT)) { #ifdef Ext4_DBG printf("DEBUG hi: %04X\n", *ext4_gd->bg_block_bitmap_hi); printf("DEBUG lo: %08X\n", *ext4_gd->bg_block_bitmap_lo); printf("block_bitmap :%012lX\n", ext4_getu48(fs->endian, ext4_gd->bg_block_bitmap_hi, ext4_gd->bg_block_bitmap_lo)); #endif } else { #ifdef Ext4_DBG printf("block_bitmap:%08lX\n", tsk_getu32(fs->endian, ext4_gd->bg_block_bitmap_lo)); printf("stored checksum: %X\n", tsk_getu16(fs->endian, ext4_gd->bg_checksum)); #endif } } else { ext2fs_gd *ext2_gd = (ext2fs_gd *) gd; if ((tsk_getu32(fs->endian, ext2_gd->bg_block_bitmap) > fs->last_block) || (tsk_getu32(fs->endian, ext2_gd->bg_inode_bitmap) > fs->last_block) || (tsk_getu32(fs->endian, ext2_gd->bg_inode_table) > fs->last_block)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr("extXfs_group_load: Group %" PRI_EXT2GRP " descriptor block locations too large at byte offset %" PRIuDADDR, grp_num, offs); return 1; } } ext2fs->grp_num = grp_num; if (fs->ftype == TSK_FS_TYPE_EXT4) { } else { ext2fs_gd *ext2_gd = (ext2fs_gd *) gd; if (tsk_verbose) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; tsk_fprintf(stderr, "\tgroup %" PRI_EXT2GRP ": %" PRIu16 "/%" PRIu16 " free blocks/inodes\n", grp_num, tsk_getu16(fs->endian, ext2_gd->bg_free_blocks_count), tsk_getu16(fs->endian, ext2_gd->bg_free_inodes_count)); } } return 0; } #ifdef EXT4_CHECKSUMS /** * ext4_group_desc_csum - Calculates the checksum of a group descriptor * Ported from linux/fs/ext4/super.c * @ext4_sb: pointer to ext2 super block structure * @block_group: group descriptor number * @gdp: pointer to group descriptor to calculate checksum for * returns the checksum value */ //Temporarily removing CRC16 code until non-GPL version implemented //static uint16_t //ext4_group_desc_csum(ext2fs_sb *ext4_sb, uint32_t block_group, // struct ext4fs_gd *gdp) //{ // uint16_t crc = 0; // if (*ext4_sb->s_feature_ro_compat & EXT2FS_FEATURE_RO_COMPAT_GDT_CSUM) // { // int offset = offsetof(struct ext4fs_gd, bg_checksum); // uint32_t le_group = tsk_getu32(TSK_LIT_ENDIAN, &block_group); // crc = crc16(~0, ext4_sb->s_uuid, sizeof(ext4_sb->s_uuid)); // crc = crc16(crc, (uint8_t *)&le_group, sizeof(le_group)); // crc = crc16(crc, (uint8_t *)gdp, offset); // offset += sizeof(gdp->bg_checksum); /* skip checksum */ // /* for checksum of struct ext4_group_desc do the rest...*/ // if ((*ext4_sb->s_feature_incompat & // EXT2FS_FEATURE_INCOMPAT_64BIT) && // offset < *ext4_sb->s_desc_size) // { // crc = crc16(crc, (uint8_t *)gdp + offset, *ext4_sb->s_desc_size - offset); // } // } // // return crc; //} static cm_t CRC16_CTX; static uint16_t ext4_group_desc_csum(ext2fs_sb * ext4_sb, uint32_t block_group, struct ext4fs_gd *gdp) { CRC16_CTX.cm_width = 16; CRC16_CTX.cm_poly = 0x8005L; CRC16_CTX.cm_init = 0xFFFFL; CRC16_CTX.cm_refin = TRUE; CRC16_CTX.cm_refot = TRUE; CRC16_CTX.cm_xorot = 0x0000L; cm_ini(&CRC16_CTX); if (*ext4_sb->s_feature_ro_compat & EXT2FS_FEATURE_RO_COMPAT_GDT_CSUM) { int offset = offsetof(struct ext4fs_gd, bg_checksum); uint32_t le_group = tsk_getu32(TSK_LIT_ENDIAN, &block_group); crc16(&CRC16_CTX, ext4_sb->s_uuid, sizeof(ext4_sb->s_uuid)); crc16(&CRC16_CTX, (uint8_t *) & le_group, sizeof(le_group)); crc16(&CRC16_CTX, (uint8_t *) gdp, offset); offset += sizeof(gdp->bg_checksum); /* skip checksum */ /* for checksum of struct ext4_group_desc do the rest... */ if ((*ext4_sb->s_feature_incompat & EXT2FS_FEATURE_INCOMPAT_64BIT) && offset < *ext4_sb->s_desc_size) { crc16(&CRC16_CTX, (uint8_t *) gdp + offset, *ext4_sb->s_desc_size - offset); } } return cm_crc(&CRC16_CTX); } #endif /* ext2fs_print_map - print a bitmap */ static void ext2fs_print_map(uint8_t * map, int len) { int i; for (i = 0; i < len; i++) { if (i > 0 && i % 10 == 0) putc('|', stderr); putc(isset(map, i) ? '1' : '.', stderr); } putc('\n', stderr); } #define INODE_TABLE_SIZE(ext2fs) \ ((tsk_getu32(ext2fs->fs_info.endian, ext2fs->fs->s_inodes_per_group) * ext2fs->inode_size - 1) \ / ext2fs->fs_info.block_size + 1) /* ext2fs_bmap_load - look up block bitmap & load into cache * * Note: This routine assumes &ext2fs->lock is locked by the caller. * * return 1 on error and 0 on success * */ static uint8_t ext2fs_bmap_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ssize_t cnt; /* * Look up the group descriptor info. The load will do the sanity check. */ if (ext2fs_group_load(ext2fs, grp_num)) { return 1; } if (ext2fs->bmap_buf == NULL) { if ((ext2fs->bmap_buf = (uint8_t *) tsk_malloc(fs->block_size)) == NULL) { return 1; } } else if (ext2fs->bmap_grp_num == grp_num) { return 0; } if (tsk_verbose) { TSK_DADDR_T dbase = 0; /* first block number in group */ TSK_DADDR_T dmin = 0; /* first block after inodes */ dbase = ext2_cgbase_lcl(fs, ext2fs->fs, grp_num); dmin = tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) + INODE_TABLE_SIZE(ext2fs); tsk_fprintf(stderr, "ext2_bmap_load: loading group %" PRI_EXT2GRP " dbase %" PRIuDADDR " bmap +%" PRIuDADDR " imap +%" PRIuDADDR " inos +%" PRIuDADDR "..%" PRIuDADDR "\n", grp_num, dbase, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap) - dbase, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) - dbase, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) - dbase, dmin - 1 - dbase); } /* * Look up the block allocation bitmap. */ if (tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap) > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("ext2fs_bmap_load: Block too large for image: %" PRIu32 "", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap)); return 1; } cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap) * fs->block_size, (char *) ext2fs->bmap_buf, ext2fs->fs_info.block_size); if (cnt != ext2fs->fs_info.block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_bmap_load: Bitmap group %" PRI_EXT2GRP " at %" PRIu32, grp_num, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap)); } ext2fs->bmap_grp_num = grp_num; if (tsk_verbose > 1) ext2fs_print_map(ext2fs->bmap_buf, tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group)); return 0; } /* ext2fs_imap_load - look up inode bitmap & load into cache * * Note: This routine assumes &ext2fs->lock is locked by the caller. * * return 0 on success and 1 on error * */ static uint8_t ext2fs_imap_load(EXT2FS_INFO * ext2fs, EXT2_GRPNUM_T grp_num) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ssize_t cnt; /* * Look up the group descriptor info. */ if (ext2fs_group_load(ext2fs, grp_num)) { return 1; } /* Allocate the cache buffer and exit if map is already loaded */ if (ext2fs->imap_buf == NULL) { if ((ext2fs->imap_buf = (uint8_t *) tsk_malloc(fs->block_size)) == NULL) { return 1; } } else if (ext2fs->imap_grp_num == grp_num) { return 0; } /* * Look up the inode allocation bitmap. */ if (EXT2FS_HAS_INCOMPAT_FEATURE(fs, ext2fs->fs, EXT2FS_FEATURE_INCOMPAT_64BIT)) { if (ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("ext2fs_imap_load: Block too large for image: %" PRIu64 "", ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo)); } cnt = tsk_fs_read(fs, (TSK_DADDR_T) ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) * fs->block_size, (char *) ext2fs->imap_buf, ext2fs->fs_info.block_size); if (cnt != ext2fs->fs_info.block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_imap_load: Inode bitmap %" PRI_EXT2GRP " at %" PRIu32, grp_num, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); } } else { if (tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("ext2fs_imap_load: Block too large for image: %" PRIu32 "", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); } cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) * fs->block_size, (char *) ext2fs->imap_buf, ext2fs->fs_info.block_size); if (cnt != ext2fs->fs_info.block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_imap_load: Inode bitmap %" PRI_EXT2GRP " at %" PRIu32, grp_num, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); } } ext2fs->imap_grp_num = grp_num; if (tsk_verbose > 1) ext2fs_print_map(ext2fs->imap_buf, tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); return 0; } /* ext2fs_dinode_load - look up disk inode & load into ext2fs_inode structure * @param ext2fs A ext2fs file system information structure * @param dino_inum Metadata address * @param dino_buf The buffer to store the block in (must be size of ext2fs->inode_size or larger) * * return 1 on error and 0 on success * */ static uint8_t ext2fs_dinode_load(EXT2FS_INFO * ext2fs, TSK_INUM_T dino_inum, ext2fs_inode * dino_buf) { EXT2_GRPNUM_T grp_num; TSK_OFF_T addr; ssize_t cnt; TSK_INUM_T rel_inum; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; /* * Sanity check. * Use last_num-1 to account for virtual Orphan directory in last_inum. */ if ((dino_inum < fs->first_inum) || (dino_inum > fs->last_inum - 1)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("ext2fs_dinode_load: address: %" PRIuINUM, dino_inum); return 1; } if (dino_buf == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_dinode_load: dino_buf is NULL"); return 1; } /* * Look up the group descriptor for this inode. */ grp_num = (EXT2_GRPNUM_T) ((dino_inum - fs->first_inum) / tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); /* lock access to grp_buf */ tsk_take_lock(&ext2fs->lock); if (ext2fs_group_load(ext2fs, grp_num)) { tsk_release_lock(&ext2fs->lock); return 1; } /* * Look up the inode table block for this inode. */ rel_inum = (dino_inum - 1) - tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group) * grp_num; if (EXT2FS_HAS_INCOMPAT_FEATURE(fs, ext2fs->fs, EXT2FS_FEATURE_INCOMPAT_64BIT) && (tsk_getu16(fs->endian, ext2fs->fs->s_desc_size) >= 64)) { #ifdef Ext4_DBG printf("DEBUG: d_inode_load 64bit gd_size=%d\n", tsk_getu16(fs->endian, ext2fs->fs->s_desc_size)); #endif addr = (TSK_OFF_T) ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_table_hi, ext2fs->ext4_grp_buf->bg_inode_table_lo) * (TSK_OFF_T) fs->block_size + rel_inum * (TSK_OFF_T) ext2fs->inode_size; } else { addr = (TSK_OFF_T) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) * (TSK_OFF_T) fs->block_size + rel_inum * (TSK_OFF_T) ext2fs->inode_size; } tsk_release_lock(&ext2fs->lock); cnt = tsk_fs_read(fs, addr, (char *) dino_buf, ext2fs->inode_size); if (cnt != ext2fs->inode_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_dinode_load: Inode %" PRIuINUM " from %" PRIuOFF, dino_inum, addr); return 1; } //DEBUG printf("Inode Size: %d, %d, %d, %d\n", sizeof(ext2fs_inode), *ext2fs->fs->s_inode_size, ext2fs->inode_size, *ext2fs->fs->s_want_extra_isize); //DEBUG debug_print_buf((char *)dino_buf, ext2fs->inode_size); if (tsk_verbose) { tsk_fprintf(stderr, "%" PRIuINUM " m/l/s=%o/%d/%" PRIuOFF " u/g=%d/%d macd=%" PRIu32 "/%" PRIu32 "/%" PRIu32 "/%" PRIu32 "\n", dino_inum, tsk_getu16(fs->endian, dino_buf->i_mode), tsk_getu16(fs->endian, dino_buf->i_nlink), (tsk_getu32(fs->endian, dino_buf->i_size) + (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_REG) ? (uint64_t) tsk_getu32(fs->endian, dino_buf->i_size_high) << 32 : 0), tsk_getu16(fs->endian, dino_buf->i_uid) + (tsk_getu16(fs->endian, dino_buf->i_uid_high) << 16), tsk_getu16(fs->endian, dino_buf->i_gid) + (tsk_getu16(fs->endian, dino_buf->i_gid_high) << 16), tsk_getu32(fs->endian, dino_buf->i_mtime), tsk_getu32(fs->endian, dino_buf->i_atime), tsk_getu32(fs->endian, dino_buf->i_ctime), tsk_getu32(fs->endian, dino_buf->i_dtime)); } return 0; } /* ext2fs_dinode_copy - copy cached disk inode into generic inode * * returns 1 on error and 0 on success * */ static uint8_t ext2fs_dinode_copy(EXT2FS_INFO * ext2fs, TSK_FS_META * fs_meta, TSK_INUM_T inum, const ext2fs_inode * dino_buf) { int i; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; ext2fs_sb *sb = ext2fs->fs; EXT2_GRPNUM_T grp_num; TSK_INUM_T ibase = 0; if (dino_buf == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_dinode_copy: dino_buf is NULL"); return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } // set the type switch (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_FMT) { case EXT2_IN_REG: fs_meta->type = TSK_FS_META_TYPE_REG; break; case EXT2_IN_DIR: fs_meta->type = TSK_FS_META_TYPE_DIR; break; case EXT2_IN_SOCK: fs_meta->type = TSK_FS_META_TYPE_SOCK; break; case EXT2_IN_LNK: fs_meta->type = TSK_FS_META_TYPE_LNK; break; case EXT2_IN_BLK: fs_meta->type = TSK_FS_META_TYPE_BLK; break; case EXT2_IN_CHR: fs_meta->type = TSK_FS_META_TYPE_CHR; break; case EXT2_IN_FIFO: fs_meta->type = TSK_FS_META_TYPE_FIFO; break; default: fs_meta->type = TSK_FS_META_TYPE_UNDEF; break; } // set the mode fs_meta->mode = 0; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_ISUID) fs_meta->mode |= TSK_FS_META_MODE_ISUID; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_ISGID) fs_meta->mode |= TSK_FS_META_MODE_ISGID; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_ISVTX) fs_meta->mode |= TSK_FS_META_MODE_ISVTX; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IRUSR) fs_meta->mode |= TSK_FS_META_MODE_IRUSR; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IWUSR) fs_meta->mode |= TSK_FS_META_MODE_IWUSR; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IXUSR) fs_meta->mode |= TSK_FS_META_MODE_IXUSR; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IRGRP) fs_meta->mode |= TSK_FS_META_MODE_IRGRP; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IWGRP) fs_meta->mode |= TSK_FS_META_MODE_IWGRP; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IXGRP) fs_meta->mode |= TSK_FS_META_MODE_IXGRP; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IROTH) fs_meta->mode |= TSK_FS_META_MODE_IROTH; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IWOTH) fs_meta->mode |= TSK_FS_META_MODE_IWOTH; if (tsk_getu16(fs->endian, dino_buf->i_mode) & EXT2_IN_IXOTH) fs_meta->mode |= TSK_FS_META_MODE_IXOTH; fs_meta->nlink = tsk_getu16(fs->endian, dino_buf->i_nlink); fs_meta->size = tsk_getu32(fs->endian, dino_buf->i_size); fs_meta->addr = inum; /* the general size value in the inode is only 32-bits, * but the i_dir_acl value is used for regular files to * hold the upper 32-bits * * The RO_COMPAT_LARGE_FILE flag in the super block will identify * if there are any large files in the file system */ if ((fs_meta->type == TSK_FS_META_TYPE_REG) && (tsk_getu32(fs->endian, sb->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_LARGE_FILE)) { fs_meta->size += ((uint64_t) tsk_getu32(fs->endian, dino_buf->i_size_high) << 32); } fs_meta->uid = tsk_getu16(fs->endian, dino_buf->i_uid) + (tsk_getu16(fs->endian, dino_buf->i_uid_high) << 16); fs_meta->gid = tsk_getu16(fs->endian, dino_buf->i_gid) + (tsk_getu16(fs->endian, dino_buf->i_gid_high) << 16); fs_meta->mtime = tsk_getu32(fs->endian, dino_buf->i_mtime); fs_meta->atime = tsk_getu32(fs->endian, dino_buf->i_atime); fs_meta->ctime = tsk_getu32(fs->endian, dino_buf->i_ctime); fs_meta->time2.ext2.dtime = tsk_getu32(fs->endian, dino_buf->i_dtime); if (fs->ftype == TSK_FS_TYPE_EXT4) { fs_meta->mtime_nano = tsk_getu32(fs->endian, dino_buf->i_mtime_extra) >> 2; fs_meta->atime_nano = tsk_getu32(fs->endian, dino_buf->i_atime_extra) >> 2; fs_meta->ctime_nano = tsk_getu32(fs->endian, dino_buf->i_ctime_extra) >> 2; fs_meta->crtime = tsk_getu32(fs->endian, dino_buf->i_crtime); fs_meta->crtime_nano = tsk_getu32(fs->endian, dino_buf->i_crtime_extra) >> 2; } else { fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime = 0; } fs_meta->time2.ext2.dtime_nano = 0; fs_meta->seq = 0; if (fs_meta->link) { free(fs_meta->link); fs_meta->link = NULL; } if (fs_meta->content_len != EXT2FS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, EXT2FS_FILE_CONTENT_LEN)) == NULL) { return 1; } } if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EXTENTS) { uint32_t *addr_ptr; fs_meta->content_type = TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS; /* NOTE TSK_DADDR_T != uint32_t, so lets make sure we use uint32_t */ addr_ptr = (uint32_t *) fs_meta->content_ptr; for (i = 0; i < EXT2FS_NDADDR + EXT2FS_NIADDR; i++) { addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]);; } } else { TSK_DADDR_T *addr_ptr; addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; for (i = 0; i < EXT2FS_NDADDR + EXT2FS_NIADDR; i++) addr_ptr[i] = tsk_gets32(fs->endian, dino_buf->i_block[i]); /* set the link string * the size check prevents us from trying to allocate a huge amount of * memory for a bad inode value */ if ((fs_meta->type == TSK_FS_META_TYPE_LNK) && (fs_meta->size < EXT2FS_MAXPATHLEN) && (fs_meta->size >= 0)) { unsigned int count = 0; int i; if ((fs_meta->link = tsk_malloc((size_t) (fs_meta->size + 1))) == NULL) return 1; /* it is located directly in the pointers */ if (fs_meta->size < 4 * (EXT2FS_NDADDR + EXT2FS_NIADDR)) { unsigned int j; for (i = 0; i < (EXT2FS_NDADDR + EXT2FS_NIADDR) && count < fs_meta->size; i++) { char *a_ptr = (char *) &dino_buf->i_block[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) { fs_meta->link[count++] = a_ptr[j]; } } fs_meta->link[count] = '\0'; /* clear the content pointer data to avoid the prog from reading them */ memset(fs_meta->content_ptr, 0, fs_meta->content_len); } /* it is in blocks */ else { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ext2fs->fs_info; char *data_buf = NULL; char *a_ptr = fs_meta->link; TSK_DADDR_T *addr_ptr = fs_meta->content_ptr;; if ((data_buf = tsk_malloc(fs->block_size)) == NULL) { return 1; } /* we only need to do the direct blocks due to the limit * on path length */ for (i = 0; i < EXT2FS_NDADDR && count < fs_meta->size; i++) { ssize_t cnt; int read_count = (fs_meta->size - count < fs->block_size) ? (int) (fs_meta->size - count) : (int) (fs->block_size); cnt = tsk_fs_read_block(fs, addr_ptr[i], data_buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ext2fs_dinode_copy: symlink destination from %" PRIuDADDR, addr_ptr[i]); free(data_buf); return 1; } memcpy(a_ptr, data_buf, read_count); count += read_count; a_ptr = (char *) ((uintptr_t) a_ptr + count); } /* terminate the string */ *a_ptr = '\0'; free(data_buf); } /* Clean up name */ i = 0; while (fs_meta->link[i] != '\0') { if (TSK_IS_CNTRL(fs_meta->link[i])) fs_meta->link[i] = '^'; i++; } } } /* Fill in the flags value */ grp_num = (EXT2_GRPNUM_T) ((inum - fs->first_inum) / tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); tsk_take_lock(&ext2fs->lock); if (ext2fs_imap_load(ext2fs, grp_num)) { tsk_release_lock(&ext2fs->lock); return 1; } ibase = grp_num * tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group) + fs->first_inum; /* * Apply the allocated/unallocated restriction. */ fs_meta->flags = (isset(ext2fs->imap_buf, inum - ibase) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); tsk_release_lock(&ext2fs->lock); /* * Apply the used/unused restriction. */ fs_meta->flags |= (fs_meta->ctime ? TSK_FS_META_FLAG_USED : TSK_FS_META_FLAG_UNUSED); return 0; } /* ext2fs_inode_lookup - lookup inode, external interface * * Returns 1 on error and 0 on success * */ static uint8_t ext2fs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; ext2fs_inode *dino_buf = NULL; unsigned int size = 0; if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_inode_lookup: fs_file is NULL"); return 1; } if (a_fs_file->meta == NULL) { if ((a_fs_file->meta = tsk_fs_meta_alloc(EXT2FS_FILE_CONTENT_LEN)) == NULL) return 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } // see if they are looking for the special "orphans" directory if (inum == TSK_FS_ORPHANDIR_INUM(fs)) { if (tsk_fs_dir_make_orphan_dir_meta(fs, a_fs_file->meta)) return 1; else return 0; } size = ext2fs->inode_size > sizeof(ext2fs_inode) ? ext2fs->inode_size : sizeof(ext2fs_inode); if ((dino_buf = (ext2fs_inode *) tsk_malloc(size)) == NULL) { return 1; } if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) { free(dino_buf); return 1; } if (ext2fs_dinode_copy(ext2fs, a_fs_file->meta, inum, dino_buf)) { free(dino_buf); return 1; } if (dino_buf != NULL) free((char *) dino_buf); return 0; } /* ext2fs_inode_walk - inode iterator * * flags used: TSK_FS_META_FLAG_USED, TSK_FS_META_FLAG_UNUSED, * TSK_FS_META_FLAG_ALLOC, TSK_FS_META_FLAG_UNALLOC, TSK_FS_META_FLAG_ORPHAN * * Return 1 on error and 0 on success */ uint8_t ext2fs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB a_action, void *a_ptr) { char *myname = "extXfs_inode_walk"; EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; EXT2_GRPNUM_T grp_num; TSK_INUM_T inum; TSK_INUM_T end_inum_tmp; TSK_INUM_T ibase = 0; TSK_FS_FILE *fs_file; int myflags; ext2fs_inode *dino_buf = NULL; unsigned int size = 0; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (start_inum < fs->first_inum || start_inum > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: start inode: %" PRIuINUM "", myname, start_inum); return 1; } if (end_inum < fs->first_inum || end_inum > fs->last_inum || end_inum < start_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: end inode: %" PRIuINUM "", myname, end_inum); return 1; } /* If ORPHAN is wanted, then make sure that the flags are correct */ if (flags & TSK_FS_META_FLAG_ORPHAN) { flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; flags |= TSK_FS_META_FLAG_USED; flags &= ~TSK_FS_META_FLAG_UNUSED; } else { if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((flags & TSK_FS_META_FLAG_USED) == 0) && ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } } /* If we are looking for orphan files and have not yet filled * in the list of unalloc inodes that are pointed to, then fill * in the list */ if ((flags & TSK_FS_META_FLAG_ORPHAN)) { if (tsk_fs_dir_load_inum_named(fs) != TSK_OK) { tsk_error_errstr2_concat ("- ext2fs_inode_walk: identifying inodes allocated by file names"); return 1; } } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(EXT2FS_FILE_CONTENT_LEN)) == NULL) return 1; // we need to handle fs->last_inum specially because it is for the // virtual ORPHANS directory. Handle it outside of the loop. if (end_inum == TSK_FS_ORPHANDIR_INUM(fs)) end_inum_tmp = end_inum - 1; else end_inum_tmp = end_inum; /* * Iterate. */ size = ext2fs->inode_size > sizeof(ext2fs_inode) ? ext2fs->inode_size : sizeof(ext2fs_inode); if ((dino_buf = (ext2fs_inode *) tsk_malloc(size)) == NULL) { return 1; } for (inum = start_inum; inum <= end_inum_tmp; inum++) { int retval; /* * Be sure to use the proper group descriptor data. XXX Linux inodes * start at 1, as in Fortran. */ grp_num = (EXT2_GRPNUM_T) ((inum - 1) / tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group)); /* lock access to imap_buf */ tsk_take_lock(&ext2fs->lock); if (ext2fs_imap_load(ext2fs, grp_num)) { tsk_release_lock(&ext2fs->lock); free(dino_buf); return 1; } ibase = grp_num * tsk_getu32(fs->endian, ext2fs->fs->s_inodes_per_group) + 1; /* * Apply the allocated/unallocated restriction. */ myflags = (isset(ext2fs->imap_buf, inum - ibase) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); tsk_release_lock(&ext2fs->lock); if ((flags & myflags) != myflags) continue; if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } /* * Apply the used/unused restriction. */ myflags |= (tsk_getu32(fs->endian, dino_buf->i_ctime) ? TSK_FS_META_FLAG_USED : TSK_FS_META_FLAG_UNUSED); if ((flags & myflags) != myflags) continue; /* If we want only orphans, then check if this * inode is in the seen list */ if ((myflags & TSK_FS_META_FLAG_UNALLOC) && (flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(fs, inum))) { continue; } /* * Fill in a file system-independent inode structure and pass control * to the application. */ if (ext2fs_dinode_copy(ext2fs, fs_file->meta, inum, dino_buf)) { tsk_fs_meta_close(fs_file->meta); free(dino_buf); return 1; } retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dino_buf); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } } // handle the virtual orphans folder if they asked for it if ((end_inum == TSK_FS_ORPHANDIR_INUM(fs)) && (flags & TSK_FS_META_FLAG_ALLOC) && (flags & TSK_FS_META_FLAG_USED)) { int retval; if (tsk_fs_dir_make_orphan_dir_meta(fs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } /* call action */ retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dino_buf); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } } /* * Cleanup. */ tsk_fs_file_close(fs_file); if (dino_buf != NULL) free((char *) dino_buf); return 0; } TSK_FS_BLOCK_FLAG_ENUM ext2fs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) a_fs; int flags; EXT2_GRPNUM_T grp_num; TSK_DADDR_T dbase = 0; /* first block number in group */ TSK_DADDR_T dmin = 0; /* first block after inodes */ // these blocks are not described in the group descriptors // sparse if (a_addr == 0) return TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_ALLOC; if (a_addr < ext2fs->first_data_block) return TSK_FS_BLOCK_FLAG_META | TSK_FS_BLOCK_FLAG_ALLOC; grp_num = ext2_dtog_lcl(a_fs, ext2fs->fs, a_addr); /* lock access to bmap_buf */ tsk_take_lock(&ext2fs->lock); /* Lookup bitmap if not loaded */ if (ext2fs_bmap_load(ext2fs, grp_num)) { tsk_release_lock(&ext2fs->lock); return 0; } /* * Be sure to use the right group descriptor information. XXX There * appears to be an off-by-one discrepancy between bitmap offsets and * disk block numbers. * * Addendum: this offset is controlled by the super block's * s_first_data_block field. */ dbase = ext2_cgbase_lcl(a_fs, ext2fs->fs, grp_num); dmin = tsk_getu32(a_fs->endian, ext2fs->grp_buf->bg_inode_table) + INODE_TABLE_SIZE(ext2fs); /* * Identify meta blocks * (any blocks that can't be allocated for file/directory data). * * XXX With sparse superblock placement, most block groups have the * block and inode bitmaps where one would otherwise find the backup * superblock and the backup group descriptor blocks. The inode * blocks are in the normal place, though. This leaves little gaps * between the bitmaps and the inode table - and ext2fs will use * those blocks for file/directory data blocks. So we must properly * account for those gaps between meta blocks. * * Thus, superblocks and group descriptor blocks are sometimes overlaid * by bitmap blocks. This means that one can still assume that the * locations of superblocks and group descriptor blocks are reserved. * They just happen to be reserved for something else :-) */ flags = (isset(ext2fs->bmap_buf, a_addr - dbase) ? TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC); if ((a_addr >= dbase && a_addr < tsk_getu32(a_fs->endian, ext2fs->grp_buf->bg_block_bitmap)) || (a_addr == tsk_getu32(a_fs->endian, ext2fs->grp_buf->bg_block_bitmap)) || (a_addr == tsk_getu32(a_fs->endian, ext2fs->grp_buf->bg_inode_bitmap)) || (a_addr >= tsk_getu32(a_fs->endian, ext2fs->grp_buf->bg_inode_table) && a_addr < dmin)) flags |= TSK_FS_BLOCK_FLAG_META; else flags |= TSK_FS_BLOCK_FLAG_CONT; tsk_release_lock(&ext2fs->lock); return flags; } /* ext2fs_block_walk - block iterator * * flags: TSK_FS_BLOCK_FLAG_ALLOC, TSK_FS_BLOCK_FLAG_UNALLOC, TSK_FS_BLOCK_FLAG_CONT, * TSK_FS_BLOCK_FLAG_META * * Return 1 on error and 0 on success */ uint8_t ext2fs_block_walk(TSK_FS_INFO * a_fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { char *myname = "extXfs_block_walk"; TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (a_start_blk < a_fs->first_block || a_start_blk > a_fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: start block: %" PRIuDADDR, myname, a_start_blk); return 1; } if (a_end_blk < a_fs->first_block || a_end_blk > a_fs->last_block || a_end_blk < a_start_blk) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: end block: %" PRIuDADDR, myname, a_end_blk); return 1; } /* Sanity check on a_flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(a_fs)) == NULL) { return 1; } /* * Iterate. This is not as tricky as it could be, because the free list * map covers the entire disk partition, including blocks occupied by * group descriptor blocks, bit maps, and other non-data blocks. */ for (addr = a_start_blk; addr <= a_end_blk; addr++) { int retval; int myflags; myflags = ext2fs_block_getflags(a_fs, addr); // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_META) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_META))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_CONT) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; if (tsk_fs_block_get_flag(a_fs, fs_block, addr, myflags) == NULL) { tsk_error_set_errstr2("ext2fs_block_walk: block %" PRIuDADDR, addr); tsk_fs_block_free(fs_block); return 1; } retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { break; } else if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); return 1; } } /* * Cleanup. */ tsk_fs_block_free(fs_block); return 0; } static uint8_t ext2fs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented yet for Ext3"); return 1; } /** \internal * Add a single extent -- that is, a single data ran -- to the file data attribute. * @return 0 on success, 1 on error. */ static TSK_OFF_T ext2fs_make_data_run_extent(TSK_FS_INFO * fs_info, TSK_FS_ATTR * fs_attr, ext2fs_extent * extent) { TSK_FS_ATTR_RUN *data_run; data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return 1; } data_run->offset = tsk_getu32(fs_info->endian, extent->ee_block); data_run->addr = (((uint32_t) tsk_getu16(fs_info->endian, extent->ee_start_hi)) << 16) | tsk_getu32(fs_info->endian, extent->ee_start_lo); data_run->len = tsk_getu16(fs_info->endian, extent->ee_len); // save the run if (tsk_fs_attr_add_run(fs_info, fs_attr, data_run)) { return 1; } return 0; } /** \internal * Given a block that contains an extent node (which starts with extent_header), * walk it, and add everything encountered to the appropriate attributes. * @return 0 on success, 1 on error. */ static TSK_OFF_T ext2fs_make_data_run_extent_index(TSK_FS_INFO * fs_info, TSK_FS_ATTR * fs_attr, TSK_FS_ATTR * fs_attr_extent, TSK_DADDR_T idx_block) { ext2fs_extent_header *header; TSK_FS_ATTR_RUN *data_run; uint8_t *buf; ssize_t cnt; unsigned int i; /* first, read the block specified by the parameter */ int fs_blocksize = fs_info->block_size; if ((buf = (uint8_t *) tsk_malloc(fs_blocksize)) == NULL) { return 1; } cnt = tsk_fs_read_block(fs_info, idx_block, (char *) buf, fs_blocksize); if (cnt != fs_blocksize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr("ext2fs_make_data_run_extent_index: Block %" PRIuDADDR, idx_block); return 1; } header = (ext2fs_extent_header *) buf; /* add it to the extent attribute */ if (tsk_getu16(fs_info->endian, header->eh_magic) != 0xF30A) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_make_data_run_extent_index: extent header magic valid incorrect!"); return 1; } data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return 1; } data_run->addr = idx_block; data_run->len = fs_blocksize; if (tsk_fs_attr_add_run(fs_info, fs_attr_extent, data_run)) { return 1; } /* process leaf nodes */ if (tsk_getu16(fs_info->endian, header->eh_depth) == 0) { ext2fs_extent *extents = (ext2fs_extent *) (header + 1); for (i = 0; i < tsk_getu16(fs_info->endian, header->eh_entries); i++) { ext2fs_extent extent = extents[i]; if (ext2fs_make_data_run_extent(fs_info, fs_attr, &extent)) { return 1; } } } /* recurse on interior nodes */ else { ext2fs_extent_idx *indices = (ext2fs_extent_idx *) (header + 1); for (i = 0; i < tsk_getu16(fs_info->endian, header->eh_entries); i++) { ext2fs_extent_idx *index = &indices[i]; TSK_DADDR_T child_block = (((uint32_t) tsk_getu16(fs_info->endian, index->ei_leaf_hi)) << 16) | tsk_getu32(fs_info-> endian, index->ei_leaf_lo); if (ext2fs_make_data_run_extent_index(fs_info, fs_attr, fs_attr_extent, child_block)) { return 1; } } } free(buf); return 0; } /** \internal * Get the number of extent blocks rooted at the given extent_header. * The count does not include the extent header passed as a parameter. * * @return the number of extent blocks, or -1 on error. */ static int32_t ext2fs_extent_tree_index_count(TSK_FS_INFO * fs_info, TSK_FS_META * fs_meta, ext2fs_extent_header * header) { int fs_blocksize = fs_info->block_size; ext2fs_extent_idx *indices; int count = 0; uint8_t *buf; int i; if (tsk_getu16(fs_info->endian, header->eh_magic) != 0xF30A) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_load_attrs: extent header magic valid incorrect!"); return -1; } if (tsk_getu16(fs_info->endian, header->eh_depth) == 0) { return 0; } if ((buf = (uint8_t *) tsk_malloc(fs_blocksize)) == NULL) { return -1; } indices = (ext2fs_extent_idx *) (header + 1); for (i = 0; i < tsk_getu16(fs_info->endian, header->eh_entries); i++) { ext2fs_extent_idx *index = &indices[i]; TSK_DADDR_T block = (((uint32_t) tsk_getu16(fs_info->endian, index->ei_leaf_hi)) << 16) | tsk_getu32(fs_info-> endian, index->ei_leaf_lo); ssize_t cnt = tsk_fs_read_block(fs_info, block, (char *) buf, fs_blocksize); int ret; if (cnt != fs_blocksize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_extent_tree_index_count: Block %" PRIuDADDR, block); return -1; } if ((ret = ext2fs_extent_tree_index_count(fs_info, fs_meta, (ext2fs_extent_header *) buf)) < 0) { return -1; } count += ret; count++; } free(buf); return count; } /** * \internal * Loads attribute for Ext4 Extents-based storage method. * @param fs_file File system to analyze * @returns 0 on success, 1 otherwise */ static uint8_t ext4_load_attrs_extents(TSK_FS_FILE *fs_file) { TSK_FS_META *fs_meta = fs_file->meta; TSK_FS_INFO *fs_info = fs_file->fs_info; TSK_OFF_T length = 0; TSK_FS_ATTR *fs_attr; int i; ext2fs_extent *extents = NULL; ext2fs_extent_idx *indices = NULL; ext2fs_extent_header *header = (ext2fs_extent_header *) fs_meta->content_ptr; uint16_t num_entries = tsk_getu16(fs_info->endian, header->eh_entries); uint16_t depth = tsk_getu16(fs_info->endian, header->eh_depth); if (tsk_getu16(fs_info->endian, header->eh_magic) != 0xF30A) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_load_attrs: extent header magic valid incorrect!"); return 1; } if ((fs_meta->attr != NULL) && (fs_meta->attr_state == TSK_FS_META_ATTR_STUDIED)) { return 0; } else if (fs_meta->attr_state == TSK_FS_META_ATTR_ERROR) { return 1; } else if (fs_meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_meta->attr); } else if (fs_meta->attr == NULL) { fs_meta->attr = tsk_fs_attrlist_alloc(); } if (TSK_FS_TYPE_ISEXT(fs_info->ftype) == 0) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_load_attr: Called with non-ExtX file system: %x", fs_info->ftype); return 1; } length = roundup(fs_meta->size, fs_info->block_size); if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, length, 0, 0)) { return 1; } if (num_entries == 0) { fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } if (depth == 0) { /* leaf node */ if (num_entries > (fs_info->block_size - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent)) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_load_attr: Inode reports too many extents"); return 1; } extents = (ext2fs_extent *) (header + 1); for (i = 0; i < num_entries; i++) { ext2fs_extent extent = extents[i]; if (ext2fs_make_data_run_extent(fs_info, fs_attr, &extent)) { return 1; } } } else { /* interior node */ TSK_FS_ATTR *fs_attr_extent; int32_t extent_index_size; if (num_entries > (fs_info->block_size - sizeof(ext2fs_extent_header)) / sizeof(ext2fs_extent_idx)) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ext2fs_load_attr: Inode reports too many extent indices"); return 1; } if ((fs_attr_extent = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } extent_index_size = ext2fs_extent_tree_index_count(fs_info, fs_meta, header); if (extent_index_size < 0) { return 1; } if (tsk_fs_attr_set_run(fs_file, fs_attr_extent, NULL, NULL, TSK_FS_ATTR_TYPE_UNIX_EXTENT, TSK_FS_ATTR_ID_DEFAULT, fs_info->block_size * extent_index_size, fs_info->block_size * extent_index_size, fs_info->block_size * extent_index_size, 0, 0)) { return 1; } indices = (ext2fs_extent_idx *) (header + 1); for (i = 0; i < num_entries; i++) { ext2fs_extent_idx *index = &indices[i]; TSK_DADDR_T child_block = (((uint32_t) tsk_getu16(fs_info->endian, index-> ei_leaf_hi)) << 16) | tsk_getu32(fs_info-> endian, index->ei_leaf_lo); if (ext2fs_make_data_run_extent_index(fs_info, fs_attr, fs_attr_extent, child_block)) { return 1; } } } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** \internal * Add the data runs and extents to the file attributes. * * @param fs_file File system to analyze * @returns 0 on success, 1 otherwise */ static uint8_t ext2fs_load_attrs(TSK_FS_FILE * fs_file) { /* EXT4 extents-based storage is dealt with differently than * the traditional pointer lists. */ if (fs_file->meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS) { return ext4_load_attrs_extents(fs_file); } else { return tsk_fs_unix_make_data_run(fs_file); } } static void ext4_fsstat_datablock_helper(TSK_FS_INFO * fs, FILE * hFile, unsigned int i, TSK_DADDR_T cg_base, int gd_size) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; ext2fs_sb *sb = ext2fs->fs; unsigned int gpfbg = (1 << sb->s_log_groups_per_flex); unsigned int ibpg, gd_blocks; unsigned int num_flex_bg, curr_flex_bg; uint64_t last_block; ext4fs_gd *ext4_gd = ext2fs->ext4_grp_buf; uint64_t db_offset = 0; unsigned int num_groups = 0, left_over = 0; #ifdef Ext4_DBG printf("\nDEBUG 64bit:%d, gd_size %d, combined %d\n", EXT2FS_HAS_INCOMPAT_FEATURE(fs, sb, EXT2FS_FEATURE_INCOMPAT_64BIT), gd_size >= 64, EXT2FS_HAS_INCOMPAT_FEATURE(fs, sb, EXT2FS_FEATURE_INCOMPAT_64BIT) && gd_size >= 64); #endif /* number of blocks the inodes consume */ ibpg = (tsk_getu32(fs->endian, sb->s_inodes_per_group) * ext2fs->inode_size + fs->block_size - 1) / fs->block_size; /* number of blocks group descriptors consume */ gd_blocks = (gd_size * ext2fs->groups_count + fs->block_size - 1) / fs->block_size; num_flex_bg = (ext2fs->groups_count / gpfbg); if (ext2fs->groups_count % gpfbg) num_flex_bg++; curr_flex_bg = i / gpfbg; last_block = cg_base + tsk_getu32(fs->endian, sb->s_blocks_per_group) - 1; if (last_block > fs->last_block) { last_block = fs->last_block; } //DEBUG printf("ibpg %d cur_flex: %d, flex_bgs: %d : %d, %d",ibpg, i/gpfbg, num_flex_bg, ext2fs->groups_count/gpfbg, ext2fs->groups_count%gpfbg); #ifdef Ext4_DBG printf("\nDEBUG: Flex BG PROCESSING cg_base: %" PRIuDADDR ", gpfbg: %d, ibpg: %d \n", cg_base, gpfbg, ibpg); #endif /*If this is the 1st bg in a flex bg then it contains the bitmaps and inode tables */ //if(ext2fs_bg_has_super(tsk_getu32(fs->endian,sb->s_feature_ro_compat),i)) //{ if (i % gpfbg == 0) { if (curr_flex_bg == (num_flex_bg - 1)) { num_groups = fs->last_block / tsk_getu32(fs->endian, sb->s_blocks_per_group); if (num_groups % tsk_getu32(fs->endian, sb->s_blocks_per_group)) num_groups++; left_over = (num_groups % gpfbg); if (EXT2FS_HAS_INCOMPAT_FEATURE(fs, sb, EXT2FS_FEATURE_INCOMPAT_64BIT) && gd_size >= 64) { //DEBUG printf("DEBUG processing 64bit file system with 64 bit group descriptors"); tsk_fprintf(hFile, " Uninit Data Bitmaps: "); tsk_fprintf(hFile, "%" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext4_gd->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) + (left_over), ext4_getu64(fs->endian, ext4_gd->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) + gpfbg - 1); tsk_fprintf(hFile, " Uninit Inode Bitmaps: "); tsk_fprintf(hFile, "%" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext4_gd->bg_inode_bitmap_hi, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo) + (left_over), ext4_getu64(fs->endian, ext4_gd->bg_inode_bitmap_hi, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo) + gpfbg - 1); tsk_fprintf(hFile, " Uninit Inode Table: "); tsk_fprintf(hFile, "%" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext4_gd->bg_inode_table_hi, ext2fs->ext4_grp_buf->bg_inode_table_lo) + ((left_over) * ibpg), ext4_getu64(fs->endian, ext4_gd->bg_inode_table_hi, ext2fs->ext4_grp_buf->bg_inode_table_lo) + (gpfbg * ibpg) - 1); } else { tsk_fprintf(hFile, " Uninit Data Bitmaps: "); tsk_fprintf(hFile, "%" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) + (left_over), tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_lo) + gpfbg - 1); tsk_fprintf(hFile, " Uninit Inode Bitmaps: "); tsk_fprintf(hFile, "%" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo) + (left_over), tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo) + gpfbg - 1); tsk_fprintf(hFile, " Uninit Inode Table: "); tsk_fprintf(hFile, "%" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_inode_table_lo) + ((left_over) * ibpg), tsk_getu32(fs->endian, ext2fs->ext4_grp_buf->bg_inode_table_lo) + (gpfbg * ibpg) - 1); } } tsk_fprintf(hFile, " Data Blocks: "); db_offset = 0; if (ext2fs_bg_has_super(tsk_getu32(fs->endian, sb->s_feature_ro_compat), i)) { db_offset = cg_base + (gpfbg * 2) //To account for the bitmaps + (ibpg * gpfbg) //Combined inode tables + tsk_getu16(fs->endian, ext2fs->fs->pad_or_gdt.s_reserved_gdt_blocks) + gd_blocks //group descriptors + 1; //superblock } else { db_offset = cg_base + (gpfbg * 2) //To account for the bitmaps + (ibpg * gpfbg); //Combined inode tables } tsk_fprintf(hFile, "%" PRIuDADDR " - %" PRIuDADDR "\n", db_offset, last_block); } else { tsk_fprintf(hFile, " Data Blocks: "); db_offset = 0; if (ext2fs_bg_has_super(tsk_getu32(fs->endian, sb->s_feature_ro_compat), i)) { db_offset = cg_base + tsk_getu16(fs->endian, ext2fs->fs->pad_or_gdt.s_reserved_gdt_blocks) + gd_blocks //group descriptors + 1; //superblock } else { db_offset = cg_base; } tsk_fprintf(hFile, "%" PRIuDADDR " - %" PRIuDADDR "\n", db_offset, last_block); } } /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t ext2fs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { unsigned int i; unsigned int gpfbg; unsigned int gd_blocks; EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; ext2fs_sb *sb = ext2fs->fs; int ibpg; int gd_size; time_t tmptime; char timeBuf[128]; const char *tmptypename; // clean up any error messages that are lying around tsk_error_reset(); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); switch (fs->ftype) { case TSK_FS_TYPE_EXT3: tmptypename = "Ext3"; gd_size = sizeof(ext2fs_gd); break; case TSK_FS_TYPE_EXT4: tmptypename = "Ext4"; if (EXT2FS_HAS_INCOMPAT_FEATURE(fs, sb, EXT2FS_FEATURE_INCOMPAT_64BIT)) gd_size = sizeof(ext4fs_gd); else gd_size = sizeof(ext2fs_gd); break; default: tmptypename = "Ext2"; gd_size = sizeof(ext2fs_gd); } tsk_fprintf(hFile, "File System Type: %s\n", tmptypename); tsk_fprintf(hFile, "Volume Name: %s\n", sb->s_volume_name); tsk_fprintf(hFile, "Volume ID: %" PRIx64 "%" PRIx64 "\n", tsk_getu64(fs->endian, &sb->s_uuid[8]), tsk_getu64(fs->endian, &sb->s_uuid[0])); tmptime = tsk_getu32(fs->endian, sb->s_wtime); tsk_fprintf(hFile, "\nLast Written at: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); tmptime = tsk_getu32(fs->endian, sb->s_lastcheck); tsk_fprintf(hFile, "Last Checked at: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); tmptime = tsk_getu32(fs->endian, sb->s_mtime); tsk_fprintf(hFile, "\nLast Mounted at: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); /* State of the file system */ if (tsk_getu16(fs->endian, sb->s_state) & EXT2FS_STATE_VALID) tsk_fprintf(hFile, "Unmounted properly\n"); else tsk_fprintf(hFile, "Unmounted Improperly\n"); if (sb->s_last_mounted != '\0') tsk_fprintf(hFile, "Last mounted on: %s\n", sb->s_last_mounted); tsk_fprintf(hFile, "\nSource OS: "); switch (tsk_getu32(fs->endian, sb->s_creator_os)) { case EXT2FS_OS_LINUX: tsk_fprintf(hFile, "Linux\n"); break; case EXT2FS_OS_HURD: tsk_fprintf(hFile, "HURD\n"); break; case EXT2FS_OS_MASIX: tsk_fprintf(hFile, "MASIX\n"); break; case EXT2FS_OS_FREEBSD: tsk_fprintf(hFile, "FreeBSD\n"); break; case EXT2FS_OS_LITES: tsk_fprintf(hFile, "LITES\n"); break; default: tsk_fprintf(hFile, "%" PRIx32 "\n", tsk_getu32(fs->endian, sb->s_creator_os)); break; } if (tsk_getu32(fs->endian, sb->s_rev_level) == EXT2FS_REV_ORIG) tsk_fprintf(hFile, "Static Structure\n"); else tsk_fprintf(hFile, "Dynamic Structure\n"); /* add features */ if (tsk_getu32(fs->endian, sb->s_feature_compat)) { tsk_fprintf(hFile, "Compat Features: "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_DIR_PREALLOC) tsk_fprintf(hFile, "Dir Prealloc, "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_IMAGIC_INODES) tsk_fprintf(hFile, "iMagic inodes, "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_HAS_JOURNAL) tsk_fprintf(hFile, "Journal, "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_EXT_ATTR) tsk_fprintf(hFile, "Ext Attributes, "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_RESIZE_INO) tsk_fprintf(hFile, "Resize Inode, "); if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_DIR_INDEX) tsk_fprintf(hFile, "Dir Index"); tsk_fprintf(hFile, "\n"); } if (tsk_getu32(fs->endian, sb->s_feature_incompat)) { tsk_fprintf(hFile, "InCompat Features: "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_COMPRESSION) tsk_fprintf(hFile, "Compression, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_FILETYPE) tsk_fprintf(hFile, "Filetype, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_RECOVER) tsk_fprintf(hFile, "Needs Recovery, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_JOURNAL_DEV) tsk_fprintf(hFile, "Journal Dev"); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_META_BG) tsk_fprintf(hFile, "Meta Block Groups, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_EXTENTS) tsk_fprintf(hFile, "Extents, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_64BIT) tsk_fprintf(hFile, "64bit, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_MMP) tsk_fprintf(hFile, "Multiple Mount Protection, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_FLEX_BG) tsk_fprintf(hFile, "Flexible Block Groups, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_EA_INODE) tsk_fprintf(hFile, "Extended Attributes, "); if (tsk_getu32(fs->endian, sb->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_DIRDATA) tsk_fprintf(hFile, "Directory Entry Data"); tsk_fprintf(hFile, "\n"); } if (tsk_getu32(fs->endian, sb->s_feature_ro_compat)) { tsk_fprintf(hFile, "Read Only Compat Features: "); if (tsk_getu32(fs->endian, sb->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER) tsk_fprintf(hFile, "Sparse Super, "); if (tsk_getu32(fs->endian, sb->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_LARGE_FILE) tsk_fprintf(hFile, "Large File, "); if (EXT2FS_HAS_RO_COMPAT_FEATURE(fs, sb, EXT2FS_FEATURE_RO_COMPAT_HUGE_FILE)) tsk_fprintf(hFile, "Huge File, "); if (tsk_getu32(fs->endian, sb->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_BTREE_DIR) tsk_fprintf(hFile, "Btree Dir, "); if (tsk_getu32(fs->endian, sb->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_EXTRA_ISIZE) tsk_fprintf(hFile, "Extra Inode Size"); tsk_fprintf(hFile, "\n"); } /* Print journal information */ if (tsk_getu32(fs->endian, sb->s_feature_compat) & EXT2FS_FEATURE_COMPAT_HAS_JOURNAL) { tsk_fprintf(hFile, "\nJournal ID: %" PRIx64 "%" PRIx64 "\n", tsk_getu64(fs->endian, &sb->s_journal_uuid[8]), tsk_getu64(fs->endian, &sb->s_journal_uuid[0])); if (tsk_getu32(fs->endian, sb->s_journal_inum) != 0) tsk_fprintf(hFile, "Journal Inode: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_journal_inum)); if (tsk_getu32(fs->endian, sb->s_journal_dev) != 0) tsk_fprintf(hFile, "Journal Device: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_journal_dev)); } tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Inode Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); tsk_fprintf(hFile, "Root Directory: %" PRIuINUM "\n", fs->root_inum); tsk_fprintf(hFile, "Free Inodes: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_free_inode_count)); /* Only print size of inode for Ext4 This determines if you will get nanosecs and crtime */ if (!strcmp(tmptypename, "Ext4")) { tsk_fprintf(hFile, "Inode Size: %" PRIu16 "\n", tsk_getu16(fs->endian, sb->s_inode_size)); } if (tsk_getu32(fs->endian, sb->s_last_orphan)) { uint32_t or_in; tsk_fprintf(hFile, "Orphan Inodes: "); or_in = tsk_getu32(fs->endian, sb->s_last_orphan); while (or_in) { TSK_FS_FILE *fs_file; if ((or_in > fs->last_inum) || (or_in < fs->first_inum)) break; tsk_fprintf(hFile, "%" PRIu32 ", ", or_in); if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) { /* Ignore this error */ tsk_error_reset(); break; } /* Get the next one */ if (ext2fs_inode_lookup(fs, fs_file, or_in)) { /* Ignore this error */ tsk_error_reset(); break; } or_in = (uint32_t) fs_file->meta->time2.ext2.dtime; tsk_fs_file_close(fs_file); } tsk_fprintf(hFile, "\n"); } tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); if (fs->ftype == TSK_FS_TYPE_EXT4) { tsk_fprintf(hFile, "Block Groups Per Flex Group: %" PRIu32 "\n", (1 << sb->s_log_groups_per_flex)); gpfbg = (1 << sb->s_log_groups_per_flex); } tsk_fprintf(hFile, "Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block); if (fs->last_block != fs->last_block_act) tsk_fprintf(hFile, "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block_act); tsk_fprintf(hFile, "Block Size: %u\n", fs->block_size); if (tsk_getu32(fs->endian, sb->s_first_data_block)) tsk_fprintf(hFile, "Reserved Blocks Before Block Groups: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_first_data_block)); tsk_fprintf(hFile, "Free Blocks: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_free_blocks_count)); tsk_fprintf(hFile, "\nBLOCK GROUP INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Number of Block Groups: %" PRI_EXT2GRP "\n", ext2fs->groups_count); tsk_fprintf(hFile, "Inodes per group: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_inodes_per_group)); tsk_fprintf(hFile, "Blocks per group: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_blocks_per_group)); /* number of blocks the inodes consume */ ibpg = (tsk_getu32(fs->endian, sb->s_inodes_per_group) * ext2fs->inode_size + fs->block_size - 1) / fs->block_size; /* number of blocks group descriptors consume */ gd_blocks = (gd_size * ext2fs->groups_count + fs->block_size - 1) / fs->block_size; #ifdef Ext4_DBG tsk_fprintf(hFile, "\n\tDEBUG: Group Descriptor Size: %d\n", gd_size); //DEBUG tsk_fprintf(hFile, "\n\tDEBUG: Group Descriptor Size: %d\n", *sb->s_desc_size); //DEBUG debug_print_buf((unsigned char *) &sb->pad_or_gdt, 16); printf("\n\tDEBUG: gdt_growth: %d\n", tsk_getu16(fs->endian, sb->pad_or_gdt.s_reserved_gdt_blocks)); #endif for (i = 0; i < ext2fs->groups_count; i++) { TSK_DADDR_T cg_base; TSK_INUM_T inum; /* lock access to grp_buf */ tsk_take_lock(&ext2fs->lock); if (ext2fs_group_load(ext2fs, i)) { tsk_release_lock(&ext2fs->lock); return 1; } tsk_fprintf(hFile, "\nGroup: %d:\n", i); if (fs->ftype == TSK_FS_TYPE_EXT4) { tsk_fprintf(hFile, " Block Group Flags: ["); if (EXT4BG_HAS_FLAG(fs, ext2fs->ext4_grp_buf, EXT4_BG_INODE_UNINIT)) tsk_fprintf(hFile, "INODE_UNINIT, "); if (EXT4BG_HAS_FLAG(fs, ext2fs->ext4_grp_buf, EXT4_BG_BLOCK_UNINIT)) tsk_fprintf(hFile, "BLOCK_UNINIT, "); if (EXT4BG_HAS_FLAG(fs, ext2fs->ext4_grp_buf, EXT4_BG_INODE_ZEROED)) tsk_fprintf(hFile, "INODE_ZEROED, "); tsk_fprintf(hFile, "\b\b]\n"); } inum = fs->first_inum + tsk_gets32(fs->endian, sb->s_inodes_per_group) * i; tsk_fprintf(hFile, " Inode Range: %" PRIuINUM " - ", inum); if ((inum + tsk_gets32(fs->endian, sb->s_inodes_per_group) - 1) < fs->last_inum) tsk_fprintf(hFile, "%" PRIuINUM "\n", inum + tsk_gets32(fs->endian, sb->s_inodes_per_group) - 1); else tsk_fprintf(hFile, "%" PRIuINUM "\n", fs->last_inum); if (tsk_getu32(fs->endian, ext2fs->fs-> s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_64BIT) { cg_base = ext4_cgbase_lcl(fs, sb, i); #ifdef Ext4_DBG printf("DEBUG64: ext2_cgbase_lcl %" PRIuDADDR "\n", cg_base); printf("DEBUG64: fs->s_first_data_block %" PRIuDADDR "\n", tsk_getu32(fs->endian, sb->s_first_data_block)); printf("DEBUG64: blocks_per_group %" PRIuDADDR "\n", tsk_getu32(fs->endian, sb->s_blocks_per_group)); printf("DEBUG64: i %" PRIuDADDR " %" PRIuDADDR " %" PRIuDADDR "\n", i, tsk_getu32(fs->endian, sb->s_blocks_per_group), (uint64_t) i * (uint64_t) tsk_getu32(fs->endian, sb->s_blocks_per_group)); //printf("DEBUG: calculated %"PRIuDADDR"\n", ) #endif tsk_fprintf(hFile, " Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", cg_base, ((ext4_cgbase_lcl(fs, sb, i + 1) - 1) < fs->last_block) ? (ext4_cgbase_lcl(fs, sb, i + 1) - 1) : fs->last_block); } else { cg_base = ext2_cgbase_lcl(fs, sb, i); #ifdef Ext4_DBG debug_print_buf(sb, 100); printf("DEBUG32: ext2_cgbase_lcl %" PRIuDADDR "\n", cg_base); printf("DEBUG32: fs->s_first_data_block %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_first_data_block)); printf("DEBUG32: blocks_per_group %" PRIu32 "\n", tsk_getu32(fs->endian, sb->s_blocks_per_group)); printf("DEBUG32: i: %" PRIu32 " blocks per group: %" PRIu32 " i*blocks_per_group: %" PRIu32 "\n", i, tsk_getu32(fs->endian, sb->s_blocks_per_group), (uint64_t) i * (uint64_t) tsk_getu32(fs->endian, sb->s_blocks_per_group)); //printf("DEBUG: calculated %"PRIuDADDR"\n", ) #endif tsk_fprintf(hFile, " Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", cg_base, ((ext2_cgbase_lcl(fs, sb, i + 1) - 1) < fs->last_block) ? (ext2_cgbase_lcl(fs, sb, i + 1) - 1) : fs->last_block); } tsk_fprintf(hFile, " Layout:\n"); /* only print the super block data if we are not in a sparse * group */ #ifdef Ext4_DBG printf("DEBUG: ext2fs_super: %d\n", ext2fs_bg_has_super(tsk_getu32(fs->endian, sb->s_feature_ro_compat), i)); #endif /* if (((tsk_getu32(fs->endian, ext2fs->fs->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER) && (cg_base != tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap))) || ((tsk_getu32(fs->endian, ext2fs->fs->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER) == 0)) { */ if (ext2fs_bg_has_super(tsk_getu32(fs->endian, sb->s_feature_ro_compat), i)) { TSK_OFF_T boff; /* the super block is the first 1024 bytes */ tsk_fprintf(hFile, " Super Block: %" PRIuDADDR " - %" PRIuDADDR "\n", cg_base, cg_base + ((sizeof(ext2fs_sb) + fs->block_size - 1) / fs->block_size) - 1); boff = roundup(sizeof(ext2fs_sb), fs->block_size); /* Group Descriptors */ tsk_fprintf(hFile, " Group Descriptor Table: %" PRIuDADDR " - ", (cg_base + (boff + fs->block_size - 1) / fs->block_size)); // printf("DEBUG: Groups Count: %u * gd_size: %u = %u\n", ext2fs->groups_count, gd_size, ext2fs->groups_count * gd_size); boff += (ext2fs->groups_count * gd_size); tsk_fprintf(hFile, "%" PRIuDADDR "\n", ((cg_base + (boff + fs->block_size - 1) / fs->block_size) - 1)); if (fs->ftype == TSK_FS_TYPE_EXT4) { tsk_fprintf(hFile, " Group Descriptor Growth Blocks: %" PRIuDADDR " - ", cg_base + (boff + fs->block_size - 1) / fs->block_size); boff += tsk_getu16(fs->endian, ext2fs->fs->pad_or_gdt.s_reserved_gdt_blocks) * fs->block_size; tsk_fprintf(hFile, "%" PRIuDADDR "\n", ((cg_base + (boff + fs->block_size - 1) / fs->block_size) - 1)); } } if (tsk_getu32(fs->endian, ext2fs->fs-> s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_64BIT) { /* The block bitmap is a full block */ tsk_fprintf(hFile, " Data bitmap: %" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo), ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_block_bitmap_hi, ext2fs->ext4_grp_buf->bg_block_bitmap_lo)); /* The inode bitmap is a full block */ tsk_fprintf(hFile, " Inode bitmap: %" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_bitmap_hi, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo), ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_bitmap_hi, ext2fs->ext4_grp_buf->bg_inode_bitmap_lo)); tsk_fprintf(hFile, " Inode Table: %" PRIu64 " - %" PRIu64 "\n", ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_table_hi, ext2fs->ext4_grp_buf->bg_inode_table_lo), ext4_getu64(fs->endian, ext2fs->ext4_grp_buf->bg_inode_table_hi, ext2fs->ext4_grp_buf->bg_inode_table_lo) + ibpg - 1); } else { /* The block bitmap is a full block */ tsk_fprintf(hFile, " Data bitmap: %" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap), tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap)); /* The inode bitmap is a full block */ tsk_fprintf(hFile, " Inode bitmap: %" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap), tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap)); tsk_fprintf(hFile, " Inode Table: %" PRIu32 " - %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table), tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) + ibpg - 1); } /* If we are in a sparse group, display the other addresses */ if (fs->ftype == TSK_FS_TYPE_EXT4) { ext4_fsstat_datablock_helper(fs, hFile, i, cg_base, gd_size); } else { tsk_fprintf(hFile, " Data Blocks: "); // BC: Commented out from Ext4 commit because it produced // bad data on Ext2 test image. //if (ext2fs_bg_has_super(tsk_getu32(fs->endian, // sb->s_feature_ro_compat), i)) { if ((tsk_getu32(fs->endian, ext2fs->fs->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER) && (cg_base == tsk_getu32(fs->endian, ext2fs->grp_buf->bg_block_bitmap))) { /* it goes from the end of the inode bitmap to before the * table * * This hard coded aspect does not scale ... */ tsk_fprintf(hFile, "%" PRIu32 " - %" PRIu32 ", ", tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_bitmap) + 1, tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) - 1); } tsk_fprintf(hFile, "%" PRIu32 " - %" PRIu32 "\n", (uint64_t) tsk_getu32(fs->endian, ext2fs->grp_buf->bg_inode_table) + ibpg, ((ext2_cgbase_lcl(fs, sb, i + 1) - 1) < fs->last_block) ? (ext2_cgbase_lcl(fs, sb, i + 1) - 1) : fs->last_block); } /* Print the free info */ /* The last group may not have a full number of blocks */ if (i != (ext2fs->groups_count - 1)) { tsk_fprintf(hFile, " Free Inodes: %" PRIu16 " (%" PRIu32 "%%)\n", tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_inodes_count), (100 * tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_inodes_count)) / tsk_getu32(fs->endian, sb->s_inodes_per_group)); tsk_fprintf(hFile, " Free Blocks: %" PRIu16 " (%" PRIu32 "%%)\n", tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_blocks_count), (100 * tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_blocks_count)) / tsk_getu32(fs->endian, sb->s_blocks_per_group)); } else { TSK_INUM_T inum_left; TSK_DADDR_T blk_left; inum_left = (fs->last_inum % tsk_gets32(fs->endian, sb->s_inodes_per_group)) - 1; if (inum_left == 0) inum_left = tsk_getu32(fs->endian, sb->s_inodes_per_group); tsk_fprintf(hFile, " Free Inodes: %" PRIu16 " (%d%%)\n", tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_inodes_count), 100 * tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_inodes_count) / inum_left); /* Now blocks */ blk_left = fs->block_count % tsk_getu32(fs->endian, sb->s_blocks_per_group); if (blk_left == 0) blk_left = tsk_getu32(fs->endian, sb->s_blocks_per_group); tsk_fprintf(hFile, " Free Blocks: %" PRIu16 " (%d%%)\n", tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_blocks_count), 100 * tsk_getu16(fs->endian, ext2fs->grp_buf->bg_free_blocks_count) / blk_left); } tsk_fprintf(hFile, " Total Directories: %" PRIu16 "\n", tsk_getu16(fs->endian, ext2fs->grp_buf->bg_used_dirs_count)); if (fs->ftype == TSK_FS_TYPE_EXT4) { tsk_fprintf(hFile, " Stored Checksum: 0x%04" PRIX16 "\n", tsk_getu16(fs->endian, ext2fs->ext4_grp_buf->bg_checksum)); #ifdef EXT4_CHECKSUMS //Need Non-GPL CRC16 tsk_fprintf(hFile, " Calculated Checksum: 0x%04" PRIX16 "\n", ext4_group_desc_csum(ext2fs->fs, i, ext2fs->ext4_grp_buf)); #endif } tsk_release_lock(&ext2fs->lock); } return 0; } /************************* istat *******************************/ static void ext2fs_make_acl_str(char *str, int len, uint16_t perm) { int i = 0; if (perm & EXT2_PACL_PERM_READ) { snprintf(&str[i], len - 1, "Read"); i += 4; } if (perm & EXT2_PACL_PERM_WRITE) { if (i) { snprintf(&str[i], len - i - 1, ", "); i += 2; } snprintf(&str[i], len - i - 1, "Write"); i += 5; } if (perm & EXT2_PACL_PERM_EXEC) { if (i) { snprintf(&str[i], len - i - 1, ", "); i += 2; } snprintf(&str[i], len - i - 1, "Execute"); i += 7; } } typedef struct { FILE *hFile; int idx; } EXT2FS_PRINT_ADDR; /* Callback for istat to print the block addresses */ static TSK_WALK_RET_ENUM print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *a_ptr) { TSK_FS_INFO *fs = fs_file->fs_info; EXT2FS_PRINT_ADDR *print = (EXT2FS_PRINT_ADDR *) a_ptr; if (flags & TSK_FS_BLOCK_FLAG_CONT) { int i, s; /* cycle through the blocks if they exist */ for (i = 0, s = (int) size; s > 0; s -= fs->block_size, i++) { /* sparse file */ if (addr) tsk_fprintf(print->hFile, "%" PRIuDADDR " ", addr + i); else tsk_fprintf(print->hFile, "0 "); if (++(print->idx) == 8) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } } } return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File handle to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t ext2fs_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; TSK_FS_META *fs_meta; TSK_FS_FILE *fs_file; char ls[12]; EXT2FS_PRINT_ADDR print; const TSK_FS_ATTR *fs_attr_indir; ext2fs_inode *dino_buf = NULL; char timeBuf[128]; unsigned int size; unsigned int large_inodes; // clean up any error messages that are lying around tsk_error_reset(); if (ext2fs->inode_size > 128) { large_inodes = 1; } else { large_inodes = 0; } size = ext2fs->inode_size > sizeof(ext2fs_inode) ? ext2fs->inode_size : sizeof(ext2fs_inode); if ((dino_buf = (ext2fs_inode *) tsk_malloc(size)) == NULL) { return 1; } if (ext2fs_dinode_load(ext2fs, inum, dino_buf)) { free(dino_buf); return 1; } if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { free(dino_buf); return 1; } fs_meta = fs_file->meta; tsk_fprintf(hFile, "inode: %" PRIuINUM "\n", inum); tsk_fprintf(hFile, "%sAllocated\n", (fs_meta->flags & TSK_FS_META_FLAG_ALLOC) ? "" : "Not "); tsk_fprintf(hFile, "Group: %" PRIuGID "\n", ext2fs->grp_num); // Note that if this is a "virtual file", then ext2fs->dino_buf may not be set. tsk_fprintf(hFile, "Generation Id: %" PRIu32 "\n", tsk_getu32(fs->endian, dino_buf->i_generation)); if (fs_meta->link) tsk_fprintf(hFile, "symbolic link to: %s\n", fs_meta->link); tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", fs_meta->uid, fs_meta->gid); tsk_fs_meta_make_ls(fs_meta, ls, sizeof(ls)); tsk_fprintf(hFile, "mode: %s\n", ls); /* Print the device ids */ if ((fs_meta->type == TSK_FS_META_TYPE_BLK) || (fs_meta->type == TSK_FS_META_TYPE_CHR)) { tsk_fprintf(hFile, "Device Major: %" PRIu8 " Minor: %" PRIu8 "\n", dino_buf->i_block[0][1], dino_buf->i_block[0][0]); } if (tsk_getu32(fs->endian, dino_buf->i_flags)) { tsk_fprintf(hFile, "Flags: "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_SECDEL) tsk_fprintf(hFile, "Secure Delete, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_UNRM) tsk_fprintf(hFile, "Undelete, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_COMP) tsk_fprintf(hFile, "Compressed, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_SYNC) tsk_fprintf(hFile, "Sync Updates, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_IMM) tsk_fprintf(hFile, "Immutable, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_APPEND) tsk_fprintf(hFile, "Append Only, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_NODUMP) tsk_fprintf(hFile, "Do Not Dump, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_NOA) tsk_fprintf(hFile, "No A-Time, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_DIRTY) tsk_fprintf(hFile, "Dirty Compressed File, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_COMPRBLK) tsk_fprintf(hFile, "Compressed Clusters, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_NOCOMPR) tsk_fprintf(hFile, "Do Not Compress, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_ECOMPR) tsk_fprintf(hFile, "Compression Error, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_INDEX) tsk_fprintf(hFile, "Hash Indexed Directory, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_IMAGIC) tsk_fprintf(hFile, "AFS Magic Directory, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_JOURNAL_DATA) tsk_fprintf(hFile, "Journal Data, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_NOTAIL) tsk_fprintf(hFile, "Do Not Merge Tail, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_DIRSYNC) tsk_fprintf(hFile, "Directory Sync, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_TOPDIR) tsk_fprintf(hFile, "Top Directory, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_HUGE_FILE) tsk_fprintf(hFile, "Huge File, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EXTENTS) tsk_fprintf(hFile, "Extents, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EA_INODE) tsk_fprintf(hFile, "Large Extended Attribute, "); if (tsk_getu32(fs->endian, dino_buf->i_flags) & EXT2_IN_EOFBLOCKS) tsk_fprintf(hFile, "Blocks Allocated Beyond EOF, "); tsk_fprintf(hFile, "\n"); } tsk_fprintf(hFile, "size: %" PRIuOFF "\n", fs_meta->size); tsk_fprintf(hFile, "num of links: %d\n", fs_meta->nlink); /* Ext attribute are stored in a block with a header and a list * of entries that are aligned to 4-byte boundaries. The attr * value is stored at the end of the block. There are 4 null bytes * in between the headers and values */ if (tsk_getu32(fs->endian, dino_buf->i_file_acl) != 0) { char *buf; ext2fs_ea_header *ea_head; ext2fs_ea_entry *ea_entry; ssize_t cnt; if ((buf = tsk_malloc(fs->block_size)) == NULL) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } tsk_fprintf(hFile, "\nExtended Attributes (Block: %" PRIu32 ")\n", tsk_getu32(fs->endian, dino_buf->i_file_acl)); /* Is the value too big? */ if (tsk_getu32(fs->endian, dino_buf->i_file_acl) > fs->last_block) { tsk_fprintf(hFile, "Extended Attributes block is larger than file system\n"); goto egress_ea; } cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, dino_buf->i_file_acl) * fs->block_size, buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_istat: ACL block %" PRIu32, tsk_getu32(fs->endian, dino_buf->i_file_acl)); tsk_fs_file_close(fs_file); free(buf); free(dino_buf); return 1; } /* Check the header */ ea_head = (ext2fs_ea_header *) buf; if (tsk_getu32(fs->endian, ea_head->magic) != EXT2_EA_MAGIC) { tsk_fprintf(hFile, "Incorrect extended attribute header: %" PRIx32 "\n", tsk_getu32(fs->endian, ea_head->magic)); } /* Cycle through each entry - at the top of the block */ for (ea_entry = (ext2fs_ea_entry *) & ea_head->entry; ((uintptr_t) ea_entry < ((uintptr_t) buf + fs->block_size - sizeof(ext2fs_ea_entry))); ea_entry = (ext2fs_ea_entry *) ((uintptr_t) ea_entry + EXT2_EA_LEN(ea_entry->nlen))) { char name[256]; /* Stop if the first four bytes are NULL */ if ((ea_entry->nlen == 0) && (ea_entry->nidx == 0) && (tsk_getu16(fs->endian, ea_entry->val_off) == 0)) break; /* The Linux src does not allow this */ if (tsk_getu32(fs->endian, ea_entry->val_blk) != 0) { tsk_fprintf(hFile, "Attribute has non-zero value block - skipping\n"); continue; } /* Is the value location and size valid? */ //if ((tsk_getu32(fs->endian, if ((tsk_getu16(fs->endian, ea_entry->val_off) > fs->block_size) || ((tsk_getu16(fs->endian, ea_entry->val_off) + tsk_getu32(fs->endian, ea_entry->val_size)) > fs->block_size)) { continue; } /* Copy the name into a buffer - not NULL term */ strncpy(name, (char *) &ea_entry->name, ea_entry->nlen); name[ea_entry->nlen] = '\0'; /* User assigned attributes - setfattr / getfattr */ if ((ea_entry->nidx == EXT2_EA_IDX_USER) || (ea_entry->nidx == EXT2_EA_IDX_TRUSTED) || (ea_entry->nidx == EXT2_EA_IDX_SECURITY)) { char val[256]; strncpy(val, &buf[tsk_getu16(fs->endian, ea_entry->val_off)], tsk_getu32(fs->endian, ea_entry->val_size) > 256 ? 256 : tsk_getu32(fs->endian, ea_entry->val_size)); val[tsk_getu32(fs->endian, ea_entry->val_size) > 256 ? 256 : tsk_getu32(fs->endian, ea_entry->val_size)] = '\0'; if (ea_entry->nidx == EXT2_EA_IDX_USER) tsk_fprintf(hFile, "user.%s=%s\n", name, val); else if (ea_entry->nidx == EXT2_EA_IDX_TRUSTED) tsk_fprintf(hFile, "trust.%s=%s\n", name, val); else if (ea_entry->nidx == EXT2_EA_IDX_SECURITY) tsk_fprintf(hFile, "security.%s=%s\n", name, val); } /* POSIX ACL - setfacl / getfacl stuff */ else if ((ea_entry->nidx == EXT2_EA_IDX_POSIX_ACL_ACCESS) || (ea_entry->nidx == EXT2_EA_IDX_POSIX_ACL_DEFAULT)) { ext2fs_pos_acl_entry_lo *acl_lo; ext2fs_pos_acl_head *acl_head; if (ea_entry->nidx == EXT2_EA_IDX_POSIX_ACL_ACCESS) tsk_fprintf(hFile, "POSIX Access Control List Entries:\n"); else if (ea_entry->nidx == EXT2_EA_IDX_POSIX_ACL_DEFAULT) tsk_fprintf(hFile, "POSIX Default Access Control List Entries:\n"); /* examine the header */ acl_head = (ext2fs_pos_acl_head *) & buf[tsk_getu16(fs->endian, ea_entry->val_off)]; if (tsk_getu32(fs->endian, acl_head->ver) != 1) { tsk_fprintf(hFile, "Invalid ACL Header Version: %" PRIu32 "\n", tsk_getu32(fs->endian, acl_head->ver)); continue; } /* The first entry starts after the header */ acl_lo = (ext2fs_pos_acl_entry_lo *) ((uintptr_t) acl_head + sizeof(ext2fs_pos_acl_head)); /* Cycle through the values */ while ((uintptr_t) acl_lo < ((uintptr_t) buf + tsk_getu16(fs->endian, ea_entry->val_off) + tsk_getu32(fs->endian, ea_entry->val_size))) { char perm[64]; int len; /* Make a string from the permissions */ ext2fs_make_acl_str(perm, 64, tsk_getu16(fs->endian, acl_lo->perm)); switch (tsk_getu16(fs->endian, acl_lo->tag)) { case EXT2_PACL_TAG_USERO: tsk_fprintf(hFile, " uid: %" PRIuUID ": %s\n", fs_meta->uid, perm); len = sizeof(ext2fs_pos_acl_entry_sh); break; case EXT2_PACL_TAG_GRPO: tsk_fprintf(hFile, " gid: %" PRIuGID ": %s\n", fs_meta->gid, perm); len = sizeof(ext2fs_pos_acl_entry_sh); break; case EXT2_PACL_TAG_OTHER: tsk_fprintf(hFile, " other: %s\n", perm); len = sizeof(ext2fs_pos_acl_entry_sh); break; case EXT2_PACL_TAG_MASK: tsk_fprintf(hFile, " mask: %s\n", perm); len = sizeof(ext2fs_pos_acl_entry_sh); break; case EXT2_PACL_TAG_GRP: tsk_fprintf(hFile, " gid: %" PRIu32 ": %s\n", tsk_getu32(fs->endian, acl_lo->id), perm); len = sizeof(ext2fs_pos_acl_entry_lo); break; case EXT2_PACL_TAG_USER: tsk_fprintf(hFile, " uid: %" PRIu32 ": %s\n", tsk_getu32(fs->endian, acl_lo->id), perm); len = sizeof(ext2fs_pos_acl_entry_lo); break; default: tsk_fprintf(hFile, "Unknown ACL tag: %d\n", tsk_getu16(fs->endian, acl_lo->tag)); len = sizeof(ext2fs_pos_acl_entry_sh); break; } acl_lo = (ext2fs_pos_acl_entry_lo *) ((uintptr_t) acl_lo + len); } } else { tsk_fprintf(hFile, "Unsupported Extended Attr Type: %d\n", ea_entry->nidx); } } egress_ea: free(buf); } if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted Inode Times:\n"); if (fs_meta->mtime) fs_meta->mtime -= sec_skew; if (fs_meta->atime) fs_meta->atime -= sec_skew; if (fs_meta->ctime) fs_meta->ctime -= sec_skew; if (fs->ftype == TSK_FS_TYPE_EXT4 && large_inodes) { tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->atime, fs_meta->atime_nano, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->mtime, fs_meta->mtime_nano, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->ctime, fs_meta->ctime_nano, timeBuf)); } else { tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); } if (fs->ftype == TSK_FS_TYPE_EXT4 && large_inodes) { fs_meta->crtime -= sec_skew; tsk_fprintf(hFile, "File Created:\t%s\n", tsk_fs_time_to_str(fs_meta->crtime, timeBuf)); fs_meta->crtime += sec_skew; } if (fs_meta->time2.ext2.dtime) { fs_meta->time2.ext2.dtime -= sec_skew; tsk_fprintf(hFile, "Deleted:\t%s", tsk_fs_time_to_str(fs_meta->time2.ext2.dtime, timeBuf)); fs_meta->time2.ext2.dtime += sec_skew; } if (fs_meta->mtime) fs_meta->mtime += sec_skew; if (fs_meta->atime) fs_meta->atime += sec_skew; if (fs_meta->ctime) fs_meta->ctime += sec_skew; tsk_fprintf(hFile, "\nOriginal Inode Times:\n"); } else { tsk_fprintf(hFile, "\nInode Times:\n"); } if (fs->ftype == TSK_FS_TYPE_EXT4 && large_inodes) { tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->atime, fs_meta->atime_nano, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->mtime, fs_meta->mtime_nano, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->ctime, fs_meta->ctime_nano, timeBuf)); } else { tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); } if (fs->ftype == TSK_FS_TYPE_EXT4 && large_inodes) { tsk_fprintf(hFile, "File Created:\t%s\n", tsk_fs_time_to_str_subsecs(fs_meta->crtime, fs_meta->crtime_nano, timeBuf)); } if (fs_meta->time2.ext2.dtime) tsk_fprintf(hFile, "Deleted:\t%s\n", tsk_fs_time_to_str(fs_meta->time2.ext2.dtime, timeBuf)); if (numblock > 0) fs_meta->size = numblock * fs->block_size; tsk_fprintf(hFile, "\nDirect Blocks:\n"); print.idx = 0; print.hFile = hFile; if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading file: "); tsk_error_print(hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(hFile, "\n"); } if (fs_meta->content_type == TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS) { const TSK_FS_ATTR *fs_attr_extent = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_UNIX_EXTENT, 0, 0); if (fs_attr_extent) { tsk_fprintf(hFile, "\nExtent Blocks:\n"); print.idx = 0; if (tsk_fs_attr_walk(fs_attr_extent, TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading indirect attribute: "); tsk_error_print(hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(hFile, "\n"); } } } else { fs_attr_indir = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_UNIX_INDIR, 0, 0); if (fs_attr_indir) { tsk_fprintf(hFile, "\nIndirect Blocks:\n"); print.idx = 0; if (tsk_fs_attr_walk(fs_attr_indir, TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading indirect attribute: "); tsk_error_print(hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(hFile, "\n"); } } } tsk_fs_file_close(fs_file); if (dino_buf != NULL) free((char *) dino_buf); return 0; } /* ext2fs_close - close an ext2fs file system */ static void ext2fs_close(TSK_FS_INFO * fs) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; fs->tag = 0; free((char *) ext2fs->fs); if (ext2fs->grp_buf != NULL) free((char *) ext2fs->grp_buf); if (ext2fs->bmap_buf != NULL) free((char *) ext2fs->bmap_buf); if (ext2fs->imap_buf != NULL) free((char *) ext2fs->imap_buf); tsk_deinit_lock(&ext2fs->lock); tsk_fs_free(fs); } /** * \internal * Open part of a disk image as a Ext2/3 file system. * * @param img_info Disk image to analyze * @param offset Byte offset where file system starts * @param ftype Specific type of file system * @param test NOT USED * @returns NULL on error or if data is not an Ext2/3 file system */ TSK_FS_INFO * ext2fs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype, uint8_t test) { EXT2FS_INFO *ext2fs; unsigned int len; TSK_FS_INFO *fs; ssize_t cnt; // clean up any error messages that are lying around tsk_error_reset(); if (TSK_FS_TYPE_ISEXT(ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS Type in ext2fs_open"); return NULL; } if ((ext2fs = (EXT2FS_INFO *) tsk_fs_malloc(sizeof(*ext2fs))) == NULL) return NULL; fs = &(ext2fs->fs_info); fs->ftype = ftype; fs->flags = 0; fs->img_info = img_info; fs->offset = offset; fs->tag = TSK_FS_INFO_TAG; /* * Read the superblock. */ len = sizeof(ext2fs_sb); if ((ext2fs->fs = (ext2fs_sb *) tsk_malloc(len)) == NULL) { fs->tag = 0; tsk_fs_free((TSK_FS_INFO *)ext2fs); return NULL; } cnt = tsk_fs_read(fs, EXT2FS_SBOFF, (char *) ext2fs->fs, len); if (cnt != len) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ext2fs_open: superblock"); fs->tag = 0; free(ext2fs->fs); tsk_fs_free((TSK_FS_INFO *)ext2fs); return NULL; } /* * Verify we are looking at an EXTxFS image */ if (tsk_fs_guessu16(fs, ext2fs->fs->s_magic, EXT2FS_FS_MAGIC)) { fs->tag = 0; free(ext2fs->fs); tsk_fs_free((TSK_FS_INFO *)ext2fs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("not an EXTxFS file system (magic)"); if (tsk_verbose) fprintf(stderr, "ext2fs_open: invalid magic\n"); return NULL; } if (tsk_verbose) { if (tsk_getu32(fs->endian, ext2fs->fs->s_feature_ro_compat) & EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER) tsk_fprintf(stderr, "File system has sparse super blocks\n"); tsk_fprintf(stderr, "First data block is %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->fs->s_first_data_block)); } /* If autodetect was given, look for the journal */ if (ftype == TSK_FS_TYPE_EXT_DETECT) { if (tsk_getu32(fs->endian, ext2fs->fs->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_EXTENTS) { fs->ftype = TSK_FS_TYPE_EXT4; fs->flags |= TSK_FS_INFO_FLAG_HAVE_NANOSEC; } else if (tsk_getu32(fs->endian, ext2fs->fs->s_feature_compat) & EXT2FS_FEATURE_COMPAT_HAS_JOURNAL) fs->ftype = TSK_FS_TYPE_EXT3; else fs->ftype = TSK_FS_TYPE_EXT2; } fs->duname = "Fragment"; /* we need to figure out if dentries are v1 or v2 */ if (tsk_getu32(fs->endian, ext2fs->fs->s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_FILETYPE) ext2fs->deentry_type = EXT2_DE_V2; else ext2fs->deentry_type = EXT2_DE_V1; /* * Calculate the meta data info */ fs->inum_count = tsk_getu32(fs->endian, ext2fs->fs->s_inodes_count) + 1; // we are adding 1 in this calc to account for Orphans directory fs->last_inum = fs->inum_count; fs->first_inum = EXT2FS_FIRSTINO; fs->root_inum = EXT2FS_ROOTINO; if (fs->inum_count < 10) { fs->tag = 0; free(ext2fs->fs); tsk_fs_free((TSK_FS_INFO *)ext2fs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not an EXTxFS file system (inum count)"); if (tsk_verbose) fprintf(stderr, "ext2fs_open: two few inodes\n"); return NULL; } /* Set the size of the inode, but default to our data structure * size if it is larger */ ext2fs->inode_size = tsk_getu16(fs->endian, ext2fs->fs->s_inode_size); if (ext2fs->inode_size < sizeof(ext2fs_inode)) { if (tsk_verbose) tsk_fprintf(stderr, "SB inode size is small"); } /* * Calculate the block info */ fs->dev_bsize = img_info->sector_size; if (tsk_getu32(fs->endian, ext2fs->fs-> s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_64BIT) { // printf("DEBUG fs_open: 64bit file system\n"); fs->block_count = ext4_getu64(fs->endian, ext2fs->fs->s_blocks_count_hi, ext2fs->fs->s_blocks_count); } else { fs->block_count = tsk_getu32(fs->endian, ext2fs->fs->s_blocks_count); } fs->first_block = 0; fs->last_block_act = fs->last_block = fs->block_count - 1; ext2fs->first_data_block = tsk_getu32(fs->endian, ext2fs->fs->s_first_data_block); if (tsk_getu32(fs->endian, ext2fs->fs->s_log_block_size) != tsk_getu32(fs->endian, ext2fs->fs->s_log_frag_size)) { fs->tag = 0; free(ext2fs->fs); tsk_fs_free((TSK_FS_INFO *)ext2fs); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr ("This file system has fragments that are a different size than blocks, which is not currently supported\nContact brian with details of the system that created this image"); if (tsk_verbose) fprintf(stderr, "ext2fs_open: fragment size not equal to block size\n"); return NULL; } fs->block_size = EXT2FS_MIN_BLOCK_SIZE << tsk_getu32(fs->endian, ext2fs->fs->s_log_block_size); // determine the last block we have in this image if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < fs->block_count) fs->last_block_act = (img_info->size - offset) / fs->block_size - 1; /* The group descriptors are located in the block following the * super block */ ext2fs->groups_offset = roundup((EXT2FS_SBOFF + sizeof(ext2fs_sb)), fs->block_size); if (tsk_getu32(fs->endian, ext2fs->fs-> s_feature_incompat) & EXT2FS_FEATURE_INCOMPAT_64BIT) { ext2fs->groups_count = (EXT2_GRPNUM_T) ((ext4_getu64(fs->endian, ext2fs->fs->s_blocks_count_hi, ext2fs->fs->s_blocks_count) - ext2fs->first_data_block + tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group) - 1) / tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group)); } else { ext2fs->groups_count = (EXT2_GRPNUM_T) ((tsk_getu32(fs->endian, ext2fs->fs->s_blocks_count) - ext2fs->first_data_block + tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group) - 1) / tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group)); } /* Volume ID */ for (fs->fs_id_used = 0; fs->fs_id_used < 16; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = ext2fs->fs->s_uuid[fs->fs_id_used]; } /* Set the generic function pointers */ fs->inode_walk = ext2fs_inode_walk; fs->block_walk = ext2fs_block_walk; fs->block_getflags = ext2fs_block_getflags; fs->get_default_attr_type = tsk_fs_unix_get_default_attr_type; //fs->load_attrs = tsk_fs_unix_make_data_run; fs->load_attrs = ext2fs_load_attrs; fs->file_add_meta = ext2fs_inode_lookup; fs->dir_open_meta = ext2fs_dir_open_meta; fs->fsstat = ext2fs_fsstat; fs->fscheck = ext2fs_fscheck; fs->istat = ext2fs_istat; fs->name_cmp = tsk_fs_unix_name_cmp; fs->close = ext2fs_close; /* Journal */ fs->journ_inum = tsk_getu32(fs->endian, ext2fs->fs->s_journal_inum); fs->jblk_walk = ext2fs_jblk_walk; fs->jentry_walk = ext2fs_jentry_walk; fs->jopen = ext2fs_jopen; /* initialize the caches */ /* inode map */ ext2fs->imap_buf = NULL; ext2fs->imap_grp_num = 0xffffffff; /* block map */ ext2fs->bmap_buf = NULL; ext2fs->bmap_grp_num = 0xffffffff; /* group descriptor */ ext2fs->grp_buf = NULL; ext2fs->grp_num = 0xffffffff; /* * Print some stats. */ if (tsk_verbose) tsk_fprintf(stderr, "inodes %" PRIu32 " root ino %" PRIuINUM " blocks %" PRIu32 " blocks/group %" PRIu32 "\n", tsk_getu32(fs->endian, ext2fs->fs->s_inodes_count), fs->root_inum, tsk_getu32(fs->endian, ext2fs->fs->s_blocks_count), tsk_getu32(fs->endian, ext2fs->fs->s_blocks_per_group)); tsk_init_lock(&ext2fs->lock); return (fs); } sleuthkit-4.2.0/tsk/fs/ext2fs_dent.c000644 000765 000024 00000027064 12576320700 020140 0ustar00carrierstaff000000 000000 /* ** ext2fs_dent ** The Sleuth Kit ** ** File name layer support for an Ext2 / Ext3 FS ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2006 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILS ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file ext2fs_dent.c * Contains the internal TSK file name processing code for Ext2 / ext3 */ #include #include "tsk_fs_i.h" #include "tsk_ext2fs.h" static uint8_t ext2fs_dent_copy(EXT2FS_INFO * ext2fs, char *ext2_dent, TSK_FS_NAME * fs_name) { TSK_FS_INFO *fs = &(ext2fs->fs_info); if (ext2fs->deentry_type == EXT2_DE_V1) { ext2fs_dentry1 *dir = (ext2fs_dentry1 *) ext2_dent; fs_name->meta_addr = tsk_getu32(fs->endian, dir->inode); /* ext2 does not null terminate */ if (tsk_getu16(fs->endian, dir->name_len) >= fs_name->name_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_dent_copy: Name Space too Small %d %" PRIuSIZE "", tsk_getu16(fs->endian, dir->name_len), fs_name->name_size); return 1; } /* Copy and Null Terminate */ strncpy(fs_name->name, dir->name, tsk_getu16(fs->endian, dir->name_len)); fs_name->name[tsk_getu16(fs->endian, dir->name_len)] = '\0'; fs_name->type = TSK_FS_NAME_TYPE_UNDEF; } else { ext2fs_dentry2 *dir = (ext2fs_dentry2 *) ext2_dent; fs_name->meta_addr = tsk_getu32(fs->endian, dir->inode); /* ext2 does not null terminate */ if (dir->name_len >= fs_name->name_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2_dent_copy: Name Space too Small %d %" PRIuSIZE "", dir->name_len, fs_name->name_size); return 1; } /* Copy and Null Terminate */ strncpy(fs_name->name, dir->name, dir->name_len); fs_name->name[dir->name_len] = '\0'; switch (dir->type) { case EXT2_DE_REG: fs_name->type = TSK_FS_NAME_TYPE_REG; break; case EXT2_DE_DIR: fs_name->type = TSK_FS_NAME_TYPE_DIR; break; case EXT2_DE_CHR: fs_name->type = TSK_FS_NAME_TYPE_CHR; break; case EXT2_DE_BLK: fs_name->type = TSK_FS_NAME_TYPE_BLK; break; case EXT2_DE_FIFO: fs_name->type = TSK_FS_NAME_TYPE_FIFO; break; case EXT2_DE_SOCK: fs_name->type = TSK_FS_NAME_TYPE_SOCK; break; case EXT2_DE_LNK: fs_name->type = TSK_FS_NAME_TYPE_LNK; break; case EXT2_DE_UNKNOWN: default: fs_name->type = TSK_FS_NAME_TYPE_UNDEF; break; } } fs_name->flags = 0; return 0; } /* * @param a_is_del Set to 1 if block is from a deleted directory. */ static TSK_RETVAL_ENUM ext2fs_dent_parse_block(EXT2FS_INFO * ext2fs, TSK_FS_DIR * a_fs_dir, uint8_t a_is_del, TSK_LIST ** list_seen, char *buf, int len) { TSK_FS_INFO *fs = &(ext2fs->fs_info); int dellen = 0; int idx; uint16_t reclen; uint32_t inode; char *dirPtr; TSK_FS_NAME *fs_name; int minreclen = 4; if ((fs_name = tsk_fs_name_alloc(EXT2FS_MAXNAMLEN + 1, 0)) == NULL) return TSK_ERR; /* update each time by the actual length instead of the ** recorded length so we can view the deleted entries */ for (idx = 0; idx <= len - EXT2FS_DIRSIZ_lcl(1); idx += minreclen) { unsigned int namelen; dirPtr = &buf[idx]; if (ext2fs->deentry_type == EXT2_DE_V1) { ext2fs_dentry1 *dir = (ext2fs_dentry1 *) dirPtr; inode = tsk_getu32(fs->endian, dir->inode); namelen = tsk_getu16(fs->endian, dir->name_len); reclen = tsk_getu16(fs->endian, dir->rec_len); } else { ext2fs_dentry2 *dir = (ext2fs_dentry2 *) dirPtr; inode = tsk_getu32(fs->endian, dir->inode); namelen = dir->name_len; reclen = tsk_getu16(fs->endian, dir->rec_len); } minreclen = EXT2FS_DIRSIZ_lcl(namelen); /* ** Check if we may have a valid directory entry. If we don't, ** then increment to the next word and try again. */ if ((inode > fs->last_inum) || // inode is unsigned (namelen > EXT2FS_MAXNAMLEN) || (namelen == 0) || // namelen is unsigned (reclen < minreclen) || (reclen % 4) || (idx + reclen > len)) { minreclen = 4; if (dellen > 0) dellen -= 4; continue; } /* Before we process an entry in unallocated space, make * sure that it also ends in the unalloc space */ if ((dellen) && (dellen < minreclen)) { minreclen = 4; dellen -= 4; continue; } if (ext2fs_dent_copy(ext2fs, dirPtr, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* Do we have a deleted entry? */ if ((dellen > 0) || (inode == 0) || (a_is_del)) { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; if (dellen > 0) dellen -= minreclen; } /* We have a non-deleted entry */ else { fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } if (tsk_fs_dir_add(a_fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* If the actual length is shorter then the ** recorded length, then the next entry(ies) have been ** deleted. Set dellen to the length of data that ** has been deleted ** ** Because we aren't guaranteed with Ext2FS that the next ** entry begins right after this one, we will check to ** see if the difference is less than a possible entry ** before we waste time searching it */ if (dellen <= 0) { if (reclen - minreclen >= EXT2FS_DIRSIZ_lcl(1)) dellen = reclen - minreclen; else minreclen = reclen; } } tsk_fs_name_free(fs_name); return TSK_OK; } /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) a_fs; char *dirbuf, *dirptr; TSK_OFF_T size; TSK_FS_LOAD_FILE load_file; TSK_FS_DIR *fs_dir; TSK_LIST *list_seen = NULL; /* If we get corruption in one of the blocks, then continue processing. * retval_final will change when corruption is detected. Errors are * returned immediately. */ TSK_RETVAL_ENUM retval_tmp; TSK_RETVAL_ENUM retval_final = TSK_OK; if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("ext2fs_dir_open_meta: inode value: %" PRIuINUM "\n", a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_dir_open_meta: NULL fs_attr argument given"); return TSK_ERR; } if (tsk_verbose) { tsk_fprintf(stderr, "ext2fs_dir_open_meta: Processing directory %" PRIuINUM "\n", a_addr); #ifdef Ext4_DBG tsk_fprintf(stderr, "ext2fs_dir_open_meta: $OrphanFiles Inum %" PRIuINUM " == %" PRIuINUM ": %d\n", TSK_FS_ORPHANDIR_INUM(a_fs), a_addr, a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)); #endif } fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else { if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } } // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { #ifdef Ext4_DBG tsk_fprintf(stderr, "DEBUG: Getting ready to process ORPHANS\n"); #endif return tsk_fs_dir_find_orphans(a_fs, fs_dir); } else { #ifdef Ext4_DBG tsk_fprintf(stderr, "DEBUG: not orphan %" PRIuINUM "!=%" PRIuINUM "\n", a_addr, TSK_FS_ORPHANDIR_INUM(a_fs)); #endif } if ((fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr)) == NULL) { tsk_error_reset(); tsk_error_errstr2_concat("- ext2fs_dir_open_meta"); return TSK_COR; } size = roundup(fs_dir->fs_file->meta->size, a_fs->block_size); if ((dirbuf = tsk_malloc((size_t) size)) == NULL) { return TSK_ERR; } /* make a copy of the directory contents that we can process */ load_file.left = load_file.total = (size_t) size; load_file.base = load_file.cur = dirbuf; if (tsk_fs_file_walk(fs_dir->fs_file, TSK_FS_FILE_WALK_FLAG_SLACK, tsk_fs_load_file_action, (void *) &load_file)) { tsk_error_reset(); tsk_error_errstr2_concat("- ext2fs_dir_open_meta"); free(dirbuf); return TSK_COR; } /* Not all of the directory was copied, so we exit */ if (load_file.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ext2fs_dir_open_meta: Error reading directory contents: %" PRIuINUM "\n", a_addr); free(dirbuf); return TSK_COR; } dirptr = dirbuf; while ((int64_t) size > 0) { int len = (a_fs->block_size < size) ? a_fs->block_size : (int) size; retval_tmp = ext2fs_dent_parse_block(ext2fs, fs_dir, (fs_dir->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) ? 1 : 0, &list_seen, dirptr, len); if (retval_tmp == TSK_ERR) { retval_final = TSK_ERR; break; } else if (retval_tmp == TSK_COR) { retval_final = TSK_COR; } size -= len; dirptr = (char *) ((uintptr_t) dirptr + len); } free(dirbuf); // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); } return retval_final; } sleuthkit-4.2.0/tsk/fs/ext2fs_journal.c000644 000765 000024 00000052750 12576320700 020660 0ustar00carrierstaff000000 000000 /* @@@ UNALLOC only if seq is less - alloc can be less than block if it wrapped around ... ** ext2fs_journal ** The Sleuth Kit ** ** Journaling code for TSK_FS_INFO_TYPE_EXT_3 image ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2004-2005 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** \file ext2fs_journal.c * Contains the internal TSK Ext3 journal walking code. */ #include "tsk_fs_i.h" #include "tsk_ext2fs.h" /* Everything in the journal is in big endian */ #define big_tsk_getu32(x) \ (uint32_t)((((uint8_t *)x)[3] << 0) + \ (((uint8_t *)x)[2] << 8) + \ (((uint8_t *)x)[1] << 16) + \ (((uint8_t *)x)[0] << 24) ) /* * */ static TSK_WALK_RET_ENUM load_sb_action(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { TSK_FS_INFO *fs = fs_file->fs_info; ext2fs_journ_sb *sb; EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; EXT2FS_JINFO *jinfo = ext2fs->jinfo; if (size < 1024) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr ("FS block size is less than 1024, not supported in journal yet"); return TSK_WALK_ERROR; } sb = (ext2fs_journ_sb *) buf; if (big_tsk_getu32(sb->magic) != EXT2_JMAGIC) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Journal inode %" PRIuINUM " does not have a valid magic value: %" PRIx32, jinfo->j_inum, big_tsk_getu32(sb->magic)); return TSK_WALK_ERROR; } jinfo->bsize = big_tsk_getu32(sb->bsize); jinfo->first_block = big_tsk_getu32(sb->first_blk); jinfo->last_block = big_tsk_getu32(sb->num_blk) - 1; jinfo->start_blk = big_tsk_getu32(sb->start_blk); jinfo->start_seq = big_tsk_getu32(sb->start_seq); return TSK_WALK_STOP; } /* Place journal data in *fs * * Return 0 on success and 1 on error * */ uint8_t ext2fs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; EXT2FS_JINFO *jinfo; // clean up any error messages that are lying around tsk_error_reset(); if (!fs) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_jopen: fs is null"); return 1; } ext2fs->jinfo = jinfo = (EXT2FS_JINFO *) tsk_malloc(sizeof(EXT2FS_JINFO)); if (jinfo == NULL) { return 1; } jinfo->j_inum = inum; jinfo->fs_file = tsk_fs_file_open_meta(fs, NULL, inum); if (!jinfo->fs_file) { free(jinfo); return 1; // error("error finding journal inode %" PRIu32, inum); } if (tsk_fs_file_walk(jinfo->fs_file, 0, load_sb_action, NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr("Error loading ext3 journal"); tsk_fs_file_close(jinfo->fs_file); free(jinfo); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "journal opened at inode %" PRIuINUM " bsize: %" PRIu32 " First JBlk: %" PRIuDADDR " Last JBlk: %" PRIuDADDR "\n", inum, jinfo->bsize, jinfo->first_block, jinfo->last_block); return 0; } /* Limitations: does not use the action or any flags * * return 0 on success and 1 on error * */ uint8_t ext2fs_jentry_walk(TSK_FS_INFO * fs, int flags, TSK_FS_JENTRY_WALK_CB action, void *ptr) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; EXT2FS_JINFO *jinfo = ext2fs->jinfo; char *journ; TSK_FS_LOAD_FILE buf1; TSK_DADDR_T i; int b_desc_seen = 0; ext2fs_journ_sb *journ_sb = NULL; ext4fs_journ_commit_head *commit_head; // clean up any error messages that are lying around tsk_error_reset(); if ((jinfo == NULL) || (jinfo->fs_file == NULL) || (jinfo->fs_file->meta == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_jentry_walk: journal is not open"); return 1; } if (jinfo->fs_file->meta->size != (jinfo->last_block + 1) * jinfo->bsize) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_jentry_walk: journal file size is different from \nsize reported in journal super block"); return 1; } /* Load the journal into a buffer */ buf1.left = buf1.total = (size_t) jinfo->fs_file->meta->size; journ = buf1.cur = buf1.base = tsk_malloc(buf1.left); if (journ == NULL) { return 1; } if (tsk_fs_file_walk(jinfo->fs_file, 0, tsk_fs_load_file_action, (void *) &buf1)) { free(journ); return 1; } if (buf1.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ext2fs_jentry_walk: Buffer not fully copied"); free(journ); return 1; } /* Process the journal * Cycle through each block */ tsk_printf("JBlk\tDescription\n"); /* Note that 'i' is incremented when we find a descriptor block and * process its contents. */ for (i = 0; i < jinfo->last_block; i++) { ext2fs_journ_head *head; /* if there is no magic, then it is a normal block * These should be accounted for when we see its corresponding * descriptor. We get the 'unknown' when its desc has * been reused, it is in the next batch to be overwritten, * or if it has not been used before */ head = (ext2fs_journ_head *) & journ[i * jinfo->bsize]; if (big_tsk_getu32(head->magic) != EXT2_JMAGIC) { if (i < jinfo->first_block) { tsk_printf("%" PRIuDADDR ":\tUnused\n", i); } #if 0 /* For now, we ignore the case of the iitial entries before a descriptor, it is too hard ... */ else if (b_desc_seen == 0) { ext2fs_journ_head *head2 = NULL; TSK_DADDR_T a; int next_head = 0, next_seq = 0; ext2fs_journ_dentry *dentry; /* This occurs when the log cycled around * We need to find out where the descriptor is * and where we need to end */ b_desc_seen = 1; for (a = i; a < jinfo->last_block; a++) { head2 = (ext2fs_journ_head *) & journ[a * jinfo->bsize]; if ((big_tsk_getu32(head2->magic) == EXT2_JMAGIC)) { next_head = a; next_seq = big_tsk_getu32(head2->entry_seq); break; } } if (next_head == 0) { tsk_printf("%" PRIuDADDR ":\tFS Block Unknown\n", i); } /* Find the last descr in the journ */ for (a = jinfo->last_block; a > i; a--) { head2 = (ext2fs_journ_head *) & journ[a * jinfo->bsize]; if ((big_tsk_getu32(head2->magic) == EXT2_JMAGIC) && (big_tsk_getu32(head2->entry_type) == EXT2_J_ETYPE_DESC) && (next_seq == big_tsk_getu32(head2->entry_seq))) { break; // @@@@ We should abort if we reach a commit before descriptor } } /* We did not find a descriptor in the journ! * print unknown for the rest of the journ */ if (a == i) { tsk_printf("%" PRIuDADDR ":\tFS Block Unknown\n", i); continue; } dentry = (ext2fs_journ_dentry *) ((uintptr_t) head2 + sizeof(ext2fs_journ_head));; /* Cycle through the descriptor entries */ while ((uintptr_t) dentry <= ((uintptr_t) head2 + jinfo->bsize - sizeof(ext2fs_journ_head))) { /* Only start to look after the index in the desc has looped */ if (++a <= jinfo->last_block) { ext2fs_journ_head *head3; /* Look at the block that this entry refers to */ head3 = (ext2fs_journ_head *) & journ[i * jinfo->bsize]; if ((big_tsk_getu32(head3->magic) == EXT2_JMAGIC)) { i--; break; } /* If it doesn't have the magic, then it is a * journal entry and we print the FS info */ tsk_printf("%" PRIuDADDR ":\tFS Block %" PRIu32 "\n", i, big_tsk_getu32(dentry->fs_blk)); /* Our counter is over the end of the journ */ if (++i > jinfo->last_block) break; } /* Increment to the next */ if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_LAST) break; /* If the SAMEID value is set, then we advance by the size of the entry, otherwise add 16 for the ID */ else if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_SAMEID) dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry)); else dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry) + 16); } } #endif else { tsk_printf("%" PRIuDADDR ":\tUnallocated FS Block Unknown\n", i); } } /* The super block */ else if ((big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_SB1) || (big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_SB2)) { tsk_printf("%" PRIuDADDR ":\tSuperblock (seq: %" PRIu32 ")\n", i, big_tsk_getu32(head->entry_seq)); journ_sb = head; tsk_printf("sb version: %d\n", big_tsk_getu32(head->entry_type)); tsk_printf("sb version: %d\n", big_tsk_getu32(journ_sb->entrytype)); tsk_printf("sb feature_compat flags 0x%08X\n", big_tsk_getu32(journ_sb->feature_compat)); if (big_tsk_getu32(journ_sb-> feature_compat) & JBD2_FEATURE_COMPAT_CHECKSUM) tsk_printf("\tJOURNAL_CHECKSUMS\n"); tsk_printf("sb feature_incompat flags 0x%08X\n", big_tsk_getu32(journ_sb->feature_incompat)); if (big_tsk_getu32(journ_sb-> feature_incompat) & JBD2_FEATURE_INCOMPAT_REVOKE) tsk_printf("\tJOURNAL_REVOKE\n"); if (big_tsk_getu32(journ_sb-> feature_incompat) & JBD2_FEATURE_INCOMPAT_64BIT) tsk_printf("\tJOURNAL_64BIT\n"); if (big_tsk_getu32(journ_sb-> feature_incompat) & JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) tsk_printf("\tJOURNAL_ASYNC_COMMIT\n"); tsk_printf("sb feature_ro_incompat flags 0x%08X\n", big_tsk_getu32(journ_sb->feature_ro_incompat)); } /* Revoke Block */ else if (big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_REV) { tsk_printf("%" PRIuDADDR ":\t%sRevoke Block (seq: %" PRIu32 ")\n", i, ((i < jinfo->start_blk) || (big_tsk_getu32(head->entry_seq) < jinfo->start_seq)) ? "Unallocated " : "Allocated ", big_tsk_getu32(head->entry_seq)); } /* The commit is the end of the entries */ else if (big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_COM) { tsk_printf("%" PRIuDADDR ":\t%sCommit Block (seq: %" PRIu32, i, ((i < jinfo->start_blk) || (big_tsk_getu32(head->entry_seq) < jinfo->start_seq)) ? "Unallocated " : "Allocated ", big_tsk_getu32(head->entry_seq)); commit_head = head; //tsk_printf("commit seq %" PRIu32 "\n", big_tsk_getu32(commit_head->c_header.entry_seq)); if (big_tsk_getu32(journ_sb-> feature_compat) & JBD2_FEATURE_COMPAT_CHECKSUM) { int chksum_type = commit_head->chksum_type; if (chksum_type) { tsk_printf(", checksum_type: %d", commit_head->chksum_type); switch (commit_head->chksum_type) { case JBD2_CRC32_CHKSUM: tsk_printf("-CRC32"); break; case JBD2_MD5_CHKSUM: tsk_printf("-MD5"); break; case JBD2_SHA1_CHKSUM: tsk_printf("-SHA1"); break; default: tsk_printf("-UNKOWN"); break; } tsk_printf(", checksum_size: %d", commit_head->chksum_size); tsk_printf(", chksum: 0x%08X", big_tsk_getu32(commit_head->chksum)); } } tsk_printf(", sec: %llu.%u", tsk_getu64(TSK_BIG_ENDIAN, commit_head->commit_sec), NSEC_PER_SEC / 10 * tsk_getu32(TSK_BIG_ENDIAN, commit_head->commit_nsec)); tsk_printf(")\n"); } /* The descriptor describes the FS blocks that follow it */ else if (big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_DESC) { ext2fs_journ_dentry *dentry; ext2fs_journ_head *head2; int unalloc = 0; b_desc_seen = 1; /* Is this an unallocated journ block or sequence */ if ((i < jinfo->start_blk) || (big_tsk_getu32(head->entry_seq) < jinfo->start_seq)) unalloc = 1; tsk_printf("%" PRIuDADDR ":\t%sDescriptor Block (seq: %" PRIu32 ")\n", i, (unalloc) ? "Unallocated " : "Allocated ", big_tsk_getu32(head->entry_seq)); dentry = (ext2fs_journ_dentry *) ((uintptr_t) head + sizeof(ext2fs_journ_head));; /* Cycle through the descriptor entries to account for the journal blocks */ while ((uintptr_t) dentry <= ((uintptr_t) head + jinfo->bsize - sizeof(ext2fs_journ_head))) { /* Our counter is over the end of the journ */ if (++i > jinfo->last_block) break; /* Look at the block that this entry refers to */ head2 = (ext2fs_journ_head *) & journ[i * jinfo->bsize]; if ((big_tsk_getu32(head2->magic) == EXT2_JMAGIC) && (big_tsk_getu32(head2->entry_seq) >= big_tsk_getu32(head->entry_seq))) { i--; break; } /* If it doesn't have the magic, then it is a * journal entry and we print the FS info */ tsk_printf("%" PRIuDADDR ":\t%sFS Block %" PRIu32 "\n", i, (unalloc) ? "Unallocated " : "Allocated ", big_tsk_getu32(dentry->fs_blk)); /* Increment to the next */ if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_LAST) break; /* If the SAMEID value is set, then we advance by the size of the entry, otherwise add 16 for the ID */ else if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_SAMEID) dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry)); else dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry) + 16); } } } free(journ); return 0; } /* * Limitations for 1st version: start must equal end and action is ignored * * Return 0 on success and 1 on error */ uint8_t ext2fs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int flags, TSK_FS_JBLK_WALK_CB action, void *ptr) { EXT2FS_INFO *ext2fs = (EXT2FS_INFO *) fs; EXT2FS_JINFO *jinfo = ext2fs->jinfo; char *journ; TSK_FS_LOAD_FILE buf1; TSK_DADDR_T i; ext2fs_journ_head *head; // clean up any error messages that are lying around tsk_error_reset(); if ((jinfo == NULL) || (jinfo->fs_file == NULL) || (jinfo->fs_file->meta == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ext2fs_jblk_walk: journal is not open"); return 1; } if (jinfo->last_block < end) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("ext2fs_jblk_walk: end is too large "); return 1; } if (start != end) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ext2fs_blk_walk: only start == end is currently supported"); return 1; } if (jinfo->fs_file->meta->size != (jinfo->last_block + 1) * jinfo->bsize) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr ("ext2fs_jblk_walk: journal file size is different from size reported in journal super block"); return 1; } /* Load into buffer and then process it * Only get the minimum needed */ buf1.left = buf1.total = (size_t) ((end + 1) * jinfo->bsize); journ = buf1.cur = buf1.base = tsk_malloc(buf1.left); if (journ == NULL) { return 1; } if (tsk_fs_file_walk(jinfo->fs_file, 0, tsk_fs_load_file_action, (void *) &buf1)) { free(journ); return 1; } if (buf1.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr("ext2fs_jblk_walk: Buffer not fully copied"); free(journ); return 1; } head = (ext2fs_journ_head *) & journ[end * jinfo->bsize]; /* Check if our target block is a journal data structure. * * If not, * we need to look for its descriptor to see if it has been * escaped */ if (big_tsk_getu32(head->magic) != EXT2_JMAGIC) { /* cycle backwards until we find a desc block */ for (i = end - 1; i >= 0; i--) { ext2fs_journ_dentry *dentry; TSK_DADDR_T diff; head = (ext2fs_journ_head *) & journ[i * jinfo->bsize]; if (big_tsk_getu32(head->magic) != EXT2_JMAGIC) continue; /* If we get a commit, then any desc we find will not * be for our block, so forget about it */ if (big_tsk_getu32(head->entry_type) == EXT2_J_ETYPE_COM) break; /* Skip any other data structure types */ if (big_tsk_getu32(head->entry_type) != EXT2_J_ETYPE_DESC) continue; /* We now have the previous descriptor * * NOTE: We have no clue if this is the correct * descriptor if it is not the current 'run' of * transactions, but this is the best we can do */ diff = end - i; dentry = (ext2fs_journ_dentry *) (&journ[i * jinfo->bsize] + sizeof(ext2fs_journ_head)); while ((uintptr_t) dentry <= ((uintptr_t) & journ[(i + 1) * jinfo->bsize] - sizeof(ext2fs_journ_head))) { if (--diff == 0) { if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_ESC) { journ[end * jinfo->bsize] = 0xC0; journ[end * jinfo->bsize + 1] = 0x3B; journ[end * jinfo->bsize + 2] = 0x39; journ[end * jinfo->bsize + 3] = 0x98; } break; } /* If the SAMEID value is set, then we advance by the size of the entry, otherwise add 16 for the ID */ if (big_tsk_getu32(dentry->flag) & EXT2_J_DENTRY_SAMEID) dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry)); else dentry = (ext2fs_journ_dentry *) ((uintptr_t) dentry + sizeof(ext2fs_journ_dentry) + 16); } break; } } if (fwrite(&journ[end * jinfo->bsize], jinfo->bsize, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr ("ext2fs_jblk_walk: error writing buffer block"); free(journ); return 1; } free(journ); return 0; } sleuthkit-4.2.0/tsk/fs/fatfs.c000644 000765 000024 00000062516 12576320700 017017 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file fatfs.c * Contains the internal TSK FAT file system code to handle basic file system * processing for opening file system, processing sectors, and directory entries. */ #include "tsk_fs_i.h" #include "tsk_fatfs.h" #include "tsk_fatxxfs.h" #include "tsk_exfatfs.h" /** * \internal * Open part of a disk image as a FAT file system. * * @param a_img_info Disk image to analyze * @param a_offset Byte offset where FAT file system starts * @param a_ftype Specific type of FAT file system * @param a_test NOT USED * @returns NULL on error or if data is not a FAT file system */ TSK_FS_INFO * fatfs_open(TSK_IMG_INFO *a_img_info, TSK_OFF_T a_offset, TSK_FS_TYPE_ENUM a_ftype, uint8_t a_test) { const char *func_name = "fatfs_open"; FATFS_INFO *fatfs = NULL; TSK_FS_INFO *fs = NULL; TSK_OFF_T boot_sector_offset = 0; int find_boot_sector_attempt = 0; ssize_t bytes_read = 0; FATFS_MASTER_BOOT_RECORD *bootSector; tsk_error_reset(); if (TSK_FS_TYPE_ISFAT(a_ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: Invalid FS Type", func_name); return NULL; } // Allocate an FATFS_INFO and initialize its generic TSK_FS_INFO members. if ((fatfs = (FATFS_INFO*)tsk_fs_malloc(sizeof(FATFS_INFO))) == NULL) { return NULL; } fs = &(fatfs->fs_info); fs->ftype = a_ftype; fs->img_info = a_img_info; fs->offset = a_offset; fs->dev_bsize = a_img_info->sector_size; fs->journ_inum = 0; fs->tag = TSK_FS_INFO_TAG; // Look for a FAT boot sector. Try up to three times because FAT32 and exFAT file systems have backup boot sectors. for (find_boot_sector_attempt = 0; find_boot_sector_attempt < 3; ++find_boot_sector_attempt) { if (find_boot_sector_attempt == 1) { // The FATXX backup boot sector is located in sector 6, look there. boot_sector_offset = 6 * fs->img_info->sector_size; } else if (find_boot_sector_attempt == 2) { // The exFAT backup boot sector is located in sector 12, look there. boot_sector_offset = 12 * fs->img_info->sector_size; } // Read in the prospective boot sector. bytes_read = tsk_fs_read(fs, boot_sector_offset, fatfs->boot_sector_buffer, FATFS_MASTER_BOOT_RECORD_SIZE); if (bytes_read != FATFS_MASTER_BOOT_RECORD_SIZE) { if (bytes_read >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: boot sector", func_name); tsk_fs_free((TSK_FS_INFO *)fatfs); return NULL; } // Check it out... bootSector = (FATFS_MASTER_BOOT_RECORD*)fatfs->boot_sector_buffer; if (tsk_fs_guessu16(fs, bootSector->magic, FATFS_FS_MAGIC) != 0) { // No magic, look for a backup boot sector. if ((tsk_getu16(TSK_LIT_ENDIAN, bootSector->magic) == 0) && (find_boot_sector_attempt < 3)) { continue; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a FATFS file system (magic)"); if (tsk_verbose) { fprintf(stderr, "%s: Incorrect FATFS magic\n", func_name); } tsk_fs_free((TSK_FS_INFO *)fatfs); return NULL; } } else { // Found the magic. fatfs->using_backup_boot_sector = boot_sector_offset > 0; if (fatfs->using_backup_boot_sector && tsk_verbose) { fprintf(stderr, "%s: Using backup boot sector\n", func_name); } break; } } // Attempt to open the file system as one of the FAT types. if ((a_ftype == TSK_FS_TYPE_FAT_DETECT && (fatxxfs_open(fatfs) == 0 || exfatfs_open(fatfs) == 0)) || (a_ftype == TSK_FS_TYPE_EXFAT && exfatfs_open(fatfs) == 0) || (fatxxfs_open(fatfs) == 0)) { return (TSK_FS_INFO*)fatfs; } else { tsk_fs_free((TSK_FS_INFO *)fatfs); return NULL; } } /* TTL is 0 if the entry has not been used. TTL of 1 means it was the * most recently used, and TTL of FATFS_FAT_CACHE_N means it was the least * recently used. This function has a LRU replacement algo * * Note: This routine assumes &fatfs->cache_lock is locked by the caller. */ // return -1 on error, or cache index on success (0 to FATFS_FAT_CACHE_N) static int getFATCacheIdx(FATFS_INFO * fatfs, TSK_DADDR_T sect) { int i, cidx; ssize_t cnt; TSK_FS_INFO *fs = (TSK_FS_INFO *) & fatfs->fs_info; // see if we already have it in the cache for (i = 0; i < FATFS_FAT_CACHE_N; i++) { if ((fatfs->fatc_ttl[i] > 0) && (sect >= fatfs->fatc_addr[i]) && (sect < (fatfs->fatc_addr[i] + (FATFS_FAT_CACHE_B >> fatfs->ssize_sh)))) { int a; // update the TTLs to push i to the front for (a = 0; a < FATFS_FAT_CACHE_N; a++) { if (fatfs->fatc_ttl[a] == 0) continue; if (fatfs->fatc_ttl[a] < fatfs->fatc_ttl[i]) fatfs->fatc_ttl[a]++; } fatfs->fatc_ttl[i] = 1; // fprintf(stdout, "FAT Hit: %d\n", sect); // fflush(stdout); return i; } } // fprintf(stdout, "FAT Miss: %d\n", (int)sect); // fflush(stdout); // Look for an unused entry or an entry with a TTL of FATFS_FAT_CACHE_N cidx = 0; for (i = 0; i < FATFS_FAT_CACHE_N; i++) { if ((fatfs->fatc_ttl[i] == 0) || (fatfs->fatc_ttl[i] >= FATFS_FAT_CACHE_N)) { cidx = i; } } // fprintf(stdout, "FAT Removing: %d\n", (int)fatfs->fatc_addr[cidx]); // fflush(stdout); // read the data cnt = tsk_fs_read(fs, sect * fs->block_size, fatfs->fatc_buf[cidx], FATFS_FAT_CACHE_B); if (cnt != FATFS_FAT_CACHE_B) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("getFATCacheIdx: FAT: %" PRIuDADDR, sect); return -1; } // update the TTLs if (fatfs->fatc_ttl[cidx] == 0) // special case for unused entry fatfs->fatc_ttl[cidx] = FATFS_FAT_CACHE_N + 1; for (i = 0; i < FATFS_FAT_CACHE_N; i++) { if (fatfs->fatc_ttl[i] == 0) continue; if (fatfs->fatc_ttl[i] < fatfs->fatc_ttl[cidx]) fatfs->fatc_ttl[i]++; } fatfs->fatc_ttl[cidx] = 1; fatfs->fatc_addr[cidx] = sect; return cidx; } /* * Set *value to the entry in the File Allocation Table (FAT) * for the given cluster * * *value is in clusters and may need to be coverted to * sectors by the calling function * * Invalid values in the FAT (i.e. greater than the largest * cluster have a value of 0 returned and a 0 return value. * * Return 1 on error and 0 on success */ uint8_t fatfs_getFAT(FATFS_INFO * fatfs, TSK_DADDR_T clust, TSK_DADDR_T * value) { uint8_t *a_ptr; uint16_t tmp16; TSK_FS_INFO *fs = (TSK_FS_INFO *) & fatfs->fs_info; TSK_DADDR_T sect, offs; ssize_t cnt; int cidx; /* Sanity Check */ if (clust > fatfs->lastclust) { /* silently ignore requests for the unclustered sectors... */ if ((clust == fatfs->lastclust + 1) && ((fatfs->firstclustsect + fatfs->csize * fatfs->clustcnt - 1) != fs->last_block)) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_getFAT: Ignoring request for non-clustered sector\n"); return 0; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("fatfs_getFAT: invalid cluster address: %" PRIuDADDR, clust); return 1; } switch (fatfs->fs_info.ftype) { case TSK_FS_TYPE_FAT12: if (clust & 0xf000) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("fatfs_getFAT: TSK_FS_TYPE_FAT12 Cluster %" PRIuDADDR " too large", clust); return 1; } /* id the sector in the FAT */ sect = fatfs->firstfatsect + ((clust + (clust >> 1)) >> fatfs->ssize_sh); tsk_take_lock(&fatfs->cache_lock); /* Load the FAT if we don't have it */ // see if it is in the cache if (-1 == (cidx = getFATCacheIdx(fatfs, sect))) { tsk_release_lock(&fatfs->cache_lock); return 1; } /* get the offset into the cache */ offs = ((sect - fatfs->fatc_addr[cidx]) << fatfs->ssize_sh) + (clust + (clust >> 1)) % fatfs->ssize; /* special case when the 12-bit value goes across the cache * we load the cache to start at this sect. The cache * size must therefore be at least 2 sectors large */ if (offs == (FATFS_FAT_CACHE_B - 1)) { // read the data -- TTLs will already have been updated cnt = tsk_fs_read(fs, sect * fs->block_size, fatfs->fatc_buf[cidx], FATFS_FAT_CACHE_B); if (cnt != FATFS_FAT_CACHE_B) { tsk_release_lock(&fatfs->cache_lock); if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("fatfs_getFAT: TSK_FS_TYPE_FAT12 FAT overlap: %" PRIuDADDR, sect); return 1; } fatfs->fatc_addr[cidx] = sect; offs = (clust + (clust >> 1)) % fatfs->ssize; } /* get pointer to entry in current buffer */ a_ptr = (uint8_t *) fatfs->fatc_buf[cidx] + offs; tmp16 = tsk_getu16(fs->endian, a_ptr); tsk_release_lock(&fatfs->cache_lock); /* slide it over if it is one of the odd clusters */ if (clust & 1) tmp16 >>= 4; *value = tmp16 & FATFS_12_MASK; /* sanity check */ if ((*value > (fatfs->lastclust)) && (*value < (0x0ffffff7 & FATFS_12_MASK))) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_getFAT: TSK_FS_TYPE_FAT12 cluster (%" PRIuDADDR ") too large (%" PRIuDADDR ") - resetting\n", clust, *value); *value = 0; } return 0; case TSK_FS_TYPE_FAT16: /* Get sector in FAT for cluster and load it if needed */ sect = fatfs->firstfatsect + ((clust << 1) >> fatfs->ssize_sh); tsk_take_lock(&fatfs->cache_lock); if (-1 == (cidx = getFATCacheIdx(fatfs, sect))) { tsk_release_lock(&fatfs->cache_lock); return 1; } /* get pointer to entry in the cache buffer */ a_ptr = (uint8_t *) fatfs->fatc_buf[cidx] + ((sect - fatfs->fatc_addr[cidx]) << fatfs->ssize_sh) + ((clust << 1) % fatfs->ssize); *value = tsk_getu16(fs->endian, a_ptr) & FATFS_16_MASK; tsk_release_lock(&fatfs->cache_lock); /* sanity check */ if ((*value > (fatfs->lastclust)) && (*value < (0x0ffffff7 & FATFS_16_MASK))) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_getFAT: contents of TSK_FS_TYPE_FAT16 entry %" PRIuDADDR " too large - resetting\n", clust); *value = 0; } return 0; case TSK_FS_TYPE_FAT32: case TSK_FS_TYPE_EXFAT: /* Get sector in FAT for cluster and load if needed */ sect = fatfs->firstfatsect + ((clust << 2) >> fatfs->ssize_sh); tsk_take_lock(&fatfs->cache_lock); if (-1 == (cidx = getFATCacheIdx(fatfs, sect))) { tsk_release_lock(&fatfs->cache_lock); return 1; } /* get pointer to entry in current buffer */ a_ptr = (uint8_t *) fatfs->fatc_buf[cidx] + ((sect - fatfs->fatc_addr[cidx]) << fatfs->ssize_sh) + (clust << 2) % fatfs->ssize; *value = tsk_getu32(fs->endian, a_ptr) & FATFS_32_MASK; tsk_release_lock(&fatfs->cache_lock); /* sanity check */ if ((*value > fatfs->lastclust) && (*value < (0x0ffffff7 & FATFS_32_MASK))) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_getFAT: contents of entry %" PRIuDADDR " too large - resetting\n", clust); *value = 0; } return 0; default: tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("fatfs_getFAT: Unknown FAT type: %d", fatfs->fs_info.ftype); return 1; } } /************************************************************************** * * BLOCK WALKING * *************************************************************************/ /* ** Walk the sectors of the partition. ** ** NOTE: This is by SECTORS and not CLUSTERS ** _flags: TSK_FS_BLOCK_FLAG_ALLOC, TSK_FS_BLOCK_FLAG_UNALLOC, TSK_FS_BLOCK_FLAG_META ** TSK_FS_BLOCK_FLAG_CONT ** */ uint8_t fatfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { char *myname = "fatfs_block_walk"; FATFS_INFO *fatfs = (FATFS_INFO *) fs; char *data_buf = NULL; ssize_t cnt; TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; int myflags; unsigned int i; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (a_start_blk < fs->first_block || a_start_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Start block: %" PRIuDADDR "", myname, a_start_blk); return 1; } if (a_end_blk < fs->first_block || a_end_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End block: %" PRIuDADDR "", myname, a_end_blk); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "fatfs_block_walk: Block Walking %" PRIuDADDR " to %" PRIuDADDR "\n", a_start_blk, a_end_blk); /* Sanity check on a_flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } /* cycle through the sectors. We do the sectors before the first * cluster seperate from the data area */ addr = a_start_blk; /* Before the data area beings (FAT, root directory etc.) */ if ((a_start_blk < fatfs->firstclustsect) && (a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC)) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_block_walk: Walking non-data area (pre %" PRIuDADDR "\n)", fatfs->firstclustsect); if ((data_buf = (char *) tsk_malloc(fs->block_size * 8)) == NULL) { tsk_fs_block_free(fs_block); return 1; } /* Read 8 sectors at a time to be faster */ for (; addr < fatfs->firstclustsect && addr <= a_end_blk;) { if ((a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) == 0) { cnt = tsk_fs_read_block(fs, addr, data_buf, fs->block_size * 8); if (cnt != fs->block_size * 8) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("fatfs_block_walk: pre-data area block: %" PRIuDADDR, addr); free(data_buf); tsk_fs_block_free(fs_block); return 1; } } /* Process the sectors until we get to the clusters, * end of target, or end of buffer */ for (i = 0; i < 8 && (addr) <= a_end_blk && (addr) < fatfs->firstclustsect; i++, addr++) { int retval; myflags = TSK_FS_BLOCK_FLAG_ALLOC; /* stuff before the first data sector is the * FAT and boot sector */ if (addr < fatfs->firstdatasect) myflags |= TSK_FS_BLOCK_FLAG_META; /* This must be the root directory for FAT12/16 */ else myflags |= TSK_FS_BLOCK_FLAG_CONT; // test this sector (we already tested ALLOC) if ((myflags & TSK_FS_BLOCK_FLAG_META) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_META))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_CONT) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT))) continue; if (a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; tsk_fs_block_set(fs, fs_block, addr, myflags | TSK_FS_BLOCK_FLAG_RAW, &data_buf[i * fs->block_size]); retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { free(data_buf); tsk_fs_block_free(fs_block); return 0; } else if (retval == TSK_WALK_ERROR) { free(data_buf); tsk_fs_block_free(fs_block); return 1; } } } free(data_buf); /* Was that it? */ if (addr >= a_end_blk) { tsk_fs_block_free(fs_block); return 0; } } /* Reset the first sector to the start of the data area if we did * not examine it - the next calculation will screw up otherwise */ else if (addr < fatfs->firstclustsect) { addr = fatfs->firstclustsect; } /* Now we read in the clusters in cluster-sized chunks, * sectors are too small */ /* Determine the base sector of the cluster where the first * sector is located */ addr = FATFS_CLUST_2_SECT(fatfs, (FATFS_SECT_2_CLUST(fatfs, addr))); if ((data_buf = tsk_malloc(fs->block_size * fatfs->csize)) == NULL) { tsk_fs_block_free(fs_block); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "fatfs_block_walk: Walking data area blocks (%" PRIuDADDR " to %" PRIuDADDR ")\n", addr, a_end_blk); for (; addr <= a_end_blk; addr += fatfs->csize) { int retval; size_t read_size; /* Identify its allocation status */ retval = fatfs_is_sectalloc(fatfs, addr); if (retval == -1) { free(data_buf); tsk_fs_block_free(fs_block); return 1; } else if (retval == 1) { myflags = TSK_FS_BLOCK_FLAG_ALLOC; } else { myflags = TSK_FS_BLOCK_FLAG_UNALLOC; } /* At this point, there should be no more meta - just content */ myflags |= TSK_FS_BLOCK_FLAG_CONT; // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_CONT) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; /* The final cluster may not be full */ if (a_end_blk - addr + 1 < fatfs->csize) read_size = (size_t) (a_end_blk - addr + 1); else read_size = fatfs->csize; if ((a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) == 0) { cnt = tsk_fs_read_block (fs, addr, data_buf, fs->block_size * read_size); if (cnt != fs->block_size * read_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("fatfs_block_walk: block: %" PRIuDADDR, addr); free(data_buf); tsk_fs_block_free(fs_block); return 1; } } /* go through each sector in the cluster */ for (i = 0; i < read_size; i++) { int retval; if (addr + i < a_start_blk) continue; else if (addr + i > a_end_blk) break; tsk_fs_block_set(fs, fs_block, addr + i, myflags | TSK_FS_BLOCK_FLAG_RAW, &data_buf[i * fs->block_size]); retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { free(data_buf); tsk_fs_block_free(fs_block); return 0; } else if (retval == TSK_WALK_ERROR) { free(data_buf); tsk_fs_block_free(fs_block); return 1; } } } free(data_buf); tsk_fs_block_free(fs_block); return 0; } TSK_FS_BLOCK_FLAG_ENUM fatfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { FATFS_INFO *fatfs = (FATFS_INFO *) a_fs; int flags = 0; // FATs and boot sector if (a_addr < fatfs->firstdatasect) { flags = TSK_FS_BLOCK_FLAG_META | TSK_FS_BLOCK_FLAG_ALLOC; } // root directory for FAT12/16 else if (a_addr < fatfs->firstclustsect) { flags = TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_ALLOC; } else { int retval; flags = TSK_FS_BLOCK_FLAG_CONT; /* Identify its allocation status */ retval = fatfs_is_sectalloc(fatfs, a_addr); if (retval != -1) { if (retval == 1) flags |= TSK_FS_BLOCK_FLAG_ALLOC; else flags |= TSK_FS_BLOCK_FLAG_UNALLOC; } } return (TSK_FS_BLOCK_FLAG_ENUM)flags; } /* * Identifies if a sector is allocated * * If it is less than the data area, then it is allocated * else the FAT table is consulted * * Return 1 if allocated, 0 if unallocated, and -1 if error */ int8_t fatfs_is_sectalloc(FATFS_INFO * fatfs, TSK_DADDR_T sect) { TSK_FS_INFO *fs = (TSK_FS_INFO *) fatfs; /* If less than the first cluster sector, then it is allocated * otherwise check the FAT */ if (sect < fatfs->firstclustsect) return 1; /* If we are in the unused area, then we are "unalloc" */ if ((sect <= fs->last_block) && (sect >= (fatfs->firstclustsect + fatfs->csize * fatfs->clustcnt))) return 0; return fatfs->is_cluster_alloc(fatfs, FATFS_SECT_2_CLUST(fatfs, sect)); } /* return 1 on error and 0 on success */ uint8_t fatfs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("FAT does not have a journal\n"); return 1; } /* return 1 on error and 0 on success */ uint8_t fatfs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented for FAT yet"); return 1; /* Check that allocated dentries point to start of allcated cluster chain */ /* Size of file is consistent with cluster chain length */ /* Allocated cluster chains have a corresponding alloc dentry */ /* Non file dentries have no clusters */ /* Only one volume label */ /* Dump Bad Sector Addresses */ /* Dump unused sector addresses * Reserved area, end of FAT, end of Data Area */ } /* return 1 on error and 0 on success */ uint8_t fatfs_jentry_walk(TSK_FS_INFO * fs, int a_flags, TSK_FS_JENTRY_WALK_CB a_action, void *a_ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("FAT does not have a journal\n"); return 1; } /* return 1 on error and 0 on success */ uint8_t fatfs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int a_flags, TSK_FS_JBLK_WALK_CB a_action, void *a_ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("FAT does not have a journal\n"); return 1; } /* fatfs_close - close an fatfs file system */ void fatfs_close(TSK_FS_INFO *fs) { FATFS_INFO *fatfs = (FATFS_INFO *) fs; fatfs_dir_buf_free(fatfs); fs->tag = 0; memset(fatfs->boot_sector_buffer, 0, FATFS_MASTER_BOOT_RECORD_SIZE); tsk_deinit_lock(&fatfs->cache_lock); tsk_deinit_lock(&fatfs->dir_lock); tsk_fs_free(fs); } sleuthkit-4.2.0/tsk/fs/fatfs_dent.cpp000644 000765 000024 00000027607 12576320700 020373 0ustar00carrierstaff000000 000000 /* ** fatfs_dent ** The Sleuth Kit ** ** file name layer support for the FAT file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ /** * \file fatfs_dent.cpp * Contains the internal TSK FAT file name processing code. */ #include "tsk_fs_i.h" #include "tsk_fatxxfs.h" #include "tsk_exfatfs.h" #include "tsk_fatfs.h" #include /* * DESIGN NOTES * * the basic goal of this code is to parse directory entry structures for * file names. The main function is fatfs_parse_buf, which parses * a buffer and stores the entries in FS_DIR. That structure is then * used by dir_get() or dir_walk() to provide the data back to the user. * * One of the odd aspects of this code is that the 'inode' values are * the 'slot-address'. Refer to the document on how FAT was implemented * for more details. This means that we need to search for the actual * 'inode' address for the '.' and '..' entries though! The search * for '..' is quite painful if this code is called from a random * location. It does save what the parent is though, so the search * only has to be done once per session. */ /* * name_walk callback used when finding the parent directory. It * forces the walking process to stop when we hit a target directory. * A list of directory to parent directory mappings is built up during * the walk and this function is used to stop that building process. */ TSK_WALK_RET_ENUM fatfs_find_parent_act(TSK_FS_FILE * fs_file, const char *a_path, void *ptr) { TSK_INUM_T par_inum = *(TSK_INUM_T *) ptr; if ((fs_file->meta == NULL) || (fs_file->meta->type != TSK_FS_META_TYPE_DIR)) return TSK_WALK_CONT; if (fs_file->meta->addr == par_inum) return TSK_WALK_STOP; return TSK_WALK_CONT; } /** \internal * Casts the void * to a map. This obfuscation is done so that the rest of the library * can remain as C and only this code needs to be C++. * * Assumes that you already have the lock */ static std::map * getParentMap(FATFS_INFO *fatfs) { // allocate it if it hasn't already been if (fatfs->inum2par == NULL) { fatfs->inum2par = new std::map; } return (std::map *)fatfs->inum2par; } /** * Adds an entry to the parent directory map. Used to make further processing * faster. * @param fatfs File system * @param par_inum Parent folder meta data address. * @param dir_inum Sub-folder meta data address. * @returns 0 */ uint8_t fatfs_dir_buf_add(FATFS_INFO * fatfs, TSK_INUM_T par_inum, TSK_INUM_T dir_inum) { tsk_take_lock(&fatfs->dir_lock); std::map *tmpMap = getParentMap(fatfs); (*tmpMap)[dir_inum] = par_inum; tsk_release_lock(&fatfs->dir_lock); return 0; } /** * Looks up the parent meta address for a child from the cached list. * @param fatfs File system * @param dir_inum Inode of sub-directory to look up * @param par_inum [out] Result of lookup * @returns 0 if found and 1 if not. */ uint8_t fatfs_dir_buf_get(FATFS_INFO * fatfs, TSK_INUM_T dir_inum, TSK_INUM_T *par_inum) { uint8_t retval = 1; tsk_take_lock(&fatfs->dir_lock); std::map *tmpMap = getParentMap(fatfs); if (tmpMap->count( dir_inum) > 0) { *par_inum = (*tmpMap)[dir_inum]; retval = 0; } tsk_release_lock(&fatfs->dir_lock); return retval; } /** * Frees the memory associated with the parent map */ void fatfs_dir_buf_free(FATFS_INFO *fatfs) { tsk_take_lock(&fatfs->dir_lock); if (fatfs->inum2par != NULL) { std::map *tmpMap = getParentMap(fatfs); delete tmpMap; fatfs->inum2par = NULL; } tsk_release_lock(&fatfs->dir_lock); } /************************************************************************** * * dent_walk * *************************************************************************/ /* values used to copy the directory contents into a buffer */ typedef struct { /* ptr to the current location in a local buffer */ char *curdirptr; /* number of bytes left in curdirptr */ size_t dirleft; /* ptr to a local buffer for the stack of sector addresses */ TSK_DADDR_T *addrbuf; /* num of entries allocated to addrbuf */ size_t addrsize; /* The current index in the addrbuf stack */ size_t addridx; } FATFS_LOAD_DIR; /** * file walk callback that is used to load directory contents * into a buffer */ static TSK_WALK_RET_ENUM fatfs_dent_action(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { FATFS_LOAD_DIR *load = (FATFS_LOAD_DIR *) ptr; /* how much of the buffer are we copying */ size_t len = (load->dirleft < size) ? load->dirleft : size; /* Copy the sector into a buffer and increment the pointers */ memcpy(load->curdirptr, buf, len); load->curdirptr = (char *) ((uintptr_t) load->curdirptr + len); load->dirleft -= len; /* fill in the stack of addresses of sectors * * if we are at the last entry, then realloc more */ if (load->addridx == load->addrsize) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("fatfs_dent_walk: Trying to put more sector address in stack than were allocated (%lu)", (long) load->addridx); return TSK_WALK_ERROR; } /* Add this sector to the stack */ load->addrbuf[load->addridx++] = addr; if (load->dirleft) return TSK_WALK_CONT; else return TSK_WALK_STOP; } /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM fatfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { const char *func_name = "fatfs_dir_open_meta"; TSK_OFF_T size, len; FATFS_INFO *fatfs = (FATFS_INFO *) a_fs; char *dirbuf; TSK_DADDR_T *addrbuf; FATFS_LOAD_DIR load; TSK_RETVAL_ENUM retval; TSK_FS_DIR *fs_dir; if ((a_addr < a_fs->first_inum) || (a_addr > a_fs->last_inum)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: invalid a_addr value: %" PRIuINUM "\n", func_name, a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("%s: NULL fs_attr argument given", func_name); return TSK_ERR; } fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else { if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } } // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { return tsk_fs_dir_find_orphans(a_fs, fs_dir); } fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr); if (fs_dir->fs_file == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: %" PRIuINUM " is not a valid inode", func_name, a_addr); return TSK_COR; } size = fs_dir->fs_file->meta->size; len = roundup(size, fatfs->ssize); if (tsk_verbose) tsk_fprintf(stderr, "%s: Processing directory %" PRIuINUM "\n", func_name, a_addr); if (size == 0) { if (tsk_verbose) tsk_fprintf(stderr, "%s: directory has 0 size\n", func_name); return TSK_OK; } /* Make a copy of the directory contents using file_walk */ if ((dirbuf = (char *)tsk_malloc((size_t) len)) == NULL) { return TSK_ERR; } load.curdirptr = dirbuf; load.dirleft = (size_t) size; /* We are going to save the address of each sector in the directory * in a stack - they are needed to determine the inode address. */ load.addrsize = (size_t) (len / fatfs->ssize); addrbuf = (TSK_DADDR_T *) tsk_malloc(load.addrsize * sizeof(TSK_DADDR_T)); if (addrbuf == NULL) { free(dirbuf); return TSK_ERR; } /* Set the variables that are used during the copy */ load.addridx = 0; load.addrbuf = addrbuf; /* save the directory contents into dirbuf */ if (tsk_fs_file_walk(fs_dir->fs_file, TSK_FS_FILE_WALK_FLAG_SLACK, fatfs_dent_action, (void *) &load)) { tsk_error_errstr2_concat("- %s", func_name); free(dirbuf); free(addrbuf); return TSK_COR; } /* We did not copy the entire directory, which occurs if an error occured */ if (load.dirleft > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("%s: Error reading directory %" PRIuINUM, func_name, a_addr); /* Free the local buffers */ free(dirbuf); free(addrbuf); return TSK_COR; } if (tsk_verbose) fprintf(stderr, "%s: Parsing directory %" PRIuINUM "\n", func_name, a_addr); retval = fatfs->dent_parse_buf(fatfs, fs_dir, dirbuf, len, addrbuf); free(dirbuf); free(addrbuf); // if we are listing the root directory, add the Orphan directory and special FAT file entries if (a_addr == a_fs->root_inum) { TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; // MBR Entry strncpy(fs_name->name, FATFS_MBRNAME, fs_name->name_size); fs_name->meta_addr = fatfs->mbr_virt_inum; fs_name->type = TSK_FS_NAME_TYPE_VIRT; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } // FAT1 Entry strncpy(fs_name->name, FATFS_FAT1NAME, fs_name->name_size); fs_name->meta_addr = fatfs->fat1_virt_inum; fs_name->type = TSK_FS_NAME_TYPE_VIRT; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } // FAT2 Entry if (fatfs->numfat == 2) { strncpy(fs_name->name, FATFS_FAT2NAME, fs_name->name_size); fs_name->meta_addr = fatfs->fat2_virt_inum; fs_name->type = TSK_FS_NAME_TYPE_VIRT; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } } // orphan directory if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); } return retval; } int fatfs_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { return strcasecmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/fatfs_meta.c000644 000765 000024 00000152044 12576320700 020021 0ustar00carrierstaff000000 000000 /* ** fatfs ** The Sleuth Kit ** ** Meta data layer support for the FAT file system. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ /** * \file fatfs_meta.c * Meta data layer support for FAT file systems. */ #include "tsk_fatfs.h" #include "tsk_fatxxfs.h" #include "tsk_exfatfs.h" TSK_FS_ATTR_TYPE_ENUM fatfs_get_default_attr_type(const TSK_FS_FILE * a_file) { return TSK_FS_ATTR_TYPE_DEFAULT; } /** * \internal * Create an TSK_FS_META structure for the root directory. FAT does * not have a directory entry for the root directory, but this * function collects the data needed to make one. * * @param fatfs File system to analyze. * @param fs_meta Inode structure to copy root directory information into. * @return 1 on error and 0 on success */ static uint8_t fatfs_make_root(FATFS_INFO *a_fatfs, TSK_FS_META *a_fs_meta) { const char *func_name = "fatfs_make_root"; TSK_DADDR_T *first_clust_addr_ptr = NULL; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_meta, "a_fs_meta", func_name)) { return 1; } /* Manufacture some metadata. */ a_fs_meta->type = TSK_FS_META_TYPE_DIR; a_fs_meta->mode = TSK_FS_META_MODE_UNSPECIFIED; a_fs_meta->nlink = 1; a_fs_meta->addr = FATFS_ROOTINO; a_fs_meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); a_fs_meta->uid = a_fs_meta->gid = 0; a_fs_meta->mtime = a_fs_meta->atime = a_fs_meta->ctime = a_fs_meta->crtime = 0; a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano = a_fs_meta->crtime_nano = 0; /* Give the root directory an empty name. */ if (a_fs_meta->name2 == NULL) { if ((a_fs_meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { return 1; } a_fs_meta->name2->next = NULL; } a_fs_meta->name2->name[0] = '\0'; /* Mark the generic attribute list as not in use (in the generic file model * attributes are containers for data or metadata). Population of this * list is done by lazy look up. */ a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (a_fs_meta->attr) { tsk_fs_attrlist_markunused(a_fs_meta->attr); } /* Determine the size of the root directory and the address of its * first cluster. */ first_clust_addr_ptr = (TSK_DADDR_T*)a_fs_meta->content_ptr; if (a_fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32 || a_fatfs->fs_info.ftype == TSK_FS_TYPE_EXFAT) { TSK_DADDR_T cnum = 0; TSK_DADDR_T clust = 0; TSK_LIST *list_seen = NULL; /* Convert the address of the first sector of the root directory into * the address of its first cluster. */ clust = FATFS_SECT_2_CLUST(a_fatfs, a_fatfs->rootsect); first_clust_addr_ptr[0] = clust; /* Walk the FAT and count the clusters allocated to the root directory. */ cnum = 0; while ((clust) && (0 == FATFS_ISEOF(clust, FATFS_32_MASK))) { TSK_DADDR_T nxt = 0; /* Make sure we do not get into an infinite loop */ if (tsk_list_find(list_seen, clust)) { if (tsk_verbose) { tsk_fprintf(stderr, "Loop found while determining root directory size\n"); } break; } if (tsk_list_add(&list_seen, clust)) { tsk_list_free(list_seen); list_seen = NULL; return 1; } cnum++; if (fatfs_getFAT(a_fatfs, clust, &nxt)) { break; } else { clust = nxt; } } tsk_list_free(list_seen); list_seen = NULL; /* Calculate the size of the root directory. */ a_fs_meta->size = (cnum * a_fatfs->csize) << a_fatfs->ssize_sh; } else { /* FAT12 and FAT16 don't use the FAT for the root directory, so set * the first cluster address to a distinguished value that other code * will have to check as a special condition. */ first_clust_addr_ptr[0] = 1; /* Set the size equal to the number of bytes between the end of the * FATs and the start of the clusters. */ a_fs_meta->size = (a_fatfs->firstclustsect - a_fatfs->firstdatasect) << a_fatfs->ssize_sh; } return 0; } /** * \internal * Create an TSK_FS_META structure for the master boot record. * * @param fatfs File system to analyze * @param fs_meta Inode structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t fatfs_make_mbr(FATFS_INFO *fatfs, TSK_FS_META *fs_meta) { TSK_DADDR_T *addr_ptr; fs_meta->type = TSK_FS_META_TYPE_VIRT; fs_meta->mode = TSK_FS_META_MODE_UNSPECIFIED; fs_meta->nlink = 1; fs_meta->addr = fatfs->mbr_virt_inum; fs_meta->flags = (TSK_FS_META_FLAG_ENUM) (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); fs_meta->uid = fs_meta->gid = 0; fs_meta->mtime = fs_meta->atime = fs_meta->ctime = fs_meta->crtime = 0; fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime_nano = 0; if (fs_meta->name2 == NULL) { if ((fs_meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { return 1; } fs_meta->name2->next = NULL; } strncpy(fs_meta->name2->name, FATFS_MBRNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } addr_ptr = (TSK_DADDR_T*)fs_meta->content_ptr; addr_ptr[0] = 0; fs_meta->size = 512; return 0; } /** * \internal * Create an TSK_FS_META structure for the FAT tables. * * @param fatfs File system to analyze * @param a_which 1 or 2 to choose between defining FAT1 or FAT2 * @param fs_meta Inode structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t fatfs_make_fat(FATFS_INFO *fatfs, uint8_t a_which, TSK_FS_META *fs_meta) { TSK_FS_INFO *fs = (TSK_FS_INFO*)fatfs; TSK_DADDR_T *addr_ptr = (TSK_DADDR_T *)fs_meta->content_ptr; if ((a_which != 1) && (a_which != 2)) { return 1; } if (a_which > fatfs->numfat) { return 1; } fs_meta->type = TSK_FS_META_TYPE_VIRT; fs_meta->mode = TSK_FS_META_MODE_UNSPECIFIED; fs_meta->nlink = 1; fs_meta->flags = (TSK_FS_META_FLAG_ENUM) (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); fs_meta->uid = fs_meta->gid = 0; fs_meta->mtime = fs_meta->atime = fs_meta->ctime = fs_meta->crtime = 0; fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime_nano = 0; if (fs_meta->name2 == NULL) { if ((fs_meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) return 1; fs_meta->name2->next = NULL; } if (a_which == 1) { fs_meta->addr = fatfs->fat1_virt_inum; strncpy(fs_meta->name2->name, FATFS_FAT1NAME, TSK_FS_META_NAME_LIST_NSIZE); addr_ptr[0] = fatfs->firstfatsect; } else { fs_meta->addr = fatfs->fat2_virt_inum; strncpy(fs_meta->name2->name, FATFS_FAT2NAME, TSK_FS_META_NAME_LIST_NSIZE); addr_ptr[0] = fatfs->firstfatsect + fatfs->sectperfat; } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } fs_meta->size = fatfs->sectperfat * fs->block_size; return 0; } /** * \internal * Load a FATFS_DENTRY structure with the bytes at a given inode address. * * @param [in] a_fs The file system from which to read the bytes. * @param [out] a_de The FATFS_DENTRY. * @param [in] a_inum An inode address. * @return 0 on success, 1 on failure. */ uint8_t fatfs_dentry_load(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum) { const char *func_name = "fatfs_dentry_load"; TSK_FS_INFO *fs = (TSK_FS_INFO*)a_fatfs; TSK_DADDR_T sect = 0; size_t off = 0; ssize_t cnt = 0; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return 1; } /* Map the inode address to a sector. */ sect = FATFS_INODE_2_SECT(a_fatfs, a_inum); if (sect > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: Inode %" PRIuINUM " in sector too big for image: %" PRIuDADDR, func_name, a_inum, sect); return 1; } /* Get the byte offset of the inode address within the sector. */ off = FATFS_INODE_2_OFF(a_fatfs, a_inum); /* Read in the bytes. */ cnt = tsk_fs_read(fs, sect * fs->block_size + off, (char*)a_dentry, sizeof(FATFS_DENTRY)); if (cnt != sizeof(FATFS_DENTRY)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: block: %" PRIuDADDR, func_name, sect); return 1; } return 0; } /** * \internal * Populate the TSK_FS_META structure of a TSK_FS_FILE structure for a * given inode address. * * @param [in] a_fs File system that contains the inode. * @param [out] a_fs_file The file corresponding to the inode. * @param [in] a_inum The inode address. * @returns 1 if an error occurs or if the inode address is not * for a valid inode, 0 otherwise. */ uint8_t fatfs_inode_lookup(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum) { const char *func_name = "fatfs_inode_lookup"; FATFS_INFO *fatfs = (FATFS_INFO*)a_fs; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fs, "a_fs", func_name) || fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name) || !fatfs_inum_arg_is_in_range(fatfs, a_inum, func_name)) { return 1; } /* Allocate or reset the TSK_FS_META struct. */ if (a_fs_file->meta == NULL) { if ((a_fs_file->meta = tsk_fs_meta_alloc(FATFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } else { tsk_fs_meta_reset(a_fs_file->meta); } /* Manufacture an inode for the root directory or a FAT virtual file, * or do a look up. */ if (a_inum == a_fs->root_inum) { if (fatfs_make_root(fatfs, a_fs_file->meta)) return 1; else return 0; } else if (a_inum == fatfs->mbr_virt_inum) { if (fatfs_make_mbr(fatfs, a_fs_file->meta)) return 1; else return 0; } else if (a_inum == fatfs->fat1_virt_inum) { if (fatfs_make_fat(fatfs, 1, a_fs_file->meta)) return 1; else return 0; } else if (a_inum == fatfs->fat2_virt_inum && fatfs->numfat == 2) { if (fatfs_make_fat(fatfs, 2, a_fs_file->meta)) return 1; else return 0; } else if (a_inum == TSK_FS_ORPHANDIR_INUM(a_fs)) { if (tsk_fs_dir_make_orphan_dir_meta(a_fs, a_fs_file->meta)) return 1; else return 0; } else { return fatfs->inode_lookup(fatfs, a_fs_file, a_inum); } } /** \internal * Make data runs out of the clusters allocated to a file represented by a * TSK_FS_FILE structure. Each data run will have a starting sector and a * length in sectors. The runs will be stored as a non-resident attribute in * the TSK_FS_ATTRLIST of the TSK_FS_META structure of the TSK_FS_FILE. * * @param a_fs_file A representation of a file. * @return 1 on error and 0 on success */ uint8_t fatfs_make_data_runs(TSK_FS_FILE * a_fs_file) { const char *func_name = "fatfs_make_data_runs"; TSK_FS_INFO *fs = NULL; TSK_FS_META *fs_meta = NULL; FATFS_INFO *fatfs = NULL; TSK_DADDR_T clust = 0; TSK_OFF_T size_remain = 0; TSK_FS_ATTR *fs_attr = NULL; if ((fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name)) || (fatfs_ptr_arg_is_null(a_fs_file->meta, "a_fs_file->meta", func_name)) || (fatfs_ptr_arg_is_null(a_fs_file->fs_info, "a_fs_file->fs_info", func_name))) { return TSK_ERR; } fs_meta = a_fs_file->meta; fs = a_fs_file->fs_info; fatfs = (FATFS_INFO*)fs; /* Check for an already populated attribute list, since a lazy strategy * is used to fill in attributes. If the attribute list is not yet * allocated, do so now. */ if ((fs_meta->attr != NULL) && (fs_meta->attr_state == TSK_FS_META_ATTR_STUDIED)) { return 0; } else if (fs_meta->attr_state == TSK_FS_META_ATTR_ERROR) { return 1; } else if (fs_meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_meta->attr); } else if (fs_meta->attr == NULL) { fs_meta->attr = tsk_fs_attrlist_alloc(); } /* Get the stashed first cluster address of the file. */ clust = ((TSK_DADDR_T*)fs_meta->content_ptr)[0]; if ((clust > (fatfs->lastclust)) && (FATFS_ISEOF(clust, fatfs->mask) == 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_error_reset(); if (a_fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) { tsk_error_set_errno(TSK_ERR_FS_RECOVER); } else { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); } tsk_error_set_errstr ("%s: Starting cluster address too large: %" PRIuDADDR, func_name, clust); return 1; } /* Figure out the allocated length of the file in bytes. Because the * allocation unit for FAT file systems is the cluster, round the * size up to a multiple of cluster size. */ size_remain = roundup(fs_meta->size, fatfs->csize * fs->block_size); if ((a_fs_file->meta->addr == fs->root_inum) && (fs->ftype != TSK_FS_TYPE_FAT32) && (fs->ftype != TSK_FS_TYPE_EXFAT) && (clust == 1)) { /* Make a single contiguous data run for a FAT12 or FAT16 root * directory. The root directory for these file systems is not * tracked in the FAT. */ TSK_FS_ATTR_RUN *data_run; if (tsk_verbose) { tsk_fprintf(stderr, "%s: Loading root directory\n", func_name); } /* Allocate the run. */ data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return 1; } /* Set the starting sector address and run length. The run begins with * the first sector of the data area. */ data_run->addr = fatfs->rootsect; data_run->len = fatfs->firstclustsect - fatfs->firstdatasect; /* Allocate a non-resident attribute to hold the run and add it to the attribute list. */ if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } /* Tie everything together. */ if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, data_run->len * fs->block_size, data_run->len * fs->block_size, data_run->len * fs->block_size, 0, 0)) { return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } else if ((a_fs_file->meta->addr >= fatfs->mbr_virt_inum) && (a_fs_file->meta->addr <= fatfs->mbr_virt_inum + fatfs->numfat)) { /* Make a single contiguous data run for a virtual file (MBR, FAT). */ TSK_FS_ATTR_RUN *data_run; if (tsk_verbose) { tsk_fprintf(stderr, "%s: Loading virtual file: %" PRIuINUM "\n", func_name, a_fs_file->meta->addr); } /* Allocate the run. */ data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return 1; } /* Set the starting sector address and run length. */ data_run->addr = clust; data_run->len = a_fs_file->meta->size / fs->block_size; /* Allocate a non-resident attribute to hold the run and add it to the attribute list. */ if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } /* Tie everything together. */ if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, data_run->len * fs->block_size, data_run->len * fs->block_size, data_run->len * fs->block_size, 0, 0)) { return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } else if (fs_meta->flags & TSK_FS_META_FLAG_UNALLOC) { /* Make data runs for a deleted file that we want to recover. * In this case, we could get a lot of errors because of inconsistent * data. To make it clear that these are from a recovery, we set most * error codes to _RECOVER so that they can be more easily suppressed. */ TSK_DADDR_T sbase; TSK_DADDR_T startclust = clust; TSK_OFF_T recoversize = fs_meta->size; int retval; TSK_FS_ATTR_RUN *data_run = NULL; TSK_FS_ATTR_RUN *data_run_head = NULL; TSK_OFF_T full_len_s = 0; uint8_t canRecover = 1; // set to 0 if recovery is not possible if (tsk_verbose) tsk_fprintf(stderr, "%s: Processing deleted file %" PRIuINUM " in recovery mode\n", func_name, fs_meta->addr); /* We know the size and the starting cluster * * We are going to take the clusters from the starting cluster * onwards and skip the clusters that are current allocated */ /* Quick check for exFAT only * Empty deleted files have a starting cluster of zero, which * causes problems in the exFAT functions since the first data * cluster should be 2. Since a starting cluster of zero indicates * no data, make an empty data run and skip any further processing */ if((fs->ftype == TSK_FS_TYPE_EXFAT) && (startclust == 0)){ // initialize the data run fs_attr = tsk_fs_attrlist_getnew(a_fs_file->meta->attr, TSK_FS_ATTR_NONRES); if (fs_attr == NULL) { a_fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } // Add the empty data run if (tsk_fs_attr_set_run(a_fs_file, fs_attr, NULL, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, 0, 0, 0, (TSK_FS_ATTR_FLAG_ENUM)0, 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /* Sanity checks on the starting cluster */ /* Convert the cluster addr to a sector addr */ sbase = FATFS_CLUST_2_SECT(fatfs, startclust); if (sbase > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_RECOVER); tsk_error_set_errstr ("%s: Starting cluster address too large (recovery): %" PRIuDADDR, func_name, sbase); fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } else { /* If the starting cluster is already allocated then we can't * recover it */ retval = fatfs->is_cluster_alloc(fatfs, startclust); if (retval != 0) { canRecover = 0; } } /* Part 1 is to make sure there are enough unallocated clusters * for the size of the file */ clust = startclust; size_remain = recoversize; // we could make this negative so sign it for the comparison while (((int64_t) size_remain > 0) && (canRecover)) { int retval; sbase = FATFS_CLUST_2_SECT(fatfs, clust); /* Are we past the end of the FS? * that means we could not find enough unallocated clusters * for the file size */ if (sbase + fatfs->csize - 1 > fs->last_block) { canRecover = 0; if (tsk_verbose) tsk_fprintf(stderr, "%s: Could not find enough unallocated sectors to recover with - aborting\n", func_name); break; } /* Skip allocated clusters */ retval = fatfs->is_cluster_alloc(fatfs, clust); if (retval == -1) { canRecover = 0; break; } else if (retval == 1) { clust++; continue; } /* We can use this sector */ // see if we need a new run if ((data_run == NULL) || (data_run->addr + data_run->len != sbase)) { TSK_FS_ATTR_RUN *data_run_tmp = tsk_fs_attr_run_alloc(); if (data_run_tmp == NULL) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_fs_attr_run_free(data_run_head); return 1; } if (data_run_head == NULL) { data_run_head = data_run_tmp; data_run_tmp->offset = 0; } else if (data_run != NULL) { data_run->next = data_run_tmp; data_run_tmp->offset = data_run->offset + data_run->len; } data_run = data_run_tmp; data_run->len = 0; data_run->addr = sbase; } data_run->len += fatfs->csize; full_len_s += fatfs->csize; size_remain -= (fatfs->csize << fatfs->ssize_sh); clust++; } // Get a FS_DATA structure and add the runlist to it if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (canRecover) { /* We can recover the file */ // initialize the data run if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run_head, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, roundup(fs_meta->size, fatfs->csize * fs->block_size), 0, 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; } // create a one cluster run else { TSK_FS_ATTR_RUN *data_run_tmp = tsk_fs_attr_run_alloc(); if (data_run_tmp == NULL) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } data_run_tmp->addr = sbase; data_run_tmp->len = fatfs->csize; // initialize the data run if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run_tmp, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, roundup(fs_meta->size, fatfs->csize * fs->block_size), 0, 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; } return 0; } else { TSK_LIST *list_seen = NULL; TSK_FS_ATTR_RUN *data_run = NULL; TSK_FS_ATTR_RUN *data_run_head = NULL; TSK_OFF_T full_len_s = 0; TSK_DADDR_T sbase; /* Do normal cluster chain walking for a file or directory, including * FAT32 and exFAT root directories. */ if (tsk_verbose) { tsk_fprintf(stderr, "%s: Processing file %" PRIuINUM " in normal mode\n", func_name, fs_meta->addr); } /* Cycle through the cluster chain */ while ((clust & fatfs->mask) > 0 && (int64_t) size_remain > 0 && (0 == FATFS_ISEOF(clust, fatfs->mask))) { /* Convert the cluster addr to a sector addr */ sbase = FATFS_CLUST_2_SECT(fatfs, clust); if (sbase + fatfs->csize - 1 > fs->last_block) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("%s: Invalid sector address in FAT (too large): %" PRIuDADDR " (plus %d sectors)", func_name, sbase, fatfs->csize); return 1; } // see if we need a new run if ((data_run == NULL) || (data_run->addr + data_run->len != sbase)) { TSK_FS_ATTR_RUN *data_run_tmp = tsk_fs_attr_run_alloc(); if (data_run_tmp == NULL) { tsk_fs_attr_run_free(data_run_head); fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (data_run_head == NULL) { data_run_head = data_run_tmp; data_run_tmp->offset = 0; } else if (data_run != NULL) { data_run->next = data_run_tmp; data_run_tmp->offset = data_run->offset + data_run->len; } data_run = data_run_tmp; data_run->len = 0; data_run->addr = sbase; } data_run->len += fatfs->csize; full_len_s += fatfs->csize; size_remain -= (fatfs->csize * fs->block_size); if ((int64_t) size_remain > 0) { TSK_DADDR_T nxt; if (fatfs_getFAT(fatfs, clust, &nxt)) { tsk_error_set_errstr2("%s: Inode: %" PRIuINUM " cluster: %" PRIuDADDR, func_name, fs_meta->addr, clust); fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_fs_attr_run_free(data_run_head); tsk_list_free(list_seen); list_seen = NULL; return 1; } clust = nxt; /* Make sure we do not get into an infinite loop */ if (tsk_list_find(list_seen, clust)) { if (tsk_verbose) tsk_fprintf(stderr, "Loop found while processing file\n"); break; } if (tsk_list_add(&list_seen, clust)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_list_free(list_seen); list_seen = NULL; return 1; } } } // add the run list to the inode structure if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } // initialize the data run if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run_head, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, roundup(fs_meta->size, fatfs->csize * fs->block_size), 0, 0)) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } tsk_list_free(list_seen); list_seen = NULL; fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } } /* Used for istat callback */ typedef struct { FILE *hFile; int idx; int istat_seen; } FATFS_PRINT_ADDR; /* Callback a_action for file_walk to print the sector addresses * of a file, used for istat */ static TSK_WALK_RET_ENUM print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr) { FATFS_PRINT_ADDR *print = (FATFS_PRINT_ADDR *) a_ptr; tsk_fprintf(print->hFile, "%" PRIuDADDR " ", addr); if (++(print->idx) == 8) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } print->istat_seen = 1; return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param a_fs File system file is located in. * @param a_hFile File handle to print text to. * @param a_inum Address of file in file system. * @param a_numblock The number of blocks in file to force print (can go beyond file size). * @param a_sec_skew Clock skew in seconds to also print times in. * * @returns 1 on error and 0 on success. */ uint8_t fatfs_istat(TSK_FS_INFO *a_fs, FILE *a_hFile, TSK_INUM_T a_inum, TSK_DADDR_T a_numblock, int32_t a_sec_skew) { const char* func_name = "fatfs_istat"; FATFS_INFO *fatfs = (FATFS_INFO*)a_fs; TSK_FS_META *fs_meta = NULL; TSK_FS_FILE *fs_file = NULL; TSK_FS_META_NAME_LIST *fs_name_list = NULL; FATFS_PRINT_ADDR print; char timeBuf[128]; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fs, "a_fs", func_name) || fatfs_ptr_arg_is_null(a_hFile, "a_hFile", func_name) || !fatfs_inum_arg_is_in_range(fatfs, a_inum, func_name)) { return 1; } /* Create a TSK_FS_FILE corresponding to the specified inode. */ if ((fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_inum)) == NULL) { return 1; } fs_meta = fs_file->meta; /* Print the inode address. */ tsk_fprintf(a_hFile, "Directory Entry: %" PRIuINUM "\n", a_inum); /* Print the allocation status. */ tsk_fprintf(a_hFile, "%sAllocated\n", (fs_meta->flags & TSK_FS_META_FLAG_UNALLOC) ? "Not " : ""); /* Print the attributes. */ tsk_fprintf(a_hFile, "File Attributes: "); if (a_inum == a_fs->root_inum) { tsk_fprintf(a_hFile, "Root Directory\n"); } else if (fs_meta->type == TSK_FS_META_TYPE_VIRT) { tsk_fprintf(a_hFile, "Virtual File\n"); } else if (fs_meta->addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { tsk_fprintf(a_hFile, "Virtual Directory\n"); } else { if (fatfs->istat_attr_flags(fatfs, a_inum, a_hFile)) { return 1; } } /* Print the file size. */ tsk_fprintf(a_hFile, "Size: %" PRIuOFF "\n", fs_meta->size); /* Print the name. */ if (fs_meta->name2) { fs_name_list = fs_meta->name2; tsk_fprintf(a_hFile, "Name: %s\n", fs_name_list->name); } /* Print the times. */ if (a_sec_skew != 0) { tsk_fprintf(a_hFile, "\nAdjusted Directory Entry Times:\n"); if (fs_meta->mtime) fs_meta->mtime -= a_sec_skew; if (fs_meta->atime) fs_meta->atime -= a_sec_skew; if (fs_meta->crtime) fs_meta->crtime -= a_sec_skew; tsk_fprintf(a_hFile, "Written:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(a_hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(a_hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_meta->crtime, timeBuf)); if (fs_meta->mtime) fs_meta->mtime += a_sec_skew; if (fs_meta->atime) fs_meta->atime += a_sec_skew; if (fs_meta->crtime) fs_meta->crtime += a_sec_skew; tsk_fprintf(a_hFile, "\nOriginal Directory Entry Times:\n"); } else { tsk_fprintf(a_hFile, "\nDirectory Entry Times:\n"); } tsk_fprintf(a_hFile, "Written:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(a_hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(a_hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_meta->crtime, timeBuf)); /* Print the specified number of sector addresses. */ tsk_fprintf(a_hFile, "\nSectors:\n"); if (a_numblock > 0) { /* A bad hack to force a specified number of blocks */ fs_meta->size = a_numblock * a_fs->block_size; } print.istat_seen = 0; print.idx = 0; print.hFile = a_hFile; if (tsk_fs_file_walk(fs_file, (TSK_FS_FILE_WALK_FLAG_ENUM)(TSK_FS_FILE_WALK_FLAG_AONLY | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, (void *) &print)) { tsk_fprintf(a_hFile, "\nError reading file\n"); tsk_error_print(a_hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(a_hFile, "\n"); } tsk_fs_file_close(fs_file); return 0; } /* Mark the sector used in the bitmap */ static TSK_WALK_RET_ENUM inode_walk_file_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr) { setbit((uint8_t *) a_ptr, addr); return TSK_WALK_CONT; } /* The inode_walk call back for each file. we want only the directories */ static TSK_WALK_RET_ENUM inode_walk_dent_act(TSK_FS_FILE * fs_file, const char *a_path, void *a_ptr) { unsigned int flags = TSK_FS_FILE_WALK_FLAG_SLACK | TSK_FS_FILE_WALK_FLAG_AONLY; if ((fs_file->meta == NULL) || (fs_file->meta->type != TSK_FS_META_TYPE_DIR)) return TSK_WALK_CONT; /* Get the sector addresses & ignore any errors */ if (tsk_fs_file_walk(fs_file, (TSK_FS_FILE_WALK_FLAG_ENUM)flags, inode_walk_file_act, a_ptr)) { tsk_error_reset(); } return TSK_WALK_CONT; } /** * Walk the inodes in a specified range and do a TSK_FS_META_WALK_CB callback * for each inode that satisfies criteria specified by a set of * TSK_FS_META_FLAG_ENUM flags. The following flags are supported: * TSK_FS_META_FLAG_ALLOC, TSK_FS_META_FLAG_UNALLOC, TSK_FS_META_FLAG_ORPHAN, * TSK_FS_META_FLAG_USED (FATXX only), and TSK_FS_META_FLAG_UNUSED * (FATXX only). * * @param [in] a_fs File system that contains the inodes. * @param [in] a_start_inum Inclusive lower bound of inode range. * @param [in] a_end_inum Inclusive upper bound of inode range. * @param [in] a_selection_flags Inode selection criteria. * @param [in] a_action Callback function for selected inodes. * @param [in] a_ptr Private data pointer passed through to callback function. * @return 0 on success, 1 on failure, per TSK convention */ uint8_t fatfs_inode_walk(TSK_FS_INFO *a_fs, TSK_INUM_T a_start_inum, TSK_INUM_T a_end_inum, TSK_FS_META_FLAG_ENUM a_selection_flags, TSK_FS_META_WALK_CB a_action, void *a_ptr) { char *func_name = "fatfs_inode_walk"; FATFS_INFO *fatfs = (FATFS_INFO*)a_fs; unsigned int flags = a_selection_flags; TSK_INUM_T end_inum_tmp = 0; TSK_FS_FILE *fs_file = NULL; TSK_DADDR_T ssect = 0; TSK_DADDR_T lsect = 0; TSK_DADDR_T sect = 0; char *dino_buf = NULL; FATFS_DENTRY *dep = NULL; unsigned int dentry_idx = 0; uint8_t *dir_sectors_bitmap = NULL; ssize_t cnt = 0; uint8_t done = 0; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fs, "a_fs", func_name) || fatfs_ptr_arg_is_null(a_action, "a_action", func_name)) { return 1; } if (a_start_inum < a_fs->first_inum || a_start_inum > a_fs->last_inum) { tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Begin inode out of range: %" PRIuINUM "", func_name, a_start_inum); return 1; } else if (a_end_inum < a_fs->first_inum || a_end_inum > a_fs->last_inum || a_end_inum < a_start_inum) { tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End inode out of range: %" PRIuINUM "", func_name, a_end_inum); return 1; } /* FAT file systems do not really have the concept of unused inodes. */ if ((flags & TSK_FS_META_FLAG_UNUSED) && !(flags & TSK_FS_META_FLAG_USED)) { return 0; } flags |= TSK_FS_META_FLAG_USED; flags &= ~TSK_FS_META_FLAG_UNUSED; /* Make sure the inode selection flags are set correctly. */ if (flags & TSK_FS_META_FLAG_ORPHAN) { /* If ORPHAN file inodes are wanted, make sure that the UNALLOC * selection flag is set. */ flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; } else { /* If neither of the ALLOC or UNALLOC inode selection flags are set, * then set them both. */ if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } } if (tsk_verbose) { tsk_fprintf(stderr, "%s: Inode walking %" PRIuINUM " to %" PRIuINUM "\n", func_name, a_start_inum, a_end_inum); } /* If we are looking for orphan files and have not yet populated * the list of files reachable by name for this file system, do so now. */ if ((flags & TSK_FS_META_FLAG_ORPHAN)) { if (tsk_fs_dir_load_inum_named(a_fs) != TSK_OK) { tsk_error_errstr2_concat( "%s: Identifying orphan inodes", func_name); return 1; } } /* Allocate a TSK_FS_FILE object with a TSK_FS_META object to populate and * pass to the callback function when an inode that fits the inode * selection criteria is found. */ if ((fs_file = tsk_fs_file_alloc(a_fs)) == NULL) { return 1; } if ((fs_file->meta = tsk_fs_meta_alloc(FATFS_FILE_CONTENT_LEN)) == NULL) { return 1; } /* Process the root directory inode, if it's included in the walk. */ if (a_start_inum == a_fs->root_inum) { if (((TSK_FS_META_FLAG_ALLOC & flags) == TSK_FS_META_FLAG_ALLOC) && ((TSK_FS_META_FLAG_ORPHAN & flags) == 0)) { TSK_WALK_RET_ENUM retval = TSK_WALK_CONT; if (fatfs_make_root(fatfs, fs_file->meta)) { tsk_fs_file_close(fs_file); return 1; } retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); return 1; } } a_start_inum++; if (a_start_inum == a_end_inum) { tsk_fs_file_close(fs_file); return 0; } } /* Allocate a bitmap to keep track of which sectors are allocated to * directories. */ if ((dir_sectors_bitmap = (uint8_t*)tsk_malloc((size_t) ((a_fs->block_count + 7) / 8))) == NULL) { tsk_fs_file_close(fs_file); return 1; } /* If not doing an orphan files search, populate the directory sectors * bitmap. The bitmap will be used to make sure that no sector marked as * allocated to a directory is skipped when searching for directory * entries to map to inodes. */ if ((flags & TSK_FS_META_FLAG_ORPHAN) == 0) { if (tsk_verbose) { tsk_fprintf(stderr, "fatfs_inode_walk: Walking directories to collect sector info\n"); } /* Manufacture an inode for the root directory. */ if (fatfs_make_root(fatfs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } /* Do a file_walk on the root directory to set the bits in the * directory sectors bitmap for each sector allocated to the root * directory. */ if (tsk_fs_file_walk(fs_file, (TSK_FS_FILE_WALK_FLAG_ENUM)(TSK_FS_FILE_WALK_FLAG_SLACK | TSK_FS_FILE_WALK_FLAG_AONLY), inode_walk_file_act, (void*)dir_sectors_bitmap)) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } /* Now walk recursively through the entire directory tree to set the * bits in the directory sectors bitmap for each sector allocated to * the children of the root directory. */ if (tsk_fs_dir_walk(a_fs, a_fs->root_inum, (TSK_FS_DIR_WALK_FLAG_ENUM)(TSK_FS_DIR_WALK_FLAG_ALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE | TSK_FS_DIR_WALK_FLAG_NOORPHAN), inode_walk_dent_act, (void *) dir_sectors_bitmap)) { tsk_error_errstr2_concat ("- fatfs_inode_walk: mapping directories"); tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } } /* If the end inode is the one of the virtual virtual FAT files or the * virtual orphan files directory, adjust the end inum and handle the * virtual inodes after the main inode walking loop below completes. */ if (a_end_inum > a_fs->last_inum - FATFS_NUM_VIRT_FILES(fatfs)) { end_inum_tmp = a_fs->last_inum - FATFS_NUM_VIRT_FILES(fatfs); } else { end_inum_tmp = a_end_inum; } /* Map the begin and end inodes to the sectors that contain them. * This sets the image level boundaries for the inode walking loop. */ ssect = FATFS_INODE_2_SECT(fatfs, a_start_inum); if (ssect > a_fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("%s: Begin inode in sector too big for image: %" PRIuDADDR, func_name, ssect); tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } lsect = FATFS_INODE_2_SECT(fatfs, end_inum_tmp); if (lsect > a_fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("%s: End inode in sector too big for image: %" PRIuDADDR, func_name, lsect); tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } /* Allocate a buffer big enough to read in a cluster at a time. */ if ((dino_buf = (char*)tsk_malloc(fatfs->csize << fatfs->ssize_sh)) == NULL) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); return 1; } /* Walk the inodes. */ sect = ssect; while (sect <= lsect) { int cluster_is_alloc = 0; size_t num_sectors_to_process = 0; size_t sector_idx = 0; uint8_t do_basic_dentry_test = 0; /* Read in a chunk of the image to process on this iteration of the inode * walk. The actual size of the read will depend on whether or not it is * coming from the root directory of a FAT12 or FAT16 file system. As * indicated by the size of the buffer, the data area (exFAT cluster * heap) will for the most part be read in a cluster at a time. * However, the root directory for a FAT12/FAT16 file system precedes * the data area and the read size for it should be a sector, not a * cluster. */ if (sect < fatfs->firstclustsect) { if ((flags & TSK_FS_META_FLAG_ORPHAN) != 0) { /* If orphan file hunting, there are no orphans in the root * directory, so skip ahead to the data area. */ sect = fatfs->firstclustsect; continue; } /* Read in a FAT12/FAT16 root directory sector. */ cnt = tsk_fs_read_block(a_fs, sect, dino_buf, fatfs->ssize); if (cnt != fatfs->ssize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("%s (root dir): sector: %" PRIuDADDR, func_name, sect); tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 1; } cluster_is_alloc = 1; num_sectors_to_process = 1; } else { /* The walk has proceeded into the data area (exFAT cluster heap). * It's time to read in a cluster at a time. Get the base sector * for the cluster that contains the current sector. */ sect = FATFS_CLUST_2_SECT(fatfs, (FATFS_SECT_2_CLUST(fatfs, sect))); /* Determine whether the cluster is allocated. Skip it if it is * not allocated and the UNALLOCATED inode selection flag is not * set. */ cluster_is_alloc = fatfs_is_sectalloc(fatfs, sect); if ((cluster_is_alloc == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { sect += fatfs->csize; continue; } else if (cluster_is_alloc == -1) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 1; } /* If the cluster is allocated but is not allocated to a * directory, then skip it. NOTE: This will miss orphan file * entries in the slack space of files. */ if ((cluster_is_alloc == 1) && (isset(dir_sectors_bitmap, sect) == 0)) { sect += fatfs->csize; continue; } /* The final cluster may not be full. */ if (lsect - sect + 1 < fatfs->csize) { num_sectors_to_process = (size_t) (lsect - sect + 1); } else { num_sectors_to_process = fatfs->csize; } /* Read in a cluster. */ cnt = tsk_fs_read_block (a_fs, sect, dino_buf, num_sectors_to_process << fatfs->ssize_sh); if (cnt != (num_sectors_to_process << fatfs->ssize_sh)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: sector: %" PRIuDADDR, func_name, sect); tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 1; } } /* Now that the sectors are read in, prepare to step through them in * directory entry size chunks. Only do a basic test to confirm the * contents of each chunk is a directory entry unless the sector that * contains it is not allocated to a directory or is unallocated.*/ do_basic_dentry_test = 1; if ((isset(dir_sectors_bitmap, sect) == 0) || (cluster_is_alloc == 0)) { do_basic_dentry_test = 0; } /* Walk through the sectors read in. */ for (sector_idx = 0; sector_idx < num_sectors_to_process; sector_idx++) { TSK_INUM_T inum = 0; /* If the last inode in this sector is before the start * inode, skip the sector. */ if (FATFS_SECT_2_INODE(fatfs, sect + 1) < a_start_inum) { sect++; continue; } /* Advance the directory entry pointer to the start of the * sector. */ dep = (FATFS_DENTRY*)(&dino_buf[sector_idx << fatfs->ssize_sh]); /* If the sector is not allocated to a directory and the first * chunk is not a directory entry, skip the sector. */ if (!isset(dir_sectors_bitmap, sect) && !fatfs->is_dentry(fatfs, dep, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)cluster_is_alloc, do_basic_dentry_test)) { sect++; continue; } /* Get the base inode address of this sector. */ inum = FATFS_SECT_2_INODE(fatfs, sect); if (tsk_verbose) { tsk_fprintf(stderr, "%s: Processing sector %" PRIuDADDR " starting at inode %" PRIuINUM "\n", func_name, sect, inum); } /* Walk through the potential directory entries in the sector. */ for (dentry_idx = 0; dentry_idx < fatfs->dentry_cnt_se; dentry_idx++, inum++, dep++) { int retval; TSK_RETVAL_ENUM retval2 = TSK_OK; /* If the inode address of the potential entry is less than * the beginning inode address for the inode walk, skip it. */ if (inum < a_start_inum) { continue; } /* If inode address of the potential entry is greater than the * ending inode address for the walk, terminate the inode walk. */ if (inum > end_inum_tmp) { done = 1; break; } /* If the potential entry is likely not an entry, or it is an * entry that is not reported in an inode walk, or it does not * satisfy the inode selection flags, then skip it. */ if (!fatfs->is_dentry(fatfs, dep, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)cluster_is_alloc, do_basic_dentry_test) || fatfs->inode_walk_should_skip_dentry(fatfs, inum, dep, flags, cluster_is_alloc)) { continue; } retval2 = fatfs->dinode_copy(fatfs, inum, dep, cluster_is_alloc, fs_file); if (retval2 != TSK_OK) { if (retval2 == TSK_COR) { /* Corrupted, move on to the next chunk. */ if (tsk_verbose) { tsk_error_print(stderr); } tsk_error_reset(); continue; } else { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 1; } } if (tsk_verbose) { tsk_fprintf(stderr, "%s: Directory Entry %" PRIuINUM " (%u) at sector %" PRIuDADDR "\n", func_name, inum, dentry_idx, sect); } /* Do the callback. */ retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dir_sectors_bitmap); free(dino_buf); return 1; } } sect++; if (done) { break; } } if (done) { break; } } free(dir_sectors_bitmap); free(dino_buf); // handle the virtual orphans folder and FAT files if they asked for them if ((a_end_inum > a_fs->last_inum - FATFS_NUM_VIRT_FILES(fatfs)) && (flags & TSK_FS_META_FLAG_ALLOC) && ((flags & TSK_FS_META_FLAG_ORPHAN) == 0)) { TSK_INUM_T inum; // cycle through the special files for (inum = a_fs->last_inum - FATFS_NUM_VIRT_FILES(fatfs) + 1; inum <= a_end_inum; inum++) { int retval; tsk_fs_meta_reset(fs_file->meta); if (inum == fatfs->mbr_virt_inum) { if (fatfs_make_mbr(fatfs, fs_file->meta)) { tsk_fs_file_close(fs_file); return 1; } } else if (inum == fatfs->fat1_virt_inum) { if (fatfs_make_fat(fatfs, 1, fs_file->meta)) { tsk_fs_file_close(fs_file); return 1; } } else if (inum == fatfs->fat2_virt_inum && fatfs->numfat == 2) { if (fatfs_make_fat(fatfs, 2, fs_file->meta)) { tsk_fs_file_close(fs_file); return 1; } } else if (inum == TSK_FS_ORPHANDIR_INUM(a_fs)) { if (tsk_fs_dir_make_orphan_dir_meta(a_fs, fs_file->meta)) { tsk_fs_file_close(fs_file); return 1; } } retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); return 1; } } } tsk_fs_file_close(fs_file); return 0; } sleuthkit-4.2.0/tsk/fs/fatfs_utils.c000755 000765 000024 00000023151 12576320700 020232 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file fatfs_utils.c * Contains utility functions for processing FAT file systems. */ #include "tsk_fs_i.h" #include "tsk_fatfs.h" #include /** * \internal * Tests whether a pointer argument is set to NULL. If the pointer is NULL, * sets a TSK_ERR_FS_ARG error with an error message that includes a parameter * name and a function name supplied by the caller. * * @param a_ptr The pointer to test for NULL. * @param a_param_name The name of the parameter for which the pointer was * passed as an argument. * @param a_func_name The name of the function for which a_param is a * parameter. * @return Returns 1 if the pointer is NULL, 0 otherwise. */ uint8_t fatfs_ptr_arg_is_null(void *a_ptr, const char *a_param_name, const char *a_func_name) { const char *func_name = "fatfs_ptr_arg_is_null"; assert(a_param_name != NULL); assert(a_func_name != NULL); if (a_ptr == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); if ((a_param_name != NULL) && (a_func_name != NULL)) { tsk_error_set_errstr("%s: %s is NULL", a_param_name, a_func_name); } else { tsk_error_set_errstr("%s: NULL pointer", func_name); } return 1; } return 0; } /** * \internal * Tests whether an inode address is in the range of valid inode addresses for * a given file system. * * @param a_fatfs Generic FAT file system info structure. * @param a_inum An inode address. * @return Returns 1 if the address is in range, 0 otherwise. */ uint8_t fatfs_inum_is_in_range(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum) { const char *func_name = "fatfs_inum_is_in_range"; TSK_FS_INFO *fs = (TSK_FS_INFO*)a_fatfs; assert(a_fatfs != NULL); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name)) { return 0; } if ((a_inum < fs->first_inum) || (a_inum > fs->last_inum)) { return 0; } return 1; } /** * \internal * Tests whether an inode address argument is in the range of valid inode * addresses for a given file system. If the address is out of range, * sets a TSK_ERR_FS_ARG error with an error message that includes the inode * address and a function name supplied by the caller. * * @param a_fatfs Generic FAT file system info structure. * @param a_inum An inode address. * @param a_func_name The name of the function that received the inode address * as an argument. * @return Returns 1 if the address is in range, 0 otherwise. */ uint8_t fatfs_inum_arg_is_in_range(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, const char *a_func_name) { const char *func_name = "fatfs_inum_arg_is_in_range"; assert(a_fatfs != NULL); assert(a_func_name != NULL); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name)) { return 0; } if (!fatfs_inum_is_in_range(a_fatfs, a_inum)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); if (a_func_name != NULL) { tsk_error_set_errstr("%s: inode address: %" PRIuINUM " out of range", a_func_name, a_inum); } else { tsk_error_set_errstr("%s: inode address: %" PRIuINUM " out of range", func_name, a_inum); } return 0; } return 1; } /** * \internal * Convert a DOS time stamp into a UNIX time stamp. A DOS time stamp consists * of a date with the year specified as an offset from 1980. A UNIX time stamp * is seconds since January 1, 1970 in UTC. * * @param date Date part of a DOS time stamp. * @param time Time part of a DOS time stamp. * @param timetens Tenths of seconds part of a DOS time stamp, range is 0-199. * @return A UNIX time stamp. */ time_t fatfs_dos_2_unix_time(uint16_t date, uint16_t time, uint8_t timetens) { struct tm tm1; time_t ret; if (date == 0) return 0; memset(&tm1, 0, sizeof(struct tm)); tm1.tm_sec = ((time & FATFS_SEC_MASK) >> FATFS_SEC_SHIFT) * 2; if ((tm1.tm_sec < 0) || (tm1.tm_sec > 60)) tm1.tm_sec = 0; /* The ctimetens value has a range of 0 to 199 */ if (timetens > 100) tm1.tm_sec++; tm1.tm_min = ((time & FATFS_MIN_MASK) >> FATFS_MIN_SHIFT); if ((tm1.tm_min < 0) || (tm1.tm_min > 59)) tm1.tm_min = 0; tm1.tm_hour = ((time & FATFS_HOUR_MASK) >> FATFS_HOUR_SHIFT); if ((tm1.tm_hour < 0) || (tm1.tm_hour > 23)) tm1.tm_hour = 0; tm1.tm_mday = ((date & FATFS_DAY_MASK) >> FATFS_DAY_SHIFT); if ((tm1.tm_mday < 1) || (tm1.tm_mday > 31)) tm1.tm_mday = 0; tm1.tm_mon = ((date & FATFS_MON_MASK) >> FATFS_MON_SHIFT) - 1; if ((tm1.tm_mon < 0) || (tm1.tm_mon > 11)) tm1.tm_mon = 0; /* There is a limit to the year because the UNIX time value is * a 32-bit value * the maximum UNIX time is Tue Jan 19 03:14:07 2038 */ tm1.tm_year = ((date & FATFS_YEAR_MASK) >> FATFS_YEAR_SHIFT) + 80; if ((tm1.tm_year < 0) || (tm1.tm_year > 137)) tm1.tm_year = 0; /* set the daylight savings variable to -1 so that mktime() figures * it out */ tm1.tm_isdst = -1; ret = mktime(&tm1); if (ret < 0) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_dos_2_unix_time: Error running mktime() on: %d:%d:%d %d/%d/%d\n", ((time & FATFS_HOUR_MASK) >> FATFS_HOUR_SHIFT), ((time & FATFS_MIN_MASK) >> FATFS_MIN_SHIFT), ((time & FATFS_SEC_MASK) >> FATFS_SEC_SHIFT) * 2, ((date & FATFS_MON_MASK) >> FATFS_MON_SHIFT) - 1, ((date & FATFS_DAY_MASK) >> FATFS_DAY_SHIFT), ((date & FATFS_YEAR_MASK) >> FATFS_YEAR_SHIFT) + 80); return 0; } return ret; } /** * \internal * Converts the tenths of seconds part a DOS time stamp into nanoseconds. * of a date with the year specified as an offset from 1980. A UNIX time stamp * is seconds since January 1, 1970 in UTC. * * @param timetens Tenths of seconds part of a DOS time stamp, range is 0-199. * @return A duration in nanoseconds. */ uint32_t fatfs_dos_2_nanosec(uint8_t timetens) { timetens %= 100; return timetens * 10000000; } /** * \internal * Cleans up a string so that it contains only ASCII characters. Useful when * * @param name The string */ void fatfs_cleanup_ascii(char *str) { const char *func_name = "fatfs_cleanup_ascii"; assert(str != NULL); if (!fatfs_ptr_arg_is_null(str, "str", func_name)) { int i; for (i = 0; str[i] != '\0'; i++) { if ((unsigned char) (str[i]) > 0x7e) { str[i] = '^'; } } } } /** * \internal * Converts a UTF-16 string from an inode into a UTF-8 string. If the * conversion fails, sets a TSK_ERR_FS_UNICODE error with an error message * that includes the inode address and a description of the UTF-16 string * supplied by the caller. * * @param a_fatfs Generic FAT file system info structure. * @param a_src The UTF-16 string. * @param a_src_len The length of the UTF-16 string. * @param a_dest The buffer for the UTF-8 string. * @param a_dest_len The length of the UTF-8 string buffer. * @param a_inum The address of the source inode, used if an error message is * generated. * @param a_desc A description of the source string, used if an error message * is generated. * @return TSKConversionResult. */ TSKConversionResult fatfs_utf16_inode_str_2_utf8(FATFS_INFO *a_fatfs, UTF16 *a_src, size_t a_src_len, UTF8 *a_dest, size_t a_dest_len, TSK_INUM_T a_inum, const char *a_desc) { const char *func_name = "fatfs_copy_utf16_str"; TSK_FS_INFO *fs = &(a_fatfs->fs_info); TSKConversionResult conv_result = TSKconversionOK; uint32_t i = 0; assert(a_fatfs != NULL); assert(a_src != NULL); assert(a_src_len > 0); assert(a_dest != NULL); assert(a_dest_len > 0); assert(a_desc != NULL); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name)) { return TSKsourceIllegal; } if (fatfs_ptr_arg_is_null(a_fatfs, "a_src", func_name)) { return TSKsourceExhausted; } if (a_src_len <= 0) { return TSKsourceExhausted; } if (fatfs_ptr_arg_is_null(a_fatfs, "a_dest", func_name)) { return TSKtargetExhausted; } if (a_dest_len <= 0) { return TSKtargetExhausted; } if (fatfs_ptr_arg_is_null(a_fatfs, "a_desc", func_name)) { return TSKsourceIllegal; } conv_result = tsk_UTF16toUTF8(fs->endian, (const UTF16**)&a_src, (UTF16*)&a_src[a_src_len], &a_dest, (UTF8*)&a_dest[a_dest_len], TSKlenientConversion); if (conv_result == TSKconversionOK) { /* Make sure the result is NULL-terminated. */ if ((uintptr_t) a_dest > (uintptr_t)a_dest + sizeof(a_dest)) { a_dest[sizeof(a_dest) - 1] = '\0'; } else { *a_dest = '\0'; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr("%s: Error converting %s for inum %"PRIuINUM" from UTF16 to UTF8: %d", func_name, a_desc, a_inum, conv_result); *a_dest = '\0'; } /* Clean up non-ASCII characters since the destination buffer is supposed * to be UTF-8 and the encoding of the source is essentially unknown and * may be junk. */ fatfs_cleanup_ascii((char*)a_dest); /* Remove control characters. */ i = 0; while (a_dest[i] != '\0') { if (TSK_IS_CNTRL(a_dest[i])) { a_dest[i] = '^'; } ++i; } return conv_result; }sleuthkit-4.2.0/tsk/fs/fatxxfs.c000755 000765 000024 00000073262 12576320700 017402 0ustar00carrierstaff000000 000000 /* ** fatxxfs ** The Sleuth Kit ** ** Content and meta data layer support for the FATXX file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ /** * \file fatxxfs.c * Contains the internal TSK FATXX (FAT12, FAT16, FAT32) file system code to * handle basic file system processing for opening file system, processing * sectors, and directory entries. */ #include "tsk_fatxxfs.h" /* * Implementation NOTES * * TSK_FS_META contains the first cluster. file_walk will return sector * values though because the cluster numbers do not start until after * the FAT. That makes it very hard to address the first few blocks! * * Inodes numbers do not exist in FAT. To make up for this we will count * directory entries as the inodes. As the root directory does not have * any records in FAT, we will give it times of 0 and call it inode 2 to * keep consistent with UNIX. After that, each 32-byte slot is numbered * as though it were a directory entry (even if it is not). Therefore, * when an inode walk is performed, not all inode values will be displayed * even when '-e' is given for ils. * * Progs like 'ils -e' are very slow because we have to look at each * block to see if it is a file system structure. */ /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t fatxxfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { unsigned int i; int a; TSK_DADDR_T next, snext, sstart, send; FATFS_INFO *fatfs = (FATFS_INFO *) fs; FATXXFS_SB *sb = (FATXXFS_SB*)fatfs->boot_sector_buffer; char *data_buf; FATXXFS_DENTRY *vol_label_dentry = NULL; ssize_t cnt; // clean up any error messages that are lying around tsk_error_reset(); if ((data_buf = (char *) tsk_malloc(fs->block_size)) == NULL) { return 1; } /* Read the root directory sector so that we can get the volume * label from it */ cnt = tsk_fs_read_block(fs, fatfs->rootsect, data_buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("fatxxfs_fsstat: root directory: %" PRIuDADDR, fatfs->rootsect); free(data_buf); return 1; } /* Find the dentry that is set as the volume label */ vol_label_dentry = NULL; if (fatfs->ssize <= fs->block_size) { FATXXFS_DENTRY *current_entry = (FATXXFS_DENTRY *) data_buf; for (i = 0; i < fatfs->ssize; i += sizeof(*current_entry)) { if (current_entry->attrib == FATFS_ATTR_VOLUME) { vol_label_dentry = current_entry; break; } current_entry++; } } /* Print the general file system information */ tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: FAT"); if (fs->ftype == TSK_FS_TYPE_FAT12) tsk_fprintf(hFile, "12\n"); else if (fs->ftype == TSK_FS_TYPE_FAT16) tsk_fprintf(hFile, "16\n"); else if (fs->ftype == TSK_FS_TYPE_FAT32) tsk_fprintf(hFile, "32\n"); else tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "\nOEM Name: %c%c%c%c%c%c%c%c\n", sb->oemname[0], sb->oemname[1], sb->oemname[2], sb->oemname[3], sb->oemname[4], sb->oemname[5], sb->oemname[6], sb->oemname[7]); if (fatfs->fs_info.ftype != TSK_FS_TYPE_FAT32) { tsk_fprintf(hFile, "Volume ID: 0x%" PRIx32 "\n", tsk_getu32(fs->endian, sb->a.f16.vol_id)); tsk_fprintf(hFile, "Volume Label (Boot Sector): %c%c%c%c%c%c%c%c%c%c%c\n", sb->a.f16.vol_lab[0], sb->a.f16.vol_lab[1], sb->a.f16.vol_lab[2], sb->a.f16.vol_lab[3], sb->a.f16.vol_lab[4], sb->a.f16.vol_lab[5], sb->a.f16.vol_lab[6], sb->a.f16.vol_lab[7], sb->a.f16.vol_lab[8], sb->a.f16.vol_lab[9], sb->a.f16.vol_lab[10]); if ((vol_label_dentry) && (vol_label_dentry->name)) { tsk_fprintf(hFile, "Volume Label (Root Directory): %c%c%c%c%c%c%c%c%c%c%c\n", vol_label_dentry->name[0], vol_label_dentry->name[1], vol_label_dentry->name[2], vol_label_dentry->name[3], vol_label_dentry->name[4], vol_label_dentry->name[5], vol_label_dentry->name[6], vol_label_dentry->name[7], vol_label_dentry->ext[0], vol_label_dentry->ext[1], vol_label_dentry->ext[2]); } else { tsk_fprintf(hFile, "Volume Label (Root Directory):\n"); } tsk_fprintf(hFile, "File System Type Label: %c%c%c%c%c%c%c%c\n", sb->a.f16.fs_type[0], sb->a.f16.fs_type[1], sb->a.f16.fs_type[2], sb->a.f16.fs_type[3], sb->a.f16.fs_type[4], sb->a.f16.fs_type[5], sb->a.f16.fs_type[6], sb->a.f16.fs_type[7]); } else { char *fat_fsinfo_buf; FATXXFS_FSINFO *fat_info; if ((fat_fsinfo_buf = (char *) tsk_malloc(sizeof(FATXXFS_FSINFO))) == NULL) { free(data_buf); return 1; } tsk_fprintf(hFile, "Volume ID: 0x%" PRIx32 "\n", tsk_getu32(fs->endian, sb->a.f32.vol_id)); tsk_fprintf(hFile, "Volume Label (Boot Sector): %c%c%c%c%c%c%c%c%c%c%c\n", sb->a.f32.vol_lab[0], sb->a.f32.vol_lab[1], sb->a.f32.vol_lab[2], sb->a.f32.vol_lab[3], sb->a.f32.vol_lab[4], sb->a.f32.vol_lab[5], sb->a.f32.vol_lab[6], sb->a.f32.vol_lab[7], sb->a.f32.vol_lab[8], sb->a.f32.vol_lab[9], sb->a.f32.vol_lab[10]); if ((vol_label_dentry) && (vol_label_dentry->name)) { tsk_fprintf(hFile, "Volume Label (Root Directory): %c%c%c%c%c%c%c%c%c%c%c\n", vol_label_dentry->name[0], vol_label_dentry->name[1], vol_label_dentry->name[2], vol_label_dentry->name[3], vol_label_dentry->name[4], vol_label_dentry->name[5], vol_label_dentry->name[6], vol_label_dentry->name[7], vol_label_dentry->ext[0], vol_label_dentry->ext[1], vol_label_dentry->ext[2]); } else { tsk_fprintf(hFile, "Volume Label (Root Directory):\n"); } tsk_fprintf(hFile, "File System Type Label: %c%c%c%c%c%c%c%c\n", sb->a.f32.fs_type[0], sb->a.f32.fs_type[1], sb->a.f32.fs_type[2], sb->a.f32.fs_type[3], sb->a.f32.fs_type[4], sb->a.f32.fs_type[5], sb->a.f32.fs_type[6], sb->a.f32.fs_type[7]); /* Process the FS info */ if (tsk_getu16(fs->endian, sb->a.f32.fsinfo)) { cnt = tsk_fs_read(fs, (TSK_DADDR_T) tsk_getu16(fs->endian, sb->a.f32.fsinfo) * fs->block_size, fat_fsinfo_buf, sizeof(FATXXFS_FSINFO)); if (cnt != sizeof(FATXXFS_FSINFO)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("fatxxfs_fsstat: TSK_FS_TYPE_FAT32 FSINFO block: %" PRIuDADDR, (TSK_DADDR_T) tsk_getu16(fs->endian, sb->a.f32.fsinfo)); free(data_buf); free(fat_fsinfo_buf); return 1; } fat_info = (FATXXFS_FSINFO *) fat_fsinfo_buf; tsk_fprintf(hFile, "Next Free Sector (FS Info): %" PRIuDADDR "\n", FATFS_CLUST_2_SECT(fatfs, tsk_getu32(fs->endian, fat_info->nextfree))); tsk_fprintf(hFile, "Free Sector Count (FS Info): %" PRIu32 "\n", (tsk_getu32(fs->endian, fat_info->freecnt) * fatfs->csize)); free(fat_fsinfo_buf); } } free(data_buf); tsk_fprintf(hFile, "\nSectors before file system: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->prevsect)); tsk_fprintf(hFile, "\nFile System Layout (in sectors)\n"); tsk_fprintf(hFile, "Total Range: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block); if (fs->last_block != fs->last_block_act) tsk_fprintf(hFile, "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block_act); tsk_fprintf(hFile, "* Reserved: 0 - %" PRIuDADDR "\n", fatfs->firstfatsect - 1); tsk_fprintf(hFile, "** Boot Sector: 0\n"); if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32) { tsk_fprintf(hFile, "** FS Info Sector: %" PRIu16 "\n", tsk_getu16(fs->endian, sb->a.f32.fsinfo)); tsk_fprintf(hFile, "** Backup Boot Sector: %" PRIu16 "\n", tsk_getu16(fs->endian, sb->a.f32.bs_backup)); } for (i = 0; i < fatfs->numfat; i++) { TSK_DADDR_T base = fatfs->firstfatsect + i * (fatfs->sectperfat); tsk_fprintf(hFile, "* FAT %d: %" PRIuDADDR " - %" PRIuDADDR "\n", i, base, (base + fatfs->sectperfat - 1)); } tsk_fprintf(hFile, "* Data Area: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstdatasect, fs->last_block); if (fatfs->fs_info.ftype != TSK_FS_TYPE_FAT32) { TSK_DADDR_T x = fatfs->csize * fatfs->clustcnt; tsk_fprintf(hFile, "** Root Directory: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstdatasect, fatfs->firstclustsect - 1); tsk_fprintf(hFile, "** Cluster Area: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstclustsect, (fatfs->firstclustsect + x - 1)); if ((fatfs->firstclustsect + x - 1) != fs->last_block) { tsk_fprintf(hFile, "** Non-clustered: %" PRIuDADDR " - %" PRIuDADDR "\n", (fatfs->firstclustsect + x), fs->last_block); } } else { TSK_LIST *list_seen = NULL; TSK_DADDR_T x = fatfs->csize * (fatfs->lastclust - 1); TSK_DADDR_T clust, clust_p; tsk_fprintf(hFile, "** Cluster Area: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->firstclustsect, (fatfs->firstclustsect + x - 1)); clust_p = fatfs->rootsect; clust = FATFS_SECT_2_CLUST(fatfs, fatfs->rootsect); while ((clust) && (0 == FATFS_ISEOF(clust, FATFS_32_MASK))) { TSK_DADDR_T nxt; clust_p = clust; /* Make sure we do not get into an infinite loop */ if (tsk_list_find(list_seen, clust)) { if (tsk_verbose) tsk_fprintf(stderr, "Loop found while determining root directory size\n"); break; } if (tsk_list_add(&list_seen, clust)) { tsk_list_free(list_seen); list_seen = NULL; return 1; } if (fatfs_getFAT(fatfs, clust, &nxt)) break; clust = nxt; } tsk_list_free(list_seen); list_seen = NULL; tsk_fprintf(hFile, "*** Root Directory: %" PRIuDADDR " - %" PRIuDADDR "\n", fatfs->rootsect, (FATFS_CLUST_2_SECT(fatfs, clust_p + 1) - 1)); if ((fatfs->firstclustsect + x - 1) != fs->last_block) { tsk_fprintf(hFile, "** Non-clustered: %" PRIuDADDR " - %" PRIuDADDR "\n", (fatfs->firstclustsect + x), fs->last_block); } } tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); tsk_fprintf(hFile, "Root Directory: %" PRIuINUM "\n", fs->root_inum); tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Sector Size: %" PRIu16 "\n", fatfs->ssize); tsk_fprintf(hFile, "Cluster Size: %" PRIu32 "\n", (uint32_t) fatfs->csize << fatfs->ssize_sh); tsk_fprintf(hFile, "Total Cluster Range: 2 - %" PRIuDADDR "\n", fatfs->lastclust); /* cycle via cluster and look at each cluster in the FAT * for clusters marked as bad */ cnt = 0; for (i = 2; i <= fatfs->lastclust; i++) { TSK_DADDR_T entry; TSK_DADDR_T sect; /* Get the FAT table entry */ if (fatfs_getFAT(fatfs, i, &entry)) break; if (FATFS_ISBAD(entry, fatfs->mask) == 0) { continue; } if (cnt == 0) tsk_fprintf(hFile, "Bad Sectors: "); sect = FATFS_CLUST_2_SECT(fatfs, i); for (a = 0; a < fatfs->csize; a++) { tsk_fprintf(hFile, "%" PRIuDADDR " ", sect + a); if ((++cnt % 8) == 0) tsk_fprintf(hFile, "\n"); } } if ((cnt > 0) && ((cnt % 8) != 0)) tsk_fprintf(hFile, "\n"); /* Display the FAT Table */ tsk_fprintf(hFile, "\nFAT CONTENTS (in sectors)\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); /* 'sstart' marks the first sector of the current run to print */ sstart = fatfs->firstclustsect; /* cycle via cluster and look at each cluster in the FAT to make runs */ for (i = 2; i <= fatfs->lastclust; i++) { /* 'send' marks the end sector of the current run, which will extend * when the current cluster continues to the next */ send = FATFS_CLUST_2_SECT(fatfs, i + 1) - 1; /* get the next cluster */ if (fatfs_getFAT(fatfs, i, &next)) break; snext = FATFS_CLUST_2_SECT(fatfs, next); /* we are also using the next sector (clust) */ if ((next & fatfs->mask) == (i + 1)) { continue; } /* The next clust is either further away or the clust is available, * print it if is further away */ else if ((next & fatfs->mask)) { if (FATFS_ISEOF(next, fatfs->mask)) tsk_fprintf(hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> EOF\n", sstart, send, send - sstart + 1); else if (FATFS_ISBAD(next, fatfs->mask)) tsk_fprintf(hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> BAD\n", sstart, send, send - sstart + 1); else tsk_fprintf(hFile, "%" PRIuDADDR "-%" PRIuDADDR " (%" PRIuDADDR ") -> %" PRIuDADDR "\n", sstart, send, send - sstart + 1, snext); } /* reset the starting counter */ sstart = send + 1; } return 0; } uint8_t fatxxfs_open(FATFS_INFO *fatfs) { const char *func_name = "fatxxfs_open"; TSK_FS_INFO *fs = &(fatfs->fs_info); FATXXFS_SB *fatsb = (FATXXFS_SB*)(&fatfs->boot_sector_buffer); int i = 0; ssize_t cnt = 0; TSK_DADDR_T sectors = 0; TSK_FS_DIR * test_dir1; // Directories used to try opening the root directory TSK_FS_DIR * test_dir2; // to see if it's the Android FAT version // clean up any error messages that are lying around tsk_error_reset(); /* Calculate block sizes and layout info */ // sector size fatfs->ssize = tsk_getu16(fs->endian, fatsb->ssize); if (fatfs->ssize == 512) { fatfs->ssize_sh = 9; } else if (fatfs->ssize == 1024) { fatfs->ssize_sh = 10; } else if (fatfs->ssize == 2048) { fatfs->ssize_sh = 11; } else if (fatfs->ssize == 4096) { fatfs->ssize_sh = 12; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Error: sector size (%d) is not a multiple of device size (%d)\nDo you have a disk image instead of a partition image?", fatfs->ssize, fs->dev_bsize); if (tsk_verbose) fprintf(stderr, "%s: Invalid sector size (%d)\n", func_name, fatfs->ssize); return 1; } // cluster size fatfs->csize = fatsb->csize; if ((fatfs->csize != 0x01) && (fatfs->csize != 0x02) && (fatfs->csize != 0x04) && (fatfs->csize != 0x08) && (fatfs->csize != 0x10) && (fatfs->csize != 0x20) && (fatfs->csize != 0x40) && (fatfs->csize != 0x80)) { if (tsk_verbose) fprintf(stderr, "%s: Invalid cluster size (%d)\n", func_name, fatfs->csize); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a FATXX file system (cluster size)"); return 1; } // number of FAT tables fatfs->numfat = fatsb->numfat; if ((fatfs->numfat == 0) || (fatfs->numfat > 8)) { if (tsk_verbose) fprintf(stderr, "%s: Invalid number of FATS (%d)\n", func_name, fatfs->numfat); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a FATXX file system (number of FATs)"); return 1; } /* We can't do a sanity check on this b.c. TSK_FS_TYPE_FAT32 has a value of 0 */ /* num of root entries */ fatfs->numroot = tsk_getu16(fs->endian, fatsb->numroot); /* if sectors16 is 0, then the number of sectors is stored in sectors32 */ if (0 == (sectors = tsk_getu16(fs->endian, fatsb->sectors16))) sectors = tsk_getu32(fs->endian, fatsb->sectors32); /* if secperfat16 is 0, then read sectperfat32 */ if (0 == (fatfs->sectperfat = tsk_getu16(fs->endian, fatsb->sectperfat16))) fatfs->sectperfat = tsk_getu32(fs->endian, fatsb->a.f32.sectperfat32); if (fatfs->sectperfat == 0) { if (tsk_verbose) fprintf(stderr, "%s: Invalid number of sectors per FAT (%d)\n", func_name, fatfs->sectperfat); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a FATXX file system (invalid sectors per FAT)"); return 1; } fatfs->firstfatsect = tsk_getu16(fs->endian, fatsb->reserved); if ((fatfs->firstfatsect == 0) || (fatfs->firstfatsect > sectors)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("Not a FATXX file system (invalid first FAT sector %" PRIuDADDR ")", fatfs->firstfatsect); if (tsk_verbose) fprintf(stderr, "%s: Invalid first FAT (%" PRIuDADDR ")\n", func_name, fatfs->firstfatsect); return 1; } /* Calculate the block info * * The sector of the begining of the data area - which is * after all of the FATs * * For TSK_FS_TYPE_FAT12 and TSK_FS_TYPE_FAT16, the data area starts with the root * directory entries and then the first cluster. For TSK_FS_TYPE_FAT32, * the data area starts with clusters and the root directory * is somewhere in the data area */ fatfs->firstdatasect = fatfs->firstfatsect + fatfs->sectperfat * fatfs->numfat; /* The sector where the first cluster is located. It will be used * to translate cluster addresses to sector addresses * * For TSK_FS_TYPE_FAT32, the first cluster is the start of the data area and * it is after the root directory for TSK_FS_TYPE_FAT12 and TSK_FS_TYPE_FAT16. At this * point in the program, numroot is set to 0 for TSK_FS_TYPE_FAT32 */ fatfs->firstclustsect = fatfs->firstdatasect + ((fatfs->numroot * 32 + fatfs->ssize - 1) / fatfs->ssize); /* total number of clusters */ fatfs->clustcnt = (sectors - fatfs->firstclustsect) / fatfs->csize; /* the first cluster is #2, so the final cluster is: */ fatfs->lastclust = 1 + fatfs->clustcnt; /* identify the FAT type by the total number of data clusters * this calculation is from the MS FAT Overview Doc * * A FAT file system made by another OS could use different values */ if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT_DETECT) { if (fatfs->clustcnt < 4085) { fatfs->fs_info.ftype = TSK_FS_TYPE_FAT12; } else if (fatfs->clustcnt < 65525) { fatfs->fs_info.ftype = TSK_FS_TYPE_FAT16; } else { fatfs->fs_info.ftype = TSK_FS_TYPE_FAT32; } fatfs->fs_info.ftype = fatfs->fs_info.ftype; } /* Some sanity checks */ else { if ((fatfs->fs_info.ftype == TSK_FS_TYPE_FAT12) && (fatfs->clustcnt >= 4085)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Too many sectors for TSK_FS_TYPE_FAT12: try auto-detect mode"); if (tsk_verbose) fprintf(stderr, "%s: Too many sectors for FAT12\n", func_name); return 1; } } if ((fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32) && (fatfs->numroot != 0)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Invalid TSK_FS_TYPE_FAT32 image (numroot != 0)"); if (tsk_verbose) fprintf(stderr, "%s: numroom != 0 for FAT32\n", func_name); return 1; } if ((fatfs->fs_info.ftype != TSK_FS_TYPE_FAT32) && (fatfs->numroot == 0)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Invalid FAT image (numroot == 0, and not TSK_FS_TYPE_FAT32)"); if (tsk_verbose) fprintf(stderr, "%s: numroom == 0 and not FAT32\n", func_name); return 1; } /* additional sanity checks if we think we are using the backup boot sector. * The scenario to prevent here is if fat_open is called 6 sectors before the real start * of the file system, then we want to detect that it was not a backup that we saw. */ if (fatfs->using_backup_boot_sector) { // only FAT32 has backup boot sectors.. if (fatfs->fs_info.ftype != TSK_FS_TYPE_FAT32) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Invalid FAT image (Used what we thought was a backup boot sector, but it is not TSK_FS_TYPE_FAT32)"); if (tsk_verbose) fprintf(stderr, "%s: Had to use backup boot sector, but this isn't FAT32\n", func_name); return 1; } if (fatfs->numroot > 1) { uint8_t buf1[512]; uint8_t buf2[512]; int i2; int numDiffs; cnt = tsk_fs_read(fs, fatfs->firstfatsect * fatfs->ssize, (char *) buf1, 512); if (cnt != 512) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: FAT1", func_name); fs->tag = 0; return 1; } cnt = tsk_fs_read(fs, (fatfs->firstfatsect + fatfs->sectperfat) * fatfs->ssize, (char *) buf2, 512); if (cnt != 512) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: FAT2", func_name); fs->tag = 0; return 1; } numDiffs = 0; for (i2 = 0; i2 < 512; i2++) { if (buf1[i2] != buf2[i2]) { numDiffs++; } } if (numDiffs > 25) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Invalid FAT image (Too many differences between FATS from guessing (%d diffs))", numDiffs); if (tsk_verbose) fprintf(stderr, "%s: Too many differences in FAT from guessing (%d diffs)\n", func_name, numDiffs); return 1; } } } /* Set the mask to use on the cluster values */ if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT12) { fatfs->mask = FATFS_12_MASK; } else if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT16) { fatfs->mask = FATFS_16_MASK; } else if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32) { fatfs->mask = FATFS_32_MASK; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Unknown FAT type in %s: %d\n", func_name, fatfs->fs_info.ftype); return 1; } fs->duname = "Sector"; /* the root directories are always after the FAT for TSK_FS_TYPE_FAT12 and TSK_FS_TYPE_FAT16, * but are dynamically located for TSK_FS_TYPE_FAT32 */ if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32) fatfs->rootsect = FATFS_CLUST_2_SECT(fatfs, tsk_getu32(fs->endian, fatsb->a.f32.rootclust)); else fatfs->rootsect = fatfs->firstdatasect; for (i = 0; i < FATFS_FAT_CACHE_N; i++) { fatfs->fatc_addr[i] = 0; fatfs->fatc_ttl[i] = 0; } /* * block calculations : although there are no blocks in fat, we will * use these fields for sector calculations */ fs->first_block = 0; fs->block_count = sectors; fs->last_block = fs->last_block_act = fs->block_count - 1; fs->block_size = fatfs->ssize; // determine the last block we have in this image if ((TSK_DADDR_T) ((fatfs->fs_info.img_info->size - fatfs->fs_info.offset) / fs->block_size) < fs->block_count) fs->last_block_act = (fatfs->fs_info.img_info->size - fatfs->fs_info.offset) / fs->block_size - 1; /* * inode calculations */ /* maximum number of dentries in a sector & cluster */ fatfs->dentry_cnt_se = fatfs->ssize / sizeof(FATXXFS_DENTRY); fatfs->dentry_cnt_cl = fatfs->dentry_cnt_se * fatfs->csize; fs->root_inum = FATFS_ROOTINO; fs->first_inum = FATFS_FIRSTINO; /* Calculate inode addresses for the virtual files (MBR, one or two FATS) * and the virtual orphan files directory. */ fs->last_inum = (FATFS_SECT_2_INODE(fatfs, fs->last_block_act + 1) - 1) + FATFS_NUM_VIRT_FILES(fatfs); fatfs->mbr_virt_inum = fs->last_inum - FATFS_NUM_VIRT_FILES(fatfs) + 1; fatfs->fat1_virt_inum = fatfs->mbr_virt_inum + 1; if (fatfs->numfat == 2) { fatfs->fat2_virt_inum = fatfs->fat1_virt_inum + 1; } else { fatfs->fat2_virt_inum = fatfs->fat1_virt_inum; } /* Calculate the total number of inodes. */ fs->inum_count = fs->last_inum - fs->first_inum + 1; /* Volume ID */ for (fs->fs_id_used = 0; fs->fs_id_used < 4; fs->fs_id_used++) { if (fatfs->fs_info.ftype == TSK_FS_TYPE_FAT32) fs->fs_id[fs->fs_id_used] = fatsb->a.f32.vol_id[fs->fs_id_used]; else fs->fs_id[fs->fs_id_used] = fatsb->a.f16.vol_id[fs->fs_id_used]; } /* * Set the function pointers */ fs->block_walk = fatfs_block_walk; fs->block_getflags = fatfs_block_getflags; fs->inode_walk = fatfs_inode_walk; fs->istat = fatfs_istat; fs->file_add_meta = fatfs_inode_lookup; fs->get_default_attr_type = fatfs_get_default_attr_type; fs->load_attrs = fatfs_make_data_runs; fs->dir_open_meta = fatfs_dir_open_meta; fs->name_cmp = fatfs_name_cmp; fs->fsstat = fatxxfs_fsstat; fs->fscheck = fatfs_fscheck; fs->close = fatfs_close; fs->jblk_walk = fatfs_jblk_walk; fs->jentry_walk = fatfs_jentry_walk; fs->jopen = fatfs_jopen; fatfs->is_cluster_alloc = fatxxfs_is_cluster_alloc; fatfs->is_dentry = fatxxfs_is_dentry; fatfs->dinode_copy = fatxxfs_dinode_copy; fatfs->inode_lookup = fatxxfs_inode_lookup; fatfs->inode_walk_should_skip_dentry = fatxxfs_inode_walk_should_skip_dentry; fatfs->istat_attr_flags = fatxxfs_istat_attr_flags; fatfs->dent_parse_buf = fatxxfs_dent_parse_buf; // initialize the caches tsk_init_lock(&fatfs->cache_lock); tsk_init_lock(&fatfs->dir_lock); fatfs->inum2par = NULL; // Test to see if this is the odd Android case where the FAT entries have no short name // // If there are no entries found with the normal short name // and we find more entries by removing the short name test for allocated directories, then assume // this is the case where we have no short names fatfs->subtype = TSK_FATFS_SUBTYPE_SPEC; test_dir1 = tsk_fs_dir_open_meta(fs, fs->root_inum); if(test_dir1->names_used <= 4){ // At most four automatic directores ($MBR, $FAT1, $FAT1, $OrphanFiles) fatfs->subtype = TSK_FATFS_SUBTYPE_ANDROID_1; test_dir2 = tsk_fs_dir_open_meta(fs, fs->root_inum); if(test_dir2->names_used > test_dir1->names_used){ fatfs->subtype = TSK_FATFS_SUBTYPE_ANDROID_1; } else{ fatfs->subtype = TSK_FATFS_SUBTYPE_SPEC; } tsk_fs_dir_close(test_dir2); } tsk_fs_dir_close(test_dir1); return 0; } /* Return 1 if allocated, 0 if unallocated, and -1 if error */ int8_t fatxxfs_is_cluster_alloc(FATFS_INFO *fatfs, TSK_DADDR_T clust) { TSK_DADDR_T content = 0; if (fatfs_getFAT(fatfs, clust, &content)) return -1; else if (content == FATFS_UNALLOC) return 0; else return 1; } sleuthkit-4.2.0/tsk/fs/fatxxfs_dent.c000755 000765 000024 00000041717 12576320700 020414 0ustar00carrierstaff000000 000000 /* ** fatfs_dent ** The Sleuth Kit ** ** file name layer support for the FAT file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ /** * \file fatfs_dent.cpp * Contains the internal TSK FAT file name processing code. */ #include "tsk_fs_i.h" #include "tsk_fatxxfs.h" #include /* Special data structure allocated for each directory to hold the long * file name entries until all entries have been found */ typedef struct { uint8_t name[FATFS_MAXNAMLEN_UTF8]; /* buffer for lfn - in reverse order */ uint16_t start; /* current start of name */ uint8_t chk; /* current checksum */ uint8_t seq; /* seq of first entry in lfn */ } FATXXFS_LFN; /** * /internal * Parse a buffer containing the contents of a directory and add TSK_FS_NAME * objects for each named file found to the TSK_FS_DIR representation of the * directory. * * @param fatfs File system information structure for file system that * contains the directory. * @param a_fs_dir Directory structure into to which parsed file metadata will * be added. * @param buf Buffer that contains the directory contents. * @param len Length of buffer in bytes (must be a multiple of sector * size). * @param addrs Array where each element is the original address of * the corresponding sector in a_buf (size of array is number of sectors in * the directory). * @return TSK_RETVAL_ENUM */ TSK_RETVAL_ENUM fatxxfs_dent_parse_buf(FATFS_INFO *fatfs, TSK_FS_DIR *a_fs_dir, char *buf, TSK_OFF_T len, TSK_DADDR_T *addrs) { char *func_name = "fatxxfs_dent_parse_buf"; unsigned int idx = 0; unsigned int sidx = 0; int a = 0; int b = 0; TSK_INUM_T ibase = 0; FATXXFS_DENTRY *dep = NULL; TSK_FS_INFO *fs = (TSK_FS_INFO*)&fatfs->fs_info; int sectalloc = 0; TSK_FS_NAME *fs_name = NULL; FATXXFS_LFN lfninfo; int entrySeenCount = 0; int entryInvalidCount = 0; uint8_t isCorruptDir = 0; tsk_error_reset(); if (fatfs_ptr_arg_is_null(fatfs, "fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_dir, "a_fs_dir", func_name) || fatfs_ptr_arg_is_null(buf, "buf", func_name) || fatfs_ptr_arg_is_null(addrs, "addrs", func_name)) { return TSK_ERR; } assert(len > 0); if (len < 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: invalid buffer length", func_name); return TSK_ERR; } dep = (FATXXFS_DENTRY*)buf; if ((fs_name = tsk_fs_name_alloc(FATFS_MAXNAMLEN_UTF8, 32)) == NULL) { return TSK_ERR; } memset(&lfninfo, 0, sizeof(FATXXFS_LFN)); lfninfo.start = FATFS_MAXNAMLEN_UTF8 - 1; /* Loop through the sectors in the buffer. */ for (sidx = 0; sidx < (unsigned int) (len / fatfs->ssize); sidx++) { /* Get the base inode for the current sector */ ibase = FATFS_SECT_2_INODE(fatfs, addrs[sidx]); if (ibase > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("fatfs_parse: inode address is too large"); tsk_fs_name_free(fs_name); return TSK_COR; } if (tsk_verbose) tsk_fprintf(stderr, "fatfs_dent_parse_buf: Parsing sector %" PRIuDADDR " for dir %" PRIuINUM "\n", addrs[sidx], a_fs_dir->addr); /* Get the allocation status of the current sector. */ if ((sectalloc = fatfs_is_sectalloc(fatfs, addrs[sidx])) == -1) { if (tsk_verbose) { tsk_fprintf(stderr, "fatfs_dent_parse_buf: Error looking up sector allocation: %" PRIuDADDR "\n", addrs[sidx]); tsk_error_print(stderr); } tsk_error_reset(); continue; } /* Loop through the putative directory entries in the current sector. */ for (idx = 0; idx < fatfs->dentry_cnt_se; idx++, dep++) { FATXXFS_DENTRY *dir; TSK_INUM_T inode; entrySeenCount++; /* Is the current entry a valid entry? */ if (0 == fatxxfs_is_dentry(fatfs, (FATFS_DENTRY*)dep, (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)sectalloc, ((isCorruptDir == 0) && (sectalloc)) ? 1 : 0)) { if (tsk_verbose) tsk_fprintf(stderr, "fatfs_dent_parse_buf: Entry %u is invalid\n", idx); entryInvalidCount++; /* If we have seen four entries and all of them are corrupt, * then test every remaining entry in this folder -- * even if the sector is allocated. The scenario is one * where we are processing a cluster that is allocated * to a file and we happen to get some data that matches * every now and then. */ if ((entrySeenCount == 4) && (entryInvalidCount == 4)) { isCorruptDir = 1; } continue; } dir = dep; /* Compute the inode address corresponding to this directory entry. */ inode = ibase + idx; if ((dir->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { /* The current entry is a long file name entry. */ FATXXFS_DENTRY_LFN *dirl = (FATXXFS_DENTRY_LFN *) dir; /* Store the name in dinfo until we get the 8.3 name * Use the checksum to identify a new sequence. */ if (((dirl->seq & FATXXFS_LFN_SEQ_FIRST) && (dirl->seq != FATXXFS_SLOT_DELETED)) || (dirl->chksum != lfninfo.chk)) { // @@@ Do a partial output here /* This is the last long file name entry in a sequence. * Reset the sequence number, check sum, and next char * address. */ lfninfo.seq = dirl->seq & FATXXFS_LFN_SEQ_MASK; lfninfo.chk = dirl->chksum; lfninfo.start = FATFS_MAXNAMLEN_UTF8 - 1; } else if (dirl->seq != lfninfo.seq - 1) { // @@@ Check the sequence number - the checksum is correct though... } /* Copy the UTF16 values starting at end of buffer */ for (a = 3; a >= 0; a--) { if ((lfninfo.start > 0)) lfninfo.name[lfninfo.start--] = dirl->part3[a]; } for (a = 11; a >= 0; a--) { if ((lfninfo.start > 0)) lfninfo.name[lfninfo.start--] = dirl->part2[a]; } for (a = 9; a >= 0; a--) { if ((lfninfo.start > 0)) lfninfo.name[lfninfo.start--] = dirl->part1[a]; } // Skip ahead until we get a new sequence num or the 8.3 name continue; } else if ((dir->attrib & FATFS_ATTR_VOLUME) == FATFS_ATTR_VOLUME) { /* Special case for volume label: name does not have an * extension and we add a note at the end that it is a label */ a = 0; for (b = 0; b < 8; b++) { if ((dir->name[b] >= 0x20) && (dir->name[b] != 0xff)) { fs_name->name[a++] = dir->name[b]; } else { fs_name->name[a++] = '^'; } } for (b = 0; b < 3; b++) { if ((dir->ext[b] >= 0x20) && (dir->ext[b] != 0xff)) { fs_name->name[a++] = dir->ext[b]; } else { fs_name->name[a++] = '^'; } } fs_name->name[a] = '\0'; /* Append a string to show it is a label */ if (a + 22 < FATFS_MAXNAMLEN_UTF8) { const char *volstr = " (Volume Label Entry)"; strncat(fs_name->name, volstr, FATFS_MAXNAMLEN_UTF8 - a); } } else { /* A short (8.3) entry */ char *name_ptr; // The dest location for the short name /* if we have a lfn, copy it into fs_name->name * and put the short name in fs_name->shrt_name */ if (lfninfo.start != FATFS_MAXNAMLEN_UTF8 - 1) { int retVal; /* @@@ Check the checksum */ /* Convert the UTF16 to UTF8 */ UTF16 *name16 = (UTF16 *) ((uintptr_t) & lfninfo. name[lfninfo.start + 1]); UTF8 *name8 = (UTF8 *) fs_name->name; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) & lfninfo.name[FATFS_MAXNAMLEN_UTF8], &name8, (UTF8 *) ((uintptr_t) name8 + FATFS_MAXNAMLEN_UTF8), TSKlenientConversion); if (retVal != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("fatfs_parse: Error converting FAT LFN to UTF8: %d", retVal); continue; } /* Make sure it is NULL Terminated */ if ((uintptr_t) name8 > (uintptr_t) fs_name->name + FATFS_MAXNAMLEN_UTF8) fs_name->name[FATFS_MAXNAMLEN_UTF8 - 1] = '\0'; else *name8 = '\0'; lfninfo.start = FATFS_MAXNAMLEN_UTF8 - 1; name_ptr = fs_name->shrt_name; // put 8.3 into shrt_name } /* We don't have a LFN, so put the short name in * fs_name->name */ else { fs_name->shrt_name[0] = '\0'; name_ptr = fs_name->name; // put 8.3 into normal location } /* copy in the short name into the place specified above. * Skip spaces and put in the . */ a = 0; for (b = 0; b < 8; b++) { if ((dir->name[b] != 0) && (dir->name[b] != 0xff) && (dir->name[b] != 0x20)) { if ((b == 0) && (dir->name[0] == FATXXFS_SLOT_DELETED)) { name_ptr[a++] = '_'; } else if ((dir->lowercase & FATXXFS_CASE_LOWER_BASE) && (dir->name[b] >= 'A') && (dir->name[b] <= 'Z')) { name_ptr[a++] = dir->name[b] + 32; } else { name_ptr[a++] = dir->name[b]; } } } for (b = 0; b < 3; b++) { if ((dir->ext[b] != 0) && (dir->ext[b] != 0xff) && (dir->ext[b] != 0x20)) { if (b == 0) name_ptr[a++] = '.'; if ((dir->lowercase & FATXXFS_CASE_LOWER_EXT) && (dir->ext[b] >= 'A') && (dir->ext[b] <= 'Z')) name_ptr[a++] = dir->ext[b] + 32; else name_ptr[a++] = dir->ext[b]; } } name_ptr[a] = '\0'; // make sure that only ASCII is in the short name fatfs_cleanup_ascii(name_ptr); } /* file type: FAT only knows DIR and FILE */ if ((dir->attrib & FATFS_ATTR_DIRECTORY) == FATFS_ATTR_DIRECTORY) fs_name->type = TSK_FS_NAME_TYPE_DIR; else fs_name->type = TSK_FS_NAME_TYPE_REG; /* set the inode */ fs_name->meta_addr = inode; inode = 0; // so that we don't use it anymore -- use only fs_name->meta_addr /* Handle the . and .. entries specially * The current inode 'address' they have is for the current * slot in the cluster, but it needs to refer to the original * slot */ if (TSK_FS_ISDOT(fs_name->name) && (fs_name->type == TSK_FS_NAME_TYPE_DIR) && idx < 2) { if (fs_name->name[1] == '\0') { /* Current directory - "." */ fs_name->meta_addr = a_fs_dir->fs_file->meta->addr; } /* for the parent directory, look up in the list that * is maintained in fafs_info */ else if (fs_name->name[1] == '.') { /* Parent directory - ".." */ uint8_t dir_found = 0; if (fatfs_dir_buf_get(fatfs, a_fs_dir->fs_file->meta->addr, &(fs_name->meta_addr)) == 0) { dir_found = 1; } if ((dir_found == 0) && (addrs[0] == fatfs->firstdatasect)) { /* if we are currently in the root directory, we aren't going to find * a parent. This shouldn't happen, but could result in an infinite loop. */ fs_name->meta_addr = 0; dir_found = 1; } if (dir_found == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_dent_parse_buf: Walking directory to find parent\n"); /* The parent directory is not in the list. We are going to walk * the directory until we hit this directory. This process will * populate the buffer table and we will then rescan it */ if (tsk_fs_dir_walk(fs, fs->root_inum, (TSK_FS_DIR_WALK_FLAG_ENUM)(TSK_FS_DIR_WALK_FLAG_ALLOC | TSK_FS_DIR_WALK_FLAG_UNALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE), fatfs_find_parent_act, (void *) &a_fs_dir->fs_file->meta->addr)) { return TSK_OK; } if (tsk_verbose) fprintf(stderr, "fatfs_dent_parse_buf: Finished walking directory to find parent\n"); if (fatfs_dir_buf_get(fatfs, a_fs_dir->fs_file->meta->addr, &(fs_name->meta_addr)) == 0) { dir_found = 1; } // if we did not find it, then it was probably // from the orphan directory... if (dir_found == 0) fs_name->meta_addr = TSK_FS_ORPHANDIR_INUM(fs); } } } else { /* Save the (non-. or ..) directory to parent directory info to local * structures so that we can later fill into the inode * info for '..' entries */ if (fs_name->type == TSK_FS_NAME_TYPE_DIR) { if (fatfs_dir_buf_add(fatfs, a_fs_dir->fs_file->meta->addr, fs_name->meta_addr)) return TSK_ERR; } } /* The allocation status of an entry is based on the allocation * status of the sector it is in and the flag. Deleted directories * do not always clear the flags of each entry */ if (sectalloc == 1) { if(FATXXFS_IS_DELETED(dep->name, fatfs)){ fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; } else{ fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } } else { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; } tsk_fs_dir_add(a_fs_dir, fs_name); } } tsk_fs_name_free(fs_name); return TSK_OK; } sleuthkit-4.2.0/tsk/fs/fatxxfs_meta.c000755 000765 000024 00000072341 12576320700 020405 0ustar00carrierstaff000000 000000 /* ** fatxxfs ** The Sleuth Kit ** ** Content and meta data layer support for the FATXX file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ /** * \file fatxxfs.c * Contains the internal TSK FATXX (FAT12, FAT16, FAT32) file system code to * handle basic file system processing for opening file system, processing * sectors, and directory entries. */ #include "tsk_fatxxfs.h" #include /* * Identify if the dentry is a valid 8.3 name * * returns 1 if it is, 0 if it does not */ static uint8_t is_83_name(FATXXFS_DENTRY * de) { if (!de) return 0; /* The IS_NAME macro will fail if the value is 0x05, which is only * valid in name[0], similarly with '.' */ if ((de->name[0] != FATXXFS_SLOT_E5) && (de->name[0] != '.') && (FATXXFS_IS_83_NAME(de->name[0]) == 0)) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[0] is invalid\n"); return 0; } // the name cannot start with 0x20 else if (de->name[0] == 0x20) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[0] has 0x20\n"); return 0; } /* the second name field can only be . if the first one is a . */ if (de->name[1] == '.') { if (de->name[0] != '.') { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[1] is .\n"); return 0; } } else if (FATXXFS_IS_83_NAME(de->name[1]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[1] is invalid\n"); return 0; } if (FATXXFS_IS_83_NAME(de->name[2]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[2] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->name[3]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[3] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->name[4]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[4] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->name[5]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[5] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->name[6]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[6] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->name[7]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: name[7] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->ext[0]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: ext[0] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->ext[1]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: ext[1] is invalid\n"); return 0; } else if (FATXXFS_IS_83_NAME(de->ext[2]) == 0) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: ext[2] is invalid\n"); return 0; } /* Ensure that if we get a "space", that the rest of the * name is spaces. This is not in the spec, but is how * windows operates and serves as a good check to remove * false positives. We do not do this check for the * volume label though. */ if ((de->attrib & FATFS_ATTR_VOLUME) != FATFS_ATTR_VOLUME) { if (((de->name[1] == 0x20) && (de->name[2] != 0x20)) || ((de->name[2] == 0x20) && (de->name[3] != 0x20)) || ((de->name[3] == 0x20) && (de->name[4] != 0x20)) || ((de->name[4] == 0x20) && (de->name[5] != 0x20)) || ((de->name[5] == 0x20) && (de->name[6] != 0x20)) || ((de->name[6] == 0x20) && (de->name[7] != 0x20)) || ((de->ext[1] == 0x20) && (de->ext[2] != 0x20))) { if (tsk_verbose) fprintf(stderr, "fatfs_is_83_name: space before non-space\n"); return 0; } } return 1; } /** * \internal * Determine whether a buffer likely contains a directory entry. * For the most reliable results, request the in-depth test. * * @param [in] a_fatfs Source file system for the directory entry. * @param [in] a_dentry Buffer that may contain a directory entry. * @param [in] a_cluster_is_alloc The allocation status, possibly unknown, of the * cluster from which the buffer was filled. * @param [in] a_do_basic_tests_only Whether to do basic or in-depth testing. * @return 1 if the buffer likely contains a direcotry entry, 0 otherwise */ uint8_t fatxxfs_is_dentry(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc, uint8_t a_do_basic_tests_only) { const char *func_name = "fatxxfs_is_dentry"; TSK_FS_INFO *fs = (TSK_FS_INFO *) & a_fatfs->fs_info; FATXXFS_DENTRY *dentry = (FATXXFS_DENTRY*)a_dentry; if (!a_dentry) return 0; /* LFN have their own checks, which are pretty weak since most * fields are UTF16 */ if ((dentry->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { FATXXFS_DENTRY_LFN *de_lfn = (FATXXFS_DENTRY_LFN*) dentry; if ((de_lfn->seq > (FATXXFS_LFN_SEQ_FIRST | 0x0f)) && (de_lfn->seq != FATXXFS_SLOT_DELETED)) { if (tsk_verbose) fprintf(stderr, "%s: LFN seq\n", func_name); return 0; } return 1; } else { // the basic test is only for the 'essential data'. if (a_do_basic_tests_only == 0) { if (dentry->lowercase & ~(FATXXFS_CASE_LOWER_ALL)) { if (tsk_verbose) fprintf(stderr, "%s: lower case all\n", func_name); return 0; } else if (dentry->attrib & ~(FATFS_ATTR_ALL)) { if (tsk_verbose) fprintf(stderr, "%s: attribute all\n", func_name); return 0; } // verify we do not have too many flags set if (dentry->attrib & FATFS_ATTR_VOLUME) { if ((dentry->attrib & FATFS_ATTR_DIRECTORY) || (dentry->attrib & FATFS_ATTR_READONLY) || (dentry->attrib & FATFS_ATTR_ARCHIVE)) { if (tsk_verbose) fprintf(stderr, "%s: Vol and Dir/RO/Arch\n", func_name); return 0; } } /* The ctime, cdate, and adate fields are optional and * therefore 0 is a valid value * We have had scenarios where ISDATE and ISTIME return true, * but the unix2dos fail during the conversion. This has been * useful to detect corrupt entries, so we do both. */ if ((tsk_getu16(fs->endian, dentry->ctime) != 0) && (FATFS_ISTIME(tsk_getu16(fs->endian, dentry->ctime)) == 0)) { if (tsk_verbose) fprintf(stderr, "%s: ctime\n", func_name); return 0; } else if ((tsk_getu16(fs->endian, dentry->wtime) != 0) && (FATFS_ISTIME(tsk_getu16(fs->endian, dentry->wtime)) == 0)) { if (tsk_verbose) fprintf(stderr, "%s: wtime\n", func_name); return 0; } else if ((tsk_getu16(fs->endian, dentry->cdate) != 0) && ((FATFS_ISDATE(tsk_getu16(fs->endian, dentry->cdate)) == 0) || (fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->cdate), tsk_getu16(fs->endian, dentry->ctime), dentry->ctimeten) == 0))) { if (tsk_verbose) fprintf(stderr, "%s: cdate\n", func_name); return 0; } else if (dentry->ctimeten > 200) { if (tsk_verbose) fprintf(stderr, "%s: ctimeten\n", func_name); return 0; } else if ((tsk_getu16(fs->endian, dentry->adate) != 0) && ((FATFS_ISDATE(tsk_getu16(fs->endian, dentry->adate)) == 0) || (fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->adate), 0, 0) == 0))) { if (tsk_verbose) fprintf(stderr, "%s: adate\n", func_name); return 0; } else if ((tsk_getu16(fs->endian, dentry->wdate) != 0) && ((FATFS_ISDATE(tsk_getu16(fs->endian, dentry->wdate)) == 0) || (fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->wdate), tsk_getu16(fs->endian, dentry->wtime), 0) == 0))) { if (tsk_verbose) fprintf(stderr, "%s: wdate\n", func_name); return 0; } } /* verify the starting cluster is small enough */ if ((FATXXFS_DENTRY_CLUST(fs, dentry) > (a_fatfs->lastclust)) && (FATFS_ISEOF(FATXXFS_DENTRY_CLUST(fs, dentry), a_fatfs->mask) == 0)) { if (tsk_verbose) fprintf(stderr, "%s: start cluster\n", func_name); return 0; } /* Verify the file size is smaller than the data area */ else if (tsk_getu32(fs->endian, dentry->size) > ((a_fatfs->clustcnt * a_fatfs->csize) << a_fatfs->ssize_sh)) { if (tsk_verbose) fprintf(stderr, "%s: size\n", func_name); return 0; } else if ((tsk_getu32(fs->endian, dentry->size) > 0) && (FATXXFS_DENTRY_CLUST(fs, dentry) == 0)) { if (tsk_verbose) fprintf(stderr, "%s: non-zero size and NULL starting cluster\n", func_name); return 0; } else if((a_fatfs->subtype == TSK_FATFS_SUBTYPE_SPEC) && (is_83_name(dentry) == 0)) return 0; // basic sanity check on values else if ((tsk_getu16(fs->endian, dentry->ctime) == 0) && (tsk_getu16(fs->endian, dentry->wtime) == 0) && (tsk_getu16(fs->endian, dentry->cdate) == 0) && (tsk_getu16(fs->endian, dentry->adate) == 0) && (tsk_getu16(fs->endian, dentry->wdate) == 0) && (FATXXFS_DENTRY_CLUST(fs, dentry) == 0) && (tsk_getu32(fs->endian, dentry->size) == 0)) { if (tsk_verbose) fprintf(stderr, "%s: nearly all values zero\n", func_name); return 0; } return 1; } } /* * convert the attribute list in FAT to a UNIX mode */ static TSK_FS_META_TYPE_ENUM attr2type(uint16_t attr) { if (attr & FATFS_ATTR_DIRECTORY) return TSK_FS_META_TYPE_DIR; else return TSK_FS_META_TYPE_REG; } static int attr2mode(uint16_t attr) { int mode; /* every file is executable */ mode = (TSK_FS_META_MODE_IXUSR | TSK_FS_META_MODE_IXGRP | TSK_FS_META_MODE_IXOTH); if ((attr & FATFS_ATTR_READONLY) == 0) mode |= (TSK_FS_META_MODE_IRUSR | TSK_FS_META_MODE_IRGRP | TSK_FS_META_MODE_IROTH); if ((attr & FATFS_ATTR_HIDDEN) == 0) mode |= (TSK_FS_META_MODE_IWUSR | TSK_FS_META_MODE_IWGRP | TSK_FS_META_MODE_IWOTH); return mode; } TSK_RETVAL_ENUM fatxxfs_dinode_copy(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_cluster_is_alloc, TSK_FS_FILE *a_fs_file) { const char *func_name = "fatxxfs_dinode_copy"; int i; TSK_FS_INFO *fs = (TSK_FS_INFO *) & a_fatfs->fs_info; TSK_FS_META *fs_meta = a_fs_file->meta; FATXXFS_DENTRY *dentry = (FATXXFS_DENTRY*)a_dentry; TSK_DADDR_T *addr_ptr; uint32_t flags = 0; if (fs_meta->content_len < FATFS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, FATFS_FILE_CONTENT_LEN)) == NULL) { return TSK_ERR; } } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } fs_meta->mode = (TSK_FS_META_MODE_ENUM)attr2mode(dentry->attrib); fs_meta->type = attr2type(dentry->attrib); fs_meta->addr = a_inum; if (a_cluster_is_alloc) { if(FATXXFS_IS_DELETED(dentry->name, a_fatfs)){ flags = TSK_FS_META_FLAG_UNALLOC; } else{ flags = TSK_FS_META_FLAG_ALLOC; } } else { flags = TSK_FS_META_FLAG_UNALLOC; } fs_meta->flags = (TSK_FS_META_FLAG_ENUM)flags; if ((dentry->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { /* LFN entries don't have these values */ fs_meta->nlink = 0; fs_meta->size = 0; fs_meta->mtime = 0; fs_meta->atime = 0; fs_meta->ctime = 0; fs_meta->crtime = 0; fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime_nano = 0; } else { /* There is no notion of link in FAT, just deleted or not */ if(FATXXFS_IS_DELETED(dentry->name, a_fatfs)){ fs_meta->nlink = 0; } else{ fs_meta->nlink = 1; } fs_meta->size = (TSK_OFF_T) tsk_getu32(fs->endian, dentry->size); /* If these are valid dates, then convert to a unix date format */ if (FATFS_ISDATE(tsk_getu16(fs->endian, dentry->wdate))) fs_meta->mtime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->wdate), tsk_getu16(fs->endian, dentry->wtime), 0); else fs_meta->mtime = 0; fs_meta->mtime_nano = 0; if (FATFS_ISDATE(tsk_getu16(fs->endian, dentry->adate))) fs_meta->atime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->adate), 0, 0); else fs_meta->atime = 0; fs_meta->atime_nano = 0; /* cdate is the creation date in FAT and there is no change, * so we just put in into change and set create to 0. The other * front-end code knows how to handle it and display it */ if (FATFS_ISDATE(tsk_getu16(fs->endian, dentry->cdate))) { fs_meta->crtime = fatfs_dos_2_unix_time(tsk_getu16(fs->endian, dentry->cdate), tsk_getu16(fs->endian, dentry->ctime), dentry->ctimeten); fs_meta->crtime_nano = fatfs_dos_2_nanosec(dentry->ctimeten); } else { fs_meta->crtime = 0; fs_meta->crtime_nano = 0; } // FAT does not have a changed time fs_meta->ctime = 0; fs_meta->ctime_nano = 0; } /* Values that do not exist in FAT */ fs_meta->uid = 0; fs_meta->gid = 0; fs_meta->seq = 0; /* We will be copying a name, so allocate a structure */ if (fs_meta->name2 == NULL) { if ((fs_meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) return TSK_ERR; fs_meta->name2->next = NULL; } /* If we have a LFN entry, then we need to convert the three * parts of the name to UTF-8 and copy it into the name structure . */ if ((dentry->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { FATXXFS_DENTRY_LFN *lfn = (FATXXFS_DENTRY_LFN *) dentry; /* Convert the first part of the name */ UTF8 *name8 = (UTF8 *) fs_meta->name2->name; UTF16 *name16 = (UTF16 *) lfn->part1; int retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) & lfn->part1[10], &name8, (UTF8 *) ((uintptr_t) fs_meta->name2->name + sizeof(fs_meta->name2->name)), TSKlenientConversion); if (retVal != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("%s: Error converting FAT LFN (1) to UTF8: %d", func_name, retVal); *name8 = '\0'; return TSK_COR; } /* Convert the second part of the name */ name16 = (UTF16 *) lfn->part2; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) & lfn->part2[12], &name8, (UTF8 *) ((uintptr_t) fs_meta->name2->name + sizeof(fs_meta->name2->name)), TSKlenientConversion); if (retVal != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("%s: Error converting FAT LFN (2) to UTF8: %d", func_name, retVal); *name8 = '\0'; return TSK_COR; } /* Convert the third part of the name */ name16 = (UTF16 *) lfn->part3; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) & lfn->part3[4], &name8, (UTF8 *) ((uintptr_t) fs_meta->name2->name + sizeof(fs_meta->name2->name)), TSKlenientConversion); if (retVal != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("%s: Error converting FAT LFN (3) to UTF8: %d", func_name, retVal); *name8 = '\0'; return TSK_COR; } /* Make sure it is NULL Terminated */ if ((uintptr_t) name8 > (uintptr_t) fs_meta->name2->name + sizeof(fs_meta->name2->name)) fs_meta->name2->name[sizeof(fs_meta->name2->name) - 1] = '\0'; else *name8 = '\0'; } /* If the entry is for a volume label, then copy the label. */ else if ((dentry->attrib & FATFS_ATTR_VOLUME) == FATFS_ATTR_VOLUME) { int a; i = 0; for (a = 0; a < 8; a++) { if ((dentry->name[a] != 0x00) && (dentry->name[a] != 0xff)) fs_meta->name2->name[i++] = dentry->name[a]; } for (a = 0; a < 3; a++) { if ((dentry->ext[a] != 0x00) && (dentry->ext[a] != 0xff)) fs_meta->name2->name[i++] = dentry->ext[a]; } fs_meta->name2->name[i] = '\0'; /* clean up non-ASCII because we are * copying it into a buffer that is supposed to be UTF-8 and * we don't know what encoding it is actually in or if it is * simply junk. */ fatfs_cleanup_ascii(fs_meta->name2->name); } /* If the entry is a normal short entry, then copy the name * and add the '.' for the extension */ else { for (i = 0; (i < 8) && (dentry->name[i] != 0) && (dentry->name[i] != ' '); i++) { if ((i == 0) && (dentry->name[0] == FATXXFS_SLOT_DELETED)) fs_meta->name2->name[0] = '_'; else if ((dentry->lowercase & FATXXFS_CASE_LOWER_BASE) && (dentry->name[i] >= 'A') && (dentry->name[i] <= 'Z')) fs_meta->name2->name[i] = dentry->name[i] + 32; else fs_meta->name2->name[i] = dentry->name[i]; } if ((dentry->ext[0]) && (dentry->ext[0] != ' ')) { int a; fs_meta->name2->name[i++] = '.'; for (a = 0; (a < 3) && (dentry->ext[a] != 0) && (dentry->ext[a] != ' '); a++, i++) { if ((dentry->lowercase & FATXXFS_CASE_LOWER_EXT) && (dentry->ext[a] >= 'A') && (dentry->ext[a] <= 'Z')) fs_meta->name2->name[i] = dentry->ext[a] + 32; else fs_meta->name2->name[i] = dentry->ext[a]; } } fs_meta->name2->name[i] = '\0'; /* clean up non-ASCII because we are * copying it into a buffer that is supposed to be UTF-8 and * we don't know what encoding it is actually in or if it is * simply junk. */ fatfs_cleanup_ascii(fs_meta->name2->name); } /* Clean up name to remove control characters */ i = 0; while (fs_meta->name2->name[i] != '\0') { if (TSK_IS_CNTRL(fs_meta->name2->name[i])) fs_meta->name2->name[i] = '^'; i++; } /* get the starting cluster */ addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; if ((dentry->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { addr_ptr[0] = 0; } else { addr_ptr[0] = FATXXFS_DENTRY_CLUST(fs, dentry) & a_fatfs->mask; } /* FAT does not store a size for its directories so make one based * on the number of allocated sectors */ if ((dentry->attrib & FATFS_ATTR_DIRECTORY) && ((dentry->attrib & FATFS_ATTR_LFN) != FATFS_ATTR_LFN)) { if (fs_meta->flags & TSK_FS_META_FLAG_ALLOC) { TSK_LIST *list_seen = NULL; /* count the total number of clusters in this file */ TSK_DADDR_T clust = FATXXFS_DENTRY_CLUST(fs, dentry); int cnum = 0; while ((clust) && (0 == FATFS_ISEOF(clust, a_fatfs->mask))) { TSK_DADDR_T nxt; /* Make sure we do not get into an infinite loop */ if (tsk_list_find(list_seen, clust)) { if (tsk_verbose) tsk_fprintf(stderr, "Loop found while determining directory size\n"); break; } if (tsk_list_add(&list_seen, clust)) { tsk_list_free(list_seen); list_seen = NULL; return TSK_ERR; } cnum++; if (fatfs_getFAT(a_fatfs, clust, &nxt)) break; else clust = nxt; } tsk_list_free(list_seen); list_seen = NULL; fs_meta->size = (TSK_OFF_T) ((cnum * a_fatfs->csize) << a_fatfs->ssize_sh); } /* if the dir is unallocated, then assume 0 or cluster size * Ideally, we would have a smart algo here to do recovery * and look for dentries. However, we do not have that right * now and if we do not add this special check then it can * assume that an allocated file cluster chain belongs to the * directory */ else { // if the first cluster is allocated, then set size to be 0 if (fatxxfs_is_cluster_alloc(a_fatfs, FATXXFS_DENTRY_CLUST(fs, dentry)) == 1) fs_meta->size = 0; else fs_meta->size = a_fatfs->csize << a_fatfs->ssize_sh; } } return TSK_OK; } /** * \internal * Populate the TSK_FS_META object of a TSK_FS_FILE object for a * given inode address. * * @param [in] a_fs File system that contains the inode. * @param [out] a_fs_file The file corresponding to the inode. * @param [in] a_inum The inode address. * @returns 1 if an error occurs or if the inode address is not * for a valid inode, 0 otherwise. */ uint8_t fatxxfs_inode_lookup(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum) { const char *func_name = "fatxxfs_inode_lookup"; TSK_DADDR_T sector = 0; int8_t ret_val = 0; FATFS_DATA_UNIT_ALLOC_STATUS_ENUM sector_alloc_status = FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN; FATFS_DENTRY dentry; TSK_RETVAL_ENUM copy_result = TSK_OK; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_fs_file, "a_fs_file", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return 1; } sector = FATFS_INODE_2_SECT(a_fatfs, a_inum); if (sector > a_fatfs->fs_info.last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: Inode %" PRIuINUM " in sector too big for image: %" PRIuDADDR, func_name, a_inum, sector); return 1; } if (fatfs_dentry_load(a_fatfs, &dentry, a_inum) != 0) { return 1; } ret_val = fatfs_is_sectalloc(a_fatfs, sector); if (ret_val == -1) { return 1; } else { sector_alloc_status = (FATFS_DATA_UNIT_ALLOC_STATUS_ENUM)ret_val; } /* Note that only the sector allocation status is used to choose * between the basic or in-depth version of the inode validity * test. In other places in the code information about whether or not * the sector that contains the inode is part of a folder is used to * make this decision. Here, that information is not available. Thus, * the test here is less reliable and may result in some false * positives. */ if (!fatxxfs_is_dentry(a_fatfs, &dentry, sector_alloc_status, sector_alloc_status)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("%s: %" PRIuINUM " is not an inode", func_name, a_inum); return 1; } copy_result = fatxxfs_dinode_copy(a_fatfs, a_inum, &dentry, (uint8_t)sector_alloc_status, a_fs_file); if (copy_result == TSK_OK) { return 0; } else if (copy_result == TSK_COR) { /* If there was a Unicode conversion error, * then still return the inode. */ if (tsk_verbose) { tsk_error_print(stderr); } tsk_error_reset(); return 0; } else { return 1; } } /** * Output the file attributes of an exFAT file directory entry in * human-readable form. * * @param a_fatfs Source file system for the directory entry. * @param a_inum Inode address associated with the directory entry. * @param a_hFile Handle of the file to which to write. * @return 0 on success, 1 on failure, per TSK convention */ uint8_t fatxxfs_istat_attr_flags(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FILE *a_hFile) { const char *func_name = "fatxxfs_istat_attr_flags"; FATXXFS_DENTRY dentry; tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || fatfs_ptr_arg_is_null(a_hFile, "a_hFile", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name)) { return 1; } if (fatfs_dentry_load(a_fatfs, (FATFS_DENTRY*)(&dentry), a_inum)) { return 1; } if ((dentry.attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { tsk_fprintf(a_hFile, "Long File Name\n"); } else { if (dentry.attrib & FATFS_ATTR_DIRECTORY) tsk_fprintf(a_hFile, "Directory"); else if (dentry.attrib & FATFS_ATTR_VOLUME) tsk_fprintf(a_hFile, "Volume Label"); else tsk_fprintf(a_hFile, "File"); if (dentry.attrib & FATFS_ATTR_READONLY) tsk_fprintf(a_hFile, ", Read Only"); if (dentry.attrib & FATFS_ATTR_HIDDEN) tsk_fprintf(a_hFile, ", Hidden"); if (dentry.attrib & FATFS_ATTR_SYSTEM) tsk_fprintf(a_hFile, ", System"); if (dentry.attrib & FATFS_ATTR_ARCHIVE) tsk_fprintf(a_hFile, ", Archive"); tsk_fprintf(a_hFile, "\n"); } return 0; } uint8_t fatxxfs_inode_walk_should_skip_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, unsigned int a_selection_flags, int a_cluster_is_alloc) { const char *func_name = "fatxxfs_inode_walk_should_skip_dentry"; FATXXFS_DENTRY *dentry = (FATXXFS_DENTRY*)a_dentry; unsigned int dentry_flags = 0; assert(a_fatfs != NULL); assert(fatfs_inum_is_in_range(a_fatfs, a_inum)); assert(a_dentry != NULL); tsk_error_reset(); if (fatfs_ptr_arg_is_null(a_fatfs, "a_fatfs", func_name) || !fatfs_inum_arg_is_in_range(a_fatfs, a_inum, func_name) || fatfs_ptr_arg_is_null(a_dentry, "a_dentry", func_name)) { return 1; } /* If this is a long file name entry, then skip it and * wait for the short name. */ if ((dentry->attrib & FATFS_ATTR_LFN) == FATFS_ATTR_LFN) { return 1; } /* Skip the "." and ".." entries because they are redundant. */ if (((dentry->attrib & FATFS_ATTR_DIRECTORY) == FATFS_ATTR_DIRECTORY) && (dentry->name[0] == '.')) { return 1; } /* Compare directory entry allocation status with the inode selection * flags. Allocation status is determined first by the allocation status * of the sector that contains the entry, then by the deleted status of * the file. This is necessary because when a directory is deleted, its * contents are not always marked as unallocated. */ if (a_cluster_is_alloc == 1) { if(FATXXFS_IS_DELETED(dentry->name, a_fatfs)){ dentry_flags = TSK_FS_META_FLAG_UNALLOC; } else{ dentry_flags = TSK_FS_META_FLAG_ALLOC; } } else { dentry_flags = TSK_FS_META_FLAG_UNALLOC; } if ((a_selection_flags & dentry_flags) != dentry_flags) { return 1; } /* If the processing flags call for only processing orphan files, check * whether or not this inode is in list of non-orphan files found via name * walk. */ if ((dentry_flags & TSK_FS_META_FLAG_UNALLOC) && (a_selection_flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(&(a_fatfs->fs_info), a_inum))) { return 1; } return 0; } sleuthkit-4.2.0/tsk/fs/ffind_lib.c000644 000765 000024 00000006332 12576320700 017622 0ustar00carrierstaff000000 000000 /* ** ffind (file find) ** The Sleuth Kit ** ** Find the file that uses the specified inode (including deleted files) ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file ffind_lib.c * Contains the library API functions used by the TSK ffind command * line tool. */ #include "tsk_fs_i.h" #include "tsk_ntfs.h" // NTFS has an optimized version of this function /** \internal * Structure to store data for callbacks. */ typedef struct { TSK_INUM_T inode; uint8_t flags; uint8_t found; } FFIND_DATA; static TSK_WALK_RET_ENUM find_file_act(TSK_FS_FILE * fs_file, const char *a_path, void *ptr) { FFIND_DATA *data = (FFIND_DATA *) ptr; /* We found it! */ if (fs_file->name->meta_addr == data->inode) { data->found = 1; if (fs_file->name->flags & TSK_FS_NAME_FLAG_UNALLOC) tsk_printf("* "); tsk_printf("/%s%s\n", a_path, fs_file->name->name); if (!(data->flags & TSK_FS_FFIND_ALL)) { return TSK_WALK_STOP; } } return TSK_WALK_CONT; } /* Return 0 on success and 1 on error */ uint8_t tsk_fs_ffind(TSK_FS_INFO * fs, TSK_FS_FFIND_FLAG_ENUM lclflags, TSK_INUM_T a_inode, TSK_FS_ATTR_TYPE_ENUM type, uint8_t type_used, uint16_t id, uint8_t id_used, TSK_FS_DIR_WALK_FLAG_ENUM flags) { FFIND_DATA data; data.found = 0; data.flags = lclflags; data.inode = a_inode; /* Since we start the walk on the root inode, then this will not show ** up in the above functions, so do it now */ if (data.inode == fs->root_inum) { if (flags & TSK_FS_DIR_WALK_FLAG_ALLOC) { tsk_printf("/\n"); data.found = 1; if (!(lclflags & TSK_FS_FFIND_ALL)) return 0; } } if (TSK_FS_TYPE_ISNTFS(fs->ftype)) { if (ntfs_find_file(fs, data.inode, type, type_used, id, id_used, flags, find_file_act, &data)) return 1; } else { if (tsk_fs_dir_walk(fs, fs->root_inum, flags, find_file_act, &data)) return 1; } if (data.found == 0) { /* With FAT, we can at least give the name of the file and call * it orphan */ if (TSK_FS_TYPE_ISFAT(fs->ftype)) { TSK_FS_FILE *fs_file = tsk_fs_file_open_meta(fs, NULL, data.inode); if ((fs_file != NULL) && (fs_file->meta != NULL) && (fs_file->meta->name2 != NULL)) { if (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) tsk_printf("* "); tsk_printf("%s/%s\n", TSK_FS_ORPHAN_STR, fs_file->meta->name2->name); } if (fs_file) tsk_fs_file_close(fs_file); } else { tsk_printf("File name not found for inode\n"); } } return 0; } sleuthkit-4.2.0/tsk/fs/ffs.c000644 000765 000024 00000216664 12576320700 016477 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002-2003 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** * \file ffs.c * Contains the internal TSK UFS / FFS file system functions */ #include "tsk_fs_i.h" #include "tsk_ffs.h" /* ffs_group_load - load cylinder group descriptor info into cache * * Note: This routine assumes &ffs->lock is locked by the caller. * * return 1 on error and 0 on success * */ static uint8_t ffs_group_load(FFS_INFO * ffs, FFS_GRPNUM_T grp_num) { TSK_DADDR_T addr; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ffs->fs_info; /* * Sanity check */ if (grp_num < 0 || grp_num >= ffs->groups_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ffs_group_load: invalid cylinder group number: %" PRI_FFSGRP "", grp_num); return 1; } /* * Allocate/read cylinder group info on the fly. Trust that a cylinder * group always fits within a logical disk block (as promised in the * 4.4BSD include file). */ if (ffs->grp_buf == NULL) { if ((ffs->grp_buf = tsk_malloc(ffs->ffsbsize_b)) == NULL) { return 1; } } addr = cgtod_lcl(fs, ffs->fs.sb1, grp_num); if (ffs->grp_addr != addr) { ffs_cgd *cg; ssize_t cnt; cnt = tsk_fs_read_block(fs, addr, ffs->grp_buf, ffs->ffsbsize_b); if (cnt != ffs->ffsbsize_b) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ffs_group_load: Group %" PRI_FFSGRP " at %" PRIuDADDR, grp_num, addr); return 1; } ffs->grp_addr = addr; /* Perform a sanity check on the data to make sure offsets are in range */ cg = (ffs_cgd *) ffs->grp_buf; if ((tsk_gets32(fs->endian, cg->cg_iusedoff) > ffs->ffsbsize_b) || (tsk_gets32(fs->endian, cg->cg_freeoff) > ffs->ffsbsize_b)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr2("ffs_group_load: Group %" PRI_FFSGRP " descriptor offsets too large at %" PRIuDADDR, grp_num, addr); return 1; } } ffs->grp_num = grp_num; return 0; } /* * ffs_dinode_load - read disk inode and load the data into ffs_inode structure * * Return 0 on success and 1 on error */ static uint8_t ffs_dinode_load(FFS_INFO * ffs, TSK_INUM_T inum, ffs_inode * dino_buf) { TSK_DADDR_T addr; TSK_OFF_T offs; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ffs->fs_info; /* * Sanity check. * Use last_num-1 to account for virtual Orphan directory in last_inum. */ if (inum < fs->first_inum || inum > fs->last_inum - 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("ffs_dinode_load: address: %" PRIuINUM, inum); return 1; } /* * Allocate/read the inode table buffer on the fly. */ /* lock access to itbl_buf */ tsk_take_lock(&ffs->lock); if (ffs->itbl_buf == NULL) { if ((ffs->itbl_buf = tsk_malloc(ffs->ffsbsize_b)) == NULL) { tsk_release_lock(&ffs->lock); return 1; } } /* UFS2 is different because it does not initialize all inodes * when the file system is created. Therefore we need to check * the group descriptor to find out if this is in the valid * range */ if (fs->ftype == TSK_FS_TYPE_FFS2) { ffs_cgd2 *cg2; FFS_GRPNUM_T grp_num; if (dino_buf == NULL) { tsk_release_lock(&ffs->lock); return 1; } /* Lookup the cylinder group descriptor if it isn't * cached */ grp_num = (FFS_GRPNUM_T) itog_lcl(fs, ffs->fs.sb1, inum); if (ffs_group_load(ffs, grp_num)) { tsk_release_lock(&ffs->lock); return 1; } cg2 = (ffs_cgd2 *) ffs->grp_buf; /* If the inode is not init, then do not worry about it */ if ((inum - grp_num * tsk_getu32(fs->endian, ffs->fs.sb2->cg_inode_num)) >= tsk_getu32(fs->endian, cg2->cg_initediblk)) { memset((char *) dino_buf, 0, sizeof(ffs_inode2)); } else { ssize_t cnt; /* Get the base and offset addr for the inode in the tbl */ addr = itod_lcl(fs, ffs->fs.sb1, inum); if (ffs->itbl_addr != addr) { cnt = tsk_fs_read_block (fs, addr, ffs->itbl_buf, ffs->ffsbsize_b); if (cnt != ffs->ffsbsize_b) { tsk_release_lock(&ffs->lock); if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_dinode_load: FFS2 inode table at %" PRIuDADDR, addr); return 1; } ffs->itbl_addr = addr; } offs = itoo_lcl(fs, ffs->fs.sb2, inum) * sizeof(ffs_inode2); memcpy((char *) dino_buf, ffs->itbl_buf + offs, sizeof(ffs_inode2)); } } else { if (dino_buf == NULL) { tsk_release_lock(&ffs->lock); return 1; } addr = itod_lcl(fs, ffs->fs.sb1, inum); if (ffs->itbl_addr != addr) { ssize_t cnt; cnt = tsk_fs_read_block(fs, addr, ffs->itbl_buf, ffs->ffsbsize_b); if (cnt != ffs->ffsbsize_b) { tsk_release_lock(&ffs->lock); if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_dinode_load: FFS1 inode table at %" PRIuDADDR, addr); return 1; } ffs->itbl_addr = addr; } offs = itoo_lcl(fs, ffs->fs.sb1, inum) * sizeof(ffs_inode1); memcpy((char *) dino_buf, ffs->itbl_buf + offs, sizeof(ffs_inode1)); } tsk_release_lock(&ffs->lock); return 0; } static TSK_FS_META_TYPE_ENUM ffsmode2tsktype(uint16_t a_mode) { switch (a_mode & FFS_IN_FMT) { case FFS_IN_REG: return TSK_FS_META_TYPE_REG; case FFS_IN_DIR: return TSK_FS_META_TYPE_DIR; case FFS_IN_SOCK: return TSK_FS_META_TYPE_SOCK; case FFS_IN_LNK: return TSK_FS_META_TYPE_LNK; case FFS_IN_BLK: return TSK_FS_META_TYPE_BLK; case FFS_IN_CHR: return TSK_FS_META_TYPE_CHR; case FFS_IN_FIFO: return TSK_FS_META_TYPE_FIFO; case FFS_IN_SHAD: return TSK_FS_META_TYPE_SHAD; case FFS_IN_WHT: return TSK_FS_META_TYPE_WHT; default: return TSK_FS_META_TYPE_UNDEF; } } static uint16_t ffsmode2tskmode(uint16_t a_mode) { uint16_t mode = 0; if (a_mode & FFS_IN_ISUID) mode |= TSK_FS_META_MODE_ISUID; if (a_mode & FFS_IN_ISGID) mode |= TSK_FS_META_MODE_ISGID; if (a_mode & FFS_IN_ISVTX) mode |= TSK_FS_META_MODE_ISVTX; if (a_mode & FFS_IN_IRUSR) mode |= TSK_FS_META_MODE_IRUSR; if (a_mode & FFS_IN_IWUSR) mode |= TSK_FS_META_MODE_IWUSR; if (a_mode & FFS_IN_IXUSR) mode |= TSK_FS_META_MODE_IXUSR; if (a_mode & FFS_IN_IRGRP) mode |= TSK_FS_META_MODE_IRGRP; if (a_mode & FFS_IN_IWGRP) mode |= TSK_FS_META_MODE_IWGRP; if (a_mode & FFS_IN_IXGRP) mode |= TSK_FS_META_MODE_IXGRP; if (a_mode & FFS_IN_IROTH) mode |= TSK_FS_META_MODE_IROTH; if (a_mode & FFS_IN_IWOTH) mode |= TSK_FS_META_MODE_IWOTH; if (a_mode & FFS_IN_IXOTH) mode |= TSK_FS_META_MODE_IXOTH; return mode; } /* ffs_dinode_copy - copy cached disk inode to generic inode * * Return 1 on error and 0 on success */ static uint8_t ffs_dinode_copy(FFS_INFO * ffs, TSK_FS_META * fs_meta, TSK_INUM_T dino_inum, const ffs_inode * dino_buf) { int i, j; unsigned int count; TSK_FS_INFO *fs = &(ffs->fs_info); FFS_GRPNUM_T grp_num; ffs_cgd *cg; unsigned char *inosused = NULL; TSK_INUM_T ibase; if (dino_buf == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ffs_dinode_copy: dino_buf is NULL"); return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } fs_meta->flags = 0; fs_meta->seq = 0; /* If the symlink field is set from a previous run, then free it */ if (fs_meta->link) { free(fs_meta->link); fs_meta->link = NULL; } fs_meta->addr = dino_inum; /* OpenBSD and FreeBSD style */ if (fs->ftype == TSK_FS_TYPE_FFS1) { ffs_inode1 *in = (ffs_inode1 *) dino_buf; TSK_DADDR_T *addr_ptr; fs_meta->mode = ffsmode2tskmode(tsk_getu16(fs->endian, in->di_mode)); fs_meta->type = ffsmode2tsktype(tsk_getu16(fs->endian, in->di_mode)); fs_meta->nlink = tsk_gets16(fs->endian, in->di_nlink); fs_meta->size = tsk_getu64(fs->endian, in->di_size); fs_meta->uid = tsk_getu32(fs->endian, in->di_uid); fs_meta->gid = tsk_getu32(fs->endian, in->di_gid); fs_meta->mtime = tsk_gets32(fs->endian, in->di_mtime); fs_meta->atime = tsk_gets32(fs->endian, in->di_atime); fs_meta->ctime = tsk_gets32(fs->endian, in->di_ctime); fs_meta->crtime = 0; fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime_nano = 0; if (fs_meta->content_len < FFS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, FFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; for (i = 0; i < FFS_NDADDR; i++) addr_ptr[i] = tsk_gets32(fs->endian, in->di_db[i]); for (i = 0; i < FFS_NIADDR; i++) addr_ptr[FFS_NDADDR + i] = tsk_gets32(fs->endian, in->di_ib[i]); /* set the link string (if the file is a link) * The size check is a sanity check so that we don't try and allocate * a huge amount of memory for a bad inode value */ if ((fs_meta->type == TSK_FS_META_TYPE_LNK) && (fs_meta->size < FFS_MAXPATHLEN) && (fs_meta->size >= 0)) { int i; fs_meta->link = tsk_malloc((size_t) fs_meta->size + 1); if (fs_meta->link == NULL) { return 1; } count = 0; /* index into the link array */ /* it is located directly in the pointers */ if (fs_meta->size < 4 * (FFS_NDADDR + FFS_NIADDR)) { char *ptr; /* Direct block pointer locations */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_db[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } /* indirect block pointers */ for (i = 0; i < FFS_NIADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_ib[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } fs_meta->link[count] = '\0'; /* clear the values to avoid other code from reading them */ memset(fs_meta->content_ptr, 0, fs_meta->content_len); } /* it is in blocks (the regular way) */ else { char *buf; char *ptr = fs_meta->link; if ((buf = (char *) tsk_malloc(fs->block_size)) == NULL) { return 1; } /* there is a max link length of 1000, so we should never * need the indirect blocks */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ssize_t cnt; TSK_DADDR_T *addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; /* Do we need the entire block, or just part of it? */ int read_count = (fs_meta->size - count < fs->block_size) ? (int) fs_meta->size - count : fs->block_size; cnt = tsk_fs_read_block(fs, addr_ptr[i], buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_dinode_copy: FFS1A symlink dest at %" PRIuDADDR, addr_ptr[i]); free(buf); return 1; } memcpy(ptr, buf, read_count); count += read_count; ptr = (char *) ((uintptr_t) ptr + read_count); } /* terminate the string */ *ptr = '\0'; /* Clean up name */ i = 0; while (fs_meta->link[i] != '\0') { if (TSK_IS_CNTRL(fs_meta->link[i])) fs_meta->link[i] = '^'; i++; } free(buf); } } /* end of symlink */ } /* TSK_FS_TYPE_FFS1B - Solaris */ else if (fs->ftype == TSK_FS_TYPE_FFS1B) { ffs_inode1b *in = (ffs_inode1b *) dino_buf; TSK_DADDR_T *addr_ptr; fs_meta->mode = ffsmode2tskmode(tsk_getu16(fs->endian, in->di_mode)); fs_meta->type = ffsmode2tsktype(tsk_getu16(fs->endian, in->di_mode)); fs_meta->nlink = tsk_gets16(fs->endian, in->di_nlink); fs_meta->size = tsk_getu64(fs->endian, in->di_size); fs_meta->uid = tsk_getu32(fs->endian, in->di_uid); fs_meta->gid = tsk_getu32(fs->endian, in->di_gid); fs_meta->mtime = tsk_gets32(fs->endian, in->di_mtime); fs_meta->atime = tsk_gets32(fs->endian, in->di_atime); fs_meta->ctime = tsk_gets32(fs->endian, in->di_ctime); fs_meta->crtime = 0; fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = fs_meta->crtime_nano = 0; if (fs_meta->content_len < FFS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, FFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; for (i = 0; i < FFS_NDADDR; i++) addr_ptr[i] = tsk_gets32(fs->endian, in->di_db[i]); for (i = 0; i < FFS_NIADDR; i++) addr_ptr[FFS_NDADDR + i] = tsk_gets32(fs->endian, in->di_ib[i]); if ((fs_meta->type == TSK_FS_META_TYPE_LNK) && (fs_meta->size < FFS_MAXPATHLEN) && (fs_meta->size >= 0)) { count = 0; /* index into the link array */ /* it is located directly in the pointers */ if (fs_meta->size < 4 * (FFS_NDADDR + FFS_NIADDR)) { char *ptr; /* Direct block pointer locations */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_db[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } /* indirect block pointers */ for (i = 0; i < FFS_NIADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_ib[i]; for (j = 0; j < 4 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } fs_meta->link[count] = '\0'; /* clear the values to avoid other code from reading them */ memset(fs_meta->content_ptr, 0, fs_meta->content_len); } /* it is in blocks (the regular way) */ else { char *buf; char *ptr; if ((buf = (char *) tsk_malloc(fs->block_size)) == NULL) return 1; fs_meta->link = ptr = tsk_malloc((size_t) fs_meta->size + 1); if (fs_meta->link == NULL) { free(buf); return 1; } /* there is a max link length of 1000, so we should never * need the indirect blocks */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ssize_t cnt; TSK_DADDR_T *addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; /* Do we need the entire block, or just part of it? */ int read_count = (fs_meta->size - count < fs->block_size) ? (int) fs_meta->size - count : fs->block_size; cnt = tsk_fs_read_block(fs, addr_ptr[i], buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_dinode_copy: FFS1B symlink dest at %" PRIuDADDR, addr_ptr[i]); free(buf); return 1; } memcpy(ptr, buf, read_count); count += read_count; ptr = (char *) ((uintptr_t) ptr + read_count); } /* terminate the string */ *ptr = '\0'; free(buf); } } } else if (fs->ftype == TSK_FS_TYPE_FFS2) { ffs_inode2 *in = (ffs_inode2 *) dino_buf; TSK_DADDR_T *addr_ptr; fs_meta->mode = ffsmode2tskmode(tsk_getu16(fs->endian, in->di_mode)); fs_meta->type = ffsmode2tsktype(tsk_getu16(fs->endian, in->di_mode)); fs_meta->nlink = tsk_gets16(fs->endian, in->di_nlink); fs_meta->size = tsk_getu64(fs->endian, in->di_size); fs_meta->uid = tsk_getu32(fs->endian, in->di_uid); fs_meta->gid = tsk_getu32(fs->endian, in->di_gid); fs_meta->mtime = (time_t) tsk_gets64(fs->endian, in->di_mtime); fs_meta->atime = (time_t) tsk_gets64(fs->endian, in->di_atime); fs_meta->ctime = (time_t) tsk_gets64(fs->endian, in->di_ctime); fs_meta->crtime = 0; fs_meta->mtime_nano = tsk_getu32(fs->endian, in->di_mtimensec); fs_meta->atime_nano = tsk_getu32(fs->endian, in->di_atimensec); fs_meta->ctime_nano = tsk_getu32(fs->endian, in->di_ctimensec); fs_meta->crtime_nano = tsk_getu32(fs->endian, in->di_crtimensec); if (fs_meta->content_len < FFS_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, FFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; for (i = 0; i < FFS_NDADDR; i++) addr_ptr[i] = tsk_gets64(fs->endian, in->di_db[i]); for (i = 0; i < FFS_NIADDR; i++) addr_ptr[FFS_NDADDR + i] = tsk_gets64(fs->endian, in->di_ib[i]); /* set the link string (if the file is a link) * The size check is a sanity check so that we don't try and allocate * a huge amount of memory for a bad inode value */ if ((fs_meta->type == TSK_FS_META_TYPE_LNK) && (fs_meta->size < FFS_MAXPATHLEN) && (fs_meta->size >= 0)) { fs_meta->link = tsk_malloc((size_t) fs_meta->size + 1); if (fs_meta->link == NULL) { return 1; } count = 0; /* index into the link array */ /* it is located directly in the pointers * Only the new style inode has this "fast link" */ if (fs_meta->size < 8 * (FFS_NDADDR + FFS_NIADDR)) { char *ptr; /* Direct block pointer locations */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_db[i]; for (j = 0; j < 8 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } /* indirect block pointers */ for (i = 0; i < FFS_NIADDR && count < fs_meta->size; i++) { ptr = (char *) &in->di_ib[i]; for (j = 0; j < 8 && count < fs_meta->size; j++) fs_meta->link[count++] = ptr[j]; } fs_meta->link[count] = '\0'; /* clear the values to avoid other code from reading them */ memset(fs_meta->content_ptr, 0, fs_meta->content_len); } /* it is in blocks (the regular way) */ else { char *buf; char *ptr = fs_meta->link; if ((buf = (char *) tsk_malloc(fs->block_size)) == NULL) { return 1; } /* there is a max link length of 1000, so we should never * need the indirect blocks */ for (i = 0; i < FFS_NDADDR && count < fs_meta->size; i++) { ssize_t cnt; TSK_DADDR_T *addr_ptr = fs_meta->content_ptr; /* Do we need the entire block, or just part of it? */ int read_count = (fs_meta->size - count < fs->block_size) ? (int) fs_meta->size - count : fs->block_size; cnt = tsk_fs_read_block(fs, addr_ptr[i], buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_dinode_copy: FFS2 symlink dest at %" PRIuDADDR, addr_ptr[i]); free(buf); return 1; } memcpy(ptr, buf, read_count); count += read_count; ptr = (char *) ((uintptr_t) ptr + read_count); } /* terminate the string */ *ptr = '\0'; free(buf); } } /* end of symlink */ } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ffs_dinode_copy: Unknown FFS Type"); return 1; } /* set the flags */ grp_num = (FFS_GRPNUM_T) itog_lcl(fs, ffs->fs.sb1, dino_inum); tsk_take_lock(&ffs->lock); if (ffs_group_load(ffs, grp_num)) { tsk_release_lock(&ffs->lock); return 1; } cg = (ffs_cgd *) ffs->grp_buf; inosused = (unsigned char *) cg_inosused_lcl(fs, cg); ibase = grp_num * tsk_gets32(fs->endian, ffs->fs.sb1->cg_inode_num); /* get the alloc flag */ fs_meta->flags = (isset(inosused, dino_inum - ibase) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); tsk_release_lock(&ffs->lock); /* used/unused */ fs_meta->flags |= (fs_meta->ctime ? TSK_FS_META_FLAG_USED : TSK_FS_META_FLAG_UNUSED); return 0; } /* ffs_inode_lookup - lookup inode, external interface * * Return 1 on error * * */ static uint8_t ffs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { ffs_inode *dino_buf; FFS_INFO *ffs = (FFS_INFO *) fs; if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ffs_inode_lookup: fs_file is NULL"); return 1; } /* copy it to the TSK_FS_META structure */ if (a_fs_file->meta == NULL) { a_fs_file->meta = tsk_fs_meta_alloc(FFS_FILE_CONTENT_LEN); if (a_fs_file->meta == NULL) return 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } // see if they are looking for the special "orphans" directory if (inum == TSK_FS_ORPHANDIR_INUM(fs)) { if (tsk_fs_dir_make_orphan_dir_meta(fs, a_fs_file->meta)) return 1; else return 0; } /* Lookup the inode and store it in ffs */ if ((dino_buf = (ffs_inode *) tsk_malloc(sizeof(ffs_inode2))) == NULL) return 1; if (ffs_dinode_load(ffs, inum, dino_buf)) { free(dino_buf); return 1; } if (ffs_dinode_copy(ffs, a_fs_file->meta, inum, dino_buf)) { free(dino_buf); return 1; } free(dino_buf); return 0; } /************************************************************************** * * INODE WALKING * **************************************************************************/ /* ffs_inode_walk - inode iterator * * flags used: TSK_FS_META_FLAG_USED, TSK_FS_META_FLAG_UNUSED, * TSK_FS_META_FLAG_ALLOC, TSK_FS_META_FLAG_UNALLOC, TSK_FS_META_FLAG_ORPHAN * * return 1 on error and 0 on success */ uint8_t ffs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB action, void *ptr) { char *myname = "ffs_inode_walk"; FFS_INFO *ffs = (FFS_INFO *) fs; FFS_GRPNUM_T grp_num; ffs_cgd *cg = NULL; TSK_INUM_T inum; unsigned char *inosused = NULL; TSK_FS_FILE *fs_file; int myflags; TSK_INUM_T ibase = 0; TSK_INUM_T end_inum_tmp; ffs_inode *dino_buf; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (start_inum < fs->first_inum || start_inum > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Start inode: %" PRIuINUM "", myname, start_inum); return 1; } else if (end_inum < fs->first_inum || end_inum > fs->last_inum || end_inum < start_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End inode: %" PRIuINUM "", myname, end_inum); return 1; } /* If ORPHAN is wanted, then make sure that the flags are correct */ if (a_flags & TSK_FS_META_FLAG_ORPHAN) { a_flags |= TSK_FS_META_FLAG_UNALLOC; a_flags &= ~TSK_FS_META_FLAG_ALLOC; a_flags |= TSK_FS_META_FLAG_USED; a_flags &= ~TSK_FS_META_FLAG_UNUSED; } else { if (((a_flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((a_flags & TSK_FS_META_FLAG_USED) == 0) && ((a_flags & TSK_FS_META_FLAG_UNUSED) == 0)) { a_flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } } /* If we are looking for orphan files and have not yet filled * in the list of unalloc inodes that are pointed to, then fill * in the list * */ if ((a_flags & TSK_FS_META_FLAG_ORPHAN)) { if (tsk_fs_dir_load_inum_named(fs) != TSK_OK) { tsk_error_errstr2_concat ("- ffs_inode_walk: identifying inodes allocated by file names"); return 1; } } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(FFS_FILE_CONTENT_LEN)) == NULL) return 1; // we need to handle fs->last_inum specially because it is for the // virtual ORPHANS directory. Handle it outside of the loop. if (end_inum == TSK_FS_ORPHANDIR_INUM(fs)) end_inum_tmp = end_inum - 1; else end_inum_tmp = end_inum; if ((dino_buf = (ffs_inode *) tsk_malloc(sizeof(ffs_inode2))) == NULL) return 1; /* * Iterate. This is easy because inode numbers are contiguous, unlike * data blocks which are interleaved with cylinder group blocks. */ for (inum = start_inum; inum <= end_inum_tmp; inum++) { int retval; /* * Be sure to use the proper cylinder group data. */ grp_num = itog_lcl(fs, ffs->fs.sb1, inum); tsk_take_lock(&ffs->lock); if (ffs_group_load(ffs, grp_num)) { tsk_release_lock(&ffs->lock); free(dino_buf); return 1; } cg = (ffs_cgd *) ffs->grp_buf; inosused = (unsigned char *) cg_inosused_lcl(fs, cg); ibase = grp_num * tsk_gets32(fs->endian, ffs->fs.sb1->cg_inode_num); /* * Apply the allocated/unallocated restriction. */ myflags = (isset(inosused, inum - ibase) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); tsk_release_lock(&ffs->lock); if ((a_flags & myflags) != myflags) continue; if (ffs_dinode_load(ffs, inum, dino_buf)) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { /* both inode forms are the same for the required fields */ ffs_inode1 *in1 = (ffs_inode1 *) dino_buf; /* * Apply the used/unused restriction. */ myflags |= (tsk_gets32(fs->endian, in1->di_ctime) ? TSK_FS_META_FLAG_USED : TSK_FS_META_FLAG_UNUSED); if ((a_flags & myflags) != myflags) continue; } else { ffs_inode2 *in2 = (ffs_inode2 *) dino_buf; /* * Apply the used/unused restriction. */ myflags |= (tsk_gets64(fs->endian, in2->di_ctime) ? TSK_FS_META_FLAG_USED : TSK_FS_META_FLAG_UNUSED); if ((a_flags & myflags) != myflags) continue; } /* If we want only orphans, then check if this * inode is in the seen list */ if ((myflags & TSK_FS_META_FLAG_UNALLOC) && (a_flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(fs, inum))) { continue; } /* * Fill in a file system-independent inode structure and pass control * to the application. */ if (ffs_dinode_copy(ffs, fs_file->meta, inum, dino_buf)) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } retval = action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dino_buf); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } } // handle the virtual orphans folder if they asked for it if ((end_inum == TSK_FS_ORPHANDIR_INUM(fs)) && (a_flags & TSK_FS_META_FLAG_ALLOC) && (a_flags & TSK_FS_META_FLAG_USED)) { int retval; if (tsk_fs_dir_make_orphan_dir_meta(fs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } /* call action */ retval = action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dino_buf); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } } /* * Cleanup. */ tsk_fs_file_close(fs_file); free(dino_buf); return 0; } TSK_FS_BLOCK_FLAG_ENUM ffs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { FFS_INFO *ffs = (FFS_INFO *) a_fs; FFS_GRPNUM_T grp_num; ffs_cgd *cg = 0; TSK_DADDR_T frag_base = 0; TSK_DADDR_T dblock_addr = 0; /* first data block in group */ TSK_DADDR_T sblock_addr = 0; /* super block in group */ unsigned char *freeblocks = NULL; int flags; // sparse if (a_addr == 0) return TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_ALLOC; grp_num = dtog_lcl(a_fs, ffs->fs.sb1, a_addr); tsk_take_lock(&ffs->lock); if (ffs_group_load(ffs, grp_num)) { tsk_release_lock(&ffs->lock); return 0; } cg = (ffs_cgd *) ffs->grp_buf; freeblocks = (unsigned char *) cg_blksfree_lcl(a_fs, cg); // get the base fragment for the group frag_base = cgbase_lcl(a_fs, ffs->fs.sb1, grp_num); // address of first data block in group dblock_addr = cgdmin_lcl(a_fs, ffs->fs.sb1, grp_num); // address of super block in group sblock_addr = cgsblock_lcl(a_fs, ffs->fs.sb1, grp_num); /* get the flags for this fragment * * Beware: FFS stores file data in the blocks between the start of a * cylinder group and the start of its super block. */ flags = (isset(freeblocks, a_addr - frag_base) ? TSK_FS_BLOCK_FLAG_UNALLOC : TSK_FS_BLOCK_FLAG_ALLOC); tsk_release_lock(&ffs->lock); if (a_addr >= sblock_addr && a_addr < dblock_addr) flags |= TSK_FS_BLOCK_FLAG_META; else flags |= TSK_FS_BLOCK_FLAG_CONT; return flags; } /************************************************************************** * * BLOCK WALKING * **************************************************************************/ /* ffs_block_walk - block iterator * * flags: TSK_FS_BLOCK_FLAG_ALLOC, TSK_FS_BLOCK_FLAG_UNALLOC, TSK_FS_BLOCK_FLAG_CONT, * TSK_FS_BLOCK_FLAG_META * * return 1 on error and 0 on success */ uint8_t ffs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB action, void *ptr) { char *myname = "ffs_block_walk"; FFS_INFO *ffs = (FFS_INFO *) fs; TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; char *cache_blk_buf; // buffer used for local read cache TSK_DADDR_T cache_addr; // base address in local cache int cache_len_f; // amount of data read into cache (in fragments) // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks on input bounds */ if (a_start_blk < fs->first_block || a_start_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Start block: %" PRIuDADDR "", myname, a_start_blk); return 1; } if (a_end_blk < fs->first_block || a_end_blk > fs->last_block || a_end_blk < a_start_blk) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End block: %" PRIuDADDR "", myname, a_end_blk); return 1; } /* Sanity check on flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } /* Other initialization */ if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } if ((cache_blk_buf = tsk_malloc(ffs->ffsbsize_b)) == NULL) { return 1; } cache_len_f = 0; cache_addr = 0; /* Cycle through the fragment range requested */ for (addr = a_start_blk; addr <= a_end_blk; addr++) { int retval; size_t cache_offset = 0; int myflags = ffs_block_getflags(fs, addr); if ((tsk_verbose) && (myflags & TSK_FS_BLOCK_FLAG_META) && (myflags & TSK_FS_BLOCK_FLAG_UNALLOC)) tsk_fprintf(stderr, "impossible: unallocated meta block %" PRIuDADDR, addr); // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_META) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_META))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_CONT) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if ((a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) == 0) { /* we read in block-sized chunks and cache the result for later * calls. See if this fragment is in our cache */ if ((cache_len_f == 0) || (addr >= cache_addr + cache_len_f)) { ssize_t cnt; int frags; /* Ideally, we want to read in block sized chunks, verify we can do that */ frags = (a_end_blk > addr + ffs->ffsbsize_f - 1 ? ffs->ffsbsize_f : (int) (a_end_blk + 1 - addr)); cnt = tsk_fs_read_block(fs, addr, cache_blk_buf, fs->block_size * frags); if (cnt != fs->block_size * frags) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ffs_block_walk: Block %" PRIuDADDR, addr); tsk_fs_block_free(fs_block); free(cache_blk_buf); return 1; } cache_len_f = frags; cache_addr = addr; } cache_offset = (size_t) ((addr - cache_addr) * fs->block_size); } if (a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; // call the callback tsk_fs_block_set(fs, fs_block, addr, myflags | TSK_FS_BLOCK_FLAG_RAW, &cache_blk_buf[cache_offset]); retval = action(fs_block, ptr); if (retval == TSK_WALK_STOP) { break; } else if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); free(cache_blk_buf); return 1; } } /* Cleanup */ tsk_fs_block_free(fs_block); free(cache_blk_buf); return 0; } /* * return 1 on error and 0 on success */ static uint8_t ffs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented for ffs yet"); return 1; } /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t ffs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { unsigned int i; time_t tmptime; ffs_csum1 *csum1 = NULL; ffs_cgd *cgd = NULL; FFS_INFO *ffs = (FFS_INFO *) fs; ffs_sb1 *sb1 = ffs->fs.sb1; ffs_sb2 *sb2 = ffs->fs.sb2; int flags; char timeBuf[128]; // clean up any error messages that are lying around tsk_error_reset(); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { tsk_fprintf(hFile, "File System Type: UFS 1\n"); tmptime = tsk_getu32(fs->endian, sb1->wtime); tsk_fprintf(hFile, "Last Written: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); tsk_fprintf(hFile, "Last Mount Point: %s\n", sb1->last_mnt); flags = sb1->fs_flags; } else { tsk_fprintf(hFile, "File System Type: UFS 2\n"); tmptime = tsk_getu32(fs->endian, sb2->wtime); tsk_fprintf(hFile, "Last Written: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); tsk_fprintf(hFile, "Last Mount Point: %s\n", sb2->last_mnt); tsk_fprintf(hFile, "Volume Name: %s\n", sb2->volname); tsk_fprintf(hFile, "System UID: %" PRIu64 "\n", tsk_getu64(fs->endian, sb2->swuid)); flags = tsk_getu32(fs->endian, sb2->fs_flags); } if (flags) { int cnt = 0; tsk_fprintf(hFile, "Flags: "); if (flags & FFS_SB_FLAG_UNCLEAN) tsk_fprintf(hFile, "%s Unclean", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_SOFTDEP) tsk_fprintf(hFile, "%s Soft Dependencies", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_NEEDFSCK) tsk_fprintf(hFile, "%s Needs fsck", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_INDEXDIR) tsk_fprintf(hFile, "%s Index directories", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_ACL) tsk_fprintf(hFile, "%s ACLs", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_MULTILABEL) tsk_fprintf(hFile, "%s TrustedBSD MAC Multi-label", (cnt++ == 0 ? "" : ",")); if (flags & FFS_SB_FLAG_UPDATED) tsk_fprintf(hFile, "%s Updated Flag Location", (cnt++ == 0 ? "" : ",")); tsk_fprintf(hFile, "\n"); } tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Inode Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); tsk_fprintf(hFile, "Root Directory: %" PRIuINUM "\n", fs->root_inum); if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { tsk_fprintf(hFile, "Num of Avail Inodes: %" PRIu32 "\n", tsk_getu32(fs->endian, sb1->cstotal.ino_free)); tsk_fprintf(hFile, "Num of Directories: %" PRIu32 "\n", tsk_getu32(fs->endian, sb1->cstotal.dir_num)); } else { tsk_fprintf(hFile, "Num of Avail Inodes: %" PRIu64 "\n", tsk_getu64(fs->endian, sb2->cstotal.ino_free)); tsk_fprintf(hFile, "Num of Directories: %" PRIu64 "\n", tsk_getu64(fs->endian, sb2->cstotal.dir_num)); } tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Fragment Range: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block); if (fs->last_block != fs->last_block_act) tsk_fprintf(hFile, "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block_act); tsk_fprintf(hFile, "Block Size: %u\n", ffs->ffsbsize_b); tsk_fprintf(hFile, "Fragment Size: %u\n", fs->block_size); if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { tsk_fprintf(hFile, "Num of Avail Full Blocks: %" PRIu32 "\n", tsk_getu32(fs->endian, sb1->cstotal.blk_free)); tsk_fprintf(hFile, "Num of Avail Fragments: %" PRIu32 "\n", tsk_getu32(fs->endian, sb1->cstotal.frag_free)); } else { tsk_fprintf(hFile, "Num of Avail Full Blocks: %" PRIu64 "\n", tsk_getu64(fs->endian, sb2->cstotal.blk_free)); tsk_fprintf(hFile, "Num of Avail Fragments: %" PRIu64 "\n", tsk_getu64(fs->endian, sb2->cstotal.frag_free)); } tsk_fprintf(hFile, "\nCYLINDER GROUP INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Number of Cylinder Groups: %" PRIu32 "\n", ffs->groups_count); tsk_fprintf(hFile, "Inodes per group: %" PRId32 "\n", tsk_gets32(fs->endian, sb1->cg_inode_num)); tsk_fprintf(hFile, "Fragments per group: %" PRId32 "\n", tsk_gets32(fs->endian, sb1->cg_frag_num)); /* UFS 1 and 2 use the same ssize field and use the same csum1 */ if (tsk_getu32(fs->endian, sb1->cg_ssize_b)) { ssize_t cnt; csum1 = (ffs_csum1 *) tsk_malloc(tsk_getu32(fs->endian, sb1->cg_ssize_b)); if (csum1 == NULL) return 1; if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { cnt = tsk_fs_read_block(fs, (TSK_DADDR_T) tsk_getu32(fs->endian, sb1->cg_saddr), (char *) csum1, tsk_getu32(fs->endian, sb1->cg_ssize_b)); if (cnt != tsk_getu32(fs->endian, sb1->cg_ssize_b)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_fsstat: FFS1 group descriptor at %" PRIu32, tsk_getu32(fs->endian, sb1->cg_saddr)); return 1; } } else { cnt = tsk_fs_read_block (fs, (TSK_DADDR_T) tsk_getu64(fs->endian, sb2->cg_saddr), (char *) csum1, tsk_getu32(fs->endian, sb2->cg_ssize_b)); if (cnt != tsk_getu32(fs->endian, sb2->cg_ssize_b)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_fsstat: FFS2 group descriptor at %" PRIu64, tsk_getu64(fs->endian, sb2->cg_saddr)); return 1; } } } for (i = 0; i < ffs->groups_count; i++) { tsk_take_lock(&ffs->lock); if (ffs_group_load(ffs, i)) { tsk_release_lock(&ffs->lock); return 1; } cgd = (ffs_cgd *) ffs->grp_buf; tsk_fprintf(hFile, "\nGroup %d:\n", i); if (cgd) { if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { tmptime = tsk_getu32(fs->endian, cgd->wtime); } else { ffs_cgd2 *cgd2 = (ffs_cgd2 *) cgd; tmptime = (uint32_t) tsk_getu64(fs->endian, cgd2->wtime); } tsk_fprintf(hFile, " Last Written: %s\n", (tmptime > 0) ? tsk_fs_time_to_str(tmptime, timeBuf) : "empty"); } tsk_release_lock(&ffs->lock); tsk_fprintf(hFile, " Inode Range: %" PRIu32 " - %" PRIu32 "\n", (tsk_gets32(fs->endian, sb1->cg_inode_num) * i), ((uint32_t) ((tsk_gets32(fs->endian, sb1->cg_inode_num) * (i + 1)) - 1) < fs->last_inum) ? (uint32_t) ((tsk_gets32(fs->endian, sb1->cg_inode_num) * (i + 1)) - 1) : (uint32_t) fs->last_inum); tsk_fprintf(hFile, " Fragment Range: %" PRIuDADDR " - %" PRIuDADDR "\n", cgbase_lcl(fs, sb1, i), ((cgbase_lcl(fs, sb1, i + 1) - 1) < fs->last_block) ? (cgbase_lcl(fs, sb1, i + 1) - 1) : fs->last_block); /* The first group is special because the first 16 sectors are * reserved for the boot block. * the next contains the primary Super Block */ if (!i) { tsk_fprintf(hFile, " Boot Block: 0 - %" PRIu32 "\n", (uint32_t) (15 * 512 / fs->block_size)); tsk_fprintf(hFile, " Super Block: %" PRIu32 " - %" PRIu32 "\n", (uint32_t) (16 * 512 / fs->block_size), (uint32_t) ((16 * 512 / fs->block_size) + ffs->ffsbsize_f - 1)); } tsk_fprintf(hFile, " Super Block: %" PRIuDADDR " - %" PRIuDADDR "\n", cgsblock_lcl(fs, sb1, i), (cgsblock_lcl(fs, sb1, i) + ffs->ffsbsize_f - 1)); tsk_fprintf(hFile, " Group Desc: %" PRIuDADDR " - %" PRIuDADDR "\n", cgtod_lcl(fs, sb1, i), (cgtod_lcl(fs, sb1, i) + ffs->ffsbsize_f - 1)); if (fs->ftype == TSK_FS_TYPE_FFS2) { tsk_fprintf(hFile, " Inode Table: %" PRIuDADDR " - %" PRIuDADDR "\n", cgimin_lcl(fs, sb1, i), (cgimin_lcl(fs, sb1, i) + ((roundup (tsk_gets32(fs->endian, sb1->cg_inode_num) * sizeof(ffs_inode2), fs->block_size) / fs->block_size) - 1))); } else { tsk_fprintf(hFile, " Inode Table: %" PRIuDADDR " - %" PRIuDADDR "\n", cgimin_lcl(fs, sb1, i), (cgimin_lcl(fs, sb1, i) + ((roundup (tsk_gets32(fs->endian, sb1->cg_inode_num) * sizeof(ffs_inode1), fs->block_size) / fs->block_size) - 1))); } tsk_fprintf(hFile, " Data Fragments: "); /* For all groups besides the first, the space before the * super block is also used for data */ if (i) tsk_fprintf(hFile, "%" PRIuDADDR " - %" PRIuDADDR ", ", cgbase_lcl(fs, sb1, i), cgsblock_lcl(fs, sb1, i) - 1); tsk_fprintf(hFile, "%" PRIuDADDR " - %" PRIuDADDR "\n", cgdmin_lcl(fs, sb1, i), ((cgbase_lcl(fs, sb1, i + 1) - 1) < fs->last_block) ? (cgbase_lcl(fs, sb1, i + 1) - 1) : fs->last_block); if ((csum1) && ((i + 1) * sizeof(ffs_csum1) < tsk_getu32(fs->endian, sb1->cg_ssize_b))) { tsk_fprintf(hFile, " Global Summary (from the superblock summary area):\n"); tsk_fprintf(hFile, " Num of Dirs: %" PRIu32 "\n", tsk_getu32(fs->endian, &csum1[i].dir_num)); tsk_fprintf(hFile, " Num of Avail Blocks: %" PRIu32 "\n", tsk_getu32(fs->endian, &csum1[i].blk_free)); tsk_fprintf(hFile, " Num of Avail Inodes: %" PRIu32 "\n", tsk_getu32(fs->endian, &csum1[i].ino_free)); tsk_fprintf(hFile, " Num of Avail Frags: %" PRIu32 "\n", tsk_getu32(fs->endian, &csum1[i].frag_free)); } if (cgd) { tsk_fprintf(hFile, " Local Summary (from the group descriptor):\n"); tsk_fprintf(hFile, " Num of Dirs: %" PRIu32 "\n", tsk_getu32(fs->endian, &cgd->cs.dir_num)); tsk_fprintf(hFile, " Num of Avail Blocks: %" PRIu32 "\n", tsk_getu32(fs->endian, &cgd->cs.blk_free)); tsk_fprintf(hFile, " Num of Avail Inodes: %" PRIu32 "\n", tsk_getu32(fs->endian, &cgd->cs.ino_free)); tsk_fprintf(hFile, " Num of Avail Frags: %" PRIu32 "\n", tsk_getu32(fs->endian, &cgd->cs.frag_free)); tsk_fprintf(hFile, " Last Block Allocated: %" PRIuDADDR "\n", tsk_getu32(fs->endian, &cgd->last_alloc_blk) + cgbase_lcl(fs, sb1, i)); tsk_fprintf(hFile, " Last Fragment Allocated: %" PRIuDADDR "\n", tsk_getu32(fs->endian, &cgd->last_alloc_frag) + cgbase_lcl(fs, sb1, i)); tsk_fprintf(hFile, " Last Inode Allocated: %" PRIu32 "\n", tsk_getu32(fs->endian, &cgd->last_alloc_ino) + (tsk_gets32(fs->endian, sb1->cg_inode_num) * i)); } } return 0; } /************************* istat *******************************/ typedef struct { FILE *hFile; int idx; } FFS_PRINT_ADDR; static TSK_WALK_RET_ENUM print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *ptr) { TSK_FS_INFO *fs = fs_file->fs_info; FFS_PRINT_ADDR *print = (FFS_PRINT_ADDR *) ptr; if (a_flags & TSK_FS_BLOCK_FLAG_CONT) { int i, s; /* cycle through the fragments if they exist */ for (i = 0, s = (int) size; s > 0; s -= fs->block_size, i++) { /* sparse file */ if (addr) tsk_fprintf(print->hFile, "%" PRIuDADDR " ", addr + i); else tsk_fprintf(print->hFile, "0 "); if (++(print->idx) == 8) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } } } return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File handle to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t ffs_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { FFS_INFO *ffs = (FFS_INFO *) fs; TSK_FS_META *fs_meta; TSK_FS_FILE *fs_file; char ls[12]; FFS_PRINT_ADDR print; const TSK_FS_ATTR *fs_attr_indir; char *dino_buf; char timeBuf[128]; // clean up any error messages that are lying around tsk_error_reset(); if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { return 1; } fs_meta = fs_file->meta; tsk_fprintf(hFile, "inode: %" PRIuINUM "\n", inum); tsk_fprintf(hFile, "%sAllocated\n", (fs_meta->flags & TSK_FS_META_FLAG_ALLOC) ? "" : "Not "); tsk_take_lock(&ffs->lock); tsk_fprintf(hFile, "Group: %" PRI_FFSGRP "\n", ffs->grp_num); tsk_release_lock(&ffs->lock); if (fs_meta->link) tsk_fprintf(hFile, "symbolic link to: %s\n", fs_meta->link); tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", fs_meta->uid, fs_meta->gid); tsk_fs_meta_make_ls(fs_meta, ls, sizeof(ls)); tsk_fprintf(hFile, "mode: %s\n", ls); tsk_fprintf(hFile, "size: %" PRIuOFF "\n", fs_meta->size); tsk_fprintf(hFile, "num of links: %u\n", fs_meta->nlink); if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted Inode Times:\n"); if (fs_meta->mtime) fs_meta->mtime -= sec_skew; if (fs_meta->atime) fs_meta->atime -= sec_skew; if (fs_meta->ctime) fs_meta->ctime -= sec_skew; tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); if (fs_meta->mtime) fs_meta->mtime += sec_skew; if (fs_meta->atime) fs_meta->atime += sec_skew; if (fs_meta->ctime) fs_meta->ctime += sec_skew; tsk_fprintf(hFile, "\nOriginal Inode Times:\n"); } else { tsk_fprintf(hFile, "\nInode Times:\n"); } tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); if ((dino_buf = (char *) tsk_malloc(sizeof(ffs_inode2))) == NULL) return 1; // we won't have dino_buf for "virtual" files if ((fs->ftype == TSK_FS_TYPE_FFS2) && (dino_buf)) { ffs_inode2 *in = (ffs_inode2 *) dino_buf; /* Are there extended attributes */ if (tsk_getu32(fs->endian, in->di_extsize) > 0) { ffs_extattr *ea; uint32_t size; char name[257]; char *blk_buf; if ((blk_buf = tsk_malloc(ffs->ffsbsize_b)) == NULL) { tsk_fs_file_close(fs_file); free(dino_buf); return 1; } size = tsk_getu32(fs->endian, in->di_extsize); tsk_fprintf(hFile, "\nExtended Attributes:\n"); tsk_fprintf(hFile, "Size: %" PRIu32 " (%" PRIu64 ", %" PRIu64 ")\n", size, tsk_getu64(fs->endian, in->di_extb[0]), tsk_getu64(fs->endian, in->di_extb[1])); /* Process first block */ // @@@ Incorporate values into this as well if ((tsk_getu64(fs->endian, in->di_extb[0]) >= fs->first_block) && (tsk_getu64(fs->endian, in->di_extb[0]) <= fs->last_block)) { uintptr_t end; ssize_t cnt; cnt = tsk_fs_read_block(fs, tsk_getu64(fs->endian, in->di_extb[0]), blk_buf, ffs->ffsbsize_b); if (cnt != ffs->ffsbsize_b) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ffs_istat: FFS2 extended attribute 0 at %" PRIu64, tsk_getu64(fs->endian, in->di_extb[0])); tsk_fs_file_close(fs_file); free(blk_buf); free(dino_buf); return 1; } ea = (ffs_extattr *) blk_buf; if (size > ffs->ffsbsize_b) { end = (uintptr_t) ea + ffs->ffsbsize_b; size -= ffs->ffsbsize_b; } else { end = (uintptr_t) ea + size; size = 0; } for (; (uintptr_t) ea < end; ea = (ffs_extattr *) ((uintptr_t) ea + tsk_getu32(fs->endian, ea->reclen))) { memcpy(name, ea->name, ea->nlen); name[ea->nlen] = '\0'; tsk_fprintf(hFile, "%s\n", name); } } if ((tsk_getu64(fs->endian, in->di_extb[1]) >= fs->first_block) && (tsk_getu64(fs->endian, in->di_extb[1]) <= fs->last_block)) { uintptr_t end; ssize_t cnt; cnt = tsk_fs_read_block(fs, tsk_getu64(fs->endian, in->di_extb[1]), blk_buf, ffs->ffsbsize_b); if (cnt != ffs->ffsbsize_b) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); } tsk_error_set_errstr2 ("ffs_istat: FFS2 extended attribute 1 at %" PRIu64, tsk_getu64(fs->endian, in->di_extb[1])); tsk_fs_file_close(fs_file); free(blk_buf); free(dino_buf); return 1; } ea = (ffs_extattr *) blk_buf; if (size > ffs->ffsbsize_b) end = (uintptr_t) ea + ffs->ffsbsize_b; else end = (uintptr_t) ea + size; for (; (uintptr_t) ea < end; ea = (ffs_extattr *) ((uintptr_t) ea + tsk_getu32(fs->endian, ea->reclen))) { memcpy(name, ea->name, ea->nlen); name[ea->nlen] = '\0'; tsk_fprintf(hFile, "%s\n", name); } } free(blk_buf); } free(dino_buf); } /* A bad hack to force a specified number of blocks */ if (numblock > 0) fs_meta->size = numblock * ffs->ffsbsize_b; tsk_fprintf(hFile, "\nDirect Blocks:\n"); print.idx = 0; print.hFile = hFile; if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading blocks in file\n"); tsk_error_print(hFile); tsk_fs_file_close(fs_file); return 1; } if (print.idx != 0) tsk_fprintf(hFile, "\n"); fs_attr_indir = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_UNIX_INDIR, 0, 0); if (fs_attr_indir) { tsk_fprintf(hFile, "\nIndirect Blocks:\n"); print.idx = 0; if (tsk_fs_attr_walk(fs_attr_indir, TSK_FS_FILE_WALK_FLAG_AONLY, print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading indirect attribute: "); tsk_error_print(hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(hFile, "\n"); } } tsk_fs_file_close(fs_file); return 0; } /* Return 1 on error and 0 on success */ uint8_t ffs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("UFS does not have a journal"); return 1; } uint8_t ffs_jentry_walk(TSK_FS_INFO * fs, int a_flags, TSK_FS_JENTRY_WALK_CB action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("UFS does not have a journal"); return 1; } uint8_t ffs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int a_flags, TSK_FS_JBLK_WALK_CB action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("UFS does not have a journal"); return 1; } /* ffs_close - close a fast file system */ static void ffs_close(TSK_FS_INFO * fs) { FFS_INFO *ffs = (FFS_INFO *) fs; fs->tag = 0; if (ffs->grp_buf) free(ffs->grp_buf); if (ffs->itbl_buf) free(ffs->itbl_buf); tsk_deinit_lock(&ffs->lock); free((char *) ffs->fs.sb1); tsk_fs_free(fs); } /** * \internal * Open part of a disk image as a FFS/UFS file system. * * @param img_info Disk image to analyze * @param offset Byte offset where file system starts * @param ftype Specific type of file system * @returns NULL on error or if data is not a FFS file system */ TSK_FS_INFO * ffs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype) { char *myname = "ffs_open"; FFS_INFO *ffs; unsigned int len; TSK_FS_INFO *fs; ssize_t cnt; // clean up any error messages that are lying around tsk_error_reset(); if (TSK_FS_TYPE_ISFFS(ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS Type in ffs_open"); return NULL; } if ((ffs = (FFS_INFO *) tsk_fs_malloc(sizeof(*ffs))) == NULL) return NULL; fs = &(ffs->fs_info); fs->ftype = ftype; fs->flags = 0; fs->duname = "Fragment"; fs->tag = TSK_FS_INFO_TAG; fs->img_info = img_info; fs->offset = offset; /* Both sbs are the same size */ len = roundup(sizeof(ffs_sb1), img_info->sector_size); ffs->fs.sb1 = (ffs_sb1 *) tsk_malloc(len); if (ffs->fs.sb1 == NULL) { fs->tag = 0; tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } /* check the magic and figure out the endian ordering */ /* Try UFS2 first - I read somewhere that some upgrades * kept the original UFS1 superblock in addition to * the new one */ cnt = tsk_fs_read (fs, (TSK_OFF_T) UFS2_SBOFF, (char *) ffs->fs.sb2, sizeof(ffs_sb2)); if (cnt != sizeof(ffs_sb2)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr("%s: Superblock at %" PRIuDADDR, myname, (TSK_OFF_T) UFS2_SBOFF); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } /* If that didn't work, try the 256KB UFS2 location */ if (tsk_fs_guessu32(fs, ffs->fs.sb2->magic, UFS2_FS_MAGIC)) { if (tsk_verbose) fprintf(stderr, "ufs_open: Trying 256KB UFS2 location\n"); cnt = tsk_fs_read (fs, (TSK_OFF_T) UFS2_SBOFF2, (char *) ffs->fs.sb2, sizeof(ffs_sb2)); if (cnt != sizeof(ffs_sb2)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: Superblock at %" PRIuDADDR, myname, (TSK_OFF_T) UFS2_SBOFF2); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } /* Try UFS1 if that did not work */ if (tsk_fs_guessu32(fs, ffs->fs.sb2->magic, UFS2_FS_MAGIC)) { if (tsk_verbose) fprintf(stderr, "ufs_open: Trying UFS1 location\n"); cnt = tsk_fs_read (fs, (TSK_OFF_T) UFS1_SBOFF, (char *) ffs->fs.sb1, len); if (cnt != len) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: Superblock at %" PRIuDADDR, myname, (TSK_OFF_T) UFS1_SBOFF); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } if (tsk_fs_guessu32(fs, ffs->fs.sb1->magic, UFS1_FS_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("No UFS Magic Found"); if (tsk_verbose) fprintf(stderr, "ufs_open: No UFS magic found\n"); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } else { // @@@ NEED TO DIFFERENTIATE BETWEEN A & B - UID/GID location in inode fs->ftype = TSK_FS_TYPE_FFS1; } } else { fs->ftype = TSK_FS_TYPE_FFS2; } } else { fs->ftype = TSK_FS_TYPE_FFS2; } /* * Translate some filesystem-specific information to generic form. */ if (fs->ftype == TSK_FS_TYPE_FFS2) { fs->block_count = tsk_gets64(fs->endian, ffs->fs.sb2->frag_num); fs->block_size = tsk_gets32(fs->endian, ffs->fs.sb2->fsize_b); ffs->ffsbsize_b = tsk_gets32(fs->endian, ffs->fs.sb2->bsize_b); ffs->ffsbsize_f = tsk_gets32(fs->endian, ffs->fs.sb2->bsize_frag); ffs->groups_count = tsk_gets32(fs->endian, ffs->fs.sb2->cg_num); } else { fs->block_count = tsk_gets32(fs->endian, ffs->fs.sb1->frag_num); fs->block_size = tsk_gets32(fs->endian, ffs->fs.sb1->fsize_b); ffs->ffsbsize_b = tsk_gets32(fs->endian, ffs->fs.sb1->bsize_b); ffs->ffsbsize_f = tsk_gets32(fs->endian, ffs->fs.sb1->bsize_frag); ffs->groups_count = tsk_gets32(fs->endian, ffs->fs.sb1->cg_num); } /* * Block calculations */ fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; fs->dev_bsize = img_info->sector_size; // determine the last block we have in this image if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < fs->block_count) fs->last_block_act = (img_info->size - offset) / fs->block_size - 1; if ((fs->block_size % 512) || (ffs->ffsbsize_b % 512)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a UFS FS (invalid fragment or block size)"); if (tsk_verbose) fprintf(stderr, "ufs_open: invalid fragment or block size\n"); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } if ((ffs->ffsbsize_b / fs->block_size) != ffs->ffsbsize_f) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a UFS FS (frag / block size mismatch)"); if (tsk_verbose) fprintf(stderr, "ufs_open: fragment / block size mismatch\n"); fs->tag = 0; free(ffs->fs.sb1); tsk_fs_free((TSK_FS_INFO *)ffs); return NULL; } // Inode / meta data calculations if (fs->ftype == TSK_FS_TYPE_FFS2) { fs->inum_count = ffs->groups_count * tsk_gets32(fs->endian, ffs->fs.sb2->cg_inode_num) + 1; // we are adding 1 in this calc to account for Orphans directory } else { fs->inum_count = ffs->groups_count * tsk_gets32(fs->endian, ffs->fs.sb1->cg_inode_num) + 1; // we are adding 1 in this calc to account for Orphans directory } fs->root_inum = FFS_ROOTINO; fs->first_inum = FFS_FIRSTINO; fs->last_inum = fs->inum_count - 1; /* Volume ID - in the same place for both types */ for (fs->fs_id_used = 0; fs->fs_id_used < 8; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = ffs->fs.sb1->fs_id[fs->fs_id_used]; } // set the function pointers fs->inode_walk = ffs_inode_walk; fs->block_walk = ffs_block_walk; fs->block_getflags = ffs_block_getflags; fs->get_default_attr_type = tsk_fs_unix_get_default_attr_type; fs->load_attrs = tsk_fs_unix_make_data_run; fs->name_cmp = tsk_fs_unix_name_cmp; fs->file_add_meta = ffs_inode_lookup; fs->dir_open_meta = ffs_dir_open_meta; fs->fsstat = ffs_fsstat; fs->fscheck = ffs_fscheck; fs->istat = ffs_istat; fs->close = ffs_close; fs->jblk_walk = ffs_jblk_walk; fs->jentry_walk = ffs_jentry_walk; fs->jopen = ffs_jopen; fs->journ_inum = 0; // initialize caches ffs->grp_buf = NULL; ffs->grp_num = 0xffffffff; ffs->grp_addr = 0; ffs->itbl_buf = NULL; ffs->itbl_addr = 0; /* * Print some stats. */ if (tsk_verbose) tsk_fprintf(stderr, "inodes %" PRIuINUM " root ino %" PRIuINUM " cyl groups %" PRId32 " blocks %" PRIuDADDR "\n", fs->inum_count, fs->root_inum, ffs->groups_count, fs->block_count); tsk_init_lock(&ffs->lock); return (fs); } sleuthkit-4.2.0/tsk/fs/ffs_dent.c000644 000765 000024 00000025746 12576320700 017510 0ustar00carrierstaff000000 000000 /* ** ffs_dent ** The Sleuth Kit ** ** File name layer for a FFS/UFS image ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2006 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file ffs_dent.c * Contains the internal TSK UFS/FFS file name (directory entry) processing functions */ #include #include "tsk_fs_i.h" #include "tsk_ffs.h" static uint8_t ffs_dent_copy(FFS_INFO * ffs, char *ffs_dent, TSK_FS_NAME * fs_name) { TSK_FS_INFO *a_fs = &(ffs->fs_info); /* this one has the type field */ if ((a_fs->ftype == TSK_FS_TYPE_FFS1) || (a_fs->ftype == TSK_FS_TYPE_FFS2)) { ffs_dentry1 *dir = (ffs_dentry1 *) ffs_dent; fs_name->meta_addr = tsk_getu32(a_fs->endian, dir->d_ino); if (fs_name->name_size != FFS_MAXNAMLEN) { if (tsk_fs_name_realloc(fs_name, FFS_MAXNAMLEN)) return 1; } /* ffs null terminates so we can strncpy */ strncpy(fs_name->name, dir->d_name, fs_name->name_size); switch (dir->d_type) { case FFS_DT_REG: fs_name->type = TSK_FS_NAME_TYPE_REG; break; case FFS_DT_DIR: fs_name->type = TSK_FS_NAME_TYPE_DIR; break; case FFS_DT_CHR: fs_name->type = TSK_FS_NAME_TYPE_CHR; break; case FFS_DT_BLK: fs_name->type = TSK_FS_NAME_TYPE_BLK; break; case FFS_DT_FIFO: fs_name->type = TSK_FS_NAME_TYPE_FIFO; break; case FFS_DT_SOCK: fs_name->type = TSK_FS_NAME_TYPE_SOCK; break; case FFS_DT_LNK: fs_name->type = TSK_FS_NAME_TYPE_LNK; break; case FFS_DT_WHT: fs_name->type = TSK_FS_NAME_TYPE_WHT; break; case FFS_DT_UNKNOWN: default: fs_name->type = TSK_FS_NAME_TYPE_UNDEF; break; } } else if (a_fs->ftype == TSK_FS_TYPE_FFS1B) { ffs_dentry2 *dir = (ffs_dentry2 *) ffs_dent; fs_name->meta_addr = tsk_getu32(a_fs->endian, dir->d_ino); if (fs_name->name_size != FFS_MAXNAMLEN) { if (tsk_fs_name_realloc(fs_name, FFS_MAXNAMLEN)) return 1; } /* ffs null terminates so we can strncpy */ strncpy(fs_name->name, dir->d_name, fs_name->name_size); fs_name->type = TSK_FS_NAME_TYPE_UNDEF; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ffs_dent_copy: Unknown FS type"); return 1; } fs_name->flags = 0; return 0; } /* * @param a_is_del Set to 1 if block is from a deleted directory. */ static TSK_RETVAL_ENUM ffs_dent_parse_block(FFS_INFO * ffs, TSK_FS_DIR * fs_dir, uint8_t a_is_del, char *buf, unsigned int len) { unsigned int idx; unsigned int inode = 0, dellen = 0, reclen = 0; unsigned int minreclen = 4; TSK_FS_INFO *fs = &(ffs->fs_info); char *dirPtr; TSK_FS_NAME *fs_name; if ((fs_name = tsk_fs_name_alloc(FFS_MAXNAMLEN + 1, 0)) == NULL) return TSK_ERR; /* update each time by the actual length instead of the ** recorded length so we can view the deleted entries */ for (idx = 0; idx <= len - FFS_DIRSIZ_lcl(1); idx += minreclen) { unsigned int namelen = 0; dirPtr = (char *) &buf[idx]; /* copy to local variables */ if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS2)) { ffs_dentry1 *dir = (ffs_dentry1 *) dirPtr; inode = tsk_getu32(fs->endian, dir->d_ino); namelen = dir->d_namlen; reclen = tsk_getu16(fs->endian, dir->d_reclen); } /* TSK_FS_TYPE_FFS1B */ else if (fs->ftype == TSK_FS_TYPE_FFS1B) { ffs_dentry2 *dir = (ffs_dentry2 *) dirPtr; inode = tsk_getu32(fs->endian, dir->d_ino); namelen = tsk_getu16(fs->endian, dir->d_namlen); reclen = tsk_getu16(fs->endian, dir->d_reclen); } /* what is the minimum size needed for this entry */ minreclen = FFS_DIRSIZ_lcl(namelen); /* Perform a couple sanity checks ** OpenBSD never zeros the inode number, but solaris ** does. These checks will hopefully catch all non ** entries */ if ((inode > fs->last_inum) || // inode is unsigned (namelen > FFS_MAXNAMLEN) || // namelen is unsigned (namelen == 0) || (reclen < minreclen) || (reclen % 4) || (idx + reclen > len)) { /* we don't have a valid entry, so skip ahead 4 */ minreclen = 4; if (dellen > 0) dellen -= 4; continue; } /* Before we process an entry in unallocated space, make * sure that it also ends in the unalloc space */ if ((dellen) && (dellen < minreclen)) { minreclen = 4; dellen -= 4; continue; } /* the entry is valid */ if (ffs_dent_copy(ffs, dirPtr, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* Do we have a deleted entry? (are we in a deleted space) */ if ((dellen > 0) || (inode == 0) || (a_is_del)) { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; if (dellen) dellen -= minreclen; } else { fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* If we have some slack and an entry could exist in it, the set dellen */ if (dellen <= 0) { if (reclen - minreclen >= FFS_DIRSIZ_lcl(1)) dellen = reclen - minreclen; else minreclen = reclen; } } tsk_fs_name_free(fs_name); return TSK_OK; } /* end ffs_dent_parse_block */ /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { TSK_OFF_T size; FFS_INFO *ffs = (FFS_INFO *) a_fs; char *dirbuf; int nchnk, cidx; TSK_FS_LOAD_FILE load_file; TSK_FS_DIR *fs_dir; /* If we get corruption in one of the blocks, then continue processing. * retval_final will change when corruption is detected. Errors are * returned immediately. */ TSK_RETVAL_ENUM retval_tmp; TSK_RETVAL_ENUM retval_final = TSK_OK; if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("ffs_dir_open_meta: Invalid inode value: %" PRIuINUM, a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ffs_dir_open_meta: NULL fs_attr argument given"); return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "ffs_dir_open_meta: Processing directory %" PRIuINUM "\n", a_addr); fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else { if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } } // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { return tsk_fs_dir_find_orphans(a_fs, fs_dir); } if ((fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr)) == NULL) { tsk_error_reset(); tsk_error_errstr2_concat("- ffs_dir_open_meta"); return TSK_COR; } /* make a copy of the directory contents that we can process */ /* round up cause we want the slack space too */ size = roundup(fs_dir->fs_file->meta->size, FFS_DIRBLKSIZ); if ((dirbuf = tsk_malloc((size_t) size)) == NULL) { return TSK_ERR; } load_file.total = load_file.left = (size_t) size; load_file.base = load_file.cur = dirbuf; if (tsk_fs_file_walk(fs_dir->fs_file, TSK_FS_FILE_WALK_FLAG_SLACK, tsk_fs_load_file_action, (void *) &load_file)) { tsk_error_reset(); tsk_error_errstr2_concat("- ffs_dir_open_meta"); free(dirbuf); return TSK_COR; } /* Not all of the directory was copied, so we return */ if (load_file.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr("ffs_dir_open_meta: Error reading directory %" PRIuINUM, a_addr); free(dirbuf); return TSK_COR; } /* Directory entries are written in chunks of DIRBLKSIZ ** determine how many chunks of this size we have to read to ** get a full block ** ** Entries do not cross over the DIRBLKSIZ boundary */ nchnk = (int) (size) / (FFS_DIRBLKSIZ) + 1; for (cidx = 0; cidx < nchnk && (int64_t) size > 0; cidx++) { int len = (FFS_DIRBLKSIZ < size) ? FFS_DIRBLKSIZ : (int) size; retval_tmp = ffs_dent_parse_block(ffs, fs_dir, (fs_dir->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) ? 1 : 0, dirbuf + cidx * FFS_DIRBLKSIZ, len); if (retval_tmp == TSK_ERR) { retval_final = TSK_ERR; break; } else if (retval_tmp == TSK_COR) { retval_final = TSK_COR; } size -= len; } free(dirbuf); // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); } return retval_final; } sleuthkit-4.2.0/tsk/fs/fls_lib.c000644 000765 000024 00000020434 12576320700 017317 0ustar00carrierstaff000000 000000 /* ** fls ** The Sleuth Kit ** ** Given an image and directory inode, display the file names and ** directories that exist (both active and deleted) ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carier. All rights reserved ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** \file fls_lib.c * Contains the library code associated with the TSK fls tool to list files in a directory. */ #include "tsk_fs_i.h" /** \internal * Structure to store data for callbacks. */ typedef struct { /* Time skew of the system in seconds */ int32_t sec_skew; /*directory prefix for printing mactime output */ char *macpre; int flags; } FLS_DATA; /* this is a wrapper type function that takes care of the runtime * flags * * fs_attr should be set to NULL for all non-NTFS file systems */ static void printit(TSK_FS_FILE * fs_file, const char *a_path, const TSK_FS_ATTR * fs_attr, const FLS_DATA * fls_data) { TSK_FS_HASH_RESULTS hash_results; unsigned char null_buf[16]; unsigned int i; if ((!(fls_data->flags & TSK_FS_FLS_FULL)) && (a_path)) { uint8_t printed = 0; // lazy way to find out how many dirs there could be for (i = 0; a_path[i] != '\0'; i++) { if ((a_path[i] == '/') && (i != 0)) { tsk_fprintf(stdout, "+"); printed = 1; } } if (printed) tsk_fprintf(stdout, " "); } if (fls_data->flags & TSK_FS_FLS_MAC) { if (fls_data->flags & TSK_FS_FLS_HASH) { if(0 == tsk_fs_file_hash_calc(fs_file, &hash_results, TSK_BASE_HASH_MD5)){ tsk_fs_name_print_mac_md5(stdout, fs_file, a_path, fs_attr, fls_data->macpre, fls_data->sec_skew, hash_results.md5_digest); } else{ // If the hash calculation had errors, pass in a buffer of nulls memset(null_buf, 0, 16); tsk_fs_name_print_mac_md5(stdout, fs_file, a_path, fs_attr, fls_data->macpre, fls_data->sec_skew, null_buf); } } else { tsk_fs_name_print_mac(stdout, fs_file, a_path, fs_attr, fls_data->macpre, fls_data->sec_skew); } } else if (fls_data->flags & TSK_FS_FLS_LONG) { tsk_fs_name_print_long(stdout, fs_file, a_path, fs_file->fs_info, fs_attr, TSK_FS_FLS_FULL & fls_data->flags ? 1 : 0, fls_data->sec_skew); } else { tsk_fs_name_print(stdout, fs_file, a_path, fs_file->fs_info, fs_attr, TSK_FS_FLS_FULL & fls_data->flags ? 1 : 0); tsk_printf("\n"); } } /* * call back action function for dent_walk */ static TSK_WALK_RET_ENUM print_dent_act(TSK_FS_FILE * fs_file, const char *a_path, void *ptr) { FLS_DATA *fls_data = (FLS_DATA *) ptr; /* only print dirs if TSK_FS_FLS_DIR is set and only print everything ** else if TSK_FS_FLS_FILE is set (or we aren't sure what it is) */ if (((fls_data->flags & TSK_FS_FLS_DIR) && ((fs_file->meta) && (fs_file->meta->type == TSK_FS_META_TYPE_DIR))) || ((fls_data->flags & TSK_FS_FLS_FILE) && (((fs_file->meta) && (fs_file->meta->type != TSK_FS_META_TYPE_DIR)) || (!fs_file->meta)))) { /* Make a special case for NTFS so we can identify all of the * alternate data streams! */ if ((TSK_FS_TYPE_ISNTFS(fs_file->fs_info->ftype)) && (fs_file->meta)) { uint8_t printed = 0; int i, cnt; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_DATA) { printed = 1; if (fs_file->meta->type == TSK_FS_META_TYPE_DIR) { /* we don't want to print the ..:blah stream if * the -a flag was not given */ if ((fs_file->name->name[0] == '.') && (fs_file->name->name[1]) && (fs_file->name->name[2] == '\0') && ((fls_data->flags & TSK_FS_FLS_DOT) == 0)) { continue; } } printit(fs_file, a_path, fs_attr, fls_data); } else if (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_IDXROOT) { printed = 1; /* If it is . or .. only print it if the flags say so, * we continue with other streams though in case the * directory has a data stream */ if (!((TSK_FS_ISDOT(fs_file->name->name)) && ((fls_data->flags & TSK_FS_FLS_DOT) == 0))) printit(fs_file, a_path, fs_attr, fls_data); } /* Print the FILE_NAME times if this is the same attribute * that we collected the times from. */ else if ((fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_FNAME) && (fs_attr->id == fs_file->meta->time2.ntfs.fn_id) && (fls_data->flags & TSK_FS_FLS_MAC)) { /* If it is . or .. only print it if the flags say so, * we continue with other streams though in case the * directory has a data stream */ if (!((TSK_FS_ISDOT(fs_file->name->name)) && ((fls_data->flags & TSK_FS_FLS_DOT) == 0))) printit(fs_file, a_path, fs_attr, fls_data); } } /* A user reported that an allocated file had the standard * attributes, but no $Data. We should print something */ if (printed == 0) { printit(fs_file, a_path, NULL, fls_data); } } else { /* skip it if it is . or .. and we don't want them */ if (!((TSK_FS_ISDOT(fs_file->name->name)) && ((fls_data->flags & TSK_FS_FLS_DOT) == 0))) printit(fs_file, a_path, NULL, fls_data); } } return TSK_WALK_CONT; } /* Returns 0 on success and 1 on error */ uint8_t tsk_fs_fls(TSK_FS_INFO * fs, TSK_FS_FLS_FLAG_ENUM lclflags, TSK_INUM_T inode, TSK_FS_DIR_WALK_FLAG_ENUM flags, TSK_TCHAR * tpre, int32_t skew) { FLS_DATA data; data.flags = lclflags; data.sec_skew = skew; #ifdef TSK_WIN32 { size_t clen; UTF8 *ptr8; UTF16 *ptr16; int retval; if ((tpre != NULL) && (TSTRLEN(tpre) > 0)) { clen = TSTRLEN(tpre) * 4; data.macpre = (char *) tsk_malloc(clen); if (data.macpre == NULL) { return 1; } ptr8 = (UTF8 *) data.macpre; ptr16 = (UTF16 *) tpre; retval = tsk_UTF16toUTF8_lclorder((const UTF16 **) &ptr16, (UTF16 *) & ptr16[TSTRLEN(tpre) + 1], &ptr8, (UTF8 *) ((uintptr_t) ptr8 + clen), TSKlenientConversion); if (retval != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("Error converting fls mactime pre-text to UTF-8 %d\n", retval); return 1; } } else { data.macpre = (char *) tsk_malloc(1); if (data.macpre == NULL) { return 1; } data.macpre[0] = '\0'; } retval = tsk_fs_dir_walk(fs, inode, flags, print_dent_act, &data); free(data.macpre); data.macpre = NULL; return retval; } #else data.macpre = tpre; return tsk_fs_dir_walk(fs, inode, flags, print_dent_act, &data); #endif } sleuthkit-4.2.0/tsk/fs/fs_attr.c000644 000765 000024 00000117511 12576320700 017352 0ustar00carrierstaff000000 000000 /* ** fs_attr ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file fs_attr.c * Functions to allocate and add structures to maintain generic file * system attributes and run lists. */ /* * The TSK_FS_ATTR structure is motivated by NTFS. NTFS (and others) allow * one to have more than one data area per file. Furthermore, there is * more than one way to store the data (resident in the MFT entry or * in the Data Area runs). To handle this in * a generic format, the TSK_FS_ATTR structure was created. * * TSK_FS_ATTR structures have a type and id that describe it and then * a flag identifies it as a resident stream or a non-resident run * They form a linked list and are added to the TSK_FS_META structure */ #include "tsk_fs_i.h" /** * \internal * Allocate a run list entry. * * @returns NULL on error */ TSK_FS_ATTR_RUN * tsk_fs_attr_run_alloc() { TSK_FS_ATTR_RUN *fs_attr_run = (TSK_FS_ATTR_RUN *) tsk_malloc(sizeof(TSK_FS_ATTR_RUN)); if (fs_attr_run == NULL) return NULL; return fs_attr_run; } /** * \internal * Free a list of data_runs * * @param fs_attr_run Head of list to free */ void tsk_fs_attr_run_free(TSK_FS_ATTR_RUN * fs_attr_run) { TSK_FS_ATTR_RUN *fs_attr_run_prev; while (fs_attr_run) { fs_attr_run_prev = fs_attr_run; fs_attr_run = fs_attr_run->next; fs_attr_run_prev->next = NULL; free(fs_attr_run_prev); } } /** * \internal * Allocates and initializes a new structure. * * @param type The type of attribute to create (Resident or Non-resident) * @returns NULL on error */ TSK_FS_ATTR * tsk_fs_attr_alloc(TSK_FS_ATTR_FLAG_ENUM type) { TSK_FS_ATTR *fs_attr = (TSK_FS_ATTR *) tsk_malloc(sizeof(TSK_FS_ATTR)); if (fs_attr == NULL) { return NULL; } fs_attr->name_size = 128; if ((fs_attr->name = (char *) tsk_malloc(fs_attr->name_size)) == NULL) { free(fs_attr); return NULL; } if (type == TSK_FS_ATTR_NONRES) { fs_attr->flags = (TSK_FS_ATTR_NONRES | TSK_FS_ATTR_INUSE); } else if (type == TSK_FS_ATTR_RES) { fs_attr->rd.buf_size = 1024; fs_attr->rd.buf = (uint8_t *) tsk_malloc(fs_attr->rd.buf_size); if (fs_attr->rd.buf == NULL) { free(fs_attr->name); return NULL; } fs_attr->flags = (TSK_FS_ATTR_RES | TSK_FS_ATTR_INUSE); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attr_alloc: Invalid Type: %d\n", type); return NULL; } return fs_attr; } /** * \internal * Free a single TSK_FS_ATTR structure. This does not free the linked list. * * @param a_fs_attr Structure to free. */ void tsk_fs_attr_free(TSK_FS_ATTR * a_fs_attr) { if (a_fs_attr == NULL) return; if (a_fs_attr->nrd.run) tsk_fs_attr_run_free(a_fs_attr->nrd.run); a_fs_attr->nrd.run = NULL; if (a_fs_attr->rd.buf) free(a_fs_attr->rd.buf); a_fs_attr->rd.buf = NULL; if (a_fs_attr->name) free(a_fs_attr->name); a_fs_attr->name = NULL; free(a_fs_attr); } /** * \internal * Clear the run_lists fields of a single FS_DATA structure * * @param a_fs_attr Structure to clear for reuse */ void tsk_fs_attr_clear(TSK_FS_ATTR * a_fs_attr) { a_fs_attr->size = a_fs_attr->type = a_fs_attr->id = a_fs_attr->flags = 0; if (a_fs_attr->nrd.run) { tsk_fs_attr_run_free(a_fs_attr->nrd.run); a_fs_attr->nrd.run = NULL; a_fs_attr->nrd.run_end = NULL; a_fs_attr->nrd.allocsize = 0; a_fs_attr->nrd.initsize = 0; } } /** * Add a name to an existing FS_DATA structure. Will reallocate * space for the name if needed. * * @param fs_attr Structure to add name to * @param name UTF-8 name to add * * @return 1 on error and 0 on success */ static uint8_t fs_attr_put_name(TSK_FS_ATTR * fs_attr, const char *name) { if ((name == NULL) || (strlen(name) == 0)) { if (fs_attr->name_size > 0) { free(fs_attr->name); fs_attr->name_size = 0; } fs_attr->name = NULL; return 0; } if (fs_attr->name_size < (strlen(name) + 1)) { fs_attr->name = tsk_realloc(fs_attr->name, strlen(name) + 1); if (fs_attr->name == NULL) return 1; fs_attr->name_size = strlen(name) + 1; } strncpy(fs_attr->name, name, fs_attr->name_size); return 0; } /** * \internal * Copy resident data to an attribute. * * @param a_fs_attr Attribute to add data to (cannot be NULL) * @param name Name of the attribute to set * @param type Type of the attribute to set * @param id Id of the attribute to set * @param res_data Pointer to where resident data is located (data will * be copied from here into FS_DATA) * @param len Length of resident data * @return 1 on error and 0 on success */ uint8_t tsk_fs_attr_set_str(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR * a_fs_attr, const char *name, TSK_FS_ATTR_TYPE_ENUM type, uint16_t id, void *res_data, size_t len) { if (a_fs_attr == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null fs_attr in tsk_fs_attr_set_str"); return 1; } a_fs_attr->fs_file = a_fs_file; a_fs_attr->flags = (TSK_FS_ATTR_INUSE | TSK_FS_ATTR_RES); a_fs_attr->type = type; a_fs_attr->id = id; a_fs_attr->nrd.compsize = 0; if (fs_attr_put_name(a_fs_attr, name)) { return 1; } if (a_fs_attr->rd.buf_size < len) { a_fs_attr->rd.buf = (uint8_t *) tsk_realloc((char *) a_fs_attr->rd.buf, len); if (a_fs_attr->rd.buf == NULL) return 1; a_fs_attr->rd.buf_size = len; } memset(a_fs_attr->rd.buf, 0, a_fs_attr->rd.buf_size); memcpy(a_fs_attr->rd.buf, res_data, len); a_fs_attr->size = len; return 0; } /** * \internal * Set the needed fields along with an initial run list for a data attribute. To add more * runs, use ...._add_run(). * * @param a_fs_file File to add attribute to * @param a_fs_attr The data attribute to initialize and add the run to * @param a_data_run_new The set of runs to add (can be NULL). * @param name Name of the attribute (can be NULL) * @param type Type of attribute to add run to * @param id Id of attribute to add run to * @param size Total size of the attribute (can be larger than length of initial run being added) * @param init_size Number of bytes in attribute that have been initialized (less then or equal to size) * (note that this sets the initialized size for the attribute and it will not be updated as more runs are added). * @param alloc_size Allocated size of the attribute (>= size). Identifies the slack space. * (note that this sets the allocated size for the attribute and it will not be updated as more runs are added). * @param flags Flags about compression, sparse etc. of data * @param compsize Compression unit size (in case it needs to be created) * * @returns 1 on error and 0 on success */ uint8_t tsk_fs_attr_set_run(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * a_data_run_new, const char *name, TSK_FS_ATTR_TYPE_ENUM type, uint16_t id, TSK_OFF_T size, TSK_OFF_T init_size, TSK_OFF_T alloc_size, TSK_FS_ATTR_FLAG_ENUM flags, uint32_t compsize) { if ((a_fs_file == NULL) || (a_fs_file->meta == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null fs_file in tsk_fs_attr_set_run"); return 1; } if (a_fs_attr == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null fs_attr in tsk_fs_attr_set_run"); return 1; } if (alloc_size < size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attr_set_run: alloc_size (%" PRIuOFF ") is less than size (%" PRIuOFF ")", alloc_size, size); return 1; } a_fs_attr->fs_file = a_fs_file; a_fs_attr->flags = (TSK_FS_ATTR_INUSE | TSK_FS_ATTR_NONRES | flags); a_fs_attr->type = type; a_fs_attr->id = id; a_fs_attr->size = size; a_fs_attr->nrd.allocsize = alloc_size; a_fs_attr->nrd.initsize = init_size; a_fs_attr->nrd.compsize = compsize; if (fs_attr_put_name(a_fs_attr, name)) { return 1; } /* Add the a_data_run_new to the attribute. */ /* We support the ODD case where the run is NULL. In this case, * we set the attribute size info, but set everything else to NULL. */ if (a_data_run_new == NULL) { a_fs_attr->nrd.run = NULL; a_fs_attr->nrd.run_end = NULL; return 0; } /* * If this is not in the begining, then we need to make a filler * to account for the cluster numbers we haven't seen yet * * This commonly happens when we process an MFT entry that * is not a base entry and it is referenced in an $ATTR_LIST * * The $DATA attribute in the non-base have a non-zero * a_data_run_new->offset. */ if (a_data_run_new->offset != 0) { TSK_FS_ATTR_RUN *fill_run = tsk_fs_attr_run_alloc(); fill_run->flags = TSK_FS_ATTR_RUN_FLAG_FILLER; fill_run->offset = 0; fill_run->addr = 0; fill_run->len = a_data_run_new->offset; fill_run->next = a_data_run_new; a_data_run_new = fill_run; } a_fs_attr->nrd.run = a_data_run_new; // update the pointer to the end of the list a_fs_attr->nrd.run_end = a_data_run_new; while (a_fs_attr->nrd.run_end->next) { a_fs_attr->nrd.run_end = a_fs_attr->nrd.run_end->next; } return 0; } static void dump_attr(TSK_FS_ATTR * a_fs_attr) { TSK_FS_ATTR_RUN *cur_run; cur_run = a_fs_attr->nrd.run; fprintf(stderr, "Attribute Run Dump:\n"); for (cur_run = a_fs_attr->nrd.run; cur_run; cur_run = cur_run->next) { fprintf(stderr, " %" PRIuDADDR " to %" PRIuDADDR " %sFiller\n", cur_run->offset, cur_run->offset + cur_run->len - 1, (cur_run->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) ? "" : "Not"); } } /** * \internal * Add a set of consecutive runs to an attribute. This will add and remove FILLER entries * as needed and update internal variables. * * @param a_fs File system run is from * @param fs_attr Attribute to add run to * @param a_data_run_new The set of runs to add. * * @returns 1 on error and 0 on succes */ uint8_t tsk_fs_attr_add_run(TSK_FS_INFO * a_fs, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * a_data_run_new) { TSK_FS_ATTR_RUN *data_run_cur, *data_run_prev; TSK_DADDR_T run_len; tsk_error_reset(); if (a_fs_attr == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_add_run: Error, a_fs_attr is NULL"); return 1; } // we only support the case of a null run if it is the only run... if (a_data_run_new == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_add_run: Error, a_data_run_new is NULL (%" PRIuINUM ")", a_fs_attr->fs_file->meta->addr); return 1; } run_len = 0; data_run_cur = a_data_run_new; while (data_run_cur) { run_len += data_run_cur->len; data_run_cur = data_run_cur->next; } /* First thing, is to check if we can just add it to the end */ if ((a_fs_attr->nrd.run_end) && (a_fs_attr->nrd.run_end->offset + a_fs_attr->nrd.run_end->len == a_data_run_new->offset)) { a_fs_attr->nrd.run_end->next = a_data_run_new; // update the pointer to the end of the list while (a_fs_attr->nrd.run_end->next) a_fs_attr->nrd.run_end = a_fs_attr->nrd.run_end->next; /* return head of a_fs_attr list */ return 0; } // cycle through existing runs and see if we can add this into a filler spot data_run_cur = a_fs_attr->nrd.run; data_run_prev = NULL; while (data_run_cur) { if (tsk_verbose) tsk_fprintf(stderr, "tsk_fs_attr_add: %" PRIuOFF "@%" PRIuOFF " (Filler: %s)\n", data_run_cur->offset, data_run_cur->len, (data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) ? "Yes" : "No"); /* Do we replace this filler spot? */ if (data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) { /* This should never happen because we always add * the filler to start from VCN 0 */ if (data_run_cur->offset > a_data_run_new->offset) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("tsk_fs_attr_add_run: could not add data_run b.c. offset (%" PRIuOFF ") is larger than FILLER (%" PRIuOFF ") (%" PRIuINUM ")", a_data_run_new->offset, data_run_cur->offset, a_fs_attr->fs_file->meta->addr); dump_attr(a_fs_attr); return 1; } /* Check if the new run starts inside of this filler. */ if (data_run_cur->offset + data_run_cur->len > a_data_run_new->offset) { TSK_FS_ATTR_RUN *endrun; /* if the new starts at the same as the filler, * replace the pointer */ if (data_run_cur->offset == a_data_run_new->offset) { if (data_run_prev) data_run_prev->next = a_data_run_new; else a_fs_attr->nrd.run = a_data_run_new; } /* The new run does not start at the begining of * the filler, so make a new start filler */ else { TSK_FS_ATTR_RUN *newfill = tsk_fs_attr_run_alloc(); if (newfill == NULL) return 1; if (data_run_prev) data_run_prev->next = newfill; else a_fs_attr->nrd.run = newfill; newfill->next = a_data_run_new; newfill->len = a_data_run_new->offset - data_run_cur->offset; newfill->offset = data_run_cur->offset; newfill->flags = TSK_FS_ATTR_RUN_FLAG_FILLER; data_run_cur->len -= newfill->len; } /* get to the end of the run that we are trying to add */ endrun = a_data_run_new; while (endrun->next) endrun = endrun->next; /* if the filler is the same size as the * new one, replace it */ if (run_len == data_run_cur->len) { endrun->next = data_run_cur->next; // update the pointer to the end of the list (if we are the end) if (endrun->next == NULL) a_fs_attr->nrd.run_end = endrun; free(data_run_cur); } /* else adjust the last filler entry */ else { endrun->next = data_run_cur; data_run_cur->len -= run_len; data_run_cur->offset = a_data_run_new->offset + a_data_run_new->len; } return 0; } } data_run_prev = data_run_cur; data_run_cur = data_run_cur->next; } /* * There is no filler holding the location of this run, so * we will add it to the end of the list * * we got here because it did not fit in the current list or * because the current list is NULL * * At this point data_run_prev is the end of the existing list or * 0 if there is no list */ /* This is an error condition. * It means that we cycled through the existing runs, * ended at a VCN that is larger than what we are adding, * and never found a filler entry to insert it into... */ if ((data_run_prev) && (data_run_prev->offset + data_run_prev->len > a_data_run_new->offset)) { /* MAYBE this is because of a duplicate entry .. */ if ((data_run_prev->addr == a_data_run_new->addr) && (data_run_prev->len == a_data_run_new->len)) { // @@@ Sould be we freeing this....? What if the caller tries to write to it? tsk_fs_attr_run_free(a_data_run_new); return 0; } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("fs_attr_add_run: error adding additional run (%" PRIuINUM "): No filler entry for %" PRIuDADDR ". Final: %" PRIuDADDR, a_fs_attr->fs_file->meta->addr, a_data_run_new->offset, data_run_prev->offset + data_run_prev->len); dump_attr(a_fs_attr); return 1; } /* we should add it right here */ else if (((data_run_prev) && (data_run_prev->offset + data_run_prev->len == a_data_run_new->offset)) || (a_data_run_new->offset == 0)) { if (data_run_prev) data_run_prev->next = a_data_run_new; else a_fs_attr->nrd.run = a_data_run_new; } /* we need to make a filler before it */ else { TSK_FS_ATTR_RUN *tmprun = tsk_fs_attr_run_alloc(); if (tmprun == NULL) return 1; if (data_run_prev) { data_run_prev->next = tmprun; tmprun->offset = data_run_prev->offset + data_run_prev->len; } else { a_fs_attr->nrd.run = tmprun; } tmprun->len = a_data_run_new->offset - tmprun->offset; tmprun->flags = TSK_FS_ATTR_RUN_FLAG_FILLER; tmprun->next = a_data_run_new; } // update the pointer to the end of the list a_fs_attr->nrd.run_end = a_data_run_new; while (a_fs_attr->nrd.run_end->next) a_fs_attr->nrd.run_end = a_fs_attr->nrd.run_end->next; return 0; } /** * Append a data run to the end of the attribute and update its offset * value. This ignores the offset in the data run and blindly appends. * * @param a_fs File system run is from * @param a_fs_attr Data attribute to append to * @param a_data_run Data run to append. */ void tsk_fs_attr_append_run(TSK_FS_INFO * a_fs, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * a_data_run) { TSK_FS_ATTR_RUN *data_run_cur; if ((a_fs_attr == NULL) || (a_data_run == NULL)) { return; } if (a_fs_attr->nrd.run == NULL) { a_fs_attr->nrd.run = a_data_run; a_data_run->offset = 0; } else { // just in case this was not updated if ((a_fs_attr->nrd.run_end == NULL) || (a_fs_attr->nrd.run_end->next != NULL)) { data_run_cur = a_fs_attr->nrd.run; while (data_run_cur->next) { data_run_cur = data_run_cur->next; } a_fs_attr->nrd.run_end = data_run_cur; } a_fs_attr->nrd.run_end->next = a_data_run; a_data_run->offset = a_fs_attr->nrd.run_end->offset + a_fs_attr->nrd.run_end->len; } // update the rest of the offsets in the run (if any exist) data_run_cur = a_data_run; while (data_run_cur->next) { data_run_cur->next->offset = data_run_cur->offset + data_run_cur->len; a_fs_attr->nrd.run_end = data_run_cur->next; data_run_cur = data_run_cur->next; } } /** \internal * Processes a resident TSK_FS_ATTR structure and calls the callback with the associated * data. The size of the buffer in the callback will be block_size at max. * * @param a_fs File system being analyzed * @param fs_attr Resident data structure to be walked * @param a_flags Flags for walking * @param a_action Callback action * @param a_ptr Pointer to data that is passed to callback * @returns 1 on error or 0 on success */ static uint8_t tsk_fs_attr_walk_res(const TSK_FS_ATTR * fs_attr, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr) { char *buf = NULL; int myflags; int retval; size_t buf_len = 0; TSK_OFF_T off; size_t read_len; TSK_FS_INFO *fs; fs = fs_attr->fs_file->fs_info; if ((fs_attr->flags & TSK_FS_ATTR_RES) == 0) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk_res: called with non-resident data"); return 1; } /* Allocate a buffer that is at most a block size in length */ buf_len = (size_t) fs_attr->size; if (buf_len > fs->block_size) buf_len = fs->block_size; if ((a_flags & TSK_FS_FILE_WALK_FLAG_AONLY) == 0) { if ((buf = tsk_malloc(buf_len)) == NULL) { return 1; } } myflags = TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_ALLOC | TSK_FS_BLOCK_FLAG_RES; // Call the callback in (at max) block-sized chunks. retval = TSK_WALK_CONT; for (off = 0; off < fs_attr->size; off += read_len) { if (fs_attr->size - off > buf_len) read_len = buf_len; else read_len = (size_t) (fs_attr->size - off); if (buf) { // wipe rest of buffer if we are not going to read into all of it if (read_len != buf_len) memset(&buf[read_len], 0, buf_len - read_len); memcpy(buf, &fs_attr->rd.buf[off], read_len); } retval = a_action(fs_attr->fs_file, off, 0, buf, read_len, myflags, a_ptr); if (retval != TSK_WALK_CONT) break; } if (buf) free(buf); if (retval == TSK_WALK_ERROR) return 1; else return 0; } /** \internal * Processes a non-resident TSK_FS_ATTR structure and calls the callback with the associated * data. * * @param fs_attr Resident data structure to be walked * @param a_flags Flags for walking * @param a_action Callback action * @param a_ptr Pointer to data that is passed to callback * @returns 1 on error or 0 on success */ static uint8_t tsk_fs_attr_walk_nonres(const TSK_FS_ATTR * fs_attr, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr) { char *buf = NULL; TSK_OFF_T tot_size; TSK_OFF_T off = 0; TSK_FS_ATTR_RUN *fs_attr_run; int retval; uint32_t skip_remain; TSK_FS_INFO *fs = fs_attr->fs_file->fs_info; uint8_t stop_loop = 0; if ((fs_attr->flags & TSK_FS_ATTR_NONRES) == 0) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk_nonres: called with non-non-resident data"); return 1; } /* if we want the slack space too, then use the allocsize */ if (a_flags & TSK_FS_FILE_WALK_FLAG_SLACK) tot_size = fs_attr->nrd.allocsize; else tot_size = fs_attr->size; skip_remain = fs_attr->nrd.skiplen; if ((a_flags & TSK_FS_FILE_WALK_FLAG_AONLY) == 0) { if ((buf = (char *) tsk_malloc(fs->block_size)) == NULL) { return 1; } } /* cycle through the number of runs we have */ retval = TSK_WALK_CONT; for (fs_attr_run = fs_attr->nrd.run; fs_attr_run; fs_attr_run = fs_attr_run->next) { TSK_DADDR_T addr, len_idx; addr = fs_attr_run->addr; /* cycle through each block in the run */ for (len_idx = 0; len_idx < fs_attr_run->len; len_idx++) { TSK_FS_BLOCK_FLAG_ENUM myflags; /* If the address is too large then give an error */ if (addr + len_idx > fs->last_block) { if (fs_attr->fs_file-> meta->flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); else tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("Invalid address in run (too large): %" PRIuDADDR "", addr + len_idx); return 1; } // load the buffer if they want more than just the address if ((a_flags & TSK_FS_FILE_WALK_FLAG_AONLY) == 0) { /* sparse files just get 0s */ if (fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) { memset(buf, 0, fs->block_size); } /* FILLER entries exist when the source file system can store run * info out of order and we did not get all of the run info. We * return 0s if data is read from this type of run. */ else if (fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) { memset(buf, 0, fs->block_size); if (tsk_verbose) fprintf(stderr, "tsk_fs_attr_walk_nonres: File %" PRIuINUM " has FILLER entry, using 0s\n", fs_attr->fs_file->meta->addr); } // we return 0s for reads past the initsize else if ((off >= fs_attr->nrd.initsize) && ((a_flags & TSK_FS_FILE_READ_FLAG_SLACK) == 0)) { memset(buf, 0, fs->block_size); } else { ssize_t cnt; cnt = tsk_fs_read_block (fs, addr + len_idx, buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("tsk_fs_file_walk: Error reading block at %" PRIuDADDR, addr + len_idx); return 1; } if ((off + fs->block_size > fs_attr->nrd.initsize) && ((a_flags & TSK_FS_FILE_READ_FLAG_SLACK) == 0)) { memset(&buf[fs_attr->nrd.initsize - off], 0, fs->block_size - (size_t) (fs_attr->nrd.initsize - off)); } } } /* Need to account for the skip length, which is the number of bytes * in the start of the attribute that are skipped and that are not * included in the overall length. We will seek past those and not * return those in the action. We just read a block size so check * if there is data to be returned in this buffer. */ if (skip_remain >= fs->block_size) { skip_remain -= fs->block_size; } else { size_t ret_len; /* Do we want to return a full block, or just the end? */ if ((TSK_OFF_T) fs->block_size - skip_remain < tot_size - off) ret_len = fs->block_size - skip_remain; else ret_len = (size_t) (tot_size - off); /* Only do sparse or FILLER clusters if NOSPARSE is not set */ if ((fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) || (fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) || (off > fs_attr->nrd.initsize)) { myflags = fs->block_getflags(fs, 0); myflags |= TSK_FS_BLOCK_FLAG_SPARSE; if ((a_flags & TSK_FS_FILE_WALK_FLAG_NOSPARSE) == 0) { retval = a_action(fs_attr->fs_file, off, 0, &buf[skip_remain], ret_len, myflags, a_ptr); } } else { myflags = fs->block_getflags(fs, addr + len_idx); myflags |= TSK_FS_BLOCK_FLAG_RAW; retval = a_action(fs_attr->fs_file, off, addr + len_idx, &buf[skip_remain], ret_len, myflags, a_ptr); } off += ret_len; skip_remain = 0; if (retval != TSK_WALK_CONT) { stop_loop = 1; break; } if (off >= tot_size) { stop_loop = 1; break; } } } if (stop_loop) break; } if (buf) free(buf); if (retval == TSK_WALK_ERROR) return 1; else return 0; } /** * \ingroup fslib * Process an attribute and call a callback function with its contents. The callback will be * called with chunks of data that are fs->block_size or less. The address given in the callback * will be correct only for raw files (when the raw file contents were stored in the block). For * compressed and sparse attributes, the address may be zero. * * @param a_fs_attr Attribute to process * @param a_flags Flags to use while processing attribute * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t tsk_fs_attr_walk(const TSK_FS_ATTR * a_fs_attr, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr) { TSK_FS_INFO *fs; // clean up any error messages that are lying around tsk_error_reset(); // check the FS_INFO, FS_FILE structures if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) || (a_fs_attr->fs_file->meta == NULL) || (a_fs_attr->fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_walk: called with NULL pointers"); return 1; } fs = a_fs_attr->fs_file->fs_info; if (fs->tag != TSK_FS_INFO_TAG) { // || (a_fs_attr->id != TSK_FS_ATTR_ID)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_walk: called with unallocated structures"); return 1; } if (a_fs_attr->flags & TSK_FS_ATTR_COMP) { if (a_fs_attr->w == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_walk: compressed attribute found, but special function not defined"); return 1; } return a_fs_attr->w(a_fs_attr, a_flags, a_action, a_ptr); } // resident data if (a_fs_attr->flags & TSK_FS_ATTR_RES) { fflush(stderr); return tsk_fs_attr_walk_res(a_fs_attr, a_flags, a_action, a_ptr); } // non-resident data else if (a_fs_attr->flags & TSK_FS_ATTR_NONRES) { fflush(stderr); return tsk_fs_attr_walk_nonres(a_fs_attr, a_flags, a_action, a_ptr); } tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_walk: called with unknown attribute type: %x", a_fs_attr->flags); return 1; } /** * \ingroup fslib * Read the contents of a given attribute using a typical read() type interface. * 0s are returned for missing runs. * * @param a_fs_attr The attribute to read. * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past end of file). */ ssize_t tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { TSK_FS_INFO *fs; if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) || (a_fs_attr->fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_read: Attribute has null pointers."); return -1; } fs = a_fs_attr->fs_file->fs_info; /* for compressed data, call the specialized function */ if (a_fs_attr->flags & TSK_FS_ATTR_COMP) { if (a_fs_attr->r == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attr_read: Attribute has compressed type set, but no compressed read function defined"); return -1; } return a_fs_attr->r(a_fs_attr, a_offset, a_buf, a_len); } /* For resident data, copy data from the local buffer */ else if (a_fs_attr->flags & TSK_FS_ATTR_RES) { size_t len_toread; if (a_offset >= a_fs_attr->size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ_OFF); tsk_error_set_errstr("tsk_fs_attr_read - %" PRIuOFF, a_offset); return -1; } len_toread = a_len; if (a_offset + a_len > a_fs_attr->size) { len_toread = (size_t) (a_fs_attr->size - a_offset); memset(&a_buf[len_toread], 0, a_len - len_toread); } memcpy(a_buf, &a_fs_attr->rd.buf[a_offset], len_toread); return (ssize_t) len_toread; } /* For non-resident data, load the needed block and copy the data */ else if (a_fs_attr->flags & TSK_FS_ATTR_NONRES) { TSK_FS_ATTR_RUN *data_run_cur; TSK_DADDR_T blkoffset_toread; // block offset of where we want to start reading from size_t byteoffset_toread; // byte offset in blkoffset_toread of where we want to start reading from size_t len_remain; // length remaining to copy size_t len_toread; // length total to copy if (((a_flags & TSK_FS_FILE_READ_FLAG_SLACK) && (a_offset >= a_fs_attr->nrd.allocsize)) || (!(a_flags & TSK_FS_FILE_READ_FLAG_SLACK) && (a_offset >= a_fs_attr->size))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ_OFF); tsk_error_set_errstr("tsk_fs_attr_read - %" PRIuOFF, a_offset); return -1; } blkoffset_toread = a_offset / fs->block_size; byteoffset_toread = (size_t) (a_offset % fs->block_size); // determine how many bytes we can copy len_toread = a_len; if (a_flags & TSK_FS_FILE_READ_FLAG_SLACK) { if (a_offset + a_len > a_fs_attr->nrd.allocsize) len_toread = (size_t) (a_fs_attr->nrd.allocsize - a_offset); } else { if (a_offset + a_len > a_fs_attr->size) len_toread = (size_t) (a_fs_attr->size - a_offset); } // wipe the buffer we won't read into if (len_toread < a_len) memset(&a_buf[len_toread], 0, a_len - len_toread); len_remain = len_toread; // cycle through the run until we find where we can start to process the clusters for (data_run_cur = a_fs_attr->nrd.run; data_run_cur; data_run_cur = data_run_cur->next) { TSK_DADDR_T blkoffset_inrun; size_t len_inrun; // we are done if (len_remain <= 0) break; // See if this run contains the starting offset they requested if (data_run_cur->offset + data_run_cur->len <= blkoffset_toread) continue; // block offset into this run if (data_run_cur->offset < blkoffset_toread) blkoffset_inrun = blkoffset_toread - data_run_cur->offset; else blkoffset_inrun = 0; // see if we need to read the rest of this run and into the next or if it is all here len_inrun = len_remain; if ((data_run_cur->len - blkoffset_inrun) * fs->block_size - byteoffset_toread < len_remain) len_inrun = (size_t) ((data_run_cur->len - blkoffset_inrun) * fs->block_size - byteoffset_toread); /* sparse files/runs just get 0s */ if (data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) { memset(&a_buf[len_toread - len_remain], 0, len_inrun); } /* FILLER entries exist when the source file system can store run * info out of order and we did not get all of the run info. We * return 0s if data is read from this type of run. */ else if (data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) { memset(&a_buf[len_toread - len_remain], 0, len_inrun); if (tsk_verbose) fprintf(stderr, "tsk_fs_attr_read_type: File %" PRIuINUM " has FILLER entry, using 0s\n", (a_fs_attr->fs_file->meta) ? a_fs_attr-> fs_file->meta->addr : 0); } // we return 0s for reads past the initsize (unless they want slack space) else if (((TSK_OFF_T) ((data_run_cur->offset + blkoffset_inrun) * fs->block_size + byteoffset_toread) >= a_fs_attr->nrd.initsize) && ((a_flags & TSK_FS_FILE_READ_FLAG_SLACK) == 0)) { memset(&a_buf[len_toread - len_remain], 0, len_inrun); if (tsk_verbose) fprintf(stderr, "tsk_fs_attr_read: Returning 0s for read past end of initsize (%" PRIuINUM ")\n", ((a_fs_attr->fs_file) && (a_fs_attr->fs_file-> meta)) ? a_fs_attr->fs_file->meta-> addr : 0); } else { TSK_OFF_T fs_offset_b; ssize_t cnt; // calcuate the byte offset in the file system fs_offset_b = (data_run_cur->addr + blkoffset_inrun) * fs->block_size; // add the byte offset in the block fs_offset_b += byteoffset_toread; // reset this in case we need to also read from the next run byteoffset_toread = 0; cnt = tsk_fs_read(fs, fs_offset_b, &a_buf[len_toread - len_remain], len_inrun); if (cnt != len_inrun) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("tsk_fs_attr_read_type: offset: %" PRIuOFF " Len: %" PRIuSIZE "", fs_offset_b, len_inrun); return cnt; } // see if part of the data is in the non-initialized space if (((TSK_OFF_T) ((data_run_cur->offset + blkoffset_inrun) * fs->block_size + byteoffset_toread + len_inrun) > a_fs_attr->nrd.initsize) && ((a_flags & TSK_FS_FILE_READ_FLAG_SLACK) == 0)) { size_t uninit_off = (size_t) (a_fs_attr->nrd.initsize - ((data_run_cur->offset + blkoffset_inrun) * fs->block_size + byteoffset_toread)); memset(&a_buf[len_toread - len_remain + uninit_off], 0, len_inrun - uninit_off); } } len_remain -= len_inrun; } return (ssize_t) (len_toread - len_remain); } tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attr_read: Unknown attribute type: %x", a_fs_attr->flags); return -1; } sleuthkit-4.2.0/tsk/fs/fs_attrlist.c000644 000765 000024 00000030172 12576320700 020243 0ustar00carrierstaff000000 000000 /* ** fs_attrlist ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2008-2011 Brian Carrier. All Rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /** * \file fs_attrlist.c * File that contains functions to process TSK_FS_ATTRLIST structures, which * hold a linked list of TSK_FS_ATTR attribute structures. */ #include "tsk_fs_i.h" /** \internal * Allocate a new data list structure * * @returns Pointer to new list structure or NULL on error */ TSK_FS_ATTRLIST * tsk_fs_attrlist_alloc() { TSK_FS_ATTRLIST *fs_attrlist; if ((fs_attrlist = (TSK_FS_ATTRLIST *) tsk_malloc(sizeof(TSK_FS_ATTRLIST))) == NULL) return NULL; return fs_attrlist; } /** \internal * Free a list and the attributes inside of it */ void tsk_fs_attrlist_free(TSK_FS_ATTRLIST * a_fs_attrlist) { TSK_FS_ATTR *fs_attr_cur, *fs_attr_tmp; if (a_fs_attrlist == NULL) return; fs_attr_cur = a_fs_attrlist->head; while (fs_attr_cur) { fs_attr_tmp = fs_attr_cur->next; tsk_fs_attr_free(fs_attr_cur); fs_attr_cur = fs_attr_tmp; } free(a_fs_attrlist); } /** \internal * Add a new attribute to the list. * * @param a_fs_attrlist List structure to add to * @param a_fs_attr Data attribute to add * @returns 1 on error and 0 on success. Caller must free memory on error. */ uint8_t tsk_fs_attrlist_add(TSK_FS_ATTRLIST * a_fs_attrlist, TSK_FS_ATTR * a_fs_attr) { if (a_fs_attrlist == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null list in tsk_fs_attrlist_add"); return 1; } // verify INUSE is set a_fs_attr->flags |= TSK_FS_ATTR_INUSE; if (a_fs_attrlist->head == NULL) { a_fs_attrlist->head = a_fs_attr; } else { TSK_FS_ATTR *fs_attr_cur; fs_attr_cur = a_fs_attrlist->head; while (fs_attr_cur) { // check if it already exists if ((fs_attr_cur->type == a_fs_attr->type) && (fs_attr_cur->id == a_fs_attr->id)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("datalist_add: Type %d and Id %d already in list", a_fs_attr->type, a_fs_attr->id); return 1; } if (fs_attr_cur->next == NULL) { fs_attr_cur->next = a_fs_attr; break; } fs_attr_cur = fs_attr_cur->next; } } return 0; } /** * \internal * Return either an empty element in the list or create a new one at the end * * Preference is given to finding one of the same type to prevent * excessive malloc's, but if one is not found then a different * type is used: type = [TSK_FS_ATTR_NONRES | TSK_FS_ATTR_RES] * * @param a_fs_attrlist Attribute list to search * @param a_atype Preference for attribute type to reuse * @return NULL on error or attribute in list to use */ TSK_FS_ATTR * tsk_fs_attrlist_getnew(TSK_FS_ATTRLIST * a_fs_attrlist, TSK_FS_ATTR_FLAG_ENUM a_atype) { TSK_FS_ATTR *fs_attr_cur; TSK_FS_ATTR *fs_attr_ok = NULL; if (a_fs_attrlist == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null list in tsk_fs_attrlist_getnew()"); return NULL; } if ((a_atype != TSK_FS_ATTR_NONRES) && (a_atype != TSK_FS_ATTR_RES)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid Type in tsk_fs_attrlist_getnew()"); return NULL; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if (fs_attr_cur->flags == 0) { if (a_atype == TSK_FS_ATTR_NONRES) { if (fs_attr_cur->nrd.run) break; else if (!fs_attr_ok) fs_attr_ok = fs_attr_cur; } /* we want one with an allocated buf */ else { if (fs_attr_cur->rd.buf_size) break; else if (!fs_attr_ok) fs_attr_ok = fs_attr_cur; } } } /* if we fell out then check fs_attr_tmp */ if (!fs_attr_cur) { if (fs_attr_ok) fs_attr_cur = fs_attr_ok; else { /* make a new one */ if ((fs_attr_cur = tsk_fs_attr_alloc(a_atype)) == NULL) return NULL; // add it to the list if (tsk_fs_attrlist_add(a_fs_attrlist, fs_attr_cur)) { tsk_fs_attr_free(fs_attr_cur); return NULL; } } } fs_attr_cur->flags = (TSK_FS_ATTR_INUSE | a_atype); return fs_attr_cur; } /** \internal * Cycle through the attributes and mark them as unused. Does not free anything. * @param a_fs_attrlist Attribute list too mark. */ void tsk_fs_attrlist_markunused(TSK_FS_ATTRLIST * a_fs_attrlist) { TSK_FS_ATTR *fs_attr_cur; if (a_fs_attrlist == NULL) return; fs_attr_cur = a_fs_attrlist->head; while (fs_attr_cur) { tsk_fs_attr_clear(fs_attr_cur); fs_attr_cur = fs_attr_cur->next; } } /** * \internal * Search the attribute list of TSK_FS_ATTR structures for an entry with a given * type (no ID). If more than one entry with the same type exists, the one with * the lowest ID will be returned. * * @param a_fs_attrlist Data list structure to search in * @param a_type Type of attribute to find * * @return NULL is returned on error and if an entry could not be found. * tsk_errno will be set to TSK_ERR_FS_ATTR_NOTFOUND if entry could not be found. */ const TSK_FS_ATTR * tsk_fs_attrlist_get(const TSK_FS_ATTRLIST * a_fs_attrlist, TSK_FS_ATTR_TYPE_ENUM a_type) { TSK_FS_ATTR *fs_attr_cur; TSK_FS_ATTR *fs_attr_ok = NULL; if (!a_fs_attrlist) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attrlist_get: Null list pointer"); return NULL; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if ((fs_attr_cur->flags & TSK_FS_ATTR_INUSE) && (fs_attr_cur->type == a_type)) { /* If we are looking for NTFS $Data, * then return default when we see it */ if ((fs_attr_cur->type == TSK_FS_ATTR_TYPE_NTFS_DATA) && (fs_attr_cur->name == NULL)) { return fs_attr_cur; } // make sure we return the lowest if multiple exist if ((fs_attr_ok == NULL) || (fs_attr_ok->id > fs_attr_cur->id)) fs_attr_ok = fs_attr_cur; } } if (!fs_attr_ok) { tsk_error_set_errno(TSK_ERR_FS_ATTR_NOTFOUND); tsk_error_set_errstr("tsk_fs_attrlist_get: Attribute %d not found", a_type); return NULL; } else { return fs_attr_ok; } } /** * \internal * Search the attribute list of TSK_FS_ATTR structures for an entry with a given * type and id. * * @param a_fs_attrlist Data list structure to search in * @param a_type Type of attribute to find * @param a_id Id of attribute to find. * * @return NULL is returned on error and if an entry could not be found. * tsk_errno will be set to TSK_ERR_FS_ATTR_NOTFOUND if entry could not be found. */ const TSK_FS_ATTR * tsk_fs_attrlist_get_id(const TSK_FS_ATTRLIST * a_fs_attrlist, TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id) { TSK_FS_ATTR *fs_attr_cur; if (!a_fs_attrlist) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attrlist_get_id: Null list pointer"); return NULL; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if ((fs_attr_cur->flags & TSK_FS_ATTR_INUSE) && (fs_attr_cur->type == a_type) && (fs_attr_cur->id == a_id)) return fs_attr_cur; } tsk_error_set_errno(TSK_ERR_FS_ATTR_NOTFOUND); tsk_error_set_errstr ("tsk_fs_attrlist_get_id: Attribute %d-%d not found", a_type, a_id); return NULL; } /** * \internal * Search the attribute list of TSK_FS_ATTR structures for an entry with a given * type (no ID) and a given name. If more than one entry with the same type exists, * the one with the lowest ID will be returned. * * @param a_fs_attrlist Data list structure to search in * @param a_type Type of attribute to find * @param name Name of the attribute to find (NULL for an entry with no name) * * @return NULL is returned on error and if an entry could not be found. * tsk_errno will be set to TSK_ERR_FS_ATTR_NOTFOUND if entry could not be found. */ const TSK_FS_ATTR * tsk_fs_attrlist_get_name_type(const TSK_FS_ATTRLIST * a_fs_attrlist, TSK_FS_ATTR_TYPE_ENUM a_type, const char *name) { TSK_FS_ATTR *fs_attr_cur; TSK_FS_ATTR *fs_attr_ok = NULL; if (!a_fs_attrlist) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_attrlist_get_name_type: Null list pointer"); return NULL; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if ((fs_attr_cur->flags & TSK_FS_ATTR_INUSE) && (fs_attr_cur->type == a_type)) { if (((name == NULL) && (fs_attr_cur->name == NULL)) || ((name) && (fs_attr_cur->name) && (!strcmp(fs_attr_cur->name, name)))) { /* If we are looking for NTFS $Data, * then return default when we see it */ if ((fs_attr_cur->type == TSK_FS_ATTR_TYPE_NTFS_DATA) && (fs_attr_cur->name == NULL)) { return fs_attr_cur; } // make sure we return the lowest if multiple exist if ((fs_attr_ok == NULL) || (fs_attr_ok->id > fs_attr_cur->id)) fs_attr_ok = fs_attr_cur; } } } if (!fs_attr_ok) { tsk_error_set_errno(TSK_ERR_FS_ATTR_NOTFOUND); tsk_error_set_errstr("tsk_fs_attrlist_get: Attribute %d not found", a_type); return NULL; } else { return fs_attr_ok; } } /** * \internal * Return the a_idx'th attribute in the attribute list. * * @param a_fs_attrlist Data list structure to search in * @param a_idx 0-based index of attribute to return * * @return NULL is returned on error and if an entry could not be found. * tsk_errno will be set to TSK_ERR_FS_ATTR_NOTFOUND if entry could not be found. */ const TSK_FS_ATTR * tsk_fs_attrlist_get_idx(const TSK_FS_ATTRLIST * a_fs_attrlist, int a_idx) { TSK_FS_ATTR *fs_attr_cur; int i = 0; if (!a_fs_attrlist) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attrlist_get_idx: Null list pointer"); return NULL; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if (fs_attr_cur->flags & TSK_FS_ATTR_INUSE) { if (i == a_idx) { return fs_attr_cur; } i++; } } tsk_error_set_errno(TSK_ERR_FS_ATTR_NOTFOUND); tsk_error_set_errstr ("tsk_fs_attrlist_get_idx: Attribute index %d not found", a_idx); return NULL; } /** * \internal * Return the number of attributes in the attribute list * * @param a_fs_attrlist Data list structure to analyze * * @return the number of attributes and 0 if error (if argument is NULL) */ int tsk_fs_attrlist_get_len(const TSK_FS_ATTRLIST * a_fs_attrlist) { TSK_FS_ATTR *fs_attr_cur; int len = 0; if (!a_fs_attrlist) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_attrlist_get_len: Null list pointer"); return 0; } for (fs_attr_cur = a_fs_attrlist->head; fs_attr_cur; fs_attr_cur = fs_attr_cur->next) { if (fs_attr_cur->flags & TSK_FS_ATTR_INUSE) len++; } return len; } sleuthkit-4.2.0/tsk/fs/fs_block.c000644 000765 000024 00000014251 12576320700 017467 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2008-2011 Brian Carrier, Basis Technology. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file fs_block.c * Contains functions to allocate, free, and read data into a TSK_FS_BLOCK structure. */ #include #include "tsk_fs_i.h" /** * \internal * Allocate a TSK_FS_BLOCK structure. * @param a_fs File system to create block for * @returns NULL on error */ TSK_FS_BLOCK * tsk_fs_block_alloc(TSK_FS_INFO * a_fs) { TSK_FS_BLOCK *fs_block; fs_block = (TSK_FS_BLOCK *) tsk_malloc(sizeof(TSK_FS_BLOCK)); if (fs_block == NULL) return NULL; fs_block->buf = (char *) tsk_malloc(a_fs->block_size); if (fs_block->buf == NULL) { free(fs_block); return NULL; } fs_block->tag = TSK_FS_BLOCK_TAG; fs_block->addr = 0; fs_block->flags = 0; fs_block->fs_info = a_fs; return fs_block; } /** * \ingroup fslib * Free the memory associated with the TSK_FS_BLOCK structure. * @param a_fs_block Block to free */ void tsk_fs_block_free(TSK_FS_BLOCK * a_fs_block) { if (a_fs_block->buf) free(a_fs_block->buf); a_fs_block->tag = 0; free(a_fs_block); } TSK_FS_BLOCK * tsk_fs_block_get(TSK_FS_INFO * a_fs, TSK_FS_BLOCK * a_fs_block, TSK_DADDR_T a_addr) { return tsk_fs_block_get_flag(a_fs, a_fs_block, a_addr, a_fs->block_getflags(a_fs, a_addr)); } /** * \ingroup fslib * Get the contents and flags of a specific file system block. Note that if the block contains * compressed data, then this function will return the compressed data with the RAW flag set. * The uncompressed data can be obtained only from the file-level functions. * * @param a_fs The file system to read the block from. * @param a_fs_block The structure to write the block data into or NULL to have one created. * @param a_addr The file system address to read. * @param a_flags Flag to assign to the returned TSK_FS_BLOCK (use if you already have it as part of a block_walk-type scenario) * @return The TSK_FS_BLOCK with the data or NULL on error. (If a_fs_block was not NULL, this will * be the same structure). */ TSK_FS_BLOCK * tsk_fs_block_get_flag(TSK_FS_INFO * a_fs, TSK_FS_BLOCK * a_fs_block, TSK_DADDR_T a_addr, TSK_FS_BLOCK_FLAG_ENUM a_flags) { TSK_OFF_T offs; ssize_t cnt; size_t len; if (a_fs == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("tsk_fs_block_get: fs unallocated"); return NULL; } if (a_fs_block == NULL) { a_fs_block = tsk_fs_block_alloc(a_fs); } else if ((a_fs_block->tag != TSK_FS_BLOCK_TAG) || (a_fs_block->buf == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("tsk_fs_block_get: fs_block unallocated"); return NULL; } len = a_fs->block_size; if (a_addr > a_fs->last_block_act) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); if (a_addr <= a_fs->last_block) tsk_error_set_errstr ("tsk_fs_block_get: Address missing in partial image: %" PRIuDADDR ")", a_addr); else tsk_error_set_errstr ("tsk_fs_block_get: Address is too large for image: %" PRIuDADDR ")", a_addr); return NULL; } a_fs_block->fs_info = a_fs; a_fs_block->addr = a_addr; a_fs_block->flags = a_flags; a_fs_block->flags |= TSK_FS_BLOCK_FLAG_RAW; offs = (TSK_OFF_T) a_addr *a_fs->block_size; if ((a_fs_block->flags & TSK_FS_BLOCK_FLAG_AONLY) == 0) { cnt = tsk_img_read(a_fs->img_info, a_fs->offset + offs, a_fs_block->buf, len); if (cnt != len) { return NULL; } } return a_fs_block; } /** * \internal * Set the fields of a FS_BLOCk structure. This is internally used to set the data from a * larger buffer so that larger disk reads can occur. * * @param a_fs File system * @param a_fs_block Block to load data into * @param a_addr Address where data is from * @param a_flags Flags for data * @param a_buf Buffer to copy data from and into a_fs_block (block_size will be copied) * @returns 1 on error, 0 on success */ int tsk_fs_block_set(TSK_FS_INFO * a_fs, TSK_FS_BLOCK * a_fs_block, TSK_DADDR_T a_addr, TSK_FS_BLOCK_FLAG_ENUM a_flags, char *a_buf) { if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("tsk_fs_block_set: fs_info unallocated"); return 1; } if ((a_fs_block->tag != TSK_FS_BLOCK_TAG) || (a_fs_block->buf == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("tsk_fs_block_set: fs_block unallocated"); return 1; } a_fs_block->fs_info = a_fs; if ((a_flags & TSK_FS_BLOCK_FLAG_AONLY) == 0) memcpy(a_fs_block->buf, a_buf, a_fs->block_size); a_fs_block->addr = a_addr; a_fs_block->flags = a_flags; return 0; } /** * \ingroup fslib * * Cycle through a range of file system blocks and call the callback function * with the contents and allocation status of each. * * @param a_fs File system to analyze * @param a_start_blk Block address to start walking from * @param a_end_blk Block address to walk to * @param a_flags Flags used during walk to determine which blocks to call callback with * @param a_action Callback function * @param a_ptr Pointer that will be passed to callback * @returns 1 on error and 0 on success */ uint8_t tsk_fs_block_walk(TSK_FS_INFO * a_fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_block_walk: FS_INFO structure is not allocated"); return 1; } return a_fs->block_walk(a_fs, a_start_blk, a_end_blk, a_flags, a_action, a_ptr); } sleuthkit-4.2.0/tsk/fs/fs_dir.c000644 000765 000024 00000116203 12576320700 017153 0ustar00carrierstaff000000 000000 /* * fs_dir * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2008-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 * */ /** * \file fs_dir.c * Create, manage, etc. the TSK_FS_DIR structures. */ #include "tsk_fs_i.h" #include "tsk_fatfs.h" /** \internal * Allocate a FS_DIR structure to load names into. * * @param a_addr Address of this directory. * @param a_cnt target number of FS_DENT entries to fit in * @returns NULL on error */ TSK_FS_DIR * tsk_fs_dir_alloc(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr, size_t a_cnt) { TSK_FS_DIR *fs_dir; size_t i; // allocate and initialize the structure if ((fs_dir = (TSK_FS_DIR *) tsk_malloc(sizeof(TSK_FS_DIR))) == NULL) { return NULL; } fs_dir->names_alloc = a_cnt; fs_dir->names_used = 0; if ((fs_dir->names = (TSK_FS_NAME *) tsk_malloc(sizeof(TSK_FS_NAME) * fs_dir->names_alloc)) == NULL) { free(fs_dir); return NULL; } fs_dir->fs_info = a_fs; fs_dir->addr = a_addr; fs_dir->tag = TSK_FS_DIR_TAG; for (i = 0; i < a_cnt; i++) { fs_dir->names[i].tag = TSK_FS_NAME_TAG; } return fs_dir; } /** \internal * Make the buffer in the FS_DIR structure larger. * * @param a_fs_dir Structure to enhance * @param a_cnt target number of FS_DENT entries to fit in * @returns 1 on error and 0 on success */ uint8_t tsk_fs_dir_realloc(TSK_FS_DIR * a_fs_dir, size_t a_cnt) { size_t prev_cnt, i; if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG)) return 1; if (a_fs_dir->names_alloc >= a_cnt) return 0; prev_cnt = a_fs_dir->names_alloc; a_fs_dir->names_alloc = a_cnt; if ((a_fs_dir->names = (TSK_FS_NAME *) tsk_realloc((void *) a_fs_dir->names, sizeof(TSK_FS_NAME) * a_fs_dir->names_alloc)) == NULL) { return 1; } memset(&a_fs_dir->names[prev_cnt], 0, (a_cnt - prev_cnt) * sizeof(TSK_FS_NAME)); for (i = prev_cnt; i < a_cnt; i++) { a_fs_dir->names[i].tag = TSK_FS_NAME_TAG; } return 0; } /** \internal * Reset the structures in a FS_DIR so that it can be reused. * @param a_fs_dir FS_DIR structure to re-use */ void tsk_fs_dir_reset(TSK_FS_DIR * a_fs_dir) { if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG)) return; if (a_fs_dir->fs_file) { tsk_fs_file_close(a_fs_dir->fs_file); a_fs_dir->fs_file = NULL; } a_fs_dir->names_used = 0; a_fs_dir->addr = 0; a_fs_dir->seq = 0; } /** \internal * Copy the contents of one directory structure to another. * Note that this currently does not copy the FS_FILE info. * It is only used to make a copy of the orphan directory. * It does not check for duplicate entries. * @returns 1 on error */ static uint8_t tsk_fs_dir_copy(const TSK_FS_DIR * a_src_dir, TSK_FS_DIR * a_dst_dir) { size_t i; a_dst_dir->names_used = 0; // make sure we got the room if (a_src_dir->names_used > a_dst_dir->names_alloc) { if (tsk_fs_dir_realloc(a_dst_dir, a_src_dir->names_used)) return 1; } for (i = 0; i < a_src_dir->names_used; i++) { if (tsk_fs_name_copy(&a_dst_dir->names[i], &a_src_dir->names[i])) return 1; } a_dst_dir->names_used = a_src_dir->names_used; a_dst_dir->addr = a_src_dir->addr; a_dst_dir->seq = a_src_dir->seq; return 0; } /** \internal * Add a FS_DENT structure to a FS_DIR structure by copying its * contents into the internal buffer. Checks for * duplicates and expands buffer as needed. * @param a_fs_dir DIR to add to * @param a_fs_name DENT to add * @returns 1 on error (memory allocation problems) and 0 on success */ uint8_t tsk_fs_dir_add(TSK_FS_DIR * a_fs_dir, const TSK_FS_NAME * a_fs_name) { TSK_FS_NAME *fs_name_dest = NULL; size_t i; /* see if we already have it in the buffer / queue * We skip this check for FAT because it will always fail because two entries * never have the same meta address. */ // @@@ We could do something more effecient here too with orphan files because we do not // need to check the contents of that directory either and this takes a lot of time on those // large images. if (TSK_FS_TYPE_ISFAT(a_fs_dir->fs_info->ftype) == 0) { for (i = 0; i < a_fs_dir->names_used; i++) { if ((a_fs_name->meta_addr == a_fs_dir->names[i].meta_addr) && (strcmp(a_fs_name->name, a_fs_dir->names[i].name) == 0)) { if (tsk_verbose) tsk_fprintf(stderr, "tsk_fs_dir_add: removing duplicate entry: %s (%" PRIuINUM ")\n", a_fs_name->name, a_fs_name->meta_addr); /* We do not check type because then we cannot detect NTFS orphan file * duplicates that are added as "-/r" while a similar entry exists as "r/r" (a_fs_name->type == a_fs_dir->names[i].type)) { */ // if the one in the list is unalloc and we have an alloc, replace it if ((a_fs_dir->names[i].flags & TSK_FS_NAME_FLAG_UNALLOC) && (a_fs_name->flags & TSK_FS_NAME_FLAG_ALLOC)) { fs_name_dest = &a_fs_dir->names[i]; // free the memory - not the most effecient, but prevents // duplicate code. if (fs_name_dest->name) { free(fs_name_dest->name); fs_name_dest->name = NULL; fs_name_dest->name_size = 0; } if (fs_name_dest->shrt_name) { free(fs_name_dest->shrt_name); fs_name_dest->shrt_name = NULL; fs_name_dest->shrt_name_size = 0; } break; } else { return 0; } } } } if (fs_name_dest == NULL) { // make sure we got the room if (a_fs_dir->names_used >= a_fs_dir->names_alloc) { if (tsk_fs_dir_realloc(a_fs_dir, a_fs_dir->names_used + 512)) return 1; } fs_name_dest = &a_fs_dir->names[a_fs_dir->names_used++]; } if (tsk_fs_name_copy(fs_name_dest, a_fs_name)) return 1; // add the parent address if (a_fs_dir->addr) { fs_name_dest->par_addr = a_fs_dir->addr; fs_name_dest->par_seq = a_fs_dir->seq; } return 0; } /** \ingroup fslib * Open a directory (using its metadata addr) so that each of the files in it can be accessed. * @param a_fs File system to analyze * @param a_addr Metadata address of the directory to open * @returns NULL on error */ TSK_FS_DIR * tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr) { TSK_FS_DIR *fs_dir = NULL; TSK_RETVAL_ENUM retval; if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG) || (a_fs->dir_open_meta == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_open_meta: called with NULL or unallocated structures"); return NULL; } retval = a_fs->dir_open_meta(a_fs, &fs_dir, a_addr); if (retval != TSK_OK) { tsk_fs_dir_close(fs_dir); return NULL; } return fs_dir; } /** \ingroup fslib * Open a directory (using its path) so that each of the files in it can be accessed. * @param a_fs File system to analyze * @param a_dir Path of the directory to open * @returns NULL on error */ TSK_FS_DIR * tsk_fs_dir_open(TSK_FS_INFO * a_fs, const char *a_dir) { TSK_INUM_T inum; int8_t retval; TSK_FS_DIR *fs_dir; TSK_FS_NAME *fs_name; if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_open: called with NULL or unallocated structures"); return NULL; } // allocate a structure to store the name in if ((fs_name = tsk_fs_name_alloc(128, 32)) == NULL) { return NULL; } retval = tsk_fs_path2inum(a_fs, a_dir, &inum, fs_name); if (retval == -1) { tsk_fs_name_free(fs_name); return NULL; } else if (retval == 1) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_dir_open: path not found: %s", a_dir); tsk_fs_name_free(fs_name); return NULL; } fs_dir = tsk_fs_dir_open_meta(a_fs, inum); // add the name structure on to it if ((fs_dir) && (fs_dir->fs_file)) fs_dir->fs_file->name = fs_name; return fs_dir; } /** \ingroup fslib * Close the directory that was opened with tsk_fs_dir_open() * @param a_fs_dir Directory to close */ void tsk_fs_dir_close(TSK_FS_DIR * a_fs_dir) { size_t i; if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG)) { return; } for (i = 0; i < a_fs_dir->names_used; i++) { if (a_fs_dir->names[i].name) { free(a_fs_dir->names[i].name); a_fs_dir->names[i].name = NULL; a_fs_dir->names[i].name_size = 0; } if (a_fs_dir->names[i].shrt_name) { free(a_fs_dir->names[i].shrt_name); a_fs_dir->names[i].shrt_name = NULL; a_fs_dir->names[i].shrt_name_size = 0; } } free(a_fs_dir->names); if (a_fs_dir->fs_file) { tsk_fs_file_close(a_fs_dir->fs_file); a_fs_dir->fs_file = NULL; } a_fs_dir->tag = 0; free(a_fs_dir); } /** \ingroup fslib * Returns the number of files and subdirectories in a directory. * @param a_fs_dir Directory to get information about * @returns Number of files and subdirectories (or 0 on error) */ size_t tsk_fs_dir_getsize(const TSK_FS_DIR * a_fs_dir) { if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_getsize: called with NULL or unallocated structures"); return 0; } return a_fs_dir->names_used; } /** \ingroup fslib * Return a specific file or subdirectory from an open directory. * @param a_fs_dir Directory to analyze * @param a_idx Index of file in directory to open (0-based) * @returns NULL on error */ TSK_FS_FILE * tsk_fs_dir_get(const TSK_FS_DIR * a_fs_dir, size_t a_idx) { TSK_FS_NAME *fs_name; TSK_FS_FILE *fs_file; if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG) || (a_fs_dir->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_get: called with NULL or unallocated structures"); return NULL; } if (a_fs_dir->names_used <= a_idx) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_dir_get: Index (%" PRIuSIZE ") too large (%" PRIuSIZE ")", a_idx, a_fs_dir->names_used); return NULL; } // allocate a structure to return if ((fs_file = tsk_fs_file_alloc(a_fs_dir->fs_info)) == NULL) return NULL; fs_name = &(a_fs_dir->names[a_idx]); // copy the name into another structure that we can return and later free if ((fs_file->name = tsk_fs_name_alloc(fs_name->name ? strlen(fs_name->name) + 1 : 0, fs_name->shrt_name ? strlen(fs_name->shrt_name) + 1 : 0)) == NULL) { return NULL; } if (tsk_fs_name_copy(fs_file->name, fs_name)) return NULL; /* load the fs_meta structure if possible. * Must have non-zero inode addr or have allocated name (if inode is 0) */ if (((fs_name->meta_addr) || (fs_name->flags & TSK_FS_NAME_FLAG_ALLOC))) { if (a_fs_dir->fs_info->file_add_meta(a_fs_dir->fs_info, fs_file, fs_name->meta_addr)) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); } // if the sequence numbers don't match, then don't load the meta // should ideally have sequence in previous lookup, but it isn't // in all APIs yet if ((fs_file->meta) && (fs_file->meta->seq != fs_name->meta_seq)) { tsk_fs_meta_close(fs_file->meta); fs_file->meta = NULL; } } return fs_file; } /** \ingroup fslib * Return only the name for a file or subdirectory from an open directory. * Useful when wanting to find files of a given name and you don't need the * additional metadata. * * @param a_fs_dir Directory to analyze * @param a_idx Index of file in directory to open (0-based) * @returns NULL on error */ const TSK_FS_NAME * tsk_fs_dir_get_name(const TSK_FS_DIR * a_fs_dir, size_t a_idx) { if ((a_fs_dir == NULL) || (a_fs_dir->tag != TSK_FS_DIR_TAG) || (a_fs_dir->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_get: called with NULL or unallocated structures"); return NULL; } if (a_fs_dir->names_used <= a_idx) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_dir_get: Index (%" PRIuSIZE ") too large (%" PRIuSIZE ")", a_idx, a_fs_dir->names_used); return NULL; } return &(a_fs_dir->names[a_idx]); } #define MAX_DEPTH 128 #define DIR_STRSZ 4096 /** \internal * used to keep state between calls to dir_walk_lcl */ typedef struct { /* Recursive path stuff */ /* how deep in the directory tree are we */ unsigned int depth; /* pointer in dirs string to where '/' is for given depth */ char *didx[MAX_DEPTH]; /* The current directory name string */ char dirs[DIR_STRSZ]; TSK_STACK *stack_seen; /* Set to one to collect inode info that can be used for orphan listing */ uint8_t save_inum_named; /* We keep list_inum_named inside DENT_DINFO so different threads * have their own copies. On successful completion of the dir * walk we reassigned ownership of this pointer into the shared * TSK_FS_INFO list_inum_named field. We're trading off the extra * work in each thread for cleaner locking code. */ TSK_LIST *list_inum_named; } DENT_DINFO; /* dir_walk local function that is used for recursive calls. Callers * should initially call the non-local version. */ static TSK_WALK_RET_ENUM tsk_fs_dir_walk_lcl(TSK_FS_INFO * a_fs, DENT_DINFO * a_dinfo, TSK_INUM_T a_addr, TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action, void *a_ptr) { TSK_FS_DIR *fs_dir; TSK_FS_FILE *fs_file; size_t i; // get the list of entries in the directory if ((fs_dir = tsk_fs_dir_open_meta(a_fs, a_addr)) == NULL) { return TSK_WALK_ERROR; } /* Allocate a file structure for the callbacks. We * will allocate fs_meta structures as needed and * point into the fs_dir structure for the names. */ if ((fs_file = tsk_fs_file_alloc(a_fs)) == NULL) { tsk_fs_dir_close(fs_dir); return TSK_WALK_ERROR; } for (i = 0; i < fs_dir->names_used; i++) { TSK_WALK_RET_ENUM retval; /* Point name to the buffer of names. We need to be * careful about resetting this before we free fs_file */ fs_file->name = (TSK_FS_NAME *) & fs_dir->names[i]; /* load the fs_meta structure if possible. * Must have non-zero inode addr or have allocated name (if inode is 0) */ if (((fs_file->name->meta_addr) || (fs_file->name->flags & TSK_FS_NAME_FLAG_ALLOC))) { /* Note that the NTFS code behind here has a slight hack to use the * correct sequence number based on the data in fs_file->name */ if (a_fs->file_add_meta(a_fs, fs_file, fs_file->name->meta_addr)) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); } } // call the action if we have the right flags. if ((fs_file->name->flags & a_flags) == fs_file->name->flags) { retval = a_action(fs_file, a_dinfo->dirs, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_dir_close(fs_dir); fs_file->name = NULL; tsk_fs_file_close(fs_file); /* free the list -- fs_dir_walk has no way * of knowing that we stopped early w/out error. */ if (a_dinfo->save_inum_named) { tsk_list_free(a_dinfo->list_inum_named); a_dinfo->list_inum_named = NULL; a_dinfo->save_inum_named = 0; } return TSK_WALK_STOP; } else if (retval == TSK_WALK_ERROR) { tsk_fs_dir_close(fs_dir); fs_file->name = NULL; tsk_fs_file_close(fs_file); return TSK_WALK_ERROR; } } // save the inode info for orphan finding - if requested if ((a_dinfo->save_inum_named) && (fs_file->meta) && (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC)) { if (tsk_list_add(&a_dinfo->list_inum_named, fs_file->meta->addr)) { // if there is an error, then clear the list tsk_list_free(a_dinfo->list_inum_named); a_dinfo->list_inum_named = NULL; a_dinfo->save_inum_named = 0; } } /* Recurse into a directory if: * - Both dir entry and inode have DIR type (or name is undefined) * - Recurse flag is set * - dir entry is allocated OR both are unallocated * - not one of the '.' or '..' entries * - A Non-Orphan Dir or the Orphan Dir with the NOORPHAN flag not set. */ if (((fs_file->name->type == TSK_FS_NAME_TYPE_DIR) || (fs_file->name->type == TSK_FS_NAME_TYPE_UNDEF)) && (fs_file->meta) && (fs_file->meta->type == TSK_FS_META_TYPE_DIR) && (a_flags & TSK_FS_DIR_WALK_FLAG_RECURSE) && ((fs_file->name->flags & TSK_FS_NAME_FLAG_ALLOC) || ((fs_file->name->flags & TSK_FS_NAME_FLAG_UNALLOC) && (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC)) ) && (!TSK_FS_ISDOT(fs_file->name->name)) && ((fs_file->name->meta_addr != TSK_FS_ORPHANDIR_INUM(a_fs)) || ((a_flags & TSK_FS_DIR_WALK_FLAG_NOORPHAN) == 0)) ) { /* Make sure we do not get into an infinite loop */ if (0 == tsk_stack_find(a_dinfo->stack_seen, fs_file->name->meta_addr)) { int depth_added = 0; uint8_t save_bak = 0; if (tsk_stack_push(a_dinfo->stack_seen, fs_file->name->meta_addr)) { tsk_fs_dir_close(fs_dir); fs_file->name = NULL; tsk_fs_file_close(fs_file); return TSK_WALK_ERROR; } if ((a_dinfo->depth < MAX_DEPTH) && (DIR_STRSZ > strlen(a_dinfo->dirs) + strlen(fs_file->name->name))) { a_dinfo->didx[a_dinfo->depth] = &a_dinfo->dirs[strlen(a_dinfo->dirs)]; strncpy(a_dinfo->didx[a_dinfo->depth], fs_file->name->name, DIR_STRSZ - strlen(a_dinfo->dirs)); strncat(a_dinfo->dirs, "/", DIR_STRSZ); depth_added = 1; } a_dinfo->depth++; /* We do not want to save info about named unalloc files * when we go into the Orphan directory (because then we have * no orphans). So, disable it for this recursion. */ if (fs_file->name->meta_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { save_bak = a_dinfo->save_inum_named; a_dinfo->save_inum_named = 0; } retval = tsk_fs_dir_walk_lcl(a_fs, a_dinfo, fs_file->name->meta_addr, a_flags, a_action, a_ptr); if (retval == TSK_WALK_ERROR) { /* If this fails because the directory could not be * loaded, then we still continue */ if (tsk_verbose) { tsk_fprintf(stderr, "tsk_fs_dir_walk_lcl: error reading directory: %" PRIuINUM "\n", fs_file->name->meta_addr); tsk_error_print(stderr); } tsk_error_reset(); } else if (retval == TSK_WALK_STOP) { tsk_fs_dir_close(fs_dir); fs_file->name = NULL; tsk_fs_file_close(fs_file); return TSK_WALK_STOP; } // reset the save status if (fs_file->name->meta_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { a_dinfo->save_inum_named = save_bak; } tsk_stack_pop(a_dinfo->stack_seen); a_dinfo->depth--; if (depth_added) *a_dinfo->didx[a_dinfo->depth] = '\0'; } else { if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_walk_lcl: Loop detected with address %" PRIuINUM, fs_file->name->meta_addr); } } // remove the pointer to name buffer fs_file->name = NULL; // free the metadata if we allocated it if (fs_file->meta) { tsk_fs_meta_close(fs_file->meta); fs_file->meta = NULL; } } tsk_fs_dir_close(fs_dir); fs_file->name = NULL; tsk_fs_file_close(fs_file); return TSK_WALK_CONT; } /** \ingroup fslib * Walk the file names in a directory and obtain the details of the files via a callback. * * @param a_fs File system to analyze * @param a_addr Metadata address of the directory to analyze * @param a_flags Flags used during analysis * @param a_action Callback function that is called for each file name * @param a_ptr Pointer to data that is passed to the callback function each time * @returns 1 on error and 0 on success */ uint8_t tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr, TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action, void *a_ptr) { DENT_DINFO dinfo; TSK_WALK_RET_ENUM retval; if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_dir_walk: called with NULL or unallocated structures"); return 1; } memset(&dinfo, 0, sizeof(DENT_DINFO)); if ((dinfo.stack_seen = tsk_stack_create()) == NULL) return 1; /* Sanity check on flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_DIR_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_DIR_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_DIR_WALK_FLAG_ALLOC | TSK_FS_DIR_WALK_FLAG_UNALLOC); } /* if the flags are right, we can collect info that may be needed * for an orphan walk. If the walk fails or stops, the code that * calls the action will clear this stuff. */ tsk_take_lock(&a_fs->list_inum_named_lock); if ((a_fs->list_inum_named == NULL) && (a_addr == a_fs->root_inum) && (a_flags & TSK_FS_DIR_WALK_FLAG_RECURSE)) { dinfo.save_inum_named = 1; } tsk_release_lock(&a_fs->list_inum_named_lock); retval = tsk_fs_dir_walk_lcl(a_fs, &dinfo, a_addr, a_flags, a_action, a_ptr); if (dinfo.save_inum_named == 1) { if (retval != TSK_WALK_CONT) { /* There was an error and we stopped early, so we should get * rid of the partial list we were making. */ tsk_list_free(dinfo.list_inum_named); dinfo.list_inum_named = NULL; } else { /* We finished the dir walk successfully, so reassign * ownership of the dinfo's list_inum_named to the shared * list_inum_named in TSK_FS_INFO, under a lock, if * another thread hasn't already done so. */ tsk_take_lock(&a_fs->list_inum_named_lock); if (a_fs->list_inum_named == NULL) { a_fs->list_inum_named = dinfo.list_inum_named; } else { tsk_list_free(dinfo.list_inum_named); } tsk_release_lock(&a_fs->list_inum_named_lock); dinfo.list_inum_named = NULL; } } tsk_stack_free(dinfo.stack_seen); if (retval == TSK_WALK_ERROR) return 1; else return 0; } /** \internal * Create a dummy NAME entry for the Orphan file virtual directory. * @param a_fs File system directory is for * @param a_fs_name NAME structure to populate with data * @returns 1 on error */ uint8_t tsk_fs_dir_make_orphan_dir_name(TSK_FS_INFO * a_fs, TSK_FS_NAME * a_fs_name) { snprintf(a_fs_name->name, a_fs_name->name_size, "$OrphanFiles"); if (a_fs_name->shrt_name_size > 0) a_fs_name->shrt_name[0] = '\0'; a_fs_name->meta_addr = TSK_FS_ORPHANDIR_INUM(a_fs); a_fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; a_fs_name->type = TSK_FS_NAME_TYPE_DIR; return 0; } /** \internal * Create a dummy META entry for the Orphan file virtual directory. * @param a_fs File system directory is for * @param a_fs_meta META structure to populate with data * @returns 1 on error */ uint8_t tsk_fs_dir_make_orphan_dir_meta(TSK_FS_INFO * a_fs, TSK_FS_META * a_fs_meta) { a_fs_meta->type = TSK_FS_META_TYPE_DIR; a_fs_meta->mode = 0; a_fs_meta->nlink = 1; a_fs_meta->flags = (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); a_fs_meta->uid = a_fs_meta->gid = 0; a_fs_meta->mtime = a_fs_meta->atime = a_fs_meta->ctime = a_fs_meta->crtime = 0; a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano = a_fs_meta->crtime_nano = 0; if (a_fs_meta->name2 == NULL) { if ((a_fs_meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) return 1; a_fs_meta->name2->next = NULL; } a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (a_fs_meta->attr) { tsk_fs_attrlist_markunused(a_fs_meta->attr); } a_fs_meta->addr = TSK_FS_ORPHANDIR_INUM(a_fs); strncpy(a_fs_meta->name2->name, "$OrphanFiles", TSK_FS_META_NAME_LIST_NSIZE); if (a_fs_meta->content_len) { TSK_DADDR_T *addr_ptr = (TSK_DADDR_T *) a_fs_meta->content_ptr; addr_ptr[0] = 0; } a_fs_meta->size = 0; return 0; } /** \internal * Searches the list of metadata addresses that are pointed to * by unallocated names. Used to find orphan files. * @param a_fs File system being analyzed. * @param a_inum Metadata address to lookup in list. * @returns 1 if metadata address is pointed to by an unallocated * file name or 0 if not. */ uint8_t tsk_fs_dir_find_inum_named(TSK_FS_INFO * a_fs, TSK_INUM_T a_inum) { uint8_t retval = 0; tsk_take_lock(&a_fs->list_inum_named_lock); // list can be null if no unallocated file names exist if (a_fs->list_inum_named) retval = tsk_list_find(a_fs->list_inum_named, a_inum); tsk_release_lock(&a_fs->list_inum_named_lock); return retval; } /* callback that is used by tsk_fs_dir_load_inum_named. It does nothing * because each file system has the code needed to make caller happy. */ static TSK_WALK_RET_ENUM load_named_dir_walk_cb(TSK_FS_FILE * a_fs_file, const char *a_path, void *a_ptr) { return TSK_WALK_CONT; } /** \internal * Proces a file system and populate a list of the metadata structures * that are reachable by file names. This is used to find orphan files. * Each file system has code that does the populating. */ TSK_RETVAL_ENUM tsk_fs_dir_load_inum_named(TSK_FS_INFO * a_fs) { tsk_take_lock(&a_fs->list_inum_named_lock); if (a_fs->list_inum_named != NULL) { tsk_release_lock(&a_fs->list_inum_named_lock); if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_load_inum_named: List already populated. Skipping walk.\n"); return TSK_OK; } tsk_release_lock(&a_fs->list_inum_named_lock); if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_load_inum_named: Performing dir walk to find named files\n"); /* Do a dir_walk. There is internal caching code that will populate * the structure. The callback is really a dummy call. This could * be made more effecient in the future (not do callbacks...). We * specify UNALLOC only as a flag on the assumption that there will * be fewer callbacks for UNALLOC than ALLOC. */ if (tsk_fs_dir_walk(a_fs, a_fs->root_inum, TSK_FS_NAME_FLAG_UNALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE | TSK_FS_DIR_WALK_FLAG_NOORPHAN, load_named_dir_walk_cb, NULL)) { tsk_error_errstr2_concat ("- tsk_fs_dir_load_inum_named: identifying inodes allocated by file names"); return TSK_ERR; } return TSK_OK; } /* Used to keep state while populating the orphan directory */ typedef struct { TSK_FS_NAME *fs_name; // temp name structure used when adding entries to fs_dir TSK_FS_DIR *fs_dir; // unique names are added to this. represents contents of OrphanFiles directory TSK_LIST *orphan_subdir_list; // keep track of files that can already be accessed via orphan directory } FIND_ORPHAN_DATA; /* Used to process orphan directories and make sure that their contents * are now marked as reachable */ static TSK_WALK_RET_ENUM load_orphan_dir_walk_cb(TSK_FS_FILE * a_fs_file, const char *a_path, void *a_ptr) { FIND_ORPHAN_DATA *data = (FIND_ORPHAN_DATA *) a_ptr; if( a_fs_file == NULL ) { return TSK_WALK_ERROR; } // ignore DOT entries if ((a_fs_file->name) && (a_fs_file->name->name) && (TSK_FS_ISDOT(a_fs_file->name->name))) return TSK_WALK_CONT; // add this entry to the orphan list if (a_fs_file->meta) { /* Stop if we hit an allocated entry. We shouldn't get these, but did * have some trouble images that went into allocated clusters on * a FAT file system. */ if (a_fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) { if (tsk_verbose) { tsk_fprintf(stderr, "load_orphan_dir_walk_cb: Skipping an allocated file (ID: %" PRIuINUM ")\n", a_fs_file->meta->addr); } return TSK_WALK_STOP; } /* check if we have already added it as an orphan (in a subdirectory) * Not entirely sure how possible this is, but it was added while * debugging an infinite loop problem. */ if (tsk_list_find(data->orphan_subdir_list, a_fs_file->meta->addr)) { if (tsk_verbose) fprintf(stderr, "load_orphan_dir_walk_cb: Detected loop with address %" PRIuINUM, a_fs_file->meta->addr); return TSK_WALK_STOP; } tsk_list_add(&data->orphan_subdir_list, a_fs_file->meta->addr); /* FAT file systems spend a lot of time hunting for parent * directory addresses, so we put this code in here to save * the info when we have it. */ if ((a_fs_file->meta->type == TSK_FS_META_TYPE_DIR) && (TSK_FS_TYPE_ISFAT(a_fs_file->fs_info->ftype))) { // Make sure a_fs_file->name->par_addr is not accessed when // a_fs_file->name is NULL if ((a_fs_file->name) && (fatfs_dir_buf_add((FATFS_INFO *) a_fs_file->fs_info, a_fs_file->name->par_addr, a_fs_file->meta->addr))) return TSK_WALK_ERROR; } } return TSK_WALK_CONT; } /* used to identify the unnamed metadata structures */ static TSK_WALK_RET_ENUM find_orphan_meta_walk_cb(TSK_FS_FILE * a_fs_file, void *a_ptr) { FIND_ORPHAN_DATA *data = (FIND_ORPHAN_DATA *) a_ptr; TSK_FS_INFO *fs = a_fs_file->fs_info; /* We want only orphans, then check if this * inode is in the seen list */ tsk_take_lock(&fs->list_inum_named_lock); if ((fs->list_inum_named) && (tsk_list_find(fs->list_inum_named, a_fs_file->meta->addr))) { tsk_release_lock(&fs->list_inum_named_lock); return TSK_WALK_CONT; } tsk_release_lock(&fs->list_inum_named_lock); // check if we have already added it as an orphan (in a subdirectory) if (tsk_list_find(data->orphan_subdir_list, a_fs_file->meta->addr)) { return TSK_WALK_CONT; } // use their name if they have one if (a_fs_file->meta->name2 != NULL && a_fs_file->meta->name2->name != NULL && strlen(a_fs_file->meta->name2->name) > 0) { strncpy(data->fs_name->name, a_fs_file->meta->name2->name, data->fs_name->name_size); } else { snprintf(data->fs_name->name, data->fs_name->name_size, "OrphanFile-%" PRIuINUM, a_fs_file->meta->addr); } data->fs_name->meta_addr = a_fs_file->meta->addr; /* unalloc MFT entries have their sequence number incremented * when they are unallocated. Decrement it in the file name so * that it matches the typical situation where the name is one * less. */ data->fs_name->meta_seq = a_fs_file->meta->seq - 1; data->fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; data->fs_name->type = TSK_FS_NAME_TYPE_UNDEF; if (tsk_fs_dir_add(data->fs_dir, data->fs_name)) return TSK_WALK_ERROR; /* FAT file systems spend a lot of time hunting for parent * directory addresses, so we put this code in here to save * the info when we have it. */ if (TSK_FS_TYPE_ISFAT(fs->ftype)) { if (fatfs_dir_buf_add((FATFS_INFO *) fs, TSK_FS_ORPHANDIR_INUM(fs), a_fs_file->meta->addr)) return TSK_WALK_ERROR; } /* Go into directories to mark their contents as "seen" */ if (a_fs_file->meta->type == TSK_FS_META_TYPE_DIR) { if (tsk_verbose) fprintf(stderr, "find_orphan_meta_walk_cb: Going into directory %" PRIuINUM " to mark contents as seen\n", a_fs_file->meta->addr); if (tsk_fs_dir_walk(fs, a_fs_file->meta->addr, TSK_FS_DIR_WALK_FLAG_UNALLOC | TSK_FS_DIR_WALK_FLAG_RECURSE | TSK_FS_DIR_WALK_FLAG_NOORPHAN, load_orphan_dir_walk_cb, data)) { tsk_error_errstr2_concat (" - find_orphan_meta_walk_cb: identifying inodes allocated by file names"); return TSK_WALK_ERROR; } } return TSK_WALK_CONT; } /** \internal * Adds the fake metadata entry in the FS_DIR->fs_file struct for the orphan files directory * * @returns 1 on error */ static uint8_t tsk_fs_dir_add_orphan_dir_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir) { // populate the fake FS_FILE structure for the "Orphan Directory" if ((a_fs_dir->fs_file = tsk_fs_file_alloc(a_fs)) == NULL) { return 1; } if ((a_fs_dir->fs_file->meta = tsk_fs_meta_alloc(sizeof(TSK_DADDR_T))) == NULL) { return 1; } if (tsk_fs_dir_make_orphan_dir_meta(a_fs, a_fs_dir->fs_file->meta)) { return 1; } return 0; } /** \internal * Search the file system for orphan files and create the orphan file directory. * @param a_fs File system to search * @param a_fs_dir Structure to store the orphan file directory info in. */ TSK_RETVAL_ENUM tsk_fs_dir_find_orphans(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir) { FIND_ORPHAN_DATA data; size_t i; tsk_take_lock(&a_fs->orphan_dir_lock); if (a_fs->orphan_dir != NULL) { if (tsk_fs_dir_copy(a_fs->orphan_dir, a_fs_dir)) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } if (tsk_fs_dir_add_orphan_dir_meta(a_fs, a_fs_dir)) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_OK; } if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_find_orphans: Searching for orphan files\n"); memset(&data, 0, sizeof(FIND_ORPHAN_DATA)); /* We first need to determine which of the unallocated meta structures * have a name pointing to them. We cache this data, so see if it is * already known. */ if (tsk_fs_dir_load_inum_named(a_fs) != TSK_OK) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } // note that list_inum_named could still be NULL if there are no deleted names. /* Now we walk the unallocated metadata structures and find ones that are * not named. The callback will add the names to the FS_DIR structure. */ data.fs_dir = a_fs_dir; // allocate a name once so that we will reuse for each name we add to FS_DIR if ((data.fs_name = tsk_fs_name_alloc(256, 0)) == NULL) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_find_orphans: Performing inode_walk to find unnamed metadata structures\n"); if (tsk_fs_meta_walk(a_fs, a_fs->first_inum, a_fs->last_inum, TSK_FS_META_FLAG_UNALLOC | TSK_FS_META_FLAG_USED, find_orphan_meta_walk_cb, &data)) { tsk_fs_name_free(data.fs_name); tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } tsk_fs_name_free(data.fs_name); data.fs_name = NULL; if (tsk_verbose) fprintf(stderr, "tsk_fs_dir_find_orphans: De-duping orphan files and directories\n"); /* do some cleanup on the final list. This cleanup will compare the * entries in the root orphan directory with files that can be accessed * from subdirectories of the orphan directory. These entries will exist if * they were added before their parent directory was added to the orphan directory. */ for (i = 0; i < a_fs_dir->names_used; i++) { if (tsk_list_find(data.orphan_subdir_list, a_fs_dir->names[i].meta_addr)) { if (a_fs_dir->names_used > 1) { tsk_fs_name_copy(&a_fs_dir->names[i], &a_fs_dir->names[a_fs_dir->names_used - 1]); } a_fs_dir->names_used--; } } if (data.orphan_subdir_list) { tsk_list_free(data.orphan_subdir_list); data.orphan_subdir_list = NULL; } // make copy of this so that we don't need to do it again. if ((a_fs->orphan_dir = tsk_fs_dir_alloc(a_fs, a_fs_dir->addr, a_fs_dir->names_used)) == NULL) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } if (tsk_fs_dir_copy(a_fs_dir, a_fs->orphan_dir)) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } // populate the fake FS_FILE structure in the struct to be returned for the "Orphan Directory" if (tsk_fs_dir_add_orphan_dir_meta(a_fs, a_fs_dir)) { tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_ERR; } tsk_release_lock(&a_fs->orphan_dir_lock); return TSK_OK; } sleuthkit-4.2.0/tsk/fs/fs_file.c000644 000765 000024 00000047774 12576320700 017334 0ustar00carrierstaff000000 000000 /* * fs_file * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2008-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 * */ /** * \file fs_file.c * Create, manage, etc. the TSK_FS_FILE structures. */ #include "tsk_fs_i.h" /** * \internal * Allocate a new FS_FILE structure * @param a_fs File system fiel will be in. * @returns NULL on error */ TSK_FS_FILE * tsk_fs_file_alloc(TSK_FS_INFO * a_fs) { TSK_FS_FILE *fs_file; fs_file = (TSK_FS_FILE *) tsk_malloc(sizeof(TSK_FS_FILE)); if (fs_file == NULL) return NULL; fs_file->fs_info = a_fs; fs_file->tag = TSK_FS_FILE_TAG; return fs_file; } /** \internal * * Reset the meta and name structures. * @param a_fs_file File to reset */ void tsk_fs_file_reset(TSK_FS_FILE * a_fs_file) { if (a_fs_file->meta) tsk_fs_meta_reset(a_fs_file->meta); if (a_fs_file->name) tsk_fs_name_reset(a_fs_file->name); } /** * \ingroup fslib * Close an open file. * @param a_fs_file Pointer to open file */ void tsk_fs_file_close(TSK_FS_FILE * a_fs_file) { if ((a_fs_file == NULL) || (a_fs_file->tag != TSK_FS_FILE_TAG)) return; a_fs_file->tag = 0; if (a_fs_file->meta) { tsk_fs_meta_close(a_fs_file->meta); a_fs_file->meta = NULL; } if (a_fs_file->name) { tsk_fs_name_free(a_fs_file->name); a_fs_file->name = NULL; } free(a_fs_file); } /** * \ingroup fslib * * Open a file given its metadata address. This function loads the metadata * and returns a handle that can be used to read and process the file. Note * that the returned TSK_FS_FILE structure will not have the file name set because * it was not used to load the file and this function does not search the * directory structure to find the name that points to the address. In general, * if you know the metadata address of a file, this function is more effecient * then tsk_fs_file_open, which first maps a file name to the metadata address * and then opens the file using this function. * * @param a_fs File system to analyze * @param a_fs_file Structure to store file data in or NULL to have one allocated. * @param a_addr Metadata address of file to lookup * @returns NULL on error */ TSK_FS_FILE * tsk_fs_file_open_meta(TSK_FS_INFO * a_fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T a_addr) { TSK_FS_FILE *fs_file; if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_open_meta: called with NULL or unallocated structures"); return NULL; } fs_file = a_fs_file; if (fs_file == NULL) { if ((fs_file = tsk_fs_file_alloc(a_fs)) == NULL) return NULL; } else { /* if the structure passed has a name structure, free it * because we won't use it. */ if (fs_file->name) { tsk_fs_name_free(fs_file->name); fs_file->name = NULL; } // reset the rest of it tsk_fs_file_reset(fs_file); } if (a_fs->file_add_meta(a_fs, fs_file, a_addr)) { if (a_fs_file == NULL) free(fs_file); return NULL; } return fs_file; } /** * \ingroup fslib * Return the handle structure for a specific file, given its full path. Note that * if you have the metadata address fo the file, then tsk_fs_file_open_meta() is a * more effecient approach. * * @param a_fs File system to analyze * @param a_fs_file Structure to store file data in or NULL to have one allocated. * @param a_path Path of file to open * @returns NULL on error */ TSK_FS_FILE * tsk_fs_file_open(TSK_FS_INFO * a_fs, TSK_FS_FILE * a_fs_file, const char *a_path) { TSK_INUM_T inum; int8_t retval; TSK_FS_FILE *fs_file = NULL; TSK_FS_NAME *fs_name = NULL; if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_open: called with NULL or unallocated structures"); return NULL; } // allocate a structure to store the name in if ((fs_name = tsk_fs_name_alloc(128, 32)) == NULL) { return NULL; } retval = tsk_fs_path2inum(a_fs, a_path, &inum, fs_name); if (retval == -1) { tsk_fs_name_free(fs_name); return NULL; } else if (retval == 1) { tsk_fs_name_free(fs_name); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_file_open: path not found: %s", a_path); return NULL; } fs_file = tsk_fs_file_open_meta(a_fs, a_fs_file, inum); if (fs_file) { // Add the name to the structure fs_file->name = fs_name; // path2inum did not put this in there... fs_name->meta_seq = fs_file->meta->seq; } else { tsk_fs_name_free(fs_name); } return fs_file; } /** \internal * Check the arguments for the tsk_fs_file_attr_XXX functions * and load the attributes if needed. * @param a_fs_file File argument to check. * @param a_func Name of function that this is checking for (for error messages) * @returns 1 on error */ static int tsk_fs_file_attr_check(TSK_FS_FILE * a_fs_file, char *a_func) { TSK_FS_INFO *fs; // check the FS_INFO, FS_FILE structures if ((a_fs_file == NULL) || (a_fs_file->meta == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: called with NULL pointers", a_func); return 1; } else if (a_fs_file->meta->tag != TSK_FS_META_TAG) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("%s: called with unallocated structures", a_func); return 1; } fs = a_fs_file->fs_info; // If the attributes haven't been loaded, then load them. if (a_fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("%s: called for file with corrupt data", a_func); return 1; } else if ((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) || (a_fs_file->meta->attr == NULL)) { if (fs->load_attrs(a_fs_file)) { return 1; } } return 0; } /** \ingroup fslib * Return the number of attributes in the file. * * @param a_fs_file File to return attribute count for * @returns number of attributes in file */ int tsk_fs_file_attr_getsize(TSK_FS_FILE * a_fs_file) { if (tsk_fs_file_attr_check(a_fs_file, "tsk_fs_file_attr_getsize")) { // @@@ Not sure if we should be ignoring this error or not... // Just added the reset because we were returning 0 with error codes set tsk_error_reset(); return 0; } return tsk_fs_attrlist_get_len(a_fs_file->meta->attr); } /** \ingroup fslib * Get a file's attribute based on the 0-based index in the list (and not type, id pair). * @param a_fs_file File to get attributes from. * @param a_idx 0-based index of attribute to return. * @returns Pointer to attribute or NULL on error */ const TSK_FS_ATTR * tsk_fs_file_attr_get_idx(TSK_FS_FILE * a_fs_file, int a_idx) { if (tsk_fs_file_attr_check(a_fs_file, "tsk_fs_file_attr_get_idx")) return NULL; return tsk_fs_attrlist_get_idx(a_fs_file->meta->attr, a_idx); } /** \ingroup fslib * Return the default attribute for the file * @param a_fs_file File to get data from * @returns NULL on error */ const TSK_FS_ATTR * tsk_fs_file_attr_get(TSK_FS_FILE * a_fs_file) { TSK_FS_ATTR_TYPE_ENUM type; TSK_FS_INFO *fs; if (tsk_fs_file_attr_check(a_fs_file, "tsk_fs_file_attr_get")) return NULL; // since they did not give us a type, get the default for the file fs = a_fs_file->fs_info; type = fs->get_default_attr_type(a_fs_file); return tsk_fs_attrlist_get(a_fs_file->meta->attr, type); } /** \ingroup fslib * Return a specific type and id attribute for the file. * @param a_fs_file File to get data from * @param a_type Type of attribute to load * @param a_id Id of attribute to load * @param a_id_used Set to 1 if ID is actually set or 0 to use default * @returns NULL on error */ const TSK_FS_ATTR * tsk_fs_file_attr_get_type(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, uint8_t a_id_used) { if (tsk_fs_file_attr_check(a_fs_file, "tsk_fs_file_attr_get_type")) return NULL; if (a_id_used) return tsk_fs_attrlist_get_id(a_fs_file->meta->attr, a_type, a_id); else return tsk_fs_attrlist_get(a_fs_file->meta->attr, a_type); } /** \ingroup fslib * Return a specific attribute by its ID for the file. * @param a_fs_file File to get data from * @param a_id Id of attribute to load * @returns NULL on error */ const TSK_FS_ATTR * tsk_fs_file_attr_get_id(TSK_FS_FILE * a_fs_file, uint16_t a_id) { int i, size; if (tsk_fs_file_attr_check(a_fs_file, "tsk_fs_file_attr_get_type")) return NULL; size = tsk_fs_file_attr_getsize(a_fs_file); for (i = 0; i < size; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(a_fs_file, i); if (fs_attr == NULL) return NULL; if (fs_attr->id == a_id) return fs_attr; } tsk_error_set_errno(TSK_ERR_FS_ATTR_NOTFOUND); tsk_error_set_errstr ("tsk_fs_attr_get_id: Attribute ID %d not found", a_id); return NULL; } /** * \ingroup fslib * Process a specific attribute in a file and call a callback function with the file contents. The callback will be * called with chunks of data that are fs->block_size or less. The address given in the callback * will be correct only for raw files (when the raw file contents were stored in the block). For * compressed and sparse files, the address may be zero. If the file system you are analyzing does * not have multiple attributes per file, then you can use tsk_fs_file_walk(). For incomplete or * corrupt files, some missing runs will be identified as SPARSE and zeros will be returned in the content. * * @param a_fs_file File to process * @param a_type Attribute type to process * @param a_id Id if attribute to process * @param a_flags Flags to use while processing file * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t tsk_fs_file_walk_type(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr) { const TSK_FS_ATTR *fs_attr; // clean up any error messages that are lying around tsk_error_reset(); // check the FS_INFO, FS_FILE structures if ((a_fs_file == NULL) || (a_fs_file->meta == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk: called with NULL pointers"); return 1; } else if ((a_fs_file->fs_info->tag != TSK_FS_INFO_TAG) || (a_fs_file->meta->tag != TSK_FS_META_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk: called with unallocated structures"); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "tsk_fs_file_walk: Processing file %" PRIuINUM "\n", a_fs_file->meta->addr); if ((fs_attr = tsk_fs_file_attr_get_type(a_fs_file, a_type, a_id, (a_flags & TSK_FS_FILE_WALK_FLAG_NOID) ? 0 : 1)) == NULL) return 1; return tsk_fs_attr_walk(fs_attr, a_flags, a_action, a_ptr); } /** * \ingroup fslib * Process a file and call a callback function with the file contents. The callback will be * called with chunks of data that are fs->block_size or less. The address given in the callback * will be correct only for raw files (when the raw file contents were stored in the block). For * compressed and sparse files, the address may be zero. If a file has multiple attributes, * such as NTFS files, this function uses the default one ($DATA for files, $IDX_ROOT for directories). * Use tsk_fs_file_walk_type to specify an attribute. * * @param a_fs_file File to process * @param a_flags Flags to use while processing file * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t tsk_fs_file_walk(TSK_FS_FILE * a_fs_file, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr) { const TSK_FS_ATTR *fs_attr; // clean up any error messages that are lying around tsk_error_reset(); // check the FS_INFO, FS_FILE structures if ((a_fs_file == NULL) || (a_fs_file->meta == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk: called with NULL pointers"); return 1; } else if ((a_fs_file->fs_info->tag != TSK_FS_INFO_TAG) || (a_fs_file->meta->tag != TSK_FS_META_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_walk: called with unallocated structures"); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "tsk_fs_file_walk: Processing file %" PRIuINUM "\n", a_fs_file->meta->addr); if ((fs_attr = tsk_fs_file_attr_get(a_fs_file)) == NULL) return 1; return tsk_fs_attr_walk(fs_attr, a_flags, a_action, a_ptr); } /** * \ingroup fslib * Read the contents of a specific attribute of a file using a typical read() type interface and be * able specify a specific attribute to read (applies only to file systems with multiple attributes * per file, such as NTFS). 0s are returned for missing runs of files. * * @param a_fs_file The file to read from * @param a_type The type of attribute to load * @param a_id The id of attribute to load (use 0 and set a_flags if you do not care) * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past EOF). */ ssize_t tsk_fs_file_read_type(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { const TSK_FS_ATTR *fs_attr; // clean up any error messages that are lying around tsk_error_reset(); // check the FS_INFO, FS_FILE structures if ((a_fs_file == NULL) || (a_fs_file->meta == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_read: called with NULL pointers"); return -1; } else if ((a_fs_file->fs_info->tag != TSK_FS_INFO_TAG) || (a_fs_file->meta->tag != TSK_FS_META_TAG)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_read: called with unallocated structures"); return -1; } if ((fs_attr = tsk_fs_file_attr_get_type(a_fs_file, a_type, a_id, (a_flags & TSK_FS_FILE_READ_FLAG_NOID) ? 0 : 1)) == NULL) { return -1; } return tsk_fs_attr_read(fs_attr, a_offset, a_buf, a_len, a_flags); } /** * \ingroup fslib * Read the contents of a specific attribute of a file using a typical read() type interface. * 0s are returned for missing runs of files. * * @param a_fs_file The inode structure of the file to read. * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past EOF). */ ssize_t tsk_fs_file_read(TSK_FS_FILE * a_fs_file, TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { const TSK_FS_ATTR *fs_attr; if ((a_fs_file == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_file_read: fs_info is NULL"); return -1; } if ((fs_attr = tsk_fs_file_attr_get(a_fs_file)) == NULL) { return -1; } return tsk_fs_attr_read(fs_attr, a_offset, a_buf, a_len, a_flags); } /** * Returns a string representation of the security attributes of a file. * * @param a_fs_file The file to get security info about. * @param sid_str A pointer to a pointer that will contain the SID string. This function will allocate the string and the caller must free it. * @returns 0 on success or 1 on error. */ uint8_t tsk_fs_file_get_owner_sid(TSK_FS_FILE * a_fs_file, char **sid_str) { if ((a_fs_file == NULL) || (a_fs_file->fs_info == NULL) || (a_fs_file->meta == NULL) || (sid_str == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_file_get_owner_sid: fs_info is NULL"); return 1; } // Make sure the function pointer is not NULL. // This function will only work on NTFS filesystems. if (!a_fs_file->fs_info->fread_owner_sid) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Unsupported function"); return 1; } return a_fs_file->fs_info->fread_owner_sid(a_fs_file, sid_str); } /** * Internal struct used for hash calculations */ typedef struct { TSK_BASE_HASH_ENUM flags; TSK_MD5_CTX md5_context; TSK_SHA_CTX sha1_context; } TSK_FS_HASH_DATA; /** * Helper function for tsk_fs_file_get_md5 */ TSK_WALK_RET_ENUM tsk_fs_file_hash_calc_callback(TSK_FS_FILE * file, TSK_OFF_T offset, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *ptr) { TSK_FS_HASH_DATA *hash_data = (TSK_FS_HASH_DATA *) ptr; if (hash_data == NULL) return TSK_WALK_CONT; if (hash_data->flags & TSK_BASE_HASH_MD5) { TSK_MD5_Update(&(hash_data->md5_context), (unsigned char *) buf, (unsigned int) size); } if (hash_data->flags & TSK_BASE_HASH_SHA1) { TSK_SHA_Update(&(hash_data->sha1_context), (unsigned char *) buf, (unsigned int) size); } return TSK_WALK_CONT; } /** * Returns a string containing the md5 hash of the given file * * @param a_fs_file The file to calculate the hash of * @param a_hash_results The results will be stored here (must be allocated beforehand) * @param a_flags Indicates which hash algorithm(s) to use * @returns 0 on success or 1 on error */ extern uint8_t tsk_fs_file_hash_calc(TSK_FS_FILE * a_fs_file, TSK_FS_HASH_RESULTS * a_hash_results, TSK_BASE_HASH_ENUM a_flags) { TSK_FS_HASH_DATA hash_data; if ((a_fs_file == NULL) || (a_fs_file->fs_info == NULL) || (a_fs_file->meta == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_file_hash_calc: fs_info is NULL"); return 1; } if (a_hash_results == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("tsk_fs_file_hash_calc: hash_results is NULL"); return 1; } if (a_flags & TSK_BASE_HASH_MD5) { TSK_MD5_Init(&(hash_data.md5_context)); } if (a_flags & TSK_BASE_HASH_SHA1) { TSK_SHA_Init(&(hash_data.sha1_context)); } hash_data.flags = a_flags; if (tsk_fs_file_walk(a_fs_file, TSK_FS_FILE_WALK_FLAG_NONE, tsk_fs_file_hash_calc_callback, (void *) &hash_data)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_file_hash_calc: error in file walk"); return 1; } a_hash_results->flags = a_flags; if (a_flags & TSK_BASE_HASH_MD5) { TSK_MD5_Final(a_hash_results->md5_digest, &(hash_data.md5_context)); } if (a_flags & TSK_BASE_HASH_MD5) { TSK_SHA_Final(a_hash_results->sha1_digest, &(hash_data.sha1_context)); } return 0; } sleuthkit-4.2.0/tsk/fs/fs_inode.c000644 000765 000024 00000011712 12576320700 017472 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved * * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** * \file fs_inode.c * Contains functions to allocate, free, and process the generic inode * structures */ #include "tsk_fs_i.h" /** * Contains the short (1 character) name of the file type */ char tsk_fs_meta_type_str[TSK_FS_META_TYPE_STR_MAX][2] = { "-", "r", "d", "p", "c", "b", "l", "s", "h", "w", "v" }; /** * \internal * Allocates a generic inode / metadata structure. * * @param a_buf_len Number of bytes needed to store fs-specific data regarding where content is stored. * @returns NULL on error */ TSK_FS_META * tsk_fs_meta_alloc(size_t a_buf_len) { TSK_FS_META *fs_meta; if ((fs_meta = (TSK_FS_META *) tsk_malloc(sizeof(TSK_FS_META))) == NULL) return NULL; fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (a_buf_len > 0) { if ((fs_meta->content_ptr = tsk_malloc(a_buf_len)) == NULL) { free(fs_meta); return NULL; } fs_meta->content_len = a_buf_len; } // assign the id so we know the structure is still alloc fs_meta->tag = TSK_FS_META_TAG; return (fs_meta); } /** * \internal * Resize an existing TSK_FS_META structure -- changes the number of * block pointers. * * @param fs_meta Structure to resize * @param a_buf_len Size of file system specific data that is used to store references to file content * @return NULL on error */ TSK_FS_META * tsk_fs_meta_realloc(TSK_FS_META * a_fs_meta, size_t a_buf_len) { if (a_fs_meta->content_len != a_buf_len) { a_fs_meta->content_len = a_buf_len; a_fs_meta->content_ptr = tsk_realloc((char *) a_fs_meta->content_ptr, a_buf_len); if (a_fs_meta->content_ptr == NULL) { return NULL; } } return (a_fs_meta); } /** * \internal * Free the memory allocated to the TSK_FS_META structure. * * @param fs_meta Structure to free */ void tsk_fs_meta_close(TSK_FS_META * fs_meta) { TSK_FS_META_NAME_LIST *fs_name, *fs_name2; if ((!fs_meta) || (fs_meta->tag != TSK_FS_META_TAG)) return; // clear the tag so we know the structure isn't alloc fs_meta->tag = 0; if (fs_meta->content_ptr) free((char *) fs_meta->content_ptr); fs_meta->content_ptr = NULL; fs_meta->content_len = 0; if (fs_meta->attr) tsk_fs_attrlist_free(fs_meta->attr); fs_meta->attr = NULL; if (fs_meta->link) free(fs_meta->link); fs_meta->link = NULL; fs_name = fs_meta->name2; while (fs_name) { fs_name2 = fs_name->next; fs_name->next = NULL; free(fs_name); fs_name = fs_name2; } free((char *) fs_meta); } /** \internal * Reset the contents of a TSK_FS_META structure. * @param a_fs_meta Structure to reset */ void tsk_fs_meta_reset(TSK_FS_META * a_fs_meta) { void *content_ptr_tmp; size_t content_len_tmp; TSK_FS_ATTRLIST *attr_tmp; TSK_FS_META_NAME_LIST *name2_tmp; char *link_tmp; // backup pointers content_ptr_tmp = a_fs_meta->content_ptr; content_len_tmp = a_fs_meta->content_len; attr_tmp = a_fs_meta->attr; name2_tmp = a_fs_meta->name2; link_tmp = a_fs_meta->link; // clear all data memset(a_fs_meta, 0, sizeof(TSK_FS_META)); a_fs_meta->tag = TSK_FS_META_TAG; // restore and clear the pointers a_fs_meta->content_ptr = content_ptr_tmp; a_fs_meta->content_len = content_len_tmp; a_fs_meta->attr = attr_tmp; a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; a_fs_meta->name2 = name2_tmp; a_fs_meta->link = link_tmp; if (a_fs_meta->link) a_fs_meta->link[0] = '\0'; if (a_fs_meta->name2) { name2_tmp = a_fs_meta->name2; while (name2_tmp) { name2_tmp->name[0] = '\0'; name2_tmp->par_inode = 0; name2_tmp->par_seq = 0; name2_tmp = name2_tmp->next; } } } /** * \ingroup fslib * Walk a range of metadata structures and call a callback for each * structure that matches the flags supplied. For example, it can * call the callback on only allocated or unallocated entries. * * @param a_fs File system to process * @param a_start Metadata address to start walking from * @param a_end Metadata address to walk to * @param a_flags Flags that specify the desired metadata features * @param a_cb Callback function to call * @param a_ptr Pointer to pass to the callback * @returns 1 on error and 0 on success */ uint8_t tsk_fs_meta_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_start, TSK_INUM_T a_end, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB a_cb, void *a_ptr) { if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) return 1; return a_fs->inode_walk(a_fs, a_start, a_end, a_flags, a_cb, a_ptr); } sleuthkit-4.2.0/tsk/fs/fs_io.c000644 000765 000024 00000012650 12576320700 017005 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved * Copyright (c) 2003-2005 Brian Carrier. All rights reserved * * Copyright (c) 1997,1998,1999, International Business Machines * Corporation and others. All Rights Reserved. * * * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** \file fs_io.c * Contains functions to read data from a disk image and wrapper functions to read file content. */ #include #include "tsk_fs_i.h" /** \internal * Internal method to deal with calculating correct offset when we have pre and post bytes * in teh file system blocks (i.e. RAW Cds) * @param a_fs File system being analyzed * @param a_off Byte offset into file system (i.e. not offset into image) * @param a_buf Buffer to write data into * @param a_len Number of bytes to read * @retuns Number of bytes read or -1 on error */ static ssize_t fs_prepost_read(TSK_FS_INFO * a_fs, TSK_OFF_T a_off, char *a_buf, size_t a_len) { TSK_OFF_T cur_off = a_off; TSK_OFF_T end_off = a_off + a_len; ssize_t cur_idx = 0; // we need to read block by block so that we can skip the needed pre and post bytes while (cur_off < end_off) { TSK_OFF_T read_off; ssize_t retval2 = 0; TSK_DADDR_T blk = cur_off / a_fs->block_size; size_t read_len = a_fs->block_size - cur_off % a_fs->block_size; if (read_len + cur_off > end_off) read_len = (size_t) (end_off - cur_off); read_off = a_fs->offset + cur_off + blk * (a_fs->block_pre_size + a_fs->block_post_size) + a_fs->block_pre_size; if (tsk_verbose) fprintf(stderr, "fs_prepost_read: Mapped %" PRIuOFF " to %" PRIuOFF "\n", cur_off, read_off); retval2 = tsk_img_read(a_fs->img_info, read_off, &a_buf[cur_idx], read_len); if (retval2 == -1) return -1; else if (retval2 == 0) break; cur_idx += retval2; cur_off += retval2; } return cur_idx; } /** * \ingroup fslib * Read arbitrary data from inside of the file system. * @param a_fs The file system handle. * @param a_off The byte offset to start reading from (relative to start of file system) * @param a_buf The buffer to store the block in. * @param a_len The number of bytes to read * @return The number of bytes read or -1 on error. */ ssize_t tsk_fs_read(TSK_FS_INFO * a_fs, TSK_OFF_T a_off, char *a_buf, size_t a_len) { // do a sanity check on the read bounds, but only if the block // value has been set. // note that this could prevent us from viewing the FS slack... if ((a_fs->last_block_act > 0) && ((TSK_DADDR_T) a_off >= ((a_fs->last_block_act + 1) * a_fs->block_size))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); if ((TSK_DADDR_T) a_off < ((a_fs->last_block + 1) * a_fs->block_size)) tsk_error_set_errstr ("tsk_fs_read: Offset missing in partial image: %" PRIuDADDR ")", a_off); else tsk_error_set_errstr ("tsk_fs_read: Offset is too large for image: %" PRIuDADDR ")", a_off); return -1; } if (((a_fs->block_pre_size) || (a_fs->block_post_size)) && (a_fs->block_size)) { return fs_prepost_read(a_fs, a_off, a_buf, a_len); } else { return tsk_img_read(a_fs->img_info, a_off + a_fs->offset, a_buf, a_len); } } /** * \ingroup fslib * Read a file system block into a char* buffer. * This is actually a wrapper around the fs_read_random function, * but it allows the starting location to be specified as a block address. * * @param a_fs The file system structure. * @param a_addr The starting block file system address. * @param a_buf The char * buffer to store the block data in. * @param a_len The number of bytes to read (must be a multiple of the block size) * @return The number of bytes read or -1 on error. */ ssize_t tsk_fs_read_block(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { if (a_len % a_fs->block_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("tsk_fs_read_block: length %" PRIuSIZE "" " not a multiple of %d", a_len, a_fs->block_size); return -1; } if (a_addr > a_fs->last_block_act) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); if (a_addr <= a_fs->last_block) tsk_error_set_errstr ("tsk_fs_read_block: Address missing in partial image: %" PRIuDADDR ")", a_addr); else tsk_error_set_errstr ("tsk_fs_read_block: Address is too large for image: %" PRIuDADDR ")", a_addr); return -1; } if ((a_fs->block_pre_size == 0) && (a_fs->block_post_size == 0)) { TSK_OFF_T off = a_fs->offset + (TSK_OFF_T) (a_addr) * a_fs->block_size; return tsk_img_read(a_fs->img_info, off, a_buf, a_len); } else { TSK_OFF_T off = (TSK_OFF_T) (a_addr) * a_fs->block_size; return fs_prepost_read(a_fs, off, a_buf, a_len); } } sleuthkit-4.2.0/tsk/fs/fs_load.c000644 000765 000024 00000001771 12576320700 017317 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 * */ /** \file fs_load.c * Contains a general file walk callback that can be * used to load file content into a buffer. */ #include "tsk_fs_i.h" /* File Walk Action to load the journal * TSK_FS_LOAD_FILE is defined in fs_tools.h */ TSK_WALK_RET_ENUM tsk_fs_load_file_action(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { TSK_FS_LOAD_FILE *buf1 = (TSK_FS_LOAD_FILE *) ptr; size_t cp_size; if (size > buf1->left) cp_size = buf1->left; else cp_size = size; memcpy(buf1->cur, buf, cp_size); buf1->left -= cp_size; buf1->cur = (char *) ((uintptr_t) buf1->cur + cp_size); if (buf1->left > 0) return TSK_WALK_CONT; else return TSK_WALK_STOP; } sleuthkit-4.2.0/tsk/fs/fs_name.c000644 000765 000024 00000056760 12576320700 017330 0ustar00carrierstaff000000 000000 /* ** fs_name ** The Sleuth Kit ** ** Display and manipulate directory entries ** This file contains generic functions that call the appropriate function ** depending on the file system type. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2013 Brian Carrier. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /** * \file fs_name.c * Code to allocate and free the TSK_FS_NAME structures. */ #include "tsk_fs_i.h" char tsk_fs_name_type_str[TSK_FS_NAME_TYPE_STR_MAX][2] = { "-", "p", "c", "d", "b", "r", "l", "s", "h", "w", "v" }; /** * \internal * Allocate a fs_name structure */ TSK_FS_NAME * tsk_fs_name_alloc(size_t norm_namelen, size_t shrt_namelen) { TSK_FS_NAME *fs_name; fs_name = (TSK_FS_NAME *) tsk_malloc(sizeof(*fs_name)); if (fs_name == NULL) return NULL; fs_name->name = (char *) tsk_malloc(norm_namelen + 1); if (fs_name->name == NULL) { free(fs_name); return NULL; } fs_name->name_size = norm_namelen; fs_name->flags = 0; fs_name->shrt_name_size = shrt_namelen; if (shrt_namelen == 0) { fs_name->shrt_name = NULL; } else { fs_name->shrt_name = (char *) tsk_malloc(shrt_namelen + 1); if (fs_name->shrt_name == NULL) { free(fs_name->name); free(fs_name); return NULL; } } fs_name->type = TSK_FS_NAME_TYPE_UNDEF; fs_name->tag = TSK_FS_NAME_TAG; return fs_name; } /** * \internal * returns 1 on error */ uint8_t tsk_fs_name_realloc(TSK_FS_NAME * fs_name, size_t namelen) { if ((fs_name == NULL) || (fs_name->tag != TSK_FS_NAME_TAG)) return 1; if (fs_name->name_size >= namelen) return 0; fs_name->name = (char *) tsk_realloc(fs_name->name, namelen + 1); if (fs_name->name == NULL) { fs_name->name_size = 0; return 1; } fs_name->type = TSK_FS_NAME_TYPE_UNDEF; fs_name->name_size = namelen; return 0; } /** * \internal * reset the values in the TSK_FS_NAME structure * @param a_fs_name Name structure to reset */ void tsk_fs_name_reset(TSK_FS_NAME * a_fs_name) { if (a_fs_name->name) a_fs_name->name[0] = '\0'; if (a_fs_name->shrt_name) a_fs_name->shrt_name[0] = '\0'; a_fs_name->meta_addr = 0; a_fs_name->meta_seq = 0; a_fs_name->par_addr = 0; a_fs_name->par_seq = 0; a_fs_name->type = 0; a_fs_name->flags = 0; } /** * \internal */ void tsk_fs_name_free(TSK_FS_NAME * fs_name) { if ((!fs_name) || (fs_name->tag != TSK_FS_NAME_TAG)) return; if (fs_name->name) { free(fs_name->name); fs_name->name = NULL; } if (fs_name->shrt_name) { free(fs_name->shrt_name); fs_name->shrt_name = NULL; } free(fs_name); } /** \internal * Copy the contents of a TSK_FS_NAME structure to another * structure. * @param a_fs_name_to Destination structure to copy to * @param a_fs_name_from Source structure to copy from * @returns 1 on error */ uint8_t tsk_fs_name_copy(TSK_FS_NAME * a_fs_name_to, const TSK_FS_NAME * a_fs_name_from) { if ((a_fs_name_to == NULL) || (a_fs_name_from == NULL)) return 1; /* If the source has a full name, copy it */ if (a_fs_name_from->name) { // make sure there is enough space if (strlen(a_fs_name_from->name) >= a_fs_name_to->name_size) { a_fs_name_to->name_size = strlen(a_fs_name_from->name) + 16; a_fs_name_to->name = (char *) tsk_realloc(a_fs_name_to->name, a_fs_name_to->name_size); if (a_fs_name_to->name == NULL) return 1; } strncpy(a_fs_name_to->name, a_fs_name_from->name, a_fs_name_to->name_size); } else { if (a_fs_name_to->name_size > 0) a_fs_name_to->name[0] = '\0'; else a_fs_name_to->name = NULL; } // copy the short name, if one exists if (a_fs_name_from->shrt_name) { if (strlen(a_fs_name_from->shrt_name) >= a_fs_name_to->shrt_name_size) { a_fs_name_to->shrt_name_size = strlen(a_fs_name_from->shrt_name) + 16; a_fs_name_to->shrt_name = (char *) tsk_realloc(a_fs_name_to->shrt_name, a_fs_name_to->shrt_name_size); if (a_fs_name_to->shrt_name == NULL) return 1; } strncpy(a_fs_name_to->shrt_name, a_fs_name_from->shrt_name, a_fs_name_to->shrt_name_size); } else { if (a_fs_name_to->shrt_name_size > 0) a_fs_name_to->shrt_name[0] = '\0'; else a_fs_name_to->shrt_name = NULL; } a_fs_name_to->meta_addr = a_fs_name_from->meta_addr; a_fs_name_to->meta_seq = a_fs_name_from->meta_seq; a_fs_name_to->par_addr = a_fs_name_from->par_addr; a_fs_name_to->par_seq = a_fs_name_from->par_seq; a_fs_name_to->type = a_fs_name_from->type; a_fs_name_to->flags = a_fs_name_from->flags; return 0; } /*********************************************************************** * Printing functions ***********************************************************************/ /** * \ingroup fslib * Makes the "ls -l" permissions string for a file. * * @param a_fs_meta File to be processed * @param a_buf [out] Buffer to write results to (must be 12 bytes or longer) * @param a_len Length of buffer */ uint8_t tsk_fs_meta_make_ls(const TSK_FS_META * a_fs_meta, char *a_buf, size_t a_len) { if (a_len < 12) { return 1; } /* put the default values in */ strcpy(a_buf, "----------"); if (a_fs_meta->type < TSK_FS_META_TYPE_STR_MAX) a_buf[0] = tsk_fs_meta_type_str[a_fs_meta->type][0]; /* user perms */ if (a_fs_meta->mode & TSK_FS_META_MODE_IRUSR) a_buf[1] = 'r'; if (a_fs_meta->mode & TSK_FS_META_MODE_IWUSR) a_buf[2] = 'w'; /* set uid */ if (a_fs_meta->mode & TSK_FS_META_MODE_ISUID) { if (a_fs_meta->mode & TSK_FS_META_MODE_IXUSR) a_buf[3] = 's'; else a_buf[3] = 'S'; } else if (a_fs_meta->mode & TSK_FS_META_MODE_IXUSR) a_buf[3] = 'x'; /* group perms */ if (a_fs_meta->mode & TSK_FS_META_MODE_IRGRP) a_buf[4] = 'r'; if (a_fs_meta->mode & TSK_FS_META_MODE_IWGRP) a_buf[5] = 'w'; /* set gid */ if (a_fs_meta->mode & TSK_FS_META_MODE_ISGID) { if (a_fs_meta->mode & TSK_FS_META_MODE_IXGRP) a_buf[6] = 's'; else a_buf[6] = 'S'; } else if (a_fs_meta->mode & TSK_FS_META_MODE_IXGRP) a_buf[6] = 'x'; /* other perms */ if (a_fs_meta->mode & TSK_FS_META_MODE_IROTH) a_buf[7] = 'r'; if (a_fs_meta->mode & TSK_FS_META_MODE_IWOTH) a_buf[8] = 'w'; /* sticky bit */ if (a_fs_meta->mode & TSK_FS_META_MODE_ISVTX) { if (a_fs_meta->mode & TSK_FS_META_MODE_IXOTH) a_buf[9] = 't'; else a_buf[9] = 'T'; } else if (a_fs_meta->mode & TSK_FS_META_MODE_IXOTH) a_buf[9] = 'x'; return 0; } /** \ingroup fslib * Converts a time value to a string representation. Prints * all zero values instead of 1970 if time is 0. * @param time Time to be displayed. * @param buf Buffer to print into (must be 128 byes or larger) * @returns Pointer to buffer that was passed in. */ char * tsk_fs_time_to_str(time_t time, char buf[128]) { buf[0] = '\0'; if (time <= 0) { strncpy(buf, "0000-00-00 00:00:00 (UTC)", 128); } else { struct tm *tmTime = localtime(&time); snprintf(buf, 128, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d (%s)", (int) tmTime->tm_year + 1900, (int) tmTime->tm_mon + 1, (int) tmTime->tm_mday, tmTime->tm_hour, (int) tmTime->tm_min, (int) tmTime->tm_sec, tzname[(tmTime->tm_isdst == 0) ? 0 : 1]); } return buf; } /** \ingroup fslib * Converts a time value to a string representation. Prints * all zero values instead of 1970 if time is 0. * @param time Time to be displayed. * @param buf Buffer to print into (must b 64 bytes or larger) * @param subsecs Subseconds to be printed * @returns Pointer to buffer that was passed in. */ char * tsk_fs_time_to_str_subsecs(time_t time, unsigned int subsecs, char buf[128]) { buf[0] = '\0'; if (time <= 0) { strncpy(buf, "0000-00-00 00:00:00 (UTC)", 32); } else { struct tm *tmTime = localtime(&time); snprintf(buf, 64, "%.4d-%.2d-%.2d %.2d:%.2d:%.2d.%.9d (%s)", (int) tmTime->tm_year + 1900, (int) tmTime->tm_mon + 1, (int) tmTime->tm_mday, tmTime->tm_hour, (int) tmTime->tm_min, (int) tmTime->tm_sec, subsecs, tzname[(tmTime->tm_isdst == 0) ? 0 : 1]); } return buf; } static void tsk_fs_print_time(FILE * hFile, time_t time) { char foo[128]; tsk_fs_time_to_str(time, foo); tsk_fprintf(hFile, "%s", foo); } // @@@ We could merge this with the tsk_fs_time_to_str in // the future when the feature to include time resolution // is added to TSK_FS_META (and then that value would be // passed in and tsk_fs_time_to_str would decide what to // round up/down to /** * The only difference with this one is that the time is always * 00:00:00, which is applicable for the A-Time in FAT, which does * not have a time and if we do it normally it gets messed up because * of the timezone conversion */ static void tsk_fs_print_day(FILE * hFile, time_t time) { if (time <= 0) { tsk_fprintf(hFile, "0000-00-00 00:00:00 (UTC)"); } else { struct tm *tmTime = localtime(&time); tsk_fprintf(hFile, "%.4d-%.2d-%.2d 00:00:00 (%s)", (int) tmTime->tm_year + 1900, (int) tmTime->tm_mon + 1, (int) tmTime->tm_mday, tzname[(tmTime->tm_isdst == 0) ? 0 : 1]); } } /** * \internal * Simple print of dentry type / inode type, deleted, inode, and * name * * fs_attr is used for alternate data streams in NTFS, set to NULL * for all other file systems * * A newline is not printed at the end * * If path is NULL, then skip else use. it has the full directory name * It needs to end with "/" */ void tsk_fs_name_print(FILE * hFile, const TSK_FS_FILE * fs_file, const char *a_path, TSK_FS_INFO * fs, const TSK_FS_ATTR * fs_attr, uint8_t print_path) { size_t i; char *buf; /* type of file - based on dentry type */ if (fs_file->name->type < TSK_FS_NAME_TYPE_STR_MAX) tsk_fprintf(hFile, "%s/", tsk_fs_name_type_str[fs_file->name->type]); else tsk_fprintf(hFile, "-/"); /* type of file - based on inode type: we want letters though for * regular files so we use the dent_str though */ if (fs_file->meta) { /* * An NTFS directory can have a Data stream, in which * case it would be printed with modes of a * directory, although it is really a file * So, to avoid confusion we will set the modes * to a file so it is printed that way. The * entry for the directory itself will still be * printed as a directory */ if ((fs_attr) && (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_DATA) && (fs_file->meta->type == TSK_FS_META_TYPE_DIR)) { tsk_fprintf(hFile, "r "); } else { if (fs_file->meta->type < TSK_FS_META_TYPE_STR_MAX) tsk_fprintf(hFile, "%s ", tsk_fs_meta_type_str[fs_file->meta->type]); else tsk_fprintf(hFile, "- "); } } else { tsk_fprintf(hFile, "- "); } /* print a * if it is deleted */ if (fs_file->name->flags & TSK_FS_NAME_FLAG_UNALLOC) tsk_fprintf(hFile, "* "); tsk_fprintf(hFile, "%" PRIuINUM "", fs_file->name->meta_addr); /* print the id and type if we have fs_attr (NTFS) */ if (fs_attr) tsk_fprintf(hFile, "-%" PRIu32 "-%" PRIu16 "", fs_attr->type, fs_attr->id); tsk_fprintf(hFile, "%s:\t", ((fs_file->meta) && (fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) && (fs_file->name-> flags & TSK_FS_NAME_FLAG_UNALLOC)) ? "(realloc)" : ""); if ((print_path) && (a_path != NULL)) { buf = malloc(strlen(a_path) + 1); strcpy(buf, a_path); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); } buf = malloc(strlen(fs_file->name->name) + 1); strcpy(buf, fs_file->name->name); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); /* This will add the short name in parentheses if (fs_file->name->shrt_name != NULL && fs_file->name->shrt_name[0] != '\0') tsk_fprintf(hFile, " (%s)", fs_file->name->shrt_name); */ /* print the data stream name if we the non-data NTFS stream */ if ((fs_attr) && (fs_attr->name)) { if ((fs_attr->type != TSK_FS_ATTR_TYPE_NTFS_IDXROOT) || (strcmp(fs_attr->name, "$I30") != 0)) { tsk_fprintf(hFile, ":"); buf = malloc(strlen(fs_attr->name) + 1); strcpy(buf, fs_attr->name); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); } } return; } /** * \internal * Print contents of fs_name entry format like ls -l ** ** All elements are tab delimited ** ** If path is NULL, then skip else use. it has the full directory name ** It needs to end with "/" */ void tsk_fs_name_print_long(FILE * hFile, const TSK_FS_FILE * fs_file, const char *a_path, TSK_FS_INFO * fs, const TSK_FS_ATTR * fs_attr, uint8_t print_path, int32_t sec_skew) { tsk_fs_name_print(hFile, fs_file, a_path, fs, fs_attr, print_path); if ((fs == NULL) || (fs_file->meta == NULL)) { tsk_fprintf(hFile, "\t"); tsk_fs_print_time(hFile, 0); // mtime tsk_fprintf(hFile, "\t"); tsk_fs_print_time(hFile, 0); // atime tsk_fprintf(hFile, "\t"); tsk_fs_print_time(hFile, 0); // ctime tsk_fprintf(hFile, "\t"); tsk_fs_print_time(hFile, 0); // crtime // size, uid, gid tsk_fprintf(hFile, "\t0\t0\t0\n"); } else { /* MAC times */ tsk_fprintf(hFile, "\t"); if (fs_file->meta->mtime) tsk_fs_print_time(hFile, fs_file->meta->mtime - sec_skew); else tsk_fs_print_time(hFile, fs_file->meta->mtime); tsk_fprintf(hFile, "\t"); /* FAT only gives the day of last access */ if ((TSK_FS_TYPE_ISFAT(fs->ftype)) || (fs_file->meta->atime == 0)) tsk_fs_print_day(hFile, fs_file->meta->atime); else tsk_fs_print_time(hFile, fs_file->meta->atime - sec_skew); tsk_fprintf(hFile, "\t"); if (fs_file->meta->ctime) tsk_fs_print_time(hFile, fs_file->meta->ctime - sec_skew); else tsk_fs_print_time(hFile, fs_file->meta->ctime); tsk_fprintf(hFile, "\t"); if (fs_file->meta->crtime) tsk_fs_print_time(hFile, fs_file->meta->crtime - sec_skew); else tsk_fs_print_time(hFile, fs_file->meta->crtime); /* use the stream size if one was given */ if (fs_attr) tsk_fprintf(hFile, "\t%" PRIuOFF, fs_attr->size); else tsk_fprintf(hFile, "\t%" PRIuOFF, fs_file->meta->size); tsk_fprintf(hFile, "\t%" PRIuGID "\t%" PRIuUID "\n", fs_file->meta->gid, fs_file->meta->uid); } return; } /** * \internal * ** Print output in the format that mactime reads. ** ** If the flags in the fs_file->meta structure are set to FS_FLAG_ALLOC ** then it is assumed that the inode has been reallocated and the ** contents are not displayed ** ** fs is not required (only used for block size). * @param hFile handle to print results to * @param fs_file File to print details about * @param a_path Parent directory of file (needs to end with "/") * @param fs_attr Attribute in file that is being called for (NULL for non-NTFS) * @param prefix Path of mounting point for image * @param time_skew number of seconds skew to adjust time */ void tsk_fs_name_print_mac(FILE * hFile, const TSK_FS_FILE * fs_file, const char *a_path, const TSK_FS_ATTR * fs_attr, const char *prefix, int32_t time_skew) { tsk_fs_name_print_mac_md5(hFile, fs_file, a_path, fs_attr, prefix, time_skew, NULL); } /** * \internal * ** Print output in the format that mactime reads. ** ** If the flags in the fs_file->meta structure are set to FS_FLAG_ALLOC ** then it is assumed that the inode has been reallocated and the ** contents are not displayed ** ** fs is not required (only used for block size). * @param hFile handle to print results to * @param fs_file File to print details about * @param a_path Parent directory of file (needs to end with "/") * @param fs_attr Attribute in file that is being called for (NULL for non-NTFS) * @param prefix Path of mounting point for image * @param time_skew number of seconds skew to adjust time * @param hash_results Holds the calculated md5 hash */ void tsk_fs_name_print_mac_md5(FILE * hFile, const TSK_FS_FILE * fs_file, const char *a_path, const TSK_FS_ATTR * fs_attr, const char *prefix, int32_t time_skew, const unsigned char *hash_results) { char ls[12]; size_t i; uint8_t isADS = 0; char *buf; if ((!hFile) || (!fs_file)) return; /* see if we are going to be printing the name of the attribute * We don't do it for FNAME attributes, which we handle specially below. */ if ((fs_attr) && (fs_attr->name) && (fs_attr->type != TSK_FS_ATTR_TYPE_NTFS_FNAME) && ((fs_attr->type != TSK_FS_ATTR_TYPE_NTFS_IDXROOT) || (strcmp(fs_attr->name, "$I30") != 0))) { isADS = 1; } /* hash * Print out the hash buffer (if not null) */ if (hash_results == NULL) { tsk_fprintf(hFile, "0|"); } else { for (i = 0; i < 16; i++) { tsk_fprintf(hFile, "%02x", hash_results[i]); } tsk_fprintf(hFile, "|"); } /* file name */ tsk_fprintf(hFile, "%s", prefix); // remove any control chars as we print the names if (a_path != NULL) { buf = malloc(strlen(a_path) + 1); strcpy(buf, a_path); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); } buf = malloc(strlen(fs_file->name->name) + 1); strcpy(buf, fs_file->name->name); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); /* print the data stream name if it exists and is not the default NTFS */ if (isADS) { tsk_fprintf(hFile, ":"); buf = malloc(strlen(fs_attr->name) + 1); strcpy(buf, fs_attr->name); for (i = 0; i < strlen(buf); i++) { if (TSK_IS_CNTRL(buf[i])) buf[i] = '^'; } tsk_fprintf(hFile, "%s", buf); free(buf); } // special label if FNAME if ((fs_attr) && (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_FNAME)) { tsk_fprintf(hFile, " ($FILE_NAME)"); } if ((fs_file->meta) && (fs_file->meta->type == TSK_FS_META_TYPE_LNK) && (fs_file->meta->link)) { tsk_fprintf(hFile, " -> %s", fs_file->meta->link); } /* if filename is deleted add a comment and if the inode is now * allocated, then add realloc comment */ if (fs_file->name->flags & TSK_FS_NAME_FLAG_UNALLOC) tsk_fprintf(hFile, " (deleted%s)", ((fs_file->meta) && (fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC)) ? "-realloc" : ""); /* inode */ tsk_fprintf(hFile, "|%" PRIuINUM, fs_file->name->meta_addr); if (fs_attr) tsk_fprintf(hFile, "-%" PRIu32 "-%" PRIu16 "", fs_attr->type, fs_attr->id); tsk_fprintf(hFile, "|"); /* TYPE as specified in the directory entry */ if (fs_file->name->type < TSK_FS_NAME_TYPE_STR_MAX) tsk_fprintf(hFile, "%s/", tsk_fs_name_type_str[fs_file->name->type]); else tsk_fprintf(hFile, "-/"); if (!fs_file->meta) { tsk_fprintf(hFile, "----------|0|0|0|"); } else { /* mode as string */ tsk_fs_meta_make_ls(fs_file->meta, ls, sizeof(ls)); tsk_fprintf(hFile, "%s|", ls); /* uid, gid */ tsk_fprintf(hFile, "%" PRIuUID "|%" PRIuGID "|", fs_file->meta->uid, fs_file->meta->gid); /* size - use data stream if we have it */ if (fs_attr) tsk_fprintf(hFile, "%" PRIuOFF "|", fs_attr->size); else tsk_fprintf(hFile, "%" PRIuOFF "|", fs_file->meta->size); } if (!fs_file->meta) { tsk_fprintf(hFile, "0|0|0|0\n"); } else { // special case for NTFS FILE_NAME attribute if ((fs_attr) && (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_FNAME)) { /* atime, mtime, ctime, crtime */ if (fs_file->meta->time2.ntfs.fn_atime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_atime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_atime); if (fs_file->meta->time2.ntfs.fn_mtime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_mtime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_mtime); if (fs_file->meta->time2.ntfs.fn_ctime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_ctime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->time2.ntfs.fn_ctime); if (fs_file->meta->time2.ntfs.fn_crtime) tsk_fprintf(hFile, "%" PRIu32 "\n", fs_file->meta->time2.ntfs.fn_crtime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "\n", fs_file->meta->time2.ntfs.fn_crtime); } else { /* atime, mtime, ctime, crtime */ if (fs_file->meta->atime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->atime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->atime); if (fs_file->meta->mtime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->mtime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->mtime); if (fs_file->meta->ctime) tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->ctime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "|", fs_file->meta->ctime); if (fs_file->meta->crtime) tsk_fprintf(hFile, "%" PRIu32 "\n", fs_file->meta->crtime - time_skew); else tsk_fprintf(hFile, "%" PRIu32 "\n", fs_file->meta->crtime); } } } sleuthkit-4.2.0/tsk/fs/fs_open.c000644 000765 000024 00000022442 12576320700 017337 0ustar00carrierstaff000000 000000 /* ** tsk_fs_open_img ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT */ /*++ * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ #include "tsk_fs_i.h" /** * \file fs_open.c * Contains the general code to open a file system -- this calls * the file system -specific opening routines. */ /** * \ingroup fslib * Tries to process data in a volume as a file system. * Returns a structure that can be used for analysis and reporting. * * @param a_part_info Open volume to read from and analyze * @param a_ftype Type of file system (or autodetect) * * @return NULL on error */ TSK_FS_INFO * tsk_fs_open_vol(const TSK_VS_PART_INFO * a_part_info, TSK_FS_TYPE_ENUM a_ftype) { TSK_OFF_T offset; if (a_part_info == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_open_vol: Null vpart handle"); return NULL; } else if ((a_part_info->vs == NULL) || (a_part_info->vs->tag != TSK_VS_INFO_TAG)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_open_vol: Null vs handle"); return NULL; } offset = a_part_info->start * a_part_info->vs->block_size + a_part_info->vs->offset; return tsk_fs_open_img(a_part_info->vs->img_info, offset, a_ftype); } /** * \ingroup fslib * Tries to process data in a disk image at a given offset as a file system. * Returns a structure that can be used for analysis and reporting. * * @param a_img_info Disk image to analyze * @param a_offset Byte offset to start analyzing from * @param a_ftype Type of file system (or autodetect) * * @return NULL on error */ TSK_FS_INFO * tsk_fs_open_img(TSK_IMG_INFO * a_img_info, TSK_OFF_T a_offset, TSK_FS_TYPE_ENUM a_ftype) { if (a_img_info == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("tsk_fs_open_img: Null image handle"); return NULL; } /* We will try different file systems ... * We need to try all of them in case more than one matches */ if (a_ftype == TSK_FS_TYPE_DETECT) { TSK_FS_INFO *fs_info, *fs_set = NULL; char *set = NULL; if (tsk_verbose) tsk_fprintf(stderr, "fsopen: Auto detection mode at offset %" PRIuOFF "\n", a_offset); if ((fs_info = ntfs_open(a_img_info, a_offset, TSK_FS_TYPE_NTFS_DETECT, 1)) != NULL) { set = "NTFS"; fs_set = fs_info; } else { tsk_error_reset(); } if ((fs_info = fatfs_open(a_img_info, a_offset, TSK_FS_TYPE_FAT_DETECT, 1)) != NULL) { if (set == NULL) { set = "FAT"; fs_set = fs_info; } else { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("FAT or %s", set); return NULL; } } else { tsk_error_reset(); } if ((fs_info = ext2fs_open(a_img_info, a_offset, TSK_FS_TYPE_EXT_DETECT, 1)) != NULL) { if (set == NULL) { set = "EXT2/3"; fs_set = fs_info; } else { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("EXT2/3 or %s", set); return NULL; } } else { tsk_error_reset(); } if ((fs_info = ffs_open(a_img_info, a_offset, TSK_FS_TYPE_FFS_DETECT)) != NULL) { if (set == NULL) { set = "UFS"; fs_set = fs_info; } else { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("UFS or %s", set); return NULL; } } else { tsk_error_reset(); } if ((fs_info = yaffs2_open(a_img_info, a_offset, TSK_FS_TYPE_YAFFS2_DETECT, 1)) != NULL) { if (set == NULL) { set = "YAFFS2"; fs_set = fs_info; } else { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("YAFFS2 or %s", set); return NULL; } } else { tsk_error_reset(); } #if TSK_USE_HFS if ((fs_info = hfs_open(a_img_info, a_offset, TSK_FS_TYPE_HFS_DETECT, 1)) != NULL) { if (set == NULL) { set = "HFS"; fs_set = fs_info; } else { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("HFS or %s", set); return NULL; } } else { tsk_error_reset(); } #endif if ((fs_info = iso9660_open(a_img_info, a_offset, TSK_FS_TYPE_ISO9660_DETECT, 1)) != NULL) { if (set != NULL) { fs_set->close(fs_set); fs_info->close(fs_info); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); tsk_error_set_errstr("ISO9660 or %s", set); return NULL; } fs_set = fs_info; } else { tsk_error_reset(); } if (fs_set == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNKTYPE); return NULL; } return fs_set; } else { if (TSK_FS_TYPE_ISNTFS(a_ftype)) return ntfs_open(a_img_info, a_offset, a_ftype, 0); else if (TSK_FS_TYPE_ISFAT(a_ftype)) return fatfs_open(a_img_info, a_offset, a_ftype, 0); else if (TSK_FS_TYPE_ISFFS(a_ftype)) return ffs_open(a_img_info, a_offset, a_ftype); else if (TSK_FS_TYPE_ISEXT(a_ftype)) return ext2fs_open(a_img_info, a_offset, a_ftype, 0); else if (TSK_FS_TYPE_ISHFS(a_ftype)) return hfs_open(a_img_info, a_offset, a_ftype, 0); else if (TSK_FS_TYPE_ISISO9660(a_ftype)) return iso9660_open(a_img_info, a_offset, a_ftype, 0); else if (TSK_FS_TYPE_ISRAW(a_ftype)) return rawfs_open(a_img_info, a_offset); else if (TSK_FS_TYPE_ISSWAP(a_ftype)) return swapfs_open(a_img_info, a_offset); else if (TSK_FS_TYPE_ISYAFFS2(a_ftype)) return yaffs2_open(a_img_info, a_offset, a_ftype, 0); else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPTYPE); tsk_error_set_errstr("%X", (int) a_ftype); return NULL; } } } /** * \ingroup fslib * Close an open file system. * @param a_fs File system to close. */ void tsk_fs_close(TSK_FS_INFO * a_fs) { if ((a_fs == NULL) || (a_fs->tag != TSK_FS_INFO_TAG)) return; // each file system is supposed to call tsk_fs_free() a_fs->close(a_fs); } /* tsk_fs_malloc - init lock after tsk_malloc * This is for fs module and all it's inheritances */ TSK_FS_INFO * tsk_fs_malloc(size_t a_len) { TSK_FS_INFO *fs_info; if ((fs_info = (TSK_FS_INFO *) tsk_malloc(a_len)) == NULL) return NULL; tsk_init_lock(&fs_info->list_inum_named_lock); tsk_init_lock(&fs_info->orphan_dir_lock); fs_info->list_inum_named = NULL; return fs_info; } /* tsk_fs_free - deinit lock before free memory * This is for fs module and all it's inheritances */ void tsk_fs_free(TSK_FS_INFO * a_fs_info) { if (a_fs_info->list_inum_named) { tsk_list_free(a_fs_info->list_inum_named); a_fs_info->list_inum_named = NULL; } /* we should probably get the lock, but we're * about to kill the entire object so there are * bigger problems if another thread is still * using the fs */ if (a_fs_info->orphan_dir) { tsk_fs_dir_close(a_fs_info->orphan_dir); a_fs_info->orphan_dir = NULL; } tsk_deinit_lock(&a_fs_info->list_inum_named_lock); tsk_deinit_lock(&a_fs_info->orphan_dir_lock); free(a_fs_info); } sleuthkit-4.2.0/tsk/fs/fs_parse.c000644 000765 000024 00000005774 12576320700 017521 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_fs_i.h" /** * \file fs_parse.c * Contains code to parse specific types of data from * the command line */ /** * \ingroup fslib * Parse a TSK_TCHAR string of an inode, type, and id pair (not all parts * need to be there). This assumes the string is either: * INUM, INUM-TYPE, or INUM-TYPE-ID. Return the values in integer form. * * @param [in] str Input string to parse * @param [out] inum Pointer to location where inode can be stored. * @param [out] type Pointer to location where type can be stored (or NULL) * @param [out] type_used Pointer to location where the value can be set * to 1 if the type was set (to differentiate between meanings of 0) (or NULL). * @param [out] id Pointer to location where id can be stored (or NULL) * @param [out] id_used Pointer to location where the value can be set * to 1 if the id was set (to differentiate between meanings of 0) (or NULL). * * @return 1 on error or if not an inode and 0 on success */ int tsk_fs_parse_inum(const TSK_TCHAR * str, TSK_INUM_T * inum, TSK_FS_ATTR_TYPE_ENUM * type, uint8_t * type_used, uint16_t * id, uint8_t * id_used) { TSK_TCHAR *cp; TSK_TCHAR *tdash = NULL; TSK_TCHAR *tmpstr; if (*str == 0) return 1; if (type) *type = TSK_FS_ATTR_TYPE_DEFAULT; if (type_used) *type_used = 0; if (id) *id = TSK_FS_ATTR_ID_DEFAULT; if (id_used) *id_used = 0; /* Make a copy of the input string */ tmpstr = (TSK_TCHAR *) tsk_malloc((TSTRLEN(str) + 1) * sizeof(TSK_TCHAR)); if (tmpstr == NULL) return 1; TSTRNCPY(tmpstr, str, TSTRLEN(str) + 1); if ((tdash = TSTRCHR(tmpstr, _TSK_T('-'))) != NULL) { *tdash = '\0'; tdash++; } *inum = TSTRTOULL(tmpstr, &cp, 10); if (*cp || *cp == *tmpstr) { free(tmpstr); return 1; } // if there was a dash, verify what follows is numbers if (tdash) { TSK_TCHAR *idash = NULL; uint32_t ttmp; if ((idash = TSTRCHR(tdash, _TSK_T('-'))) != NULL) { *idash = '\0'; idash++; } ttmp = (uint32_t) TSTRTOUL(tdash, &cp, 10); if (*cp || *cp == *tdash) { free(tmpstr); return 1; } if (type != NULL) { *type = ttmp; if (type_used) *type_used = 1; } // if there was a dash after type, verify it is a number after it if (idash) { uint16_t itmp; itmp = (uint16_t) TSTRTOUL(idash, &cp, 0); if (*cp || *cp == *idash) { free(tmpstr); return 1; } if (id) *id = itmp; if (id_used) *id_used = 1; } } free(tmpstr); return 0; } sleuthkit-4.2.0/tsk/fs/fs_types.c000644 000765 000024 00000010610 12576320700 017534 0ustar00carrierstaff000000 000000 /* ** fs_types ** The Sleuth Kit ** ** Identify the type of file system being used ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file fs_types.c * Contains TSK functions that deal with parsing and printing file system type strings. */ #include "tsk_fs_i.h" /** * \internal */ typedef struct { char *name; TSK_FS_TYPE_ENUM code; char *comment; } FS_TYPES; /** \internal * The table used to parse input strings - supports * legacy strings - in order of expected usage */ static FS_TYPES fs_type_table[] = { {"ntfs", TSK_FS_TYPE_NTFS_DETECT, "NTFS"}, {"fat", TSK_FS_TYPE_FAT_DETECT, "FAT (Auto Detection)"}, {"ext", TSK_FS_TYPE_EXT_DETECT, "ExtX (Auto Detection)"}, {"iso9660", TSK_FS_TYPE_ISO9660_DETECT, "ISO9660 CD"}, #if TSK_USE_HFS {"hfs", TSK_FS_TYPE_HFS_DETECT, "HFS+"}, #endif {"ufs", TSK_FS_TYPE_FFS_DETECT, "UFS (Auto Detection)"}, {"raw", TSK_FS_TYPE_RAW_DETECT, "Raw Data"}, {"swap", TSK_FS_TYPE_SWAP_DETECT, "Swap Space"}, {"fat12", TSK_FS_TYPE_FAT12, "FAT12"}, {"fat16", TSK_FS_TYPE_FAT16, "FAT16"}, {"fat32", TSK_FS_TYPE_FAT32, "FAT32"}, {"exfat", TSK_FS_TYPE_EXFAT, "exFAT"}, {"ext2", TSK_FS_TYPE_EXT2, "Ext2"}, {"ext3", TSK_FS_TYPE_EXT3, "Ext3"}, {"ext4", TSK_FS_TYPE_EXT4, "Ext4"}, {"ufs1", TSK_FS_TYPE_FFS1, "UFS1"}, {"ufs2", TSK_FS_TYPE_FFS2, "UFS2"}, {"yaffs2", TSK_FS_TYPE_YAFFS2, "YAFFS2"}, {0}, }; static FS_TYPES fs_legacy_type_table[] = { // legacy CLI arg names {"linux-ext", TSK_FS_TYPE_EXT_DETECT, "auto-detect Linux EXTxFS"}, {"linux-ext2", TSK_FS_TYPE_EXT2, "Linux TSK_FS_TYPE_EXT_2"}, {"linux-ext3", TSK_FS_TYPE_EXT3, "Linux TSK_FS_TYPE_EXT_3"}, {"linux-ext4", TSK_FS_TYPE_EXT4, "Linux TSK_FS_TYPE_EXT_4"}, {"bsdi", TSK_FS_TYPE_FFS1, "BSDi FFS"}, {"freebsd", TSK_FS_TYPE_FFS1, "FreeBSD FFS"}, {"netbsd", TSK_FS_TYPE_FFS1, "NetBSD FFS"}, {"openbsd", TSK_FS_TYPE_FFS1, "OpenBSD FFS"}, {"solaris", TSK_FS_TYPE_FFS1B, "Solaris FFS"}, {0}, }; /** * \ingroup fslib * Parse a string with the file system type and return its internal ID. * * @param str String to parse, always UTF-8. * @returns ID of string (or unsupported if the name is unknown) */ TSK_FS_TYPE_ENUM tsk_fs_type_toid_utf8(const char *str) { FS_TYPES *sp; for (sp = fs_type_table; sp->name; sp++) { if (strcmp(str, sp->name) == 0) { return sp->code; } } // look at the legacy names for (sp = fs_legacy_type_table; sp->name; sp++) { if (strcmp(str, sp->name) == 0) { return sp->code; } } return TSK_FS_TYPE_UNSUPP; } /** * \ingroup fslib * Parse a string with the file system type and return its internal ID. * * @param str String to parse. * @returns ID of string (or unsupported if the name is unknown) */ TSK_FS_TYPE_ENUM tsk_fs_type_toid(const TSK_TCHAR * str) { char tmp[16]; int i; // convert to char for (i = 0; i < 15 && str[i] != '\0'; i++) { tmp[i] = (char) str[i]; } tmp[i] = '\0'; return tsk_fs_type_toid_utf8(tmp); } /** * \ingroup fslib * Print the supported file system types to a file handle * @param hFile File handle to print to */ void tsk_fs_type_print(FILE * hFile) { FS_TYPES *sp; tsk_fprintf(hFile, "Supported file system types:\n"); for (sp = fs_type_table; sp->name; sp++) tsk_fprintf(hFile, "\t%s (%s)\n", sp->name, sp->comment); } /** * \ingroup fslib * Return the string name of a file system type id. * @param ftype File system type id * @returns Name or NULL on error */ const char * tsk_fs_type_toname(TSK_FS_TYPE_ENUM ftype) { FS_TYPES *sp; for (sp = fs_type_table; sp->name; sp++) if (sp->code == ftype) return sp->name; return NULL; } /** * \ingroup fslib * Return the supported file system types. * @returns The bit in the return value is 1 if the type is supported. */ TSK_FS_TYPE_ENUM tsk_fs_type_supported() { TSK_FS_TYPE_ENUM sup_types = 0; FS_TYPES *types; for (types = fs_type_table; types->name; types++) { sup_types |= types->code; } return sup_types; } sleuthkit-4.2.0/tsk/fs/hfs.c000644 000765 000024 00000700371 12576320700 016472 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Copyright (c) 2009 Brian Carrier. All rights reserved. ** ** Judson Powers [jpowers@atc-nycorp.com] ** Matt Stillerman [matt@atc-nycorp.com] ** Rob Joyce [rob@atc-nycorp.com] ** Copyright (c) 2008, 2012 ATC-NY. All rights reserved. ** This file contains data developed with support from the National ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier@sleuthkit.org] ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /** \file hfs.c * Contains the general internal TSK HFS metadata and data unit code */ #include "tsk_fs_i.h" #include "tsk_hfs.h" #include #ifdef TSK_WIN32 #include #else #include #endif // Compression Stuff #ifdef HAVE_LIBZ #include #endif #define XSWAP(a,b) { a ^= b; b ^= a; a ^= b; } // Forward declarations: static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file); static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * file, unsigned char *isCompressed, unsigned char *compDataInRSRC, uint64_t * uncSize); void error_detected(uint32_t errnum, char *errstr, ...); void error_returned(char *errstr, ...); #ifdef HAVE_LIBZ /***************** ZLIB stuff *******************************/ // Adapted from zpipe.c (part of zlib) at http://zlib.net/zpipe.c #define CHUNK 16384 /* * Invokes the zlib library to inflate (uncompress) data. * * Returns and error code. Places the uncompressed data in a buffer supplied by the caller. Also * returns the uncompressed length, and the number of compressed bytes consumed. * * Will stop short of the end of compressed data, if a natural end of a compression unit is reached. Using * bytesConsumed, the caller can then advance the source pointer, and re-invoke the function. This will then * inflate the next following compression unit in the data stream. * * @param source - buffer of compressed data * @param sourceLen - length of the compressed data. * @param dest -- buffer to hold the uncompressed results * @param destLen -- length of the dest buffer * @param uncompressedLength -- return of the length of the uncompressed data found. * @param bytesConsumed -- return of the number of input bytes of compressed data used. * @return 0 on success, a negative number on error */ static int zlib_inflate(char *source, uint64_t sourceLen, char *dest, uint64_t destLen, uint64_t * uncompressedLength, unsigned long *bytesConsumed) // this is unsigned long because that's what zlib uses. { int ret; unsigned have; z_stream strm; unsigned char in[CHUNK]; unsigned char out[CHUNK]; // Some vars to help with copying bytes into "in" char *srcPtr = source; char *destPtr = dest; uint64_t srcAvail = sourceLen; //uint64_t uint64_t amtToCopy; uint64_t copiedSoFar = 0; /* allocate inflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit(&strm); if (ret != Z_OK) { error_detected(TSK_ERR_FS_READ, "zlib_inflate: failed to initialize inflation engine (%d)", ret); return ret; } /* decompress until deflate stream ends or end of file */ do { // Copy up to CHUNK bytes into "in" from source, advancing the pointer, and // setting strm.avail_in equal to the number of bytes copied. if (srcAvail >= CHUNK) { amtToCopy = CHUNK; srcAvail -= CHUNK; } else { amtToCopy = srcAvail; srcAvail = 0; } // wipe out any previous value, copy in the bytes, advance the pointer, record number of bytes. memset(in, 0, CHUNK); if (amtToCopy > SIZE_MAX || amtToCopy > UINT_MAX) { error_detected(TSK_ERR_FS_READ, "zlib_inflate: amtToCopy in one chunk is too large"); return -100; } memcpy(in, srcPtr, (size_t) amtToCopy); // cast OK because of above test srcPtr += amtToCopy; strm.avail_in = (uInt) amtToCopy; // cast OK because of above test if (strm.avail_in == 0) break; strm.next_in = in; /* run inflate() on input until output buffer not full */ do { strm.avail_out = CHUNK; strm.next_out = out; ret = inflate(&strm, Z_NO_FLUSH); if (ret == Z_NEED_DICT) ret = Z_DATA_ERROR; // we don't have a custom dict if (ret < 0 && ret != Z_BUF_ERROR) { // Z_BUF_ERROR is not fatal error_detected(TSK_ERR_FS_READ, " zlib_inflate: zlib returned error %d (%s)", ret, strm.msg); (void) inflateEnd(&strm); return ret; } have = CHUNK - strm.avail_out; // Is there enough space in dest to copy the current chunk? if (copiedSoFar + have > destLen) { // There is not enough space, so better return an error error_detected(TSK_ERR_FS_READ, " zlib_inflate: not enough space in inflation destination\n"); (void) inflateEnd(&strm); return -200; } // Copy "have" bytes from out to destPtr, advance destPtr memcpy(destPtr, out, have); destPtr += have; copiedSoFar += have; } while ((strm.avail_out == 0) && (ret != Z_STREAM_END)); /* done when inflate() says it's done */ } while (ret != Z_STREAM_END); if (ret == Z_STREAM_END) *uncompressedLength = copiedSoFar; *bytesConsumed = strm.total_in; /* clean up and return */ (void) inflateEnd(&strm); return ret == Z_STREAM_END ? Z_OK : Z_DATA_ERROR; } #endif /* may set error up to string 1 * returns 0 on success, 1 on failure */ uint8_t hfs_checked_read_random(TSK_FS_INFO * fs, char *buf, size_t len, TSK_OFF_T offs) { ssize_t r; r = tsk_fs_read(fs, offs, buf, len); if (r != len) { if (r >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } return 1; } return 0; } /********************************************************************** * * MISC FUNCS * **********************************************************************/ /* convert the HFS Time (seconds from 1/1/1904) * to UNIX (UTC seconds from 1/1/1970) * The number is borrowed from linux HFS driver source */ uint32_t hfs_convert_2_unix_time(uint32_t hfsdate) { if (hfsdate < NSEC_BTWN_1904_1970) return 0; return (uint32_t) (hfsdate - NSEC_BTWN_1904_1970); } /** * Convert a cnid (metadata address) to big endian array. * This is used to create the key for tree lookups. * @param cnid Metadata address to convert * @param array [out] Array to write data into. */ static void cnid_to_array(uint32_t cnid, uint8_t array[4]) { array[3] = (cnid >> 0) & 0xff; array[2] = (cnid >> 8) & 0xff; array[1] = (cnid >> 16) & 0xff; array[0] = (cnid >> 24) & 0xff; } /********************************************************************** * * Lookup Functions * **********************************************************************/ /* Compares the given HFS+ Extents B-tree key to key constructed * for finding the beginning of the data fork extents for the given * CNID. (That is, the search key uses the given CNID and has * fork = 0 and start_block = 0.) */ static int hfs_ext_compare_keys(HFS_INFO * hfs, uint32_t cnid, const hfs_btree_key_ext * key) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); uint32_t key_cnid; key_cnid = tsk_getu32(fs->endian, key->file_id); if (key_cnid < cnid) return -1; if (key_cnid > cnid) return 1; /* referring to the same cnids */ /* we are always looking for the data fork */ if (key->fork_type != HFS_EXT_KEY_TYPE_DATA) return 1; /* we are always looking for a start_block of zero (interested in the beginning of the extents, regardless of what the start_block is); all files except the bad blocks file should have a start_block greater than zero */ if (tsk_getu32(fs->endian, key->start_block) == 0) return 0; return 1; } /** \internal * Returns the length of an HFS+ B-tree INDEX key based on the tree header * structure and the length claimed in the record. With some trees, * the length given in the record is not used. * Note that this neither detects nor correctly handles 8-bit keys * (which should not be present in HFS+). * * This does not give the right answer for the Attributes File B-tree, for some * HFS+ file systems produced by the Apple OS, while it works for others. For * the Attributes file, INDEX keys should always be as stated in the record itself, * never the "maxKeyLen" of the B-tree header. * * In this software, this function is only invoked when dealing with the Extents file. In * that usage, it is not sufficiently well tested to know if it always gives the right * answer or not. We can only test that with a highly fragmented disk. * @param hfs File System * @param keylen Length of key as given in record * @param header Tree header * @returns Length of key */ uint16_t hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen, const hfs_btree_header_record * header) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); // if the flag is set, use the length given in the record if (tsk_getu32(fs->endian, header->attr) & HFS_BT_HEAD_ATTR_VARIDXKEYS) return keylen; else return tsk_getu16(fs->endian, header->maxKeyLen); } /** * Convert the extents runs to TSK_FS_ATTR_RUN runs. * * @param a_fs File system to analyze * @param a_extents Raw extents to process (in an array of 8) * @param a_start_off Starting block offset of these runs * @returns NULL on error or if no runs are in extents (test tsk_errno) */ static TSK_FS_ATTR_RUN * hfs_extents_to_attr(TSK_FS_INFO * a_fs, const hfs_ext_desc * a_extents, TSK_OFF_T a_start_off) { TSK_FS_ATTR_RUN *head_run = NULL; TSK_FS_ATTR_RUN *prev_run = NULL; int i; TSK_OFF_T cur_off = a_start_off; // since tsk_errno is checked as a return value, make sure it is clean. tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "hfs_extents_to_attr: Converting extents from offset %" PRIuOFF " to runlist\n", a_start_off); for (i = 0; i < 8; i++) { TSK_FS_ATTR_RUN *cur_run; uint32_t addr = tsk_getu32(a_fs->endian, a_extents[i].start_blk); uint32_t len = tsk_getu32(a_fs->endian, a_extents[i].blk_cnt); if (tsk_verbose) tsk_fprintf(stderr, "hfs_extents_to_attr: run %i at addr %" PRIu32 " with len %" PRIu32 "\n", i, addr, len); if ((addr == 0) && (len == 0)) { break; } // make a non-resident run if ((cur_run = tsk_fs_attr_run_alloc()) == NULL) { error_returned(" - hfs_extents_to_attr"); return NULL; } cur_run->addr = addr; cur_run->len = len; cur_run->offset = cur_off; if (head_run == NULL) head_run = cur_run; if (prev_run != NULL) prev_run->next = cur_run; cur_off += cur_run->len; prev_run = cur_run; } return head_run; } /** * Look in the extents catalog for entries for a given file. Add the runs * to the passed attribute structure. * * @param hfs File system being analyzed * @param cnid file id of file to search for * @param a_attr Attribute to add extents runs to * @param dataForkQ if true, then find extents for the data fork. If false, then find extents for the Resource fork. * @returns 1 on error and 0 on success */ static uint8_t hfs_ext_find_extent_record_attr(HFS_INFO * hfs, uint32_t cnid, TSK_FS_ATTR * a_attr, unsigned char dataForkQ) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); uint16_t nodesize; /* size of nodes (all, regardless of the name) */ uint32_t cur_node; /* node id of the current node */ char *node = NULL; uint8_t is_done; uint8_t desiredType; tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record_attr: Looking for extents for file %" PRIu32 " %s\n", cnid, dataForkQ ? "data fork" : "resource fork"); if (!hfs->has_extents_file) { // No extents file (which is optional), and so, no further extents are possible. return 0; } // Are we looking for extents of the data fork or the resource fork? desiredType = dataForkQ ? HFS_EXT_KEY_TYPE_DATA : HFS_EXT_KEY_TYPE_RSRC; // Load the extents attribute, if it has not been done so yet. if (hfs->extents_file == NULL) { ssize_t cnt; if ((hfs->extents_file = tsk_fs_file_open_meta(fs, NULL, HFS_EXTENTS_FILE_ID)) == NULL) { return 1; } /* cache the data attribute */ hfs->extents_attr = tsk_fs_attrlist_get(hfs->extents_file->meta->attr, TSK_FS_ATTR_TYPE_DEFAULT); if (!hfs->extents_attr) { tsk_error_errstr2_concat (" - Default Attribute not found in Extents File"); return 1; } // cache the extents file header cnt = tsk_fs_attr_read(hfs->extents_attr, 14, (char *) &(hfs->extents_header), sizeof(hfs_btree_header_record), 0); if (cnt != sizeof(hfs_btree_header_record)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_ext_find_extent_record_attr: Error reading header"); return 1; } } // allocate a node buffer nodesize = tsk_getu16(fs->endian, hfs->extents_header.nodesize); if ((node = (char *) tsk_malloc(nodesize)) == NULL) { return 1; } /* start at root node */ cur_node = tsk_getu32(fs->endian, hfs->extents_header.rootNode); /* if the root node is zero, then the extents btree is empty */ /* if no files have overflow extents, the Extents B-tree still exists on disk, but is an empty B-tree containing only the header node */ if (cur_node == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: " "empty extents btree\n"); free(node); return 0; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: starting at " "root node %" PRIu32 "; nodesize = %" PRIu16 "\n", cur_node, nodesize); /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { TSK_OFF_T cur_off; /* start address of cur_node */ uint16_t num_rec; /* number of records in this node */ ssize_t cnt; hfs_btree_node *node_desc; // sanity check if (cur_node > tsk_getu32(fs->endian, hfs->extents_header.totalNodes)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record_attr: Node %d too large for file", cur_node); free(node); return 1; } // read the current node cur_off = cur_node * nodesize; if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: reading node %" PRIu32 " at offset %" PRIuOFF "\n", cur_node, cur_off); cnt = tsk_fs_attr_read(hfs->extents_attr, cur_off, node, nodesize, 0); if (cnt != nodesize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_ext_find_extent_record_attr: Error reading node %d at offset %" PRIuOFF, cur_node, cur_off); free(node); return 1; } // process the header / descriptor node_desc = (hfs_btree_node *) node; num_rec = tsk_getu16(fs->endian, node_desc->num_rec); if (num_rec == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record: zero records in node %" PRIu32, cur_node); free(node); return 1; } /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { uint32_t next_node = 0; int rec; if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: Index node %" PRIu32 " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, num_rec); for (rec = 0; rec < num_rec; rec++) { int cmp; size_t rec_off; hfs_btree_key_ext *key; // get the record offset in the node rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record_attr: offset of record %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_ext *) & node[rec_off]; cmp = hfs_ext_compare_keys(hfs, cnid, key); if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: record %" PRIu16 " ; keylen %" PRIu16 " (FileId: %" PRIu32 ", ForkType: %" PRIu8 ", StartBlk: %" PRIu32 "); compare: %d\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->file_id), key->fork_type, tsk_getu32(fs->endian, key->start_block), cmp); /* save the info from this record unless it is bigger than cnid */ if ((cmp <= 0) || (next_node == 0)) { hfs_btree_index_record *idx_rec; int keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->extents_header)); if (rec_off + keylen > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off + keylen, nodesize); free(node); return 1; } idx_rec = (hfs_btree_index_record *) & node[rec_off + keylen]; next_node = tsk_getu32(fs->endian, idx_rec->childNode); } // we are bigger than cnid, so move on to the next node if (cmp > 0) { break; } } // check if we found a relevant node, if not stop. if (next_node == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record_attr: did not find any keys for %d in index node %d", cnid, cur_node); is_done = 1; break; } cur_node = next_node; } /* with a leaf, we process until we are past cnid. We move right too if we can */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { int rec; if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: Leaf node %" PRIu32 " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, num_rec); for (rec = 0; rec < num_rec; rec++) { size_t rec_off; hfs_btree_key_ext *key; uint32_t rec_cnid; hfs_extents *extents; TSK_OFF_T ext_off = 0; int keylen; TSK_FS_ATTR_RUN *attr_run; // get the record offset in the node rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record_attr: offset of record %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_ext *) & node[rec_off]; if (tsk_verbose) tsk_fprintf(stderr, "hfs_ext_find_extent_record: record %" PRIu16 "; keylen %" PRIu16 " (%" PRIu32 ", %" PRIu8 ", %" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->file_id), key->fork_type, tsk_getu32(fs->endian, key->start_block)); rec_cnid = tsk_getu32(fs->endian, key->file_id); // see if this record is for our file // OLD logic, just handles the DATA fork // if (rec_cnid < cnid) { // continue; // } // else if ((rec_cnid > cnid) // || (key->fork_type != HFS_EXT_KEY_TYPE_DATA)) { // is_done = 1; // break; // } // NEW logic, handles both DATA and RSRC forks. if (rec_cnid < cnid) { continue; } if (rec_cnid > cnid) { is_done = 1; break; } if (key->fork_type != desiredType) { if (dataForkQ) { is_done = 1; break; } else continue; } // OK, this is one of the extents records that we are seeking, so save it. keylen = 2 + tsk_getu16(fs->endian, key->key_len); if (rec_off + keylen > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_ext_find_extent_record_attr: offset and keylenth of record %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off + keylen, nodesize); free(node); return 1; } // get the starting offset of this extent ext_off = tsk_getu32(fs->endian, key->start_block); // convert the extents to the TSK format extents = (hfs_extents *) & node[rec_off + keylen]; attr_run = hfs_extents_to_attr(fs, extents->extents, ext_off); if ((attr_run == NULL) && (tsk_error_get_errno() != 0)) { tsk_error_errstr2_concat (" - hfs_ext_find_extent_record_attr"); free(node); return 1; } if (tsk_fs_attr_add_run(fs, a_attr, attr_run)) { tsk_error_errstr2_concat (" - hfs_ext_find_extent_record_attr"); free(node); return 1; } } cur_node = tsk_getu32(fs->endian, node_desc->flink); if (cur_node == 0) { is_done = 1; break; } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_ext_find_extent_record: btree node %" PRIu32 " (%" PRIuOFF ") is neither index nor leaf (%" PRIu8 ")", cur_node, cur_off, node_desc->type); free(node); return 1; } } free(node); return 0; } /** \internal * Compares two Catalog B-tree keys. * @param hfs File System being analyzed * @param key1 Key 1 to compare * @param key2 Key 2 to compare * @returns -1 if key1 is smaller, 0 if equal, and 1 if key1 is larger */ int hfs_cat_compare_keys(HFS_INFO * hfs, const hfs_btree_key_cat * key1, const hfs_btree_key_cat * key2) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); uint32_t cnid1, cnid2; cnid1 = tsk_getu32(fs->endian, key1->parent_cnid); cnid2 = tsk_getu32(fs->endian, key2->parent_cnid); if (cnid1 < cnid2) return -1; if (cnid1 > cnid2) return 1; return hfs_unicode_compare(hfs, &key1->name, &key2->name); } /** \internal * @param hfs File system * @param targ_data can be null * @param a_cb callback * @param ptr Pointer to pass to callback * @returns 1 on error */ uint8_t hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data, TSK_HFS_BTREE_CB a_cb, void *ptr) { TSK_FS_INFO *fs = &(hfs->fs_info); uint32_t cur_node; /* node id of the current node */ char *node; uint16_t nodesize; uint8_t is_done = 0; tsk_error_reset(); nodesize = tsk_getu16(fs->endian, hfs->catalog_header.nodesize); if ((node = (char *) tsk_malloc(nodesize)) == NULL) return 1; /* start at root node */ cur_node = tsk_getu32(fs->endian, hfs->catalog_header.rootNode); /* if the root node is zero, then the extents btree is empty */ /* if no files have overflow extents, the Extents B-tree still exists on disk, but is an empty B-tree containing only the header node */ if (cur_node == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: " "empty extents btree\n"); free(node); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: starting at " "root node %" PRIu32 "; nodesize = %" PRIu16 "\n", cur_node, nodesize); /* Recurse down to the needed leaf nodes and then go forward */ is_done = 0; while (is_done == 0) { TSK_OFF_T cur_off; /* start address of cur_node */ uint16_t num_rec; /* number of records in this node */ ssize_t cnt; hfs_btree_node *node_desc; // sanity check if (cur_node > tsk_getu32(fs->endian, hfs->catalog_header.totalNodes)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: Node %d too large for file", cur_node); free(node); return 1; } // read the current node cur_off = cur_node * nodesize; cnt = tsk_fs_attr_read(hfs->catalog_attr, cur_off, node, nodesize, 0); if (cnt != nodesize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_traverse: Error reading node %d at offset %" PRIuOFF, cur_node, cur_off); free(node); return 1; } // process the header / descriptor node_desc = (hfs_btree_node *) node; num_rec = tsk_getu16(fs->endian, node_desc->num_rec); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: node %" PRIu32 " @ %" PRIu64 " has %" PRIu16 " records\n", cur_node, cur_off, num_rec); if (num_rec == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: zero records in node %" PRIu32, cur_node); free(node); return 1; } /* With an index node, find the record with the largest key that is smaller * to or equal to cnid */ if (node_desc->type == HFS_BT_NODE_TYPE_IDX) { uint32_t next_node = 0; int rec; for (rec = 0; rec < num_rec; rec++) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; // get the record offset in the node rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 " ; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ /* save the info from this record unless it is too big */ retval = a_cb(hfs, HFS_BT_NODE_TYPE_IDX, targ_data, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } // record the closest entry else if ((retval == HFS_BTREE_CB_IDX_LT) || (next_node == 0)) { hfs_btree_index_record *idx_rec; int keylen = 2 + hfs_get_idxkeylen(hfs, tsk_getu16(fs->endian, key->key_len), &(hfs->catalog_header)); if (rec_off + keylen > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record and keylength %d in index node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off + keylen, nodesize); free(node); return 1; } idx_rec = (hfs_btree_index_record *) & node[rec_off + keylen]; next_node = tsk_getu32(fs->endian, idx_rec->childNode); } if (retval == HFS_BTREE_CB_IDX_EQGT) { // move down to the next node break; } } // check if we found a relevant node if (next_node == 0) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: did not find any keys in index node %d", cur_node); is_done = 1; break; } // TODO: Handle multinode loops if (next_node == cur_node) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: node %d references itself as next node", cur_node); is_done = 1; break; } cur_node = next_node; } /* With a leaf, we look for the specific record. */ else if (node_desc->type == HFS_BT_NODE_TYPE_LEAF) { int rec; for (rec = 0; rec < num_rec; rec++) { size_t rec_off; hfs_btree_key_cat *key; uint8_t retval; // get the record offset in the node rec_off = tsk_getu16(fs->endian, &node[nodesize - (rec + 1) * 2]); if (rec_off > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_traverse: offset of record %d in leaf node %d too large (%d vs %" PRIu16 ")", rec, cur_node, (int) rec_off, nodesize); free(node); return 1; } key = (hfs_btree_key_cat *) & node[rec_off]; /* if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: record %" PRIu16 "; keylen %" PRIu16 " (%" PRIu32 ")\n", rec, tsk_getu16(fs->endian, key->key_len), tsk_getu32(fs->endian, key->parent_cnid)); */ // rec_cnid = tsk_getu32(fs->endian, key->file_id); retval = a_cb(hfs, HFS_BT_NODE_TYPE_LEAF, targ_data, key, cur_off + rec_off, ptr); if (retval == HFS_BTREE_CB_LEAF_STOP) { is_done = 1; break; } else if (retval == HFS_BTREE_CB_ERR) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr2 ("hfs_cat_traverse: Callback returned error"); free(node); return 1; } } // move right to the next node if we got this far if (is_done == 0) { cur_node = tsk_getu32(fs->endian, node_desc->flink); if (cur_node == 0) { is_done = 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_traverse: moving forward to next leaf"); } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_cat_traverse: btree node %" PRIu32 " (%" PRIu64 ") is neither index nor leaf (%" PRIu8 ")", cur_node, cur_off, node_desc->type); free(node); return 1; } } free(node); return 0; } static uint8_t hfs_cat_get_record_offset_cb(HFS_INFO * hfs, int8_t level_type, const void *targ_data, const hfs_btree_key_cat * cur_key, TSK_OFF_T key_off, void *ptr) { const hfs_btree_key_cat *targ_key = (hfs_btree_key_cat *) targ_data; if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_get_record_offset_cb: %s node want: %" PRIu32 " vs have: %" PRIu32 "\n", (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf", tsk_getu32(hfs->fs_info.endian, targ_key->parent_cnid), tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid)); if (level_type == HFS_BT_NODE_TYPE_IDX) { int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); if (diff < 0) return HFS_BTREE_CB_IDX_LT; else return HFS_BTREE_CB_IDX_EQGT; } else { int diff = hfs_cat_compare_keys(hfs, cur_key, targ_key); // see if this record is for our file or if we passed the interesting entries if (diff < 0) { return HFS_BTREE_CB_LEAF_GO; } else if (diff == 0) { TSK_OFF_T *off = (TSK_OFF_T *) ptr; *off = key_off + 2 + tsk_getu16(hfs->fs_info.endian, cur_key->key_len); } return HFS_BTREE_CB_LEAF_STOP; } } /** \internal * Find the byte offset (from the start of the catalog file) to a record * in the catalog file. * @param hfs File System being analyzed * @param needle Key to search for * @returns Byte offset or 0 on error. 0 is also returned if catalog * record was not found. Check tsk_errno to determine if error occured. */ static TSK_OFF_T hfs_cat_get_record_offset(HFS_INFO * hfs, const hfs_btree_key_cat * needle) { TSK_OFF_T off = 0; if (hfs_cat_traverse(hfs, needle, hfs_cat_get_record_offset_cb, &off)) { return 0; } return off; } /** \internal * Given a byte offset to a leaf record in teh catalog file, read the data as * a thread record. This will zero the buffer and read in the size of the thread * data. * @param hfs File System * @param off Byte offset of record in catalog file (not including key) * @param thread [out] Buffer to write thread data into. * @returns 0 on success, 1 on failure; sets up to error string 1 */ uint8_t hfs_cat_read_thread_record(HFS_INFO * hfs, TSK_OFF_T off, hfs_thread * thread) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); uint16_t uni_len; size_t cnt; memset(thread, 0, sizeof(hfs_thread)); cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) thread, 10, 0); if (cnt != 10) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_read_thread_record: Error reading catalog offset %" PRIuOFF " (header)", off); return 1; } if ((tsk_getu16(fs->endian, thread->rec_type) != HFS_FOLDER_THREAD) && (tsk_getu16(fs->endian, thread->rec_type) != HFS_FILE_THREAD)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_read_thread_record: unexpected record type %" PRIu16, tsk_getu16(fs->endian, thread->rec_type)); return 1; } uni_len = tsk_getu16(fs->endian, thread->name.length); if (uni_len > 255) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("hfs_cat_read_thread_record: invalid string length (%" PRIu16 ")", uni_len); return 1; } cnt = tsk_fs_attr_read(hfs->catalog_attr, off + 10, (char *) thread->name.unicode, uni_len * 2, 0); if (cnt != uni_len * 2) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_read_thread_record: Error reading catalog offset %" PRIuOFF " (name)", off + 10); return 1; } return 0; } /** \internal * Read a catalog record into a local data structure. This reads the * correct amount, depending on if it is a file or folder. * @param hfs File system being analyzed * @param off Byte offset (in catalog file) of record (not including key) * @param record [out] Structure to read data into * @returns 1 on error */ uint8_t hfs_cat_read_file_folder_record(HFS_INFO * hfs, TSK_OFF_T off, hfs_file_folder * record) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); size_t cnt; char rec_type[2]; memset(record, 0, sizeof(hfs_file_folder)); cnt = tsk_fs_attr_read(hfs->catalog_attr, off, rec_type, 2, 0); if (cnt != 2) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_read_file_folder_record: Error reading record type from catalog offset %" PRIuOFF " (header)", off); return 1; } if (tsk_getu16(fs->endian, rec_type) == HFS_FOLDER_RECORD) { cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, sizeof(hfs_folder), 0); if (cnt != sizeof(hfs_folder)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_read_file_folder_record: Error reading catalog offset %" PRIuOFF " (folder)", off); return 1; } } else if (tsk_getu16(fs->endian, rec_type) == HFS_FILE_RECORD) { cnt = tsk_fs_attr_read(hfs->catalog_attr, off, (char *) record, sizeof(hfs_file), 0); if (cnt != sizeof(hfs_file)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("hfs_cat_read_file_folder_record: Error reading catalog offset %" PRIuOFF " (file)", off); return 1; } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_read_file_folder_record: unexpected record type %" PRIu16, tsk_getu16(fs->endian, rec_type)); return 1; } return 0; } static TSK_INUM_T hfs_lookup_hard_link(HFS_INFO * hfs, TSK_INUM_T linknum, unsigned char is_directory) { char fBuff[30]; TSK_FS_DIR *mdir; size_t indx; TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; memset(fBuff, 0, 30); if (is_directory) { tsk_take_lock(&(hfs->metadata_dir_cache_lock)); if (hfs->dir_meta_dir == NULL) { hfs->dir_meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_dir_inum); } tsk_release_lock(&(hfs->metadata_dir_cache_lock)); if (hfs->dir_meta_dir == NULL) { error_returned ("hfs_lookup_hard_link: could not open the dir metadata directory"); return 0; } else { mdir = hfs->dir_meta_dir; } snprintf(fBuff, 30, "dir_%" PRIuINUM, linknum); } else { tsk_take_lock(&(hfs->metadata_dir_cache_lock)); if (hfs->meta_dir == NULL) { hfs->meta_dir = tsk_fs_dir_open_meta(fs, hfs->meta_inum); } tsk_release_lock(&(hfs->metadata_dir_cache_lock)); if (hfs->meta_dir == NULL) { error_returned ("hfs_lookup_hard_link: could not open file metadata directory"); return 0; } else { mdir = hfs->meta_dir; } snprintf(fBuff, 30, "iNode%" PRIuINUM, linknum); } for (indx = 0; indx < tsk_fs_dir_getsize(mdir); indx++) { if ((mdir->names != NULL) && mdir->names[indx].name && (fs->name_cmp(fs, mdir->names[indx].name, fBuff) == 0)) { // OK this is the one return mdir->names[indx].meta_addr; } } // OK, we did not find that linknum return 0; } /* * Given a catalog entry, will test that entry to see if it is a hard link. * If it is a hard link, the function returns the inum (or cnid) of the target file. * If it is NOT a hard link, then then function returns the inum of the given entry. * In both cases, the parameter is_error is set to zero. * * If an ERROR occurs, if it is a mild error, then is_error is set to 1, and the * inum of the given entry is returned. This signals that hard link detection cannot * be carried out. * * If the error is serious, then is_error is set to 2 or 3, depending on the kind of error, and * the TSK error code is set, and the function returns zero. is_error==2 means that an error * occured in looking up the target file in the Catalog. is_error==3 means that the given * entry appears to be a hard link, but the target file does not exist in the Catalog. * * @param hfs The file system * @param entry The catalog entry to check * @param is_error A Boolean that is returned indicating an error, or no error.\ * @return The inum (or cnid) of the hard link target, or of the given catalog entry, or zero. */ TSK_INUM_T hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * cat, unsigned char *is_error) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_INUM_T cnid; time_t crtime; uint32_t file_type; uint32_t file_creator; *is_error = 0; // default, not an error if (cat == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_follow_hard_link: Pointer to Catalog entry (2nd arg) is null"); return 0; } cnid = tsk_getu32(fs->endian, cat->std.cnid); if (cnid < HFS_FIRST_USER_CNID) { // Can't be a hard link. And, cannot look up in Catalog file either! return cnid; } crtime = (time_t) hfs_convert_2_unix_time(tsk_getu32(fs->endian, cat->std.crtime)); file_type = tsk_getu32(fs->endian, cat->std.u_info.file_type); file_creator = tsk_getu32(fs->endian, cat->std.u_info.file_cr); // Only proceed with the rest of this if the flags etc are right if (file_type == HFS_HARDLINK_FILE_TYPE && file_creator == HFS_HARDLINK_FILE_CREATOR) { // see if we have the HFS+ Private Data dir for file links; // if not, it can't be a hard link. (We could warn the user, but // we also rely on this when finding the HFS+ Private Data dir in // the first place and we don't want a warning on every hfs_open.) if (hfs->meta_inum == 0) return cnid; // For this to work, we need the FS creation times. Is at least one of these set? if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) && (!hfs->has_meta_crtime)) { uint32_t linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); *is_error = 1; if (tsk_verbose) tsk_fprintf(stderr, "WARNING: hfs_follow_hard_link: File system creation times are not set. " "Cannot test inode for hard link. File type and creator indicate that this" " is a hard link (file), with LINK ID = %" PRIu32 "\n", linkNum); return cnid; } if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime)) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: hfs_follow_hard_link: Either the root folder or the" " file metadata folder is not accessible. Testing this potential hard link" " may be impaired.\n"); } // Now we need to check the creation time against the three FS creation times if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { // OK, this is a hard link to a file. uint32_t linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); TSK_INUM_T target_cnid; // This is the real CNID of the file. target_cnid = hfs_lookup_hard_link(hfs, linkNum, FALSE); if (target_cnid != 0) { // Succeeded in finding that target_cnid in the Catalog file return target_cnid; } else { // This should be a hard link, BUT... // Did not find the target_cnid in the Catalog file. error_returned ("hfs_follow_hard_link: got an error looking up the target of a file link"); *is_error = 2; return 0; } } } else if (file_type == HFS_LINKDIR_FILE_TYPE && file_creator == HFS_LINKDIR_FILE_CREATOR) { // see if we have the HFS+ Private Directory Data dir for links; // if not, it can't be a hard link. (We could warn the user, but // we also rely on this when finding the HFS+ Private Directory Data dir in // the first place and we don't want a warning on every hfs_open.) if (hfs->meta_dir_inum == 0) return cnid; // For this to work, we need the FS creation times. Is at least one of these set? if ((!hfs->has_root_crtime) && (!hfs->has_meta_dir_crtime) && (!hfs->has_meta_crtime)) { uint32_t linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); *is_error = 1; if (tsk_verbose) tsk_fprintf(stderr, "WARNING: hfs_follow_hard_link: File system creation times are not set. " "Cannot test inode for hard link. File type and creator indicate that this" " is a hard link (directory), with LINK ID = %" PRIu32 "\n", linkNum); return cnid; } if ((!hfs->has_root_crtime) || (!hfs->has_meta_crtime) || (!hfs->has_meta_dir_crtime)) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: hfs_follow_hard_link: Either the root folder or the" " file metadata folder or the directory metatdata folder is" " not accessible. Testing this potential hard linked folder " "may be impaired.\n"); } // Now we need to check the creation time against the three FS creation times if ((hfs->has_meta_crtime && (crtime == hfs->meta_crtime)) || (hfs->has_meta_dir_crtime && (crtime == hfs->metadir_crtime)) || (hfs->has_root_crtime && (crtime == hfs->root_crtime))) { // OK, this is a hard link to a directory. uint32_t linkNum = tsk_getu32(fs->endian, cat->std.perm.special.inum); TSK_INUM_T target_cnid; // This is the real CNID of the file. target_cnid = hfs_lookup_hard_link(hfs, linkNum, TRUE); if (target_cnid != 0) { // Succeeded in finding that target_cnid in the Catalog file return target_cnid; } else { // This should be a hard link, BUT... // Did not find the target_cnid in the Catalog file. error_returned ("hfs_follow_hard_link: got an error looking up the target of a dir link"); *is_error = 2; return 0; } } } // It cannot be a hard link (file or directory) return cnid; } /** \internal * Lookup an entry in the catalog file and save it into the entry. Do not * call this for the special files that do not have an entry in the catalog. * data structure. * @param hfs File system being analyzed * @param inum Address (cnid) of file to open * @param entry [out] Structure to read data into * @returns 1 on error or not found, 0 on success. Check tsk_errno * to differentiate between error and not found. If it is not found, then the * errno will be TSK_ERR_FS_INODE_NUM. Else, it will be some other value. */ uint8_t hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, unsigned char follow_hard_link) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); hfs_btree_key_cat key; /* current catalog key */ hfs_thread thread; /* thread record */ hfs_file_folder record; /* file/folder record */ TSK_OFF_T off; tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup: called for inum %" PRIuINUM "\n", inum); // Test if this is a special file that is not located in the catalog if ((inum == HFS_EXTENTS_FILE_ID) || (inum == HFS_CATALOG_FILE_ID) || (inum == HFS_ALLOCATION_FILE_ID) || (inum == HFS_STARTUP_FILE_ID) || (inum == HFS_ATTRIBUTES_FILE_ID)) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("hfs_cat_file_lookup: Called on special file: %" PRIuINUM, inum); return 1; } /* first look up the thread record for the item we're searching for */ /* set up the thread record key */ memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); cnid_to_array((uint32_t) inum, key.parent_cnid); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup: Looking up thread record (%" PRIuINUM ")\n", inum); /* look up the thread record */ off = hfs_cat_get_record_offset(hfs, &key); if (off == 0) { // no parsing error, just not found if (tsk_error_get_errno() == 0) { tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr ("hfs_cat_file_lookup: Error finding thread node for file (%" PRIuINUM ")", inum); } else { tsk_error_set_errstr2 (" hfs_cat_file_lookup: thread for file (%" PRIuINUM ")", inum); } return 1; } /* read the thread record */ if (hfs_cat_read_thread_record(hfs, off, &thread)) { tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", inum); return 1; } /* now look up the actual file/folder record */ /* build key */ memset((char *) &key, 0, sizeof(hfs_btree_key_cat)); memcpy((char *) key.parent_cnid, (char *) thread.parent_cnid, sizeof(key.parent_cnid)); memcpy((char *) &key.name, (char *) &thread.name, sizeof(key.name)); if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup: Looking up file record (parent: %" PRIuINUM ")\n", (uint64_t) tsk_getu32(fs->endian, key.parent_cnid)); /* look up the record */ off = hfs_cat_get_record_offset(hfs, &key); if (off == 0) { // no parsing error, just not found if (tsk_error_get_errno() == 0) { tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr ("hfs_cat_file_lookup: Error finding record node %" PRIuINUM, inum); } else { tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", inum); } return 1; } /* read the record */ if (hfs_cat_read_file_folder_record(hfs, off, &record)) { tsk_error_set_errstr2(" hfs_cat_file_lookup: file (%" PRIuINUM ")", inum); return 1; } /* these memcpy can be gotten rid of, really */ if (tsk_getu16(fs->endian, record.file.std.rec_type) == HFS_FOLDER_RECORD) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup: found folder record valence %" PRIu32 ", cnid %" PRIu32 "\n", tsk_getu32(fs->endian, record.folder.std.valence), tsk_getu32(fs->endian, record.folder.std.cnid)); memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_folder)); } else if (tsk_getu16(fs->endian, record.file.std.rec_type) == HFS_FILE_RECORD) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup: found file record cnid %" PRIu32 "\n", tsk_getu32(fs->endian, record.file.std.cnid)); memcpy((char *) &entry->cat, (char *) &record, sizeof(hfs_file)); } /* other cases already caught by hfs_cat_read_file_folder_record */ memcpy((char *) &entry->thread, (char *) &thread, sizeof(hfs_thread)); entry->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; entry->inum = inum; if (follow_hard_link) { // TEST to see if this is a hard link unsigned char is_err; TSK_INUM_T target_cnid = hfs_follow_hard_link(hfs, &(entry->cat), &is_err); if (is_err > 1) { error_returned ("hfs_cat_file_lookup: error occurred while following a possible hard link for " "inum (cnid) = %" PRIuINUM, inum); return 1; } if (target_cnid != inum) { // This is a hard link, and we have got the cnid of the target file, so look it up. uint8_t res = hfs_cat_file_lookup(hfs, target_cnid, entry, FALSE); if (res != 0) { error_returned ("hfs_cat_file_lookup: error occurred while looking up the Catalog entry for " "the target of inum (cnid) = %" PRIuINUM " target", inum); } return 1; } // Target is NOT a hard link, so fall through to the non-hard link exit. } if (tsk_verbose) tsk_fprintf(stderr, "hfs_cat_file_lookup exiting\n"); return 0; } /** \internal * Returns the largest inode number in file system * @param hfs File system being analyzed * @returns largest metadata address */ static TSK_INUM_T hfs_find_highest_inum(HFS_INFO * hfs) { // @@@ get actual number from Catalog file (go to far right) (we can't always trust the vol header) /* I haven't gotten looking at the end of the Catalog B-Tree to work properly. A fast method: if HFS_VH_ATTR_CNIDS_REUSED is set, then the maximum CNID is 2^32-1; if it's not set, then nextCatalogId is supposed to be larger than all CNIDs on disk. */ TSK_FS_INFO *fs = (TSK_FS_INFO *) & (hfs->fs_info); if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_CNIDS_REUSED) return (TSK_INUM_T) 0xffffffff; else return (TSK_INUM_T) tsk_getu32(fs->endian, hfs->fs->next_cat_id) - 1; } static TSK_FS_META_MODE_ENUM hfs_mode_to_tsk_mode(uint16_t a_mode) { TSK_FS_META_MODE_ENUM mode = 0; if (a_mode & HFS_IN_ISUID) mode |= TSK_FS_META_MODE_ISUID; if (a_mode & HFS_IN_ISGID) mode |= TSK_FS_META_MODE_ISGID; if (a_mode & HFS_IN_ISVTX) mode |= TSK_FS_META_MODE_ISVTX; if (a_mode & HFS_IN_IRUSR) mode |= TSK_FS_META_MODE_IRUSR; if (a_mode & HFS_IN_IWUSR) mode |= TSK_FS_META_MODE_IWUSR; if (a_mode & HFS_IN_IXUSR) mode |= TSK_FS_META_MODE_IXUSR; if (a_mode & HFS_IN_IRGRP) mode |= TSK_FS_META_MODE_IRGRP; if (a_mode & HFS_IN_IWGRP) mode |= TSK_FS_META_MODE_IWGRP; if (a_mode & HFS_IN_IXGRP) mode |= TSK_FS_META_MODE_IXGRP; if (a_mode & HFS_IN_IROTH) mode |= TSK_FS_META_MODE_IROTH; if (a_mode & HFS_IN_IWOTH) mode |= TSK_FS_META_MODE_IWOTH; if (a_mode & HFS_IN_IXOTH) mode |= TSK_FS_META_MODE_IXOTH; return mode; } static TSK_FS_META_TYPE_ENUM hfs_mode_to_tsk_meta_type(uint16_t a_mode) { switch (a_mode & HFS_IN_IFMT) { case HFS_IN_IFIFO: return TSK_FS_META_TYPE_FIFO; case HFS_IN_IFCHR: return TSK_FS_META_TYPE_CHR; case HFS_IN_IFDIR: return TSK_FS_META_TYPE_DIR; case HFS_IN_IFBLK: return TSK_FS_META_TYPE_BLK; case HFS_IN_IFREG: return TSK_FS_META_TYPE_REG; case HFS_IN_IFLNK: return TSK_FS_META_TYPE_LNK; case HFS_IN_IFSOCK: return TSK_FS_META_TYPE_SOCK; case HFS_IFWHT: return TSK_FS_META_TYPE_WHT; case HFS_IFXATTR: return TSK_FS_META_TYPE_UNDEF; default: /* error */ return TSK_FS_META_TYPE_UNDEF; } } static uint8_t hfs_make_specialbase(TSK_FS_FILE * fs_file) { fs_file->meta->type = TSK_FS_META_TYPE_REG; fs_file->meta->mode = 0; fs_file->meta->nlink = 1; fs_file->meta->flags = (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); fs_file->meta->uid = fs_file->meta->gid = 0; fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime = fs_file->meta->crtime = 0; fs_file->meta->mtime_nano = fs_file->meta->atime_nano = fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0; if (fs_file->meta->name2 == NULL) { if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) { error_returned (" - hfs_make_specialbase, couldn't malloc space for a name list"); return 1; } fs_file->meta->name2->next = NULL; } if (fs_file->meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_file->meta->attr); } else { fs_file->meta->attr = tsk_fs_attrlist_alloc(); } return 0; } /** * \internal * Create an FS_INODE structure for the catalog file. * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_catalog(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; unsigned char dummy1, dummy2; uint64_t dummy3; uint8_t result; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_catalog: Making virtual catalog file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_catalog"); return 1; } fs_file->meta->addr = HFS_CATALOG_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_CATALOGNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz); // convert the runs in the volume header to attribute runs if (((attr_run = hfs_extents_to_attr(fs, hfs->fs->cat_file.extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_make_catalog"); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_catalog"); tsk_fs_attr_run_free(attr_run); return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->cat_file.logic_sz), 0, 0)) { error_returned(" - hfs_make_catalog"); tsk_fs_attr_run_free(attr_run); return 1; } // see if catalog file has additional runs if (hfs_ext_find_extent_record_attr(hfs, HFS_CATALOG_FILE_ID, fs_attr, TRUE)) { error_returned(" - hfs_make_catalog"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); if (result != 0) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: Extended attributes failed to load for the Catalog file.\n"); tsk_error_reset(); } fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Create an FS_FILE for the extents file * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_extents(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_extents: Making virtual extents file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_extents"); return 1; } fs_file->meta->addr = HFS_EXTENTS_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_EXTENTSNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz); if (((attr_run = hfs_extents_to_attr(fs, hfs->fs->ext_file.extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_make_extents"); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_extents"); tsk_fs_attr_run_free(attr_run); return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->ext_file.logic_sz), 0, 0)) { error_returned(" - hfs_make_extents"); tsk_fs_attr_run_free(attr_run); return 1; } //hfs_load_extended_attrs(fs_file); // Extents doesn't have an entry in itself fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Create an FS_INODE structure for the blockmap / allocation file. * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_blockmap(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; unsigned char dummy1, dummy2; uint64_t dummy3; uint8_t result; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_blockmap: Making virtual blockmap file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_blockmap"); return 1; } fs_file->meta->addr = HFS_ALLOCATION_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_ALLOCATIONNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz); if (((attr_run = hfs_extents_to_attr(fs, hfs->fs->alloc_file.extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_make_blockmap"); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_blockmap"); tsk_fs_attr_run_free(attr_run); return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->alloc_file.logic_sz), 0, 0)) { error_returned(" - hfs_make_blockmap"); tsk_fs_attr_run_free(attr_run); return 1; } // see if catalog file has additional runs if (hfs_ext_find_extent_record_attr(hfs, HFS_ALLOCATION_FILE_ID, fs_attr, TRUE)) { error_returned(" - hfs_make_blockmap"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); if (result != 0) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: Extended attributes failed to load for the Allocation file.\n"); tsk_error_reset(); } fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Create an FS_INODE structure for the startup / boot file. * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_startfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; unsigned char dummy1, dummy2; uint64_t dummy3; uint8_t result; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_startfile: Making virtual startup file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_startfile"); return 1; } fs_file->meta->addr = HFS_STARTUP_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_STARTUPNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz); if (((attr_run = hfs_extents_to_attr(fs, hfs->fs->start_file.extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_make_startfile"); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_startfile"); tsk_fs_attr_run_free(attr_run); return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->start_file.logic_sz), 0, 0)) { error_returned(" - hfs_make_startfile"); tsk_fs_attr_run_free(attr_run); return 1; } // see if catalog file has additional runs if (hfs_ext_find_extent_record_attr(hfs, HFS_STARTUP_FILE_ID, fs_attr, TRUE)) { error_returned(" - hfs_make_startfile"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); if (result != 0) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: Extended attributes failed to load for the Start file.\n"); tsk_error_reset(); } fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Create an FS_INODE structure for the attributes file. * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_attrfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs = (TSK_FS_INFO *) hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_attrfile: Making virtual attributes file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_attrfile"); return 1; } fs_file->meta->addr = HFS_ATTRIBUTES_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_ATTRIBUTESNAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz); if (((attr_run = hfs_extents_to_attr(fs, hfs->fs->attr_file.extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_make_attrfile"); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_attrfile"); tsk_fs_attr_run_free(attr_run); return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), tsk_getu64(fs->endian, hfs->fs->attr_file.logic_sz), 0, 0)) { error_returned(" - hfs_make_attrfile"); tsk_fs_attr_run_free(attr_run); return 1; } // see if catalog file has additional runs if (hfs_ext_find_extent_record_attr(hfs, HFS_ATTRIBUTES_FILE_ID, fs_attr, TRUE)) { error_returned(" - hfs_make_attrfile"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } //hfs_load_extended_attrs(fs_file); fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** * \internal * Create an FS_FILE structure for the BadBlocks file. * * @param hfs File system to analyze * @param fs_file Structure to copy file information into. * @return 1 on error and 0 on success */ static uint8_t hfs_make_badblockfile(HFS_INFO * hfs, TSK_FS_FILE * fs_file) { TSK_FS_ATTR *fs_attr; unsigned char dummy1, dummy2; uint64_t dummy3; uint8_t result; if (tsk_verbose) tsk_fprintf(stderr, "hfs_make_badblockfile: Making virtual badblock file\n"); if (hfs_make_specialbase(fs_file)) { error_returned(" - hfs_make_badblockfile"); return 1; } fs_file->meta->addr = HFS_BAD_BLOCK_FILE_ID; strncpy(fs_file->meta->name2->name, HFS_BAD_BLOCK_FILE_NAME, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = 0; if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_make_badblockfile"); return 1; } // add the run to the file. if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL, TSK_FS_ATTR_TYPE_DEFAULT, HFS_FS_ATTR_ID_DATA, fs_file->meta->size, fs_file->meta->size, fs_file->meta->size, 0, 0)) { error_returned(" - hfs_make_badblockfile"); return 1; } // see if file has additional runs if (hfs_ext_find_extent_record_attr(hfs, HFS_BAD_BLOCK_FILE_ID, fs_attr, TRUE)) { error_returned(" - hfs_make_badblockfile"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } /* @@@ We have a chicken and egg problem here... The current design of * fs_attr_set() requires the size to be set, but we dont' know the size * until we look into the extents file (which adds to an attribute...). * This does not seem to be the best design... neeed a way to test this. */ fs_file->meta->size = fs_attr->nrd.initsize; fs_attr->size = fs_file->meta->size; fs_attr->nrd.allocsize = fs_file->meta->size; result = hfs_load_extended_attrs(fs_file, &dummy1, &dummy2, &dummy3); if (result != 0) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: Extended attributes failed to load for the BadBlocks file.\n"); tsk_error_reset(); } fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** \internal * Copy the catalog file or folder record entry into a TSK data structure. * @param a_hfs File system being analyzed * @param a_hfs_entry Catalog record entry (HFS_ENTRY *) * @param a_fs_file Structure to copy data into (TSK_FS_FILE *) * Returns 1 on error. */ static uint8_t hfs_dinode_copy(HFS_INFO * a_hfs, const HFS_ENTRY * a_hfs_entry, TSK_FS_FILE * a_fs_file) { // Note, a_hfs_entry->cat is really of type hfs_file. But, hfs_file_folder is a union // of that type with hfs_folder. Both of hfs_file and hfs_folder have the same first member. // So, this cast is appropriate. const hfs_file_folder *a_entry = (hfs_file_folder *) & (a_hfs_entry->cat); const hfs_file_fold_std *std; TSK_FS_META *a_fs_meta = a_fs_file->meta; TSK_FS_INFO *fs; uint16_t hfsmode; TSK_INUM_T iStd; // the inum (or CNID) that occurs in the standard file metadata if (a_entry == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_dinode_copy: a_entry = a_hfs_entry->cat is NULL"); return 1; } fs = (TSK_FS_INFO *) & a_hfs->fs_info; // Just a sanity check. The inum (or cnid) occurs in two places in the // entry data structure. iStd = tsk_getu32(fs->endian, a_entry->file.std.cnid); if (iStd != a_hfs_entry->inum) { if (tsk_verbose) tsk_fprintf(stderr, "WARNING: hfs_dinode_copy: HFS_ENTRY with conflicting values for inum (or cnid).\n"); } if (a_fs_meta == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("hfs_dinode_copy: a_fs_meta is NULL"); return 1; } // both files and folders start off the same std = &(a_entry->file.std); if (tsk_verbose) tsk_fprintf(stderr, "hfs_dinode_copy: called for file/folder %" PRIu32 "\n", tsk_getu32(fs->endian, std->cnid)); if (a_fs_meta->content_len < HFS_FILE_CONTENT_LEN) { if ((a_fs_meta = tsk_fs_meta_realloc(a_fs_meta, HFS_FILE_CONTENT_LEN)) == NULL) { return 1; } } a_fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (a_fs_meta->attr) { tsk_fs_attrlist_markunused(a_fs_meta->attr); } /* * Copy the file type specific stuff first */ hfsmode = tsk_getu16(fs->endian, std->perm.mode); if (tsk_getu16(fs->endian, std->rec_type) == HFS_FOLDER_RECORD) { // set the type of mode is not set if ((hfsmode & HFS_IN_IFMT) == 0) a_fs_meta->type = TSK_FS_META_TYPE_DIR; a_fs_meta->size = 0; memset(a_fs_meta->content_ptr, 0, HFS_FILE_CONTENT_LEN); } else if (tsk_getu16(fs->endian, std->rec_type) == HFS_FILE_RECORD) { hfs_fork *fork; // set the type of mode is not set if ((hfsmode & HFS_IN_IFMT) == 0) a_fs_meta->type = TSK_FS_META_TYPE_REG; a_fs_meta->size = tsk_getu64(fs->endian, a_entry->file.data.logic_sz); // copy the data and resource forks fork = (hfs_fork *) a_fs_meta->content_ptr; memcpy(fork, &(a_entry->file.data), sizeof(hfs_fork)); memcpy(&fork[1], &(a_entry->file.resource), sizeof(hfs_fork)); } else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_dinode_copy error: catalog entry is neither file nor folder\n"); return 1; } /* * Copy the standard stuff. * Use default values (as defined in spec) if mode is not defined. */ if ((hfsmode & HFS_IN_IFMT) == 0) { a_fs_meta->mode = 0; a_fs_meta->uid = 99; a_fs_meta->gid = 99; } else { a_fs_meta->mode = hfs_mode_to_tsk_mode(hfsmode); a_fs_meta->type = hfs_mode_to_tsk_meta_type(hfsmode); a_fs_meta->uid = tsk_getu32(fs->endian, std->perm.owner); a_fs_meta->gid = tsk_getu32(fs->endian, std->perm.group); } // this field is set only for "indirect" entries if (tsk_getu32(fs->endian, std->perm.special.nlink)) a_fs_meta->nlink = tsk_getu32(fs->endian, std->perm.special.nlink); else a_fs_meta->nlink = 1; a_fs_meta->mtime = hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->cmtime)); a_fs_meta->atime = hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->atime)); a_fs_meta->crtime = hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->crtime)); a_fs_meta->ctime = hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->amtime)); a_fs_meta->time2.hfs.bkup_time = hfs_convert_2_unix_time(tsk_getu32(fs->endian, std->bkup_date)); a_fs_meta->mtime_nano = a_fs_meta->atime_nano = a_fs_meta->ctime_nano = a_fs_meta->crtime_nano = 0; a_fs_meta->time2.hfs.bkup_time_nano = 0; a_fs_meta->addr = tsk_getu32(fs->endian, std->cnid); // All entries here are used. a_fs_meta->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; if (std->perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) a_fs_meta->flags |= TSK_FS_META_FLAG_COMP; // We copy this inum (or cnid) here, because this file *might* have been a hard link. In // that case, we want to make sure that a_fs_file points consistently to the target of the // link. if (a_fs_file->name != NULL) { a_fs_file->name->meta_addr = a_fs_meta->addr; } /* TODO @@@ could fill in name2 with this entry's name and parent inode from Catalog entry */ /* set the link string (if the file is a link) * The size check is a sanity check so that we don't try to allocate * a huge amount of memory for a bad inode value */ if ((a_fs_meta->type == TSK_FS_META_TYPE_LNK) && (a_fs_meta->size >= 0) && (a_fs_meta->size < HFS_MAXPATHLEN)) { ssize_t bytes_read; a_fs_meta->link = tsk_malloc((size_t) a_fs_meta->size + 1); if (a_fs_meta->link == NULL) return 1; bytes_read = tsk_fs_file_read(a_fs_file, (TSK_OFF_T) 0, a_fs_meta->link, (size_t) a_fs_meta->size, TSK_FS_FILE_READ_FLAG_NONE); a_fs_meta->link[a_fs_meta->size] = '\0'; if (bytes_read != a_fs_meta->size) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_dinode_copy: failed to read contents of symbolic link; " "expected %u bytes but tsk_fs_file_read() returned %u\n", a_fs_meta->size, bytes_read); free(a_fs_meta->link); a_fs_meta->link = NULL; return 1; } } return 0; } /** \internal * Load a catalog file entry and save it in the TSK_FS_FILE structure. * * @param fs File system to read from. * @param a_fs_file Structure to read into. * @param inum File address to load * @returns 1 on error */ static uint8_t hfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { HFS_INFO *hfs = (HFS_INFO *) fs; HFS_ENTRY entry; if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("hfs_inode_lookup: fs_file is NULL"); return 1; } if (a_fs_file->meta == NULL) { a_fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN); } if (a_fs_file->meta == NULL) { return 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } if (tsk_verbose) tsk_fprintf(stderr, "hfs_inode_lookup: looking up %" PRIuINUM "\n", inum); // @@@ Will need to add orphan stuff here too /* First see if this is a special entry * the special ones have their metadata stored in the volume header */ if (inum == HFS_EXTENTS_FILE_ID) { if (!hfs->has_extents_file) { error_detected(TSK_ERR_FS_INODE_NUM, "Extents File not present"); return 1; } if (hfs_make_extents(hfs, a_fs_file)) { return 1; } else { return 0; } } else if (inum == HFS_CATALOG_FILE_ID) { if (hfs_make_catalog(hfs, a_fs_file)) { return 1; } else { return 0; } } else if (inum == HFS_BAD_BLOCK_FILE_ID) { // Note: the Extents file and the BadBlocks file are really the same. if (!hfs->has_extents_file) { error_detected(TSK_ERR_FS_INODE_NUM, "BadBlocks File not present"); return 1; } if (hfs_make_badblockfile(hfs, a_fs_file)) { return 1; } else { return 0; } } else if (inum == HFS_ALLOCATION_FILE_ID) { if (hfs_make_blockmap(hfs, a_fs_file)) { return 1; } else { return 0; } } else if (inum == HFS_STARTUP_FILE_ID) { if (!hfs->has_startup_file) { error_detected(TSK_ERR_FS_INODE_NUM, "Startup File not present"); return 1; } if (hfs_make_startfile(hfs, a_fs_file)) { return 1; } else { return 0; } } else if (inum == HFS_ATTRIBUTES_FILE_ID) { if (!hfs->has_attributes_file) { error_detected(TSK_ERR_FS_INODE_NUM, "Attributes File not present"); return 1; } if (hfs_make_attrfile(hfs, a_fs_file)) { return 1; } else { return 0; } } /* Lookup inode and store it in the HFS structure */ if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE)) { return 1; } /* Copy the structure in hfs to generic fs_inode */ if (hfs_dinode_copy(hfs, &entry, a_fs_file)) { return 1; } /* If this is potentially a compressed file, its * actual size is unknown until we examine the * extended attributes */ if ((a_fs_file->meta->size == 0) && (a_fs_file->meta->type == TSK_FS_META_TYPE_REG) && (a_fs_file->meta->attr_state != TSK_FS_META_ATTR_ERROR) && ((a_fs_file->meta->attr_state != TSK_FS_META_ATTR_STUDIED) || (a_fs_file->meta->attr == NULL))) { hfs_load_attrs(a_fs_file); } return 0; } #ifdef HAVE_LIBZ typedef struct { uint32_t offset; uint32_t length; } CMP_OFFSET_ENTRY; uint8_t hfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) { TSK_FS_INFO *fs; TSK_ENDIAN_ENUM endian; TSK_FS_FILE *fs_file; const TSK_FS_ATTR *rAttr; // resource fork attribute char *rawBuf = NULL; // compressed data char *uncBuf = NULL; // uncompressed data hfs_resource_fork_header rfHeader; int attrReadResult; uint32_t offsetTableOffset; char fourBytes[4]; // Will hold the number of table entries, little endian uint32_t tableSize; // The number of table entries hfs_resource_fork_header *resHead; uint32_t dataOffset; char *offsetTableData; CMP_OFFSET_ENTRY *offsetTable; size_t indx; // index for looping over the offset table TSK_OFF_T off = 0; // the offset in the uncompressed data stream consumed thus far if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_walk_special: Entered, because this is a compressed file with compressed data in the resource fork\n"); // clean up any error messages that are lying around tsk_error_reset(); if ((fs_attr == NULL) || (fs_attr->fs_file == NULL) || (fs_attr->fs_file->meta == NULL) || (fs_attr->fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_attr_walk_special: Null arguments given\n"); return 1; } // Check that the ATTR being read is the main DATA resource, 128-0, because this is the // only one that can be compressed in HFS+ if ((fs_attr->id != HFS_FS_ATTR_ID_DATA) || (fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { error_detected(TSK_ERR_FS_ARG, "hfs_attr_walk_special: arg specified an attribute %u-%u that is not the data fork, " "Only the data fork can be compressed.", fs_attr->type, fs_attr->id); return 1; } fs = fs_attr->fs_file->fs_info; //hfs = (HFS_INFO *) fs; endian = fs->endian; /* This MUST be a compressed attribute */ if (!(fs_attr->flags & TSK_FS_ATTR_COMP)) { error_detected(TSK_ERR_FS_FWALK, "hfs_attr_walk_special: called with non-special attribute: %x", fs_attr->flags); return 1; } /******** Open the Resource Fork ***********/ // The file fs_file = fs_attr->fs_file; // find the attribute for the resource fork rAttr = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, TRUE); if (rAttr == NULL) { error_returned (" hfs_attr_walk_special: could not get the attribute for the resource fork of the file"); return 1; } // Read the resource fork header attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != sizeof(hfs_resource_fork_header)) { error_returned (" hfs_attr_walk_special: trying to read the resource fork header"); return 1; } // Begin to parse the resource fork. For now, we just need the data offset. But // eventually we'll want the other quantities as well. // We are assuming that there is exactly one resource, and that this contains the compressed // data. This assumption is true in all examples we have seen. More general code would // parse the Resource Fork map, and find the appropriate entry, then jump to THAT data offset. resHead = &rfHeader; dataOffset = tsk_getu32(endian, resHead->dataOffset); //uint32_t mapOffset = tsk_getu32(endian, resHead->mapOffset); //uint32_t dataLength = tsk_getu32(endian, resHead->dataLength); //uint32_t mapLength = tsk_getu32(endian, resHead->mapLength); // The resource's data begins with an offset table, which defines blocks // of (optionally) zlib-compressed data (so that the OS can do file seeks // efficiently; each uncompressed block is 64KB). offsetTableOffset = dataOffset + 4; // read 4 bytes, the number of table entries, little endian attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != 4) { error_returned (" hfs_attr_walk_special: trying to read the offset table size, " "return value of %u should have been 4", attrReadResult); return 1; } tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); // Each table entry is 8 bytes long offsetTableData = (char *) tsk_malloc(tableSize * 8); if (offsetTableData == NULL) { error_returned (" hfs_attr_walk_special: space for the offset table raw data"); return 1; } offsetTable = (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize * sizeof(CMP_OFFSET_ENTRY)); if (offsetTable == NULL) { error_returned (" hfs_attr_walk_special: space for the offset table"); free(offsetTableData); return 1; } attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4, offsetTableData, tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != tableSize * 8) { error_returned (" hfs_attr_walk_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, tableSize * 8); free(offsetTableData); free(offsetTable); return 1; } for (indx = 0; indx < tableSize; indx++) { offsetTable[indx].offset = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8); offsetTable[indx].length = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8 + 4); } // Allocate two buffers for the raw and uncompressed data /* Raw data can be COMPRESSSION_UNIT_SIZE+1 if the data is not * compressed and there is a 1-byte flag that indicates that * the data is not compressed. */ rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1); if (rawBuf == NULL) { error_returned (" hfs_attr_walk_special: buffers for reading and uncompressing"); return 1; } uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE); if (uncBuf == NULL) { error_returned (" hfs_attr_walk_special: buffers for reading and uncompressing"); free(rawBuf); return 1; } // FOR entry in the table DO for (indx = 0; indx < tableSize; indx++) { uint32_t offset = offsetTableOffset + offsetTable[indx].offset; uint32_t len = offsetTable[indx].length; uint64_t uncLen; // uncompressed length unsigned int blockSize; uint64_t lumpSize; uint64_t remaining; char *lumpStart; if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_walk_special: reading one compression unit, number %d, length %d\n", indx, len); /* Github #383 referenced that if len is 0, then the below code causes * problems. Added this check, but I don't have data to verify this on. * it looks like it should at least not crash, but it isn't clear if it * will also do the right thing and if should actually break here instead. */ if (len == 0) { continue; } if (len > COMPRESSION_UNIT_SIZE + 1) { error_detected(TSK_ERR_FS_READ, "hfs_attr_walk_special: block size is too large: %u", len); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } // Read in the chunk of (potentially) compressed data attrReadResult = tsk_fs_attr_read(rAttr, offset, rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != len) { if (attrReadResult < 0) error_returned (" hfs_attr_walk_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, len); else error_detected(TSK_ERR_FS_READ, "hfs_attr_walk_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, len); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } // see if this block is compressed if ((len > 0) && ((rawBuf[0] & 0x0F) != 0x0F)) { unsigned long bytesConsumed; int infResult; // Uncompress the chunk of data if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_walk_special: Inflating the compression unit\n"); infResult = zlib_inflate(rawBuf, (uint64_t) len, uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE, &uncLen, &bytesConsumed); if (infResult != 0) { error_returned (" hfs_attr_walk_special: zlib inflation (uncompression) failed", infResult); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } } else { // actually an uncompressed block of data; just copy if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_walk_special: Copying an uncompressed compression unit\n"); if ((len - 1) > COMPRESSION_UNIT_SIZE) { error_detected(TSK_ERR_FS_READ, "hfs_attr_walk_special: uncompressed block length %u is longer " "than compression unit size %u", len - 1, COMPRESSION_UNIT_SIZE); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } memcpy(uncBuf, rawBuf + 1, len - 1); uncLen = len - 1; } // Call the a_action callback with "Lumps" that are at most the block size. blockSize = fs->block_size; remaining = uncLen; lumpStart = uncBuf; while (remaining > 0) { int retval; // action return value if (remaining <= blockSize) lumpSize = remaining; else lumpSize = blockSize; // Apply the callback function if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_walk_special: Calling action on lump of size %" PRIu64 " offset %" PRIu64 " in the compression unit\n", lumpSize, uncLen - remaining); if (lumpSize > SIZE_MAX) { error_detected(TSK_ERR_FS_FWALK, " hfs_attr_walk_special: lumpSize is too large for the action"); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } retval = a_action(fs_attr->fs_file, off, 0, lumpStart, (size_t) lumpSize, // cast OK because of above test TSK_FS_BLOCK_FLAG_COMP, ptr); if (retval == TSK_WALK_ERROR) { error_detected(TSK_ERR_FS | 201, "hfs_attr_walk_special: callback returned an error"); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 1; } if (retval == TSK_WALK_STOP) break; // Find the next lump off += lumpSize; remaining -= lumpSize; lumpStart += lumpSize; } } // Done, so free up the allocated resources. free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return 0; } /** \internal * * @returns number of bytes read or -1 on error (incl if offset is past EOF) */ ssize_t hfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset, char *a_buf, size_t a_len) { TSK_FS_INFO *fs = NULL; TSK_ENDIAN_ENUM endian; TSK_FS_FILE *fs_file; const TSK_FS_ATTR *rAttr; char *rawBuf = NULL; char *uncBuf = NULL; hfs_resource_fork_header rfHeader; int attrReadResult; hfs_resource_fork_header *resHead; uint32_t dataOffset; uint32_t offsetTableOffset; char fourBytes[4]; // Size of the offset table, little endian uint32_t tableSize; // Size of the offset table char *offsetTableData; CMP_OFFSET_ENTRY *offsetTable; size_t indx; // index for looping over the offset table uint64_t sizeUpperBound; uint64_t cummulativeSize = 0; uint32_t startUnit = 0; uint32_t startUnitOffset = 0; uint32_t endUnit = 0; uint64_t bytesCopied; if (tsk_verbose) tsk_fprintf(stderr, "hfs_file_read_special: called because this file is compressed, with data in the resource fork\n"); // Reading zero bytes? OK at any offset, I say! if (a_len == 0) return 0; if (a_offset < 0 || a_len < 0) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: reading from file at a negative offset, or negative length"); return -1; } if (a_len > SIZE_MAX / 2) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: trying to read more than SIZE_MAX/2 is not supported."); return -1; } if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) || (a_fs_attr->fs_file->meta == NULL) || (a_fs_attr->fs_file->fs_info == NULL)) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: NULL parameters passed"); return -1; } fs = a_fs_attr->fs_file->fs_info; //hfs = (HFS_INFO *) fs; endian = fs->endian; // This should be a compressed file. If not, that's an error! if (!(a_fs_attr->flags & TSK_FS_ATTR_COMP)) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: called with non-special attribute: %x", a_fs_attr->flags); return -1; } // Check that the ATTR being read is the main DATA resource, 4352-0, because this is the // only one that can be compressed in HFS+ if ((a_fs_attr->id != HFS_FS_ATTR_ID_DATA) || (a_fs_attr->type != TSK_FS_ATTR_TYPE_HFS_DATA)) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: arg specified an attribute %u-%u that is not the data fork, " "Only the data fork can be compressed.", a_fs_attr->type, a_fs_attr->id); return -1; } /******** Open the Resource Fork ***********/ // The file fs_file = a_fs_attr->fs_file; // find the attribute for the resource fork rAttr = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, TRUE); if (rAttr == NULL) { error_returned (" hfs_file_read_special: could not get the attribute for the resource fork of the file"); return -1; } // Read the resource fork header attrReadResult = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != sizeof(hfs_resource_fork_header)) { error_returned (" hfs_file_read_special: trying to read the resource fork header"); return -1; } // Begin to parse the resource fork. For now, we just need the data offset. But // eventually we'll want the other quantities as well. resHead = &rfHeader; dataOffset = tsk_getu32(endian, resHead->dataOffset); //uint32_t mapOffset = tsk_getu32(endian, resHead->mapOffset); //uint32_t dataLength = tsk_getu32(endian, resHead->dataLength); //uint32_t mapLength = tsk_getu32(endian, resHead->mapLength); // The resource's data begins with an offset table, which defines blocks // of (optionally) zlib-compressed data (so that the OS can do file seeks // efficiently; each uncompressed block is 64KB). offsetTableOffset = dataOffset + 4; // read 4 bytes, the number of table entries, little endian attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset, fourBytes, 4, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != 4) { error_returned (" hfs_file_read_special: trying to read the offset table size, " "return value of %u should have been 4", attrReadResult); return -1; } tableSize = tsk_getu32(TSK_LIT_ENDIAN, fourBytes); // Each table entry is 8 bytes long offsetTableData = tsk_malloc(tableSize * 8); if (offsetTableData == NULL) { error_returned (" hfs_file_read_special: space for the offset table raw data"); return -1; } offsetTable = (CMP_OFFSET_ENTRY *) tsk_malloc(tableSize * sizeof(CMP_OFFSET_ENTRY)); if (offsetTable == NULL) { error_returned (" hfs_file_read_special: space for the offset table"); free(offsetTableData); return -1; } attrReadResult = tsk_fs_attr_read(rAttr, offsetTableOffset + 4, offsetTableData, tableSize * 8, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != tableSize * 8) { error_returned (" hfs_file_read_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, tableSize * 8); free(offsetTableData); free(offsetTable); return -1; } for (indx = 0; indx < tableSize; indx++) { offsetTable[indx].offset = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8); offsetTable[indx].length = tsk_getu32(TSK_LIT_ENDIAN, offsetTableData + indx * 8 + 4); } sizeUpperBound = tableSize * COMPRESSION_UNIT_SIZE; // cast is OK because both a_offset and a_len are >= 0 if ((uint64_t) (a_offset + a_len) > sizeUpperBound) { error_detected(TSK_ERR_FS_ARG, "hfs_file_read_special: range of bytes requested %lld - %lld falls outside of the length upper bound of the uncompressed stream %llu\n", a_offset, a_offset + a_len, sizeUpperBound); free(offsetTableData); free(offsetTable); return -1; } // Compute the range of compression units needed for the request for (indx = 0; indx < tableSize; indx++) { if (cummulativeSize <= (uint64_t) a_offset && // casts OK because a_offset >= 0 (cummulativeSize + COMPRESSION_UNIT_SIZE > (uint64_t) a_offset)) { startUnit = indx; startUnitOffset = (uint32_t) (a_offset - cummulativeSize); // This cast is OK, result can't be too large, // due to enclosing test. } if ((cummulativeSize < (uint64_t) (a_offset + a_len)) && // casts OK because a_offset and a_len > 0 (cummulativeSize + COMPRESSION_UNIT_SIZE >= (uint64_t) (a_offset + a_len))) { endUnit = indx; } cummulativeSize += COMPRESSION_UNIT_SIZE; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_file_read_special: reading compression units: %" PRIu32 " to %" PRIu32 "\n", startUnit, endUnit); bytesCopied = 0; // Allocate buffers for the raw and uncompressed data /* Raw data can be COMPRESSSION_UNIT_SIZE+1 if the data is not * compressed and there is a 1-byte flag that indicates that * the data is not compressed. */ rawBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE + 1); if (rawBuf == NULL) { error_returned (" hfs_file_read_special: buffers for reading and uncompressing"); return -1; } uncBuf = (char *) tsk_malloc(COMPRESSION_UNIT_SIZE); if (uncBuf == NULL) { error_returned (" hfs_file_read_special: buffers for reading and uncompressing"); free(rawBuf); return -1; } // Read from the indicated comp units for (indx = startUnit; indx <= endUnit; indx++) { uint32_t offset = offsetTableOffset + offsetTable[indx].offset; uint32_t len = offsetTable[indx].length; uint64_t uncLen; char *uncBufPtr = uncBuf; size_t bytesToCopy; if (tsk_verbose) tsk_fprintf(stderr, "hfs_file_read_special: Reading compression unit %" PRIu32 "\n", indx); /* Github #383 referenced that if len is 0, then the below code causes * problems. Added this check, but I don't have data to verify this on. * it looks like it should at least not crash, but it isn't clear if it * will also do the right thing and if should actually break here instead. */ if (len == 0) { continue; } if (len > COMPRESSION_UNIT_SIZE + 1) { error_detected(TSK_ERR_FS_READ, "hfs_file_read_special: block size is too large: %u", len); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return -1; } // Read in the chunk of compressed data attrReadResult = tsk_fs_attr_read(rAttr, offset, rawBuf, len, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult != len) { if (attrReadResult < 0) error_returned (" hfs_file_read_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, len); else error_detected(TSK_ERR_FS_READ, "hfs_file_read_special: reading in the compression offset table, " "return value %u should have been %u", attrReadResult, len); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return -1; } // see if this block is compressed if ((len > 0) && ((rawBuf[0] & 0x0F) != 0x0F)) { unsigned long bytesConsumed; int infResult; // Uncompress the chunk of data if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_read_special: Inflating the compression unit\n"); infResult = zlib_inflate(rawBuf, (uint64_t) len, uncBuf, (uint64_t) COMPRESSION_UNIT_SIZE, &uncLen, &bytesConsumed); if (infResult != 0) { error_returned (" hfs_attr_walk_special: zlib inflation (uncompression) failed", infResult); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return -1; } } else { // actually an uncompressed block of data; just copy if (tsk_verbose) tsk_fprintf(stderr, "hfs_attr_read_special: Copying an uncompressed compression unit\n"); if ((len - 1) > COMPRESSION_UNIT_SIZE) { error_detected(TSK_ERR_FS_READ, "hfs_attr_read_special: uncompressed block length %u is longer " "than compression unit size %u", len - 1, COMPRESSION_UNIT_SIZE); free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return -1; } memcpy(uncBuf, rawBuf + 1, len - 1); uncLen = len - 1; } // There are now uncLen bytes of uncompressed data available from this comp unit. // If this is the first comp unit, then we must skip over the startUnitOffset bytes. if (indx == startUnit) { uncLen -= startUnitOffset; uncBufPtr += startUnitOffset; } // How many bytes to copy from this compression unit? if (bytesCopied + uncLen < (uint64_t) a_len) // cast OK because a_len > 0 bytesToCopy = (size_t) uncLen; // uncLen <= size of compression unit, which is small, so cast is OK else bytesToCopy = (size_t) (((uint64_t) a_len) - bytesCopied); // diff <= compression unit size, so cast is OK // Copy into the output buffer, and update bookkeeping. memcpy(a_buf + bytesCopied, uncBufPtr, bytesToCopy); bytesCopied += bytesToCopy; } // Well, we don't know (without a lot of work) what the // true uncompressed size of the stream is. All we know is the "upper bound" which // assumes that all of the compression units expand to their full size. If we did // know the true size, then we could reject requests that go beyond the end of the // stream. Instead, we treat the stream as if it is padded out to the full size of // the last compression unit with zeros. // Have we read and copied all of the bytes requested? if (bytesCopied < a_len) { // set the remaining bytes to zero memset(a_buf + bytesCopied, 0, a_len - (size_t) bytesCopied); // cast OK because diff must be < compression unit size } free(offsetTableData); free(offsetTable); free(rawBuf); free(uncBuf); return (ssize_t) bytesCopied; // cast OK, cannot be greater than a_len which cannot be // greater than SIZE_MAX/2 (rounded down). } #endif typedef struct { TSK_FS_INFO *fs; // the HFS file system TSK_FS_FILE *file; // the Attributes file, if open hfs_btree_header_record *header; // the Attributes btree header record. // For Convenience, unpacked values. TSK_ENDIAN_ENUM endian; uint32_t rootNode; uint16_t nodeSize; uint16_t maxKeyLen; } ATTR_FILE_T; /** \internal * Open the Attributes file, and read the btree header record. Fill in the fields of the ATTR_FILE_T struct. * * @param fs -- the HFS file system * @param header -- the header record struct * * @return 1 on error, 0 on success */ static uint8_t open_attr_file(TSK_FS_INFO * fs, ATTR_FILE_T * attr_file) { int cnt; // will hold bytes read hfs_btree_header_record *hrec; // clean up any error messages that are lying around tsk_error_reset(); if (fs == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("open_attr_file: fs is NULL"); return 1; } if (attr_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("open_attr_file: attr_file is NULL"); return 1; } // Open the Attributes File attr_file->file = tsk_fs_file_open_meta(fs, NULL, HFS_ATTRIBUTES_FILE_ID); if (attr_file->file == NULL) { tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr ("open_attr_file: could not open the Attributes file"); return 1; } // Allocate some space for the Attributes btree header record (which // is passed back to the caller) hrec = (hfs_btree_header_record *) malloc(sizeof(hfs_btree_header_record)); if (hrec == NULL) { tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr ("open_attr_file: could not malloc space for Attributes header record"); return 1; } // Read the btree header record cnt = tsk_fs_file_read(attr_file->file, 14, (char *) hrec, sizeof(hfs_btree_header_record), (TSK_FS_FILE_READ_FLAG_ENUM) 0); if (cnt != sizeof(hfs_btree_header_record)) { tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr ("open_attr_file: could not open the Attributes file"); tsk_fs_file_close(attr_file->file); free(hrec); return 1; } // Fill in the fields of the attr_file struct (which was passed in by the caller) attr_file->fs = fs; attr_file->header = hrec; attr_file->endian = fs->endian; attr_file->nodeSize = tsk_getu16(attr_file->endian, hrec->nodesize); attr_file->rootNode = tsk_getu32(attr_file->endian, hrec->rootNode); attr_file->maxKeyLen = tsk_getu16(attr_file->endian, hrec->maxKeyLen); return 0; } /** \internal * Closes and frees the data structures associated with ATTR_FILE_T */ static uint8_t close_attr_file(ATTR_FILE_T * attr_file) { if (attr_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_READ); tsk_error_set_errstr("close_attr_file: NULL attr_file arg"); return 1; } if (attr_file->file != NULL) { tsk_fs_file_close(attr_file->file); attr_file->file = NULL; } if (attr_file->header != NULL) { free(attr_file->header); attr_file->header = NULL; } attr_file->rootNode = 0; attr_file->nodeSize = 0; // Note that we leave the fs component alone. return 0; } static const char * hfs_attrTypeName(uint32_t typeNum) { switch (typeNum) { case TSK_FS_ATTR_TYPE_HFS_DEFAULT: return "DFLT"; case TSK_FS_ATTR_TYPE_HFS_DATA: return "DATA"; case TSK_FS_ATTR_TYPE_HFS_EXT_ATTR: return "ExATTR"; case TSK_FS_ATTR_TYPE_HFS_COMP_REC: return "CMPF"; case TSK_FS_ATTR_TYPE_HFS_RSRC: return "RSRC"; default: return "UNKN"; } } // TODO: Function description missing here no idea what it is supposed to return // in which circumstances. static uint8_t hfs_load_extended_attrs(TSK_FS_FILE * fs_file, unsigned char *isCompressed, unsigned char *compDataInRSRC, uint64_t * uncompressedSize) { TSK_FS_INFO *fs = fs_file->fs_info; uint64_t fileID; ATTR_FILE_T attrFile; int cnt; // count of chars read from file. uint8_t *nodeData; TSK_ENDIAN_ENUM endian; hfs_btree_node *nodeDescriptor; // The node descriptor uint32_t nodeID; // The number or ID of the Attributes file node to read. hfs_btree_key_attr *keyB; // ptr to the key of the Attr file record. unsigned char done; // Flag to indicate that we are done looping over leaf nodes uint16_t attribute_counter = 2; // The ID of the next attribute to be loaded. HFS_INFO *hfs; char *buffer = NULL; // buffer to hold the attribute tsk_error_reset(); // The CNID (or inode number) of the file // Note that in TSK such numbers are 64 bits, but in HFS+ they are only 32 bits. fileID = fs_file->meta->addr; if (fs == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_load_extended_attrs: NULL fs arg"); return 1; } hfs = (HFS_INFO *) fs; if (!hfs->has_attributes_file) { // No attributes file, and so, no extended attributes return 0; } if (tsk_verbose) { tsk_fprintf(stderr, "hfs_load_extended_attrs: Processing file %" PRIuINUM "\n", fileID); } // Open the Attributes File if (open_attr_file(fs, &attrFile)) { error_returned ("hfs_load_extended_attrs: could not open Attributes file"); return 1; } // Is the Attributes file empty? if (attrFile.rootNode == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Attributes file is empty\n"); close_attr_file(&attrFile); *isCompressed = FALSE; *compDataInRSRC = FALSE; return 0; } // A place to hold one node worth of data nodeData = (uint8_t *) malloc(attrFile.nodeSize); if (nodeData == NULL) { error_detected(TSK_ERR_AUX_MALLOC, "hfs_load_extended_attrs: Could not malloc space for an Attributes file node"); goto on_error; } // Initialize these *isCompressed = FALSE; *compDataInRSRC = FALSE; endian = attrFile.fs->endian; // Start with the root node nodeID = attrFile.rootNode; // While loop, over nodes in path from root node to the correct LEAF node. while (1) { uint16_t numRec; // Number of records in the node int recIndx; // index for looping over records if (tsk_verbose) { tsk_fprintf(stderr, "hfs_load_extended_attrs: Reading Attributes File node with ID %" PRIu32 "\n", nodeID); } cnt = tsk_fs_file_read(attrFile.file, nodeID * attrFile.nodeSize, (char *) nodeData, attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); if (cnt != attrFile.nodeSize) { error_returned ("hfs_load_extended_attrs: Could not read in a node from the Attributes File"); goto on_error; } // Parse the Node header nodeDescriptor = (hfs_btree_node *) nodeData; // If we are at a leaf node, then we have found the right node if (nodeDescriptor->type == HFS_ATTR_NODE_LEAF) { break; } // This had better be an INDEX node, if not its an error if (nodeDescriptor->type != HFS_ATTR_NODE_INDEX) { error_detected(TSK_ERR_FS_READ, "hfs_load_extended_attrs: Reached a non-INDEX and non-LEAF node in searching the Attributes File"); goto on_error; } // OK, we are in an INDEX node. loop over the records to find the last one whose key is // smaller than or equal to the desired key numRec = tsk_getu16(endian, nodeDescriptor->num_rec); if (numRec == 0) { // This is wrong, there must always be at least 1 record in an INDEX node. error_detected(TSK_ERR_FS_READ, "hfs_load_extended_attrs:Attributes File index node %" PRIu32 " has zero records", nodeID); goto on_error; } for (recIndx = 0; recIndx < numRec; recIndx++) { uint16_t keyLength; int comp; // comparison result char *compStr; // comparison result, as a string uint8_t *recData; // pointer to the data part of the record uint32_t keyFileID; int diff; // difference in bytes between the record start and the record data // Offset of the record uint8_t *recOffsetData = &nodeData[attrFile.nodeSize - 2 * (recIndx + 1)]; // data describing where this record is uint16_t recOffset = tsk_getu16(endian, recOffsetData); //uint8_t * nextRecOffsetData = &nodeData[attrFile.nodeSize - 2* (recIndx+2)]; // Pointer to first byte of record uint8_t *record = &nodeData[recOffset]; // Cast that to the Attributes file key (n.b., the key is the first thing in the record) keyB = (hfs_btree_key_attr *) record; keyLength = tsk_getu16(endian, keyB->key_len); // Is this key less than what we are seeking? //int comp = comp_attr_key(endian, keyB, fileID, attrName, startBlock); keyFileID = tsk_getu32(endian, keyB->file_id); if (keyFileID < fileID) { comp = -1; compStr = "less than"; } else if (keyFileID > fileID) { comp = 1; compStr = "greater than"; } else { comp = 0; compStr = "equal to"; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: INDEX record %d, fileID %" PRIu32 " is %s the file ID we are seeking, %" PRIu32 ".\n", recIndx, keyFileID, compStr, fileID); if (comp > 0) { // The key of this record is greater than what we are seeking if (recIndx == 0) { // This is the first record, so no records are appropriate // Nothing in this btree will match. We can stop right here. goto on_exit; } // This is not the first record, so, the previous record's child is the one we want. break; } // CASE: key in this record matches the key we are seeking. The previous record's child // is the one we want. However, if this is the first record, then we want THIS record's child. if (comp == 0 && recIndx != 0) { break; } // Extract the child node ID from the data of the record recData = &record[keyLength + 2]; // This is +2 because key_len does not include the // length of the key_len field itself. // Data must start on an even offset from the beginning of the record. // So, correct this if needed. diff = recData - record; if (2 * (diff / 2) != diff) { recData += 1; } // The next four bytes should be the Node ID of the child of this node. nodeID = tsk_getu32(endian, recData); // At this point, either comp<0 or comp=0 && recIndx=0. In the latter case we want to // descend to the child of this node, so we break. if (recIndx == 0 && comp == 0) { break; } // CASE: key in this record is less than key we seek. comp < 0 // So, continue looping over records in this node. } // END loop over records } // END while loop over Nodes in path from root to LEAF node // At this point nodeData holds the contents of a LEAF node with the right range of keys // and nodeDescriptor points to the descriptor of that node. // Loop over successive LEAF nodes, starting with this one done = FALSE; while (!done) { uint16_t numRec; // number of records int recIndx; // index for looping over records if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Attributes File LEAF Node %" PRIu32 ".\n", nodeID); numRec = tsk_getu16(endian, nodeDescriptor->num_rec); // Note, leaf node could have one (or maybe zero) records // Loop over the records in this node for (recIndx = 0; recIndx < numRec; recIndx++) { // Offset of the record uint8_t *recOffsetData = &nodeData[attrFile.nodeSize - 2 * (recIndx + 1)]; // data describing where this record is uint16_t recOffset = tsk_getu16(endian, recOffsetData); uint16_t keyLength; int comp; // comparison result char *compStr; // comparison result as a string uint32_t keyFileID; // Pointer to first byte of record uint8_t *record = &nodeData[recOffset]; // Cast that to the Attributes file key keyB = (hfs_btree_key_attr *) record; keyLength = tsk_getu16(endian, keyB->key_len); // Compare record key to the key that we are seeking keyFileID = tsk_getu32(endian, keyB->file_id); //fprintf(stdout, " Key file ID = %lu\n", keyFileID); if (keyFileID < fileID) { comp = -1; compStr = "less than"; } else if (keyFileID > fileID) { comp = 1; compStr = "greater than"; } else { comp = 0; compStr = "equal to"; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: LEAF Record key file ID %" PRIu32 " is %s the desired file ID %" PRIu32 "\n", keyFileID, compStr, fileID); // Are they the same? if (comp == 0) { // Yes, so load this attribute uint8_t *recData; // pointer to the data part of the record hfs_attr_data *attrData; uint32_t attributeLength; int diff; // Difference in bytes between the start of the record and the start of data. int conversionResult; char nameBuff[MAX_ATTR_NAME_LENGTH]; TSK_FS_ATTR_TYPE_ENUM attrType; TSK_FS_ATTR *fs_attr; // Points to the attribute to be loaded. recData = &record[keyLength + 2]; // Data must start on an even offset from the beginning of the record. // So, correct this if needed. diff = recData - record; if (2 * (diff / 2) != diff) { recData += 1; } attrData = (hfs_attr_data *) recData; // This is the length of the useful data, not including the record header attributeLength = tsk_getu32(endian, attrData->attr_size); buffer = malloc(attributeLength); if (buffer == NULL) { error_detected(TSK_ERR_AUX_MALLOC, "hfs_load_extended_attrs: Could not malloc space for the attribute."); goto on_error; } memcpy(buffer, attrData->attr_data, attributeLength); // Use the "attr_name" part of the key as the attribute name // but must convert to UTF8. Unfortunately, there does not seem to // be any easy way to determine how long the converted string will // be because UTF8 is a variable length encoding. However, the longest // it will be is 3 * the max number of UTF16 code units. Add one for null // termination. (thanks Judson!) conversionResult = hfs_UTF16toUTF8(fs, keyB->attr_name, tsk_getu16(endian, keyB->attr_name_len), nameBuff, MAX_ATTR_NAME_LENGTH, 0); if (conversionResult != 0) { error_returned ("-- hfs_load_extended_attrs could not convert the attr_name in the btree key into a UTF8 attribute name"); goto on_error; } // What is the type of this attribute? If it is a compression record, then // use TSK_FS_ATTR_TYPE_HFS_COMP_REC. Else, use TSK_FS_ATTR_TYPE_HFS_EXT_ATTR // Only "inline data" kind of record is handled. if (strcmp(nameBuff, "com.apple.decmpfs") == 0 && tsk_getu32(endian, attrData->record_type) == HFS_ATTR_RECORD_INLINE_DATA) { // Now, look at the compression record DECMPFS_DISK_HEADER *cmph = (DECMPFS_DISK_HEADER *) buffer; uint32_t cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); uint64_t uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size); if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: This attribute is a compression record.\n"); attrType = TSK_FS_ATTR_TYPE_HFS_COMP_REC; *isCompressed = TRUE; // The data is governed by a compression record (but might not be compressed) *uncompressedSize = uncSize; if (cmpType == 3) { // Data is inline. We will load the uncompressed data as a resident attribute. TSK_FS_ATTR *fs_attr_unc; if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Compressed data is inline in the attribute, will load this as the default DATA attribute.\n"); if (attributeLength <= 16) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: WARNING, Compression Record of type 3 is not followed by" " compressed data. No data will be loaded into the DATA attribute.\n"); } else { // There is data following the compression record, as there should be. if ((fs_attr_unc = tsk_fs_attrlist_getnew(fs_file->meta-> attr, TSK_FS_ATTR_RES)) == NULL) { error_returned (" - hfs_load_extended_attrs, FS_ATTR for uncompressed data"); goto on_error; } if ((cmph->attr_bytes[0] & 0x0F) == 0x0F) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Leading byte, 0x0F, indicates that the data is not really compressed.\n" "hfs_load_extended_attrs: Loading the default DATA attribute."); //cmpSize = attributeLength - 17; // subtr. size of header + 1 indicator byte // Load the remainder of the attribute as 128-0 // set the details in the fs_attr structure. Note, we are loading this // as a RESIDENT attribute. if (tsk_fs_attr_set_str(fs_file, fs_attr_unc, "DATA", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, (void *) (buffer + 17), (size_t) uncSize)) { error_returned (" - hfs_load_extended_attrs"); goto on_error; } } else { // Leading byte is not 0x0F #ifdef HAVE_LIBZ char *uncBuf; uint64_t uLen; unsigned long bytesConsumed; int infResult; if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Uncompressing (inflating) data."); // Uncompress the remainder of the attribute, and load as 128-0 // Note: cast is OK because uncSize will be quite modest, less than 4000. uncBuf = (char *) tsk_malloc((size_t) uncSize + 100); // add some extra space if (uncBuf == NULL) { error_returned (" - hfs_load_extended_attrs, space for the uncompressed attr"); goto on_error; } infResult = zlib_inflate(buffer + 16, (uint64_t) (attributeLength - 16), // source, srcLen uncBuf, (uint64_t) (uncSize + 100), // dest, destLen &uLen, &bytesConsumed); // returned by the function if (infResult != 0) { error_returned (" hfs_load_extended_attrs, zlib could not uncompress attr"); goto on_error; } if (bytesConsumed != attributeLength - 16) { error_detected(TSK_ERR_FS_READ, " hfs_load_extended_attrs, zlib did not consumed the whole compressed data"); goto on_error; } if (uLen != uncSize) { error_detected(TSK_ERR_FS_READ, " hfs_load_extended_attrs, actual uncompressed size not equal to the size in the compression record"); goto on_error; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Loading inflated data as default DATA attribute."); // set the details in the fs_attr structure. Note, we are loading this // as a RESIDENT attribute. if (tsk_fs_attr_set_str(fs_file, fs_attr_unc, "DATA", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, uncBuf, (size_t) uncSize)) { error_returned (" - hfs_load_extended_attrs"); goto on_error; } #else // ZLIB compression library is not available, so we will load a zero-length // default DATA attribute. Without this, icat may misbehave. // This is one byte long, so the ptr is not null, but only loading zero bytes. uint8_t uncBuf[1]; if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: ZLIB not available, so loading an empty default DATA attribute.\n"); if (tsk_fs_attr_set_str(fs_file, fs_attr_unc, "DATA", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, uncBuf, (size_t) 0)) { error_returned (" - hfs_load_extended_attrs"); goto on_error; } #endif } // END if leading byte is 0x0F ELSE clause } // END if attributeLength <= 16 ELSE clause } else if (cmpType == 4) { // Data is compressed in the resource fork *compDataInRSRC = TRUE; // The compressed data is in the RSRC fork if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Compressed data is in the file Resource Fork.\n"); } } else { // Attrbute name is NOT com.apple.decmpfs attrType = TSK_FS_ATTR_TYPE_HFS_EXT_ATTR; } // END if attribute name is com.apple.decmpfs ELSE clause if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) { error_returned(" - hfs_load_extended_attrs"); goto on_error; } if (tsk_verbose) { tsk_fprintf(stderr, "hfs_load_extended_attrs: loading attribute %s, type %u (%s)\n", nameBuff, (uint32_t) attrType, hfs_attrTypeName((uint32_t) attrType)); } // set the details in the fs_attr structure if (tsk_fs_attr_set_str(fs_file, fs_attr, nameBuff, attrType, attribute_counter, (void *) buffer, attributeLength)) { error_returned(" - hfs_load_extended_attrs"); goto on_error; } // TODO: does the previous function take ownership of buffer? // or does it need to be freed here? buffer = NULL; attribute_counter++; } // END if comp == 0 if (comp == 1) { // since this record key is greater than our search key, all // subsequent records will also be greater. done = TRUE; break; } } // END loop over records in one LEAF node /* * We get to this point if either: * * 1. We finish the loop over records and we are still loading attributes * for the given file. In this case we are NOT done, and must read in * the next leaf node, and process its records. The following code * loads the next leaf node before we return to the top of the loop. * * 2. We "broke" out of the loop over records because we found a key that * whose file ID is greater than the one we are working on. In that case * we are done. The following code does not run, and we exit the * while loop over successive leaf nodes. */ if (!done) { // We did not finish loading the attributes when we got to the end of that node, // so we must get the next node, and continue. // First determine the nodeID of the next LEAF node uint32_t newNodeID = tsk_getu32(endian, nodeDescriptor->flink); //fprintf(stdout, "Next Node ID = %u\n", newNodeID); if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Processed last record of THIS node, still gathering attributes.\n"); // If we are at the very last leaf node in the btree, then // this "flink" will be zero. We break out of this loop over LEAF nodes. if (newNodeID == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: But, there are no more leaf nodes, so we are done.\n"); break; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_extended_attrs: Reading the next LEAF node %" PRIu32 ".\n", nodeID); nodeID = newNodeID; cnt = tsk_fs_file_read(attrFile.file, nodeID * attrFile.nodeSize, (char *) nodeData, attrFile.nodeSize, (TSK_FS_FILE_READ_FLAG_ENUM) 0); if (cnt != attrFile.nodeSize) { error_returned ("hfs_load_extended_attrs: Could not read in the next LEAF node from the Attributes File btree"); goto on_error; } // Parse the Node header nodeDescriptor = (hfs_btree_node *) nodeData; // If we are NOT leaf node, then this is an error if (nodeDescriptor->type != HFS_ATTR_NODE_LEAF) { error_detected(TSK_ERR_FS_CORRUPT, "hfs_load_extended_attrs: found a non-LEAF node as a successor to a LEAF node"); goto on_error; } } // END if(! done) } // END while(! done) loop over successive LEAF nodes on_exit: free(nodeData); close_attr_file(&attrFile); return 0; on_error: if( buffer != NULL ) { free( buffer ); } if( nodeData != NULL ) { free( nodeData ); } close_attr_file(&attrFile); return 1; } typedef struct RES_DESCRIPTOR { char type[5]; // type is really 4 chars, but we will null-terminate uint16_t id; uint32_t offset; uint32_t length; char *name; // NULL if a name is not defined for this resource struct RES_DESCRIPTOR *next; } RES_DESCRIPTOR; void free_res_descriptor(RES_DESCRIPTOR * rd) { RES_DESCRIPTOR *nxt; if (rd == NULL) return; nxt = rd->next; if (rd->name != NULL) free(rd->name); free(rd); free_res_descriptor(nxt); // tail recursive } /** * The purpose of this function is to parse the resource fork of a file, and to return * a data structure that is, in effect, a table of contents for the resource fork. The * data structure is a null-terminated linked list of entries. Each one describes one * resource. If the resource fork is empty, or if there is not a resource fork at all, * or an error occurs, this function returns NULL. * * A non-NULL answer should be freed by the caller, using free_res_descriptor. * */ static RES_DESCRIPTOR * hfs_parse_resource_fork(TSK_FS_FILE * fs_file) { RES_DESCRIPTOR *result = NULL; RES_DESCRIPTOR *last = NULL; TSK_FS_INFO *fs_info; hfs_fork *fork_info; hfs_fork *resForkInfo; uint64_t resSize; const TSK_FS_ATTR *rAttr; hfs_resource_fork_header rfHeader; hfs_resource_fork_header *resHead; uint32_t dataOffset; uint32_t mapOffset; uint32_t mapLength; char *map; int attrReadResult; int attrReadResult1; int attrReadResult2; hfs_resource_fork_map_header *mapHdr; uint16_t typeListOffset; uint16_t nameListOffset; unsigned char hasNameList; char *nameListBegin = NULL; hfs_resource_type_list *typeList; uint16_t numTypes; hfs_resource_type_list_item *tlItem; int mindx; // index for looping over resource types if (fs_file == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_parse_resource_fork: null fs_file"); return NULL; } if (fs_file->meta == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_parse_resource_fork: fs_file has null metadata"); return NULL; } if (fs_file->meta->content_ptr == NULL) { if (tsk_verbose) fprintf(stderr, "hfs_parse_resource_fork: fs_file has null fork data structures, so no resources.\n"); return NULL; } // Extract the fs fs_info = fs_file->fs_info; if (fs_info == NULL) { error_detected(TSK_ERR_FS_ARG, "hfs_parse_resource_fork: null fs within fs_info"); return NULL; } // Try to look at the Resource Fork for an HFS+ file // Should be able to cast this to hfs_fork * fork_info = (hfs_fork *) fs_file->meta->content_ptr; // The data fork // The resource fork is the second one. resForkInfo = &fork_info[1]; resSize = tsk_getu64(fs_info->endian, resForkInfo->logic_sz); //uint32_t numBlocks = tsk_getu32(fs_info->endian, resForkInfo->total_blk); //uint32_t clmpSize = tsk_getu32(fs_info->endian, resForkInfo->clmp_sz); // Hmm, certainly no resources here! if (resSize == 0) { return NULL; } // OK, resource size must be > 0 // find the attribute for the resource fork rAttr = tsk_fs_file_attr_get_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, TRUE); if (rAttr == NULL) { error_returned ("hfs_parse_resource_fork: could not get the resource fork attribute"); return NULL; } // JUST read the resource fork header attrReadResult1 = tsk_fs_attr_read(rAttr, 0, (char *) &rfHeader, sizeof(hfs_resource_fork_header), TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult1 < 0 || attrReadResult1 != sizeof(hfs_resource_fork_header)) { error_returned (" hfs_parse_resource_fork: trying to read the resource fork header"); return NULL; } // Begin to parse the resource fork resHead = &rfHeader; dataOffset = tsk_getu32(fs_info->endian, resHead->dataOffset); mapOffset = tsk_getu32(fs_info->endian, resHead->mapOffset); //uint32_t dataLength = tsk_getu32(fs_info->endian, resHead->dataLength); mapLength = tsk_getu32(fs_info->endian, resHead->mapLength); // Read in the WHOLE map map = (char *) tsk_malloc(mapLength); if (map == NULL) { error_returned ("- hfs_parse_resource_fork: could not allocate space for the resource fork map"); return NULL; } attrReadResult = tsk_fs_attr_read(rAttr, (uint64_t) mapOffset, map, (size_t) mapLength, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult < 0 || attrReadResult != mapLength) { error_returned ("- hfs_parse_resource_fork: could not read the map"); free(map); return NULL; } mapHdr = (hfs_resource_fork_map_header *) map; typeListOffset = tsk_getu16(fs_info->endian, mapHdr->typeListOffset); nameListOffset = tsk_getu16(fs_info->endian, mapHdr->nameListOffset); if (nameListOffset >= mapLength || nameListOffset == 0) { hasNameList = FALSE; } else { hasNameList = TRUE; nameListBegin = map + nameListOffset; } typeList = (hfs_resource_type_list *) (map + typeListOffset); numTypes = tsk_getu16(fs_info->endian, typeList->typeCount) + 1; for (mindx = 0; mindx < numTypes; mindx++) { uint16_t numRes; uint16_t refOff; int pindx; // index for looping over resources uint16_t rID; uint32_t rOffset; tlItem = &(typeList->type[mindx]); numRes = tsk_getu16(fs_info->endian, tlItem->count) + 1; refOff = tsk_getu16(fs_info->endian, tlItem->offset); for (pindx = 0; pindx < numRes; pindx++) { int16_t nameOffset; char *nameBuffer; RES_DESCRIPTOR *rsrc; char lenBuff[4]; // first 4 bytes of a resource encodes its length uint32_t rLen; // Resource length hfs_resource_refListItem *item = ((hfs_resource_refListItem *) (((uint8_t *) typeList) + refOff)) + pindx; nameOffset = tsk_gets16(fs_info->endian, item->resNameOffset); nameBuffer = NULL; if (hasNameList && nameOffset != -1) { char *name = nameListBegin + nameOffset; uint8_t nameLen = (uint8_t) name[0]; nameBuffer = tsk_malloc(nameLen + 1); if (nameBuffer == NULL) { error_returned ("hfs_parse_resource_fork: allocating space for the name of a resource"); free_res_descriptor(result); return NULL; } memcpy(nameBuffer, name + 1, nameLen); nameBuffer[nameLen] = (char) 0; } else { nameBuffer = tsk_malloc(7); if (nameBuffer == NULL) { error_returned ("hfs_parse_resource_fork: allocating space for the (null) name of a resource"); free_res_descriptor(result); return NULL; } memcpy(nameBuffer, "", 6); nameBuffer[6] = (char) 0; } rsrc = (RES_DESCRIPTOR *) tsk_malloc(sizeof(RES_DESCRIPTOR)); if (rsrc == NULL) { error_returned ("hfs_parse_resource_fork: space for a resource descriptor"); free_res_descriptor(result); return NULL; } // Build the linked list if (result == NULL) result = rsrc; if (last != NULL) last->next = rsrc; last = rsrc; rsrc->next = NULL; rID = tsk_getu16(fs_info->endian, item->resID); rOffset = tsk_getu24(fs_info->endian, item->resDataOffset) + dataOffset; // Just read the first four bytes of the resource to get its length. It MUST // be at least 4 bytes long attrReadResult2 = tsk_fs_attr_read(rAttr, (uint64_t) rOffset, lenBuff, (size_t) 4, TSK_FS_FILE_READ_FLAG_NONE); if (attrReadResult2 != 4) { error_returned ("- hfs_parse_resource_fork: could not read the 4-byte length at beginning of resource"); free_res_descriptor(result); return NULL; } rLen = tsk_getu32(TSK_BIG_ENDIAN, lenBuff); //TODO rsrc->id = rID; rsrc->offset = rOffset + 4; memcpy(rsrc->type, tlItem->type, 4); rsrc->type[4] = (char) 0; rsrc->length = rLen; rsrc->name = nameBuffer; } // END loop over resources of one type } // END loop over resource types return result; } static uint8_t hfs_load_attrs(TSK_FS_FILE * fs_file) { TSK_FS_INFO *fs; HFS_INFO *hfs; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR_RUN *attr_run; hfs_fork *forkx; unsigned char resource_fork_has_contents = FALSE; unsigned char compression_flag = FALSE; unsigned char isCompressed = FALSE; unsigned char compDataInRSRCFork = FALSE; uint64_t uncompressedSize; uint64_t logicalSize; // of a fork // clean up any error messages that are lying around tsk_error_reset(); if ((fs_file == NULL) || (fs_file->meta == NULL) || (fs_file->fs_info == NULL)) { error_detected(TSK_ERR_FS_ARG, "hfs_load_attrs: fs_file or meta is NULL"); return 1; } fs = (TSK_FS_INFO *) fs_file->fs_info; hfs = (HFS_INFO *) fs; if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: Processing file %" PRIuINUM "\n", fs_file->meta->addr); // see if we have already loaded the runs if (fs_file->meta->attr_state == TSK_FS_META_ATTR_STUDIED) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: Attributes already loaded\n"); return 0; } else if (fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: Previous attempt to load attributes resulted in error\n"); return 1; } // Now (re)-initialize the attrlist that will hold the list of attributes if (fs_file->meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_file->meta->attr); } else if (fs_file->meta->attr == NULL) { fs_file->meta->attr = tsk_fs_attrlist_alloc(); } /****************** EXTENDED ATTRIBUTES *******************************/ // We do these first, so that we can detect the mode of compression, if // any. We need to know that mode in order to handle the forks. if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: loading the HFS+ extended attributes\n"); if (hfs_load_extended_attrs(fs_file, &isCompressed, &compDataInRSRCFork, &uncompressedSize)) { error_returned(" - hfs_load_attrs A"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (isCompressed) { fs_file->meta->size = uncompressedSize; } // This is the flag indicating compression, from the Catalog File record. compression_flag = (fs_file->meta->flags & TSK_FS_META_FLAG_COMP) != 0; if (compression_flag && !isCompressed) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: WARNING, HFS marks this as a" " compressed file, but no compression record was found.\n"); } if (isCompressed && !compression_flag) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: WARNING, this file has a compression" " record, but the HFS compression flag is not set.\n"); } /************* FORKS (both) ************************************/ // Process the data and resource forks. We only do this if the // fork data structures are non-null, so test that: if (fs_file->meta->content_ptr != NULL) { /************** DATA FORK STUFF ***************************/ // Get the data fork data-structure forkx = (hfs_fork *) fs_file->meta->content_ptr; // If this is a compressed file, then either this attribute is already loaded // because the data was in the compression record, OR // the compressed data is in the resource fork. We will load those runs when // we handle the resource fork. if (!isCompressed) { // We only load this attribute if this fork has non-zero length // or if this is a REG or LNK file. Otherwise, we skip logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); if (logicalSize > 0 || fs_file->meta->type == TSK_FS_META_TYPE_REG || fs_file->meta->type == TSK_FS_META_TYPE_LNK) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: loading the data fork attribute\n"); // get an attribute structure to store the data in if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_load_attrs"); return 1; } /* NOTE that fs_attr is now tied to fs_file->meta->attr. * that means that we do not need to free it if we abort in the * following code (and doing so will cause double free errors). */ if (logicalSize > 0) { // Convert runs of blocks to the TSK internal form if (((attr_run = hfs_extents_to_attr(fs, forkx->extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_load_attrs"); return 1; } // add the runs to the attribute and the attribute to the file. if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, logicalSize, logicalSize, (TSK_OFF_T) tsk_getu32(fs->endian, forkx->total_blk) * fs->block_size, 0, 0)) { error_returned(" - hfs_load_attrs (DATA)"); tsk_fs_attr_run_free(attr_run); return 1; } // see if extents file has additional runs if (hfs_ext_find_extent_record_attr(hfs, (uint32_t) fs_file->meta->addr, fs_attr, TRUE)) { error_returned(" - hfs_load_attrs B"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } } else { // logicalSize == 0, but this is either a REG or LNK file // so, it should have a DATA fork attribute of zero length. if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, 0, 0, 0, 0, 0)) { error_returned(" - hfs_load_attrs (non-file)"); return 1; } } } // END logicalSize>0 or REG or LNK file type } // END if not Compressed /************** RESOURCE FORK STUFF ************************************/ // Get the resource fork. //Note that content_ptr points to an array of two // hfs_fork data structures, the second of which // describes the blocks of the resource fork. forkx = &((hfs_fork *) fs_file->meta->content_ptr)[1]; logicalSize = tsk_getu64(fs->endian, forkx->logic_sz); // Skip if the length of the resource fork is zero if (logicalSize > 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: loading the resource fork\n"); resource_fork_has_contents = TRUE; // get an attribute structure to store the resource fork data in. We will // reuse the fs_attr variable, since we are done with the data fork. if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned(" - hfs_load_attrs (RSRC)"); return 1; } /* NOTE that fs_attr is now tied to fs_file->meta->attr. * that means that we do not need to free it if we abort in the * following code (and doing so will cause double free errors). */ // convert the resource fork to the TSK format if (((attr_run = hfs_extents_to_attr(fs, forkx->extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned(" - hfs_load_attrs"); return 1; } // add the runs to the attribute and the attribute to the file. if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "RSRC", TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, tsk_getu64(fs->endian, forkx->logic_sz), tsk_getu64(fs->endian, forkx->logic_sz), (TSK_OFF_T) tsk_getu32(fs->endian, forkx->total_blk) * fs->block_size, 0, 0)) { error_returned(" - hfs_load_attrs (RSRC)"); tsk_fs_attr_run_free(attr_run); return 1; } // see if extents file has additional runs for the resource fork. if (hfs_ext_find_extent_record_attr(hfs, (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { error_returned(" - hfs_load_attrs C"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (isCompressed && compDataInRSRCFork) { // OK, we are going to load those same resource fork blocks as the "DATA" // attribute, but will mark it as compressed. // get an attribute structure to store the resource fork data in. We will // reuse the fs_attr variable, since we are done with the data fork. if (tsk_verbose) tsk_fprintf(stderr, "File is compressed with data in the resource fork. " "Loading the default DATA attribute.\n"); if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { error_returned (" - hfs_load_attrs (RSRC loading as DATA)"); return 1; } /* NOTE that fs_attr is now tied to fs_file->meta->attr. * that means that we do not need to free it if we abort in the * following code (and doing so will cause double free errors). */ #ifdef HAVE_LIBZ // convert the resource fork to the TSK format if (((attr_run = hfs_extents_to_attr(fs, forkx->extents, 0)) == NULL) && (tsk_error_get_errno() != 0)) { error_returned (" - hfs_load_attrs, RSRC fork as DATA fork"); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: Loading RSRC fork block runs as the default DATA attribute.\n"); // add the runs to the attribute and the attribute to the file. if (tsk_fs_attr_set_run(fs_file, fs_attr, attr_run, "DECOMP", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, logicalSize, logicalSize, (TSK_OFF_T) tsk_getu32(fs->endian, forkx->total_blk) * fs->block_size, TSK_FS_ATTR_COMP | TSK_FS_ATTR_NONRES, 0)) { error_returned (" - hfs_load_attrs (RSRC loading as DATA)"); tsk_fs_attr_run_free(attr_run); return 1; } // see if extents file has additional runs for the resource fork. if (hfs_ext_find_extent_record_attr(hfs, (uint32_t) fs_file->meta->addr, fs_attr, FALSE)) { error_returned (" - hfs_load_attrs (RSRC loading as DATA"); fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: setting the \"special\" function pointers to inflate compressed data.\n"); fs_attr->w = hfs_attr_walk_special; fs_attr->r = hfs_file_read_special; #else // We don't have zlib, so the uncompressed data is not available to us, // however, we must have a default DATA attribute, or icat will misbehave. if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: No zlib compression library, so setting a zero-length default DATA attribute.\n"); if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, "DATA", TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, 0, 0, 0, 0, 0)) { error_returned(" - hfs_load_attrs (non-file)"); return 1; } #endif } } // END resource fork size > 0 } // END the fork data structures are non-NULL if (isCompressed && compDataInRSRCFork && !resource_fork_has_contents) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_load_attrs: WARNING, compression record claims that compressed data" " is in the Resource Fork, but that fork is empty or non-existent.\n"); } // Finish up. fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /** \internal * Get allocation status of file system block. * adapted from IsAllocationBlockUsed from: * http://developer.apple.com/technotes/tn/tn1150.html * * @param hfs File system being analyzed * @param b Block address * @returns 1 if allocated, 0 if not, -1 on error */ static int8_t hfs_block_is_alloc(HFS_INFO * hfs, TSK_DADDR_T a_addr) { TSK_FS_INFO *fs = &(hfs->fs_info); TSK_OFF_T b; size_t b2; // lazy loading if (hfs->blockmap_file == NULL) { if ((hfs->blockmap_file = tsk_fs_file_open_meta(fs, NULL, HFS_ALLOCATION_FILE_ID)) == NULL) { tsk_error_errstr2_concat(" - Loading blockmap file"); return -1; } /* cache the data attribute */ hfs->blockmap_attr = tsk_fs_attrlist_get(hfs->blockmap_file->meta->attr, TSK_FS_ATTR_TYPE_DEFAULT); if (!hfs->blockmap_attr) { tsk_error_errstr2_concat (" - Data Attribute not found in Blockmap File"); return -1; } hfs->blockmap_cache_start = -1; hfs->blockmap_cache_len = 0; } // get the byte offset b = (TSK_OFF_T) a_addr / 8; if (b > hfs->blockmap_file->meta->size) { tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr("hfs_block_is_alloc: block %" PRIuDADDR " is too large for bitmap (%" PRIuOFF ")", a_addr, hfs->blockmap_file->meta->size); return -1; } // see if it is in the cache if ((hfs->blockmap_cache_start == -1) || (hfs->blockmap_cache_start > b) || (hfs->blockmap_cache_start + hfs->blockmap_cache_len <= b)) { size_t cnt = tsk_fs_attr_read(hfs->blockmap_attr, b, hfs->blockmap_cache, sizeof(hfs->blockmap_cache), 0); if (cnt < 1) { tsk_error_set_errstr2 ("hfs_block_is_alloc: Error reading block bitmap at offset %" PRIuOFF, b); return -1; } hfs->blockmap_cache_start = b; hfs->blockmap_cache_len = cnt; } b2 = (size_t) (b - hfs->blockmap_cache_start); return (hfs->blockmap_cache[b2] & (1 << (7 - (a_addr % 8)))) != 0; } TSK_FS_BLOCK_FLAG_ENUM hfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { return (hfs_block_is_alloc((HFS_INFO *) a_fs, a_addr) == 1) ? TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; } static uint8_t hfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start_blk, TSK_DADDR_T end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM flags, TSK_FS_BLOCK_WALK_CB action, void *ptr) { char *myname = "hfs_block_walk"; HFS_INFO *hfs = (HFS_INFO *) fs; TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; if (tsk_verbose) tsk_fprintf(stderr, "%s: start_blk: %" PRIuDADDR " end_blk: %" PRIuDADDR " flags: %" PRIu32 "\n", myname, start_blk, end_blk, flags); // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (start_blk < fs->first_block || start_blk > fs->last_block) { tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: invalid start block number: %" PRIuDADDR "", myname, start_blk); return 1; } if (end_blk < fs->first_block || end_blk > fs->last_block) { tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: invalid last block number: %" PRIuDADDR "", myname, end_blk); return 1; } if (start_blk > end_blk) XSWAP(start_blk, end_blk); /* Sanity check on flags -- make sure at least one ALLOC is set */ if (((flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } /* * Iterate */ for (addr = start_blk; addr <= end_blk; addr++) { int retval; int myflags; /* identify if the block is allocated or not */ myflags = hfs_block_is_alloc(hfs, addr) ? TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; if (tsk_fs_block_get_flag(fs, fs_block, addr, (TSK_FS_BLOCK_FLAG_ENUM) myflags) == NULL) { tsk_fs_block_free(fs_block); return 1; } retval = action(fs_block, ptr); if (TSK_WALK_STOP == retval) { break; } else if (TSK_WALK_ERROR == retval) { tsk_fs_block_free(fs_block); return 1; } } tsk_fs_block_free(fs_block); return 0; } uint8_t hfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB action, void *ptr) { TSK_INUM_T inum; TSK_FS_FILE *fs_file; if (tsk_verbose) tsk_fprintf(stderr, "hfs_inode_walk: start_inum: %" PRIuINUM " end_inum: %" PRIuINUM " flags: %" PRIu32 "\n", start_inum, end_inum, flags); /* * Sanity checks. */ if (start_inum < fs->first_inum || start_inum > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("inode_walk: Start inode: %" PRIuINUM "", start_inum); return 1; } else if (end_inum < fs->first_inum || end_inum > fs->last_inum || end_inum < start_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("inode_walk: End inode: %" PRIuINUM "", end_inum); return 1; } /* If ORPHAN is wanted, then make sure that the flags are correct */ if (flags & TSK_FS_META_FLAG_ORPHAN) { flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; flags |= TSK_FS_META_FLAG_USED; flags &= ~TSK_FS_META_FLAG_UNUSED; } else { if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((flags & TSK_FS_META_FLAG_USED) == 0) && ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(HFS_FILE_CONTENT_LEN)) == NULL) return 1; if (start_inum > end_inum) XSWAP(start_inum, end_inum); for (inum = start_inum; inum <= end_inum; inum++) { int retval; if (hfs_inode_lookup(fs, fs_file, inum)) { // deleted files may not exist in the catalog if (tsk_error_get_errno() == TSK_ERR_FS_INODE_NUM) { tsk_error_reset(); continue; } else { return 1; } } if ((fs_file->meta->flags & flags) != fs_file->meta->flags) continue; /* call action */ retval = action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); return 1; } } tsk_fs_file_close(fs_file); return 0; } /* return the name of a file at a given inode * in a newly-allocated string, or NULL on error */ char * hfs_get_inode_name(TSK_FS_INFO * fs, TSK_INUM_T inum) { HFS_INFO *hfs = (HFS_INFO *) fs; HFS_ENTRY entry; char *fn = NULL; if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) return NULL; fn = malloc(HFS_MAXNAMLEN + 1); if (fn == NULL) return NULL; if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, tsk_getu16(fs->endian, entry.thread.name.length), fn, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) { free(fn); return NULL; } return fn; } /* print the name of a file at a given inode * returns 0 on success, 1 on error */ static uint8_t print_inode_name(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) { HFS_INFO *hfs = (HFS_INFO *) fs; char fn[HFS_MAXNAMLEN + 1]; HFS_ENTRY entry; if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) return 1; if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, tsk_getu16(fs->endian, entry.thread.name.length), fn, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) return 1; tsk_fprintf(hFile, "%s", fn); return 0; } /* tail recursive function to print a path... prints the parent path, then * appends / and the name of the given inode. prints nothing for root * returns 0 on success, 1 on failure */ static uint8_t print_parent_path(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) { HFS_INFO *hfs = (HFS_INFO *) fs; char fn[HFS_MAXNAMLEN + 1]; HFS_ENTRY entry; if (inum == HFS_ROOT_INUM) return 0; if (inum <= HFS_ROOT_INUM) { tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("print_parent_path: out-of-range inode %" PRIuINUM, inum); return 1; } if (hfs_cat_file_lookup(hfs, inum, &entry, FALSE)) return 1; if (hfs_UTF16toUTF8(fs, entry.thread.name.unicode, tsk_getu16(fs->endian, entry.thread.name.length), fn, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL)) return 1; if (print_parent_path(hFile, fs, (TSK_INUM_T) tsk_getu32(fs->endian, entry.thread.parent_cnid))) return 1; tsk_fprintf(hFile, "/%s", fn); return 0; } /* print the file name corresponding to an inode, in brackets after a space. * uses Unix path conventions, and does not include the volume name. * returns 0 on success, 1 on failure */ static uint8_t print_inode_file(FILE * hFile, TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_fprintf(hFile, " ["); if (inum == HFS_ROOT_INUM) tsk_fprintf(hFile, "/"); else { if (print_parent_path(hFile, fs, inum)) { tsk_fprintf(hFile, "unknown]"); return 1; } } tsk_fprintf(hFile, "]"); return 0; } static uint8_t hfs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented for HFS yet"); return 1; } static uint8_t hfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { // char *myname = "hfs_fsstat"; HFS_INFO *hfs = (HFS_INFO *) fs; hfs_plus_vh *sb = hfs->fs; time_t mac_time; TSK_INUM_T inode; char timeBuf[128]; if (tsk_verbose) tsk_fprintf(stderr, "hfs_fstat: called\n"); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: "); if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSPLUS) tsk_fprintf(hFile, "HFS+\n"); else if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) tsk_fprintf(hFile, "HFSX\n"); else tsk_fprintf(hFile, "Unknown\n"); // print name and number of version tsk_fprintf(hFile, "File System Version: "); switch (tsk_getu16(fs->endian, hfs->fs->version)) { case 4: tsk_fprintf(hFile, "HFS+\n"); break; case 5: tsk_fprintf(hFile, "HFSX\n"); break; default: tsk_fprintf(hFile, "Unknown (%" PRIu16 ")\n", tsk_getu16(fs->endian, hfs->fs->version)); break; } if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFSX) { tsk_fprintf(hFile, "Case Sensitive: %s\n", hfs->is_case_sensitive ? "yes" : "no"); } if (hfs->hfs_wrapper_offset > 0) { tsk_fprintf(hFile, "File system is embedded in an HFS wrapper at offset %" PRIuOFF "\n", hfs->hfs_wrapper_offset); } tsk_fprintf(hFile, "\nVolume Name: "); if (print_inode_name(hFile, fs, HFS_ROOT_INUM)) return 1; tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "Volume Identifier: %08" PRIx32 "%08" PRIx32 "\n", tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID1]), tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_ID2])); // print last mounted info tsk_fprintf(hFile, "\nLast Mounted By: "); if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSPLUS) tsk_fprintf(hFile, "Mac OS X\n"); else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_HFSJ) tsk_fprintf(hFile, "Mac OS X, Journaled\n"); else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSK) tsk_fprintf(hFile, "failed journal replay\n"); else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_FSCK) tsk_fprintf(hFile, "fsck_hfs\n"); else if (tsk_getu32(fs->endian, sb->last_mnt_ver) == HFS_VH_MVER_OS89) tsk_fprintf(hFile, "Mac OS 8.1 - 9.2.2\n"); else tsk_fprintf(hFile, "Unknown (%" PRIx32 "\n", tsk_getu32(fs->endian, sb->last_mnt_ver)); /* State of the file system */ if ((tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_UNMOUNTED) && (!(tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_INCONSISTENT))) tsk_fprintf(hFile, "Volume Unmounted Properly\n"); else tsk_fprintf(hFile, "Volume Unmounted Improperly\n"); tsk_fprintf(hFile, "Mount Count: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->write_cnt)); // Dates // (creation date is in local time zone, not UTC, according to TN 1150) mac_time = hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->cr_date)); tsk_fprintf(hFile, "\nCreation Date: \t%s\n", tsk_fs_time_to_str(mktime(gmtime(&mac_time)), timeBuf)); mac_time = hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->m_date)); tsk_fprintf(hFile, "Last Written Date: \t%s\n", tsk_fs_time_to_str(mac_time, timeBuf)); mac_time = hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->bkup_date)); tsk_fprintf(hFile, "Last Backup Date: \t%s\n", tsk_fs_time_to_str(mac_time, timeBuf)); mac_time = hfs_convert_2_unix_time(tsk_getu32(fs->endian, hfs->fs->chk_date)); tsk_fprintf(hFile, "Last Checked Date: \t%s\n", tsk_fs_time_to_str(mac_time, timeBuf)); if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_SOFTWARE_LOCK) tsk_fprintf(hFile, "Software write protect enabled\n"); /* Print journal information */ if (tsk_getu32(fs->endian, sb->attr) & HFS_VH_ATTR_JOURNALED) { tsk_fprintf(hFile, "\nJournal Info Block: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->jinfo_blk)); } tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT]); tsk_fprintf(hFile, "Bootable Folder ID: %" PRIuINUM, inode); if (inode > 0) print_inode_file(hFile, fs, inode); tsk_fprintf(hFile, "\n"); inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_START]); tsk_fprintf(hFile, "Startup App ID: %" PRIuINUM, inode); if (inode > 0) print_inode_file(hFile, fs, inode); tsk_fprintf(hFile, "\n"); inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_OPEN]); tsk_fprintf(hFile, "Startup Open Folder ID: %" PRIuINUM, inode); if (inode > 0) print_inode_file(hFile, fs, inode); tsk_fprintf(hFile, "\n"); inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOT9]); tsk_fprintf(hFile, "Mac OS 8/9 Blessed System Folder ID: %" PRIuINUM, inode); if (inode > 0) print_inode_file(hFile, fs, inode); tsk_fprintf(hFile, "\n"); inode = tsk_getu32(fs->endian, sb->finder_info[HFS_VH_FI_BOOTX]); tsk_fprintf(hFile, "Mac OS X Blessed System Folder ID: %" PRIuINUM, inode); if (inode > 0) print_inode_file(hFile, fs, inode); tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "Number of files: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->file_cnt)); tsk_fprintf(hFile, "Number of folders: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->fldr_cnt)); tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Block Range: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block); if (fs->last_block != fs->last_block_act) tsk_fprintf(hFile, "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block_act); tsk_fprintf(hFile, "Allocation Block Size: %u\n", fs->block_size); tsk_fprintf(hFile, "Number of Free Blocks: %" PRIu32 "\n", tsk_getu32(fs->endian, sb->free_blks)); if (tsk_getu32(fs->endian, hfs->fs->attr) & HFS_VH_ATTR_BADBLOCKS) tsk_fprintf(hFile, "Volume has bad blocks\n"); return 0; } /************************* istat *******************************/ /** * Text encoding names defined in TN1150, Table 2. */ static char * text_encoding_name(uint32_t enc) { switch (enc) { case 0: return "MacRoman"; case 1: return "MacJapanese"; case 2: return "MacChineseTrad"; case 4: return "MacKorean"; case 5: return "MacArabic"; case 6: return "MacHebrew"; case 7: return "MacGreek"; case 8: return "MacCyrillic"; case 9: return "MacDevanagari"; case 10: return "MacGurmukhi"; case 11: return "MacGujarati"; case 12: return "MacOriya"; case 13: return "MacBengali"; case 14: return "MacTamil"; case 15: return "Telugu"; case 16: return "MacKannada"; case 17: return "MacMalayalam"; case 18: return "MacSinhalese"; case 19: return "MacBurmese"; case 20: return "MacKhmer"; case 21: return "MacThai"; case 22: return "MacLaotian"; case 23: return "MacGeorgian"; case 24: return "MacArmenian"; case 25: return "MacChineseSimp"; case 26: return "MacTibetan"; case 27: return "MacMongolian"; case 28: return "MacEthiopic"; case 29: return "MacCentralEurRoman"; case 30: return "MacVietnamese"; case 31: return "MacExtArabic"; case 33: return "MacSymbol"; case 34: return "MacDingbats"; case 35: return "MacTurkish"; case 36: return "MacCroatian"; case 37: return "MacIcelandic"; case 38: return "MacRomanian"; case 49: case 140: return "MacFarsi"; case 48: case 152: return "MacUkrainian"; default: return "Unknown encoding"; } } #define HFS_PRINT_WIDTH 8 typedef struct { FILE *hFile; int idx; TSK_DADDR_T startBlock; uint32_t blockCount; unsigned char accumulating; } HFS_PRINT_ADDR; static void output_print_addr(HFS_PRINT_ADDR * print) { if (!print->accumulating) return; if (print->blockCount == 1) { tsk_fprintf(print->hFile, "%" PRIuDADDR " ", print->startBlock); print->idx += 1; } else if (print->blockCount > 1) { tsk_fprintf(print->hFile, "%" PRIuDADDR "-%" PRIuDADDR " ", print->startBlock, print->startBlock + print->blockCount - 1); print->idx += 2; } if (print->idx >= HFS_PRINT_WIDTH) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } } static TSK_WALK_RET_ENUM print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { HFS_PRINT_ADDR *print = (HFS_PRINT_ADDR *) ptr; if (print->accumulating) { if (addr == print->startBlock + print->blockCount) { print->blockCount++; } else { output_print_addr(print); print->startBlock = addr; print->blockCount = 1; } } else { print->startBlock = addr; print->blockCount = 1; print->accumulating = TRUE; } return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File name to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t hfs_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { HFS_INFO *hfs = (HFS_INFO *) fs; TSK_FS_FILE *fs_file; char hfs_mode[12]; HFS_PRINT_ADDR print; HFS_ENTRY entry; char timeBuf[128]; // Compression ATTR, if there is one: const TSK_FS_ATTR *compressionAttr = NULL; RES_DESCRIPTOR *rd; // descriptor of a resource tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "hfs_istat: inum: %" PRIuINUM " numblock: %" PRIu32 "\n", inum, numblock); if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { error_returned("hfs_istat: getting metadata for the file"); return 1; } if (inum >= HFS_FIRST_USER_CNID) { int rslt; tsk_fprintf(hFile, "File Path: "); rslt = print_parent_path(hFile, fs, inum); if (rslt != 0) tsk_fprintf(hFile, " Error in printing path\n"); else tsk_fprintf(hFile, "\n"); } else { // All of the files in this inum range have names without nulls, // slashes or control characters. So, it is OK to print this UTF8 // string this way. if (fs_file->meta->name2 != NULL) tsk_fprintf(hFile, "File Name: %s\n", fs_file->meta->name2->name); } tsk_fprintf(hFile, "Catalog Record: %" PRIuINUM "\n", inum); tsk_fprintf(hFile, "%sAllocated\n", (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) ? "Not " : ""); tsk_fprintf(hFile, "Type:\t"); if (fs_file->meta->type == TSK_FS_META_TYPE_REG) tsk_fprintf(hFile, "File\n"); else if (fs_file->meta->type == TSK_FS_META_TYPE_DIR) tsk_fprintf(hFile, "Folder\n"); else tsk_fprintf(hFile, "\n"); tsk_fs_meta_make_ls(fs_file->meta, hfs_mode, sizeof(hfs_mode)); tsk_fprintf(hFile, "Mode:\t%s\n", hfs_mode); tsk_fprintf(hFile, "Size:\t%" PRIuOFF "\n", fs_file->meta->size); if (fs_file->meta->link) tsk_fprintf(hFile, "Symbolic link to:\t%s\n", fs_file->meta->link); tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", fs_file->meta->uid, fs_file->meta->gid); tsk_fprintf(hFile, "Link count:\t%d\n", fs_file->meta->nlink); if (hfs_cat_file_lookup(hfs, inum, &entry, TRUE) == 0) { hfs_uni_str *nm = &entry.thread.name; char name_buf[HFS_MAXNAMLEN + 1]; TSK_INUM_T par_cnid; // parent CNID tsk_fprintf(hFile, "\n"); hfs_UTF16toUTF8(fs, nm->unicode, (int) tsk_getu16(fs->endian, nm->length), &name_buf[0], HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH | HFS_U16U8_FLAG_REPLACE_CONTROL); tsk_fprintf(hFile, "File Name: %s\n", name_buf); // Test here to see if this is a hard link. par_cnid = tsk_getu32(fs->endian, &(entry.thread.parent_cnid)); if ((hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) || (hfs->has_meta_crtime && par_cnid == hfs->meta_inum)) { int instr = strncmp(name_buf, "iNode", 5); int drstr = strncmp(name_buf, "dir_", 4); if (instr == 0 && hfs->has_meta_crtime && par_cnid == hfs->meta_inum) { tsk_fprintf(hFile, "This is a hard link to a file\n"); } else if (drstr == 0 && hfs->has_meta_dir_crtime && par_cnid == hfs->meta_dir_inum) { tsk_fprintf(hFile, "This is a hard link to a folder.\n"); } } /* The cat.perm union contains file-type specific values. * Print them if they are relevant. */ if ((fs_file->meta->type == TSK_FS_META_TYPE_CHR) || (fs_file->meta->type == TSK_FS_META_TYPE_BLK)) { tsk_fprintf(hFile, "Device ID:\t%" PRIu32 "\n", tsk_getu32(fs->endian, entry.cat.std.perm.special.raw)); } else if ((tsk_getu32(fs->endian, entry.cat.std.u_info.file_type) == HFS_HARDLINK_FILE_TYPE) && (tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr) == HFS_HARDLINK_FILE_CREATOR)) { // technically, the creation date of this item should be the same as either the // creation date of the "HFS+ Private Data" folder or the creation date of the root folder tsk_fprintf(hFile, "Hard link inode number\t %" PRIu32 "\n", tsk_getu32(fs->endian, entry.cat.std.perm.special.inum)); } tsk_fprintf(hFile, "Admin flags: %" PRIu8, entry.cat.std.perm.a_flags); if (entry.cat.std.perm.a_flags != 0) { tsk_fprintf(hFile, " - "); if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_ARCHIVED) tsk_fprintf(hFile, "archived "); if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_IMMUTABLE) tsk_fprintf(hFile, "immutable "); if (entry.cat.std.perm.a_flags & HFS_PERM_AFLAG_APPEND) tsk_fprintf(hFile, "append-only "); } tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "Owner flags: %" PRIu8, entry.cat.std.perm.o_flags); if (entry.cat.std.perm.o_flags != 0) { tsk_fprintf(hFile, " - "); if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_NODUMP) tsk_fprintf(hFile, "no-dump "); if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_IMMUTABLE) tsk_fprintf(hFile, "immutable "); if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_APPEND) tsk_fprintf(hFile, "append-only "); if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_OPAQUE) tsk_fprintf(hFile, "opaque "); if (entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) tsk_fprintf(hFile, "compressed "); } tsk_fprintf(hFile, "\n"); if (tsk_getu16(fs->endian, entry.cat.std.flags) & HFS_FILE_FLAG_LOCKED) tsk_fprintf(hFile, "Locked\n"); if (tsk_getu16(fs->endian, entry.cat.std.flags) & HFS_FILE_FLAG_ATTR) tsk_fprintf(hFile, "Has extended attributes\n"); if (tsk_getu16(fs->endian, entry.cat.std.flags) & HFS_FILE_FLAG_ACL) tsk_fprintf(hFile, "Has security data (ACLs)\n"); // File_type and file_cr are not relevant for Folders if (fs_file->meta->type != TSK_FS_META_TYPE_DIR) { int windx; // loop index tsk_fprintf(hFile, "File type:\t%04" PRIx32 " ", tsk_getu32(fs->endian, entry.cat.std.u_info.file_type)); for (windx = 0; windx < 4; windx++) { uint8_t cu = entry.cat.std.u_info.file_type[windx]; if (cu >= 32 && cu <= 126) tsk_fprintf(hFile, "%c", (char) cu); else tsk_fprintf(hFile, " "); } tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "File creator:\t%04" PRIx32 " ", tsk_getu32(fs->endian, entry.cat.std.u_info.file_cr)); for (windx = 0; windx < 4; windx++) { uint8_t cu = entry.cat.std.u_info.file_cr[windx]; if (cu >= 32 && cu <= 126) tsk_fprintf(hFile, "%c", (char) cu); else tsk_fprintf(hFile, " "); } tsk_fprintf(hFile, "\n"); } // END if(not folder) if (tsk_getu16(fs->endian, entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_NAME_LOCKED) tsk_fprintf(hFile, "Name locked\n"); if (tsk_getu16(fs->endian, entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_HAS_BUNDLE) tsk_fprintf(hFile, "Has bundle\n"); if (tsk_getu16(fs->endian, entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_INVISIBLE) tsk_fprintf(hFile, "Is invisible\n"); if (tsk_getu16(fs->endian, entry.cat.std.u_info.flags) & HFS_FINDER_FLAG_IS_ALIAS) tsk_fprintf(hFile, "Is alias\n"); tsk_fprintf(hFile, "Text encoding:\t%" PRIx32 " = %s\n", tsk_getu32(fs->endian, entry.cat.std.text_enc), text_encoding_name(tsk_getu32(fs->endian, entry.cat.std.text_enc))); if (tsk_getu16(fs->endian, entry.cat.std.rec_type) == HFS_FILE_RECORD) { tsk_fprintf(hFile, "Resource fork size:\t%" PRIu64 "\n", tsk_getu64(fs->endian, entry.cat.resource.logic_sz)); } } if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted times:\n"); if (fs_file->meta->mtime) fs_file->meta->mtime -= sec_skew; if (fs_file->meta->atime) fs_file->meta->atime -= sec_skew; if (fs_file->meta->ctime) fs_file->meta->ctime -= sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime -= sec_skew; if (fs_file->meta->time2.hfs.bkup_time) fs_file->meta->time2.hfs.bkup_time -= sec_skew; tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); tsk_fprintf(hFile, "Content Modified:\t%s\n", tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); tsk_fprintf(hFile, "Attributes Modified:\t%s\n", tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); tsk_fprintf(hFile, "Backed Up:\t%s\n", tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, timeBuf)); if (fs_file->meta->mtime) fs_file->meta->mtime += sec_skew; if (fs_file->meta->atime) fs_file->meta->atime += sec_skew; if (fs_file->meta->ctime) fs_file->meta->ctime += sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime += sec_skew; if (fs_file->meta->time2.hfs.bkup_time) fs_file->meta->time2.hfs.bkup_time += sec_skew; tsk_fprintf(hFile, "\nOriginal times:\n"); } else { tsk_fprintf(hFile, "\nTimes:\n"); } tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); tsk_fprintf(hFile, "Content Modified:\t%s\n", tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); tsk_fprintf(hFile, "Attributes Modified:\t%s\n", tsk_fs_time_to_str(fs_file->meta->ctime, timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); tsk_fprintf(hFile, "Backed Up:\t%s\n", tsk_fs_time_to_str(fs_file->meta->time2.hfs.bkup_time, timeBuf)); // IF this is a regular file, then print out the blocks of the DATA and RSRC forks. if (tsk_getu16(fs->endian, entry.cat.std.rec_type) == HFS_FILE_RECORD) { // Only print DATA fork blocks if this file is NOT compressed // N.B., a compressed file has no data fork, and tsk_fs_file_walk() will // do the wrong thing! if (!(entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED)) { tsk_fprintf(hFile, "\nData Fork Blocks:\n"); print.idx = 0; print.hFile = hFile; print.accumulating = FALSE; print.startBlock = 0; print.blockCount = 0; if (tsk_fs_file_walk_type(fs_file, TSK_FS_ATTR_TYPE_HFS_DATA, HFS_FS_ATTR_ID_DATA, (TSK_FS_FILE_WALK_FLAG_AONLY | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading file data fork\n"); tsk_error_print(hFile); tsk_error_reset(); } else { output_print_addr(&print); if (print.idx != 0) tsk_fprintf(hFile, "\n"); } } // Only print out the blocks of the Resource fork if it has nonzero size if (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) > 0) { tsk_fprintf(hFile, "\nResource Fork Blocks:\n"); print.idx = 0; print.hFile = hFile; print.accumulating = FALSE; print.startBlock = 0; print.blockCount = 0; if (tsk_fs_file_walk_type(fs_file, TSK_FS_ATTR_TYPE_HFS_RSRC, HFS_FS_ATTR_ID_RSRC, (TSK_FS_FILE_WALK_FLAG_AONLY | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading file resource fork\n"); tsk_error_print(hFile); tsk_error_reset(); } else { output_print_addr(&print); if (print.idx != 0) tsk_fprintf(hFile, "\n"); } } } // Force the loading of all attributes. (void) tsk_fs_file_attr_get(fs_file); /* Print all of the attributes */ tsk_fprintf(hFile, "\nAttributes: \n"); if (fs_file->meta->attr) { int cnt, i; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const char *type; // type of the attribute as a string const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; type = hfs_attrTypeName((uint32_t) fs_attr->type); // We will need to do something better than this, in the end. //type = "Data"; /* print the layout if it is non-resident and not "special" */ if (fs_attr->flags & TSK_FS_ATTR_NONRES) { //NTFS_PRINT_ADDR print_addr; tsk_fprintf(hFile, "Type: %s (%" PRIu32 "-%" PRIu16 ") Name: %s Non-Resident%s%s%s size: %" PRIuOFF " init_size: %" PRIuOFF "\n", type, fs_attr->type, fs_attr->id, (fs_attr->name) ? fs_attr->name : "N/A", (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : "", (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : "", (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : "", fs_attr->size, fs_attr->nrd.initsize); } // END: non-resident attribute case else { tsk_fprintf(hFile, "Type: %s (%" PRIu32 "-%" PRIu16 ") Name: %s Resident%s%s%s size: %" PRIuOFF "\n", type, fs_attr->type, fs_attr->id, (fs_attr->name) ? fs_attr->name : "N/A", (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : "", (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : "", (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : "", fs_attr->size); if (fs_attr->type == TSK_FS_ATTR_TYPE_HFS_COMP_REC) { if (compressionAttr == NULL) { compressionAttr = fs_attr; } else { // Problem: there is more than one compression attribute error_detected(TSK_ERR_FS_CORRUPT, "hfs_istat: more than one compression attribute"); return 1; } } } // END: else (RESIDENT attribute case) } // END: for(;;) loop over attributes } // END: if(fs_file->meta->attr is non-NULL) if ((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) && (compressionAttr == NULL)) tsk_fprintf(hFile, "WARNING: Compression Flag is set, but there" " is no compression record for this file.\n"); if (((entry.cat.std.perm.o_flags & HFS_PERM_OFLAG_COMPRESSED) == 0) && (compressionAttr != NULL)) tsk_fprintf(hFile, "WARNING: Compression Flag is NOT set, but there" " is a compression record for this file.\n"); // IF this is a compressed file if (compressionAttr != NULL) { const TSK_FS_ATTR *fs_attr = compressionAttr; int attrReadResult; DECMPFS_DISK_HEADER *cmph; uint32_t cmpType; uint64_t uncSize; unsigned char reallyCompressed = FALSE; uint64_t cmpSize = 0; // Read the attribute. It cannot be too large because it is stored in // a btree node char *aBuf = (char *) tsk_malloc((size_t) fs_attr->size); if (aBuf == NULL) { error_returned("hfs_istat: space for a compression attribute"); return 1; } attrReadResult = tsk_fs_attr_read(fs_attr, (TSK_OFF_T) 0, aBuf, (size_t) fs_attr->size, (TSK_FS_FILE_READ_FLAG_ENUM) 0x00); if (attrReadResult == -1) { error_returned("hfs_istat: reading the compression attribute"); free(aBuf); return 1; } else if (attrReadResult < fs_attr->size) { error_detected(TSK_ERR_FS_READ, "hfs_istat: could not read the whole compression attribute"); free(aBuf); return 1; } // Now, cast the attr into a compression header cmph = (DECMPFS_DISK_HEADER *) aBuf; cmpType = tsk_getu32(TSK_LIT_ENDIAN, cmph->compression_type); uncSize = tsk_getu64(TSK_LIT_ENDIAN, cmph->uncompressed_size); if (cmpType == 3) { // Data is inline if ((cmph->attr_bytes[0] & 0x0F) == 0x0F) { reallyCompressed = FALSE; cmpSize = fs_attr->size - 17; // subtr. size of header + 1 indicator byte } else { reallyCompressed = TRUE; cmpSize = fs_attr->size - 16; // subt size of header } } else if (cmpType == 4) { // Data is compressed in the resource fork reallyCompressed = TRUE; } tsk_fprintf(hFile, "\nCompressed File:\n"); tsk_fprintf(hFile, " Uncompressed size: %llu\n", uncSize); if (cmpType == 4) { tsk_fprintf(hFile, " Data is zlib compressed in the resource fork\n"); } else if (cmpType == 3) { tsk_fprintf(hFile, " Data follows compression record in the CMPF attribute\n"); tsk_fprintf(hFile, " %" PRIu64 " bytes of data at offset ", cmpSize); if (reallyCompressed) tsk_fprintf(hFile, "16, zlib compressed\n"); else tsk_fprintf(hFile, "17, not compressed\n"); } else { tsk_fprintf(hFile, " Compression type is UNKNOWN\n"); } free(aBuf); if (cmpType == 4 && (tsk_getu64(fs->endian, entry.cat.resource.logic_sz) == 0)) tsk_fprintf(hFile, "WARNING: Compression record indicates compressed data" " in the RSRC Fork, but that fork is empty.\n"); } // This will return NULL if there is an error, or if there are no resources rd = hfs_parse_resource_fork(fs_file); // TODO: Should check the errnum here to see if there was an error if (rd != NULL) { tsk_fprintf(hFile, "\nResources:\n"); while (rd) { tsk_fprintf(hFile, " Type: %s \tID: %-5u \tOffset: %-5u \tSize: %-5u \tName: %s\n", rd->type, rd->id, rd->offset, rd->length, rd->name); rd = rd->next; } } // This is OK to call with NULL free_res_descriptor(rd); tsk_fs_file_close(fs_file); return 0; } static TSK_FS_ATTR_TYPE_ENUM hfs_get_default_attr_type(const TSK_FS_FILE * a_file) { // The HFS+ special files have a default attr type of "Default" TSK_INUM_T inum = a_file->meta->addr; if (inum == 3 || // Extents File inum == 4 || // Catalog File inum == 5 || // Bad Blocks File inum == 6 || // Block Map (Allocation File) inum == 7 || // Startup File inum == 8 || // Attributes File inum == 14 || // Not sure if these two will actually work. I don't see inum == 15) // any code to load the attrs of these files, if they exist. return TSK_FS_ATTR_TYPE_DEFAULT; // The "regular" files and symbolic links have a DATA fork with type "DATA" if (a_file->meta->type == TSK_FS_META_TYPE_REG || a_file->meta->type == TSK_FS_META_TYPE_LNK) // This should be an HFS-specific type. return TSK_FS_ATTR_TYPE_HFS_DATA; // We've got to return *something* for every file, so we return this. return TSK_FS_ATTR_TYPE_DEFAULT; } static void hfs_close(TSK_FS_INFO * fs) { HFS_INFO *hfs = (HFS_INFO *) fs; // We'll grab this lock a bit early. tsk_take_lock(&(hfs->metadata_dir_cache_lock)); fs->tag = 0; free(hfs->fs); tsk_fs_file_close(hfs->catalog_file); hfs->catalog_attr = NULL; if (hfs->blockmap_file) { tsk_fs_file_close(hfs->blockmap_file); hfs->blockmap_attr = NULL; } if (hfs->meta_dir) { tsk_fs_dir_close(hfs->meta_dir); hfs->meta_dir = NULL; } if (hfs->dir_meta_dir) { tsk_fs_dir_close(hfs->dir_meta_dir); hfs->dir_meta_dir = NULL; } if (hfs->extents_file) { tsk_fs_file_close(hfs->extents_file); hfs->extents_file = NULL; } tsk_release_lock(&(hfs->metadata_dir_cache_lock)); tsk_deinit_lock(&(hfs->metadata_dir_cache_lock)); tsk_fs_free((TSK_FS_INFO *)hfs); } /* hfs_open - open an hfs file system * * Return NULL on error (or not an HFS or HFS+ file system) * */ TSK_FS_INFO * hfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype, uint8_t test) { HFS_INFO *hfs; unsigned int len; TSK_FS_INFO *fs; ssize_t cnt; TSK_FS_FILE *file; // The root directory, or the metadata directories TSK_INUM_T inum; // The inum (or CNID) of the metadata directories int8_t result; // of tsk_fs_path2inum() tsk_error_reset(); if (TSK_FS_TYPE_ISHFS(ftype) == 0) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS Type in hfs_open"); return NULL; } if ((hfs = (HFS_INFO *) tsk_fs_malloc(sizeof(HFS_INFO))) == NULL) return NULL; fs = &(hfs->fs_info); fs->ftype = TSK_FS_TYPE_HFS; fs->duname = "Allocation Block"; fs->tag = TSK_FS_INFO_TAG; fs->flags = 0; fs->img_info = img_info; fs->offset = offset; /* * Read the superblock. */ len = sizeof(hfs_plus_vh); if ((hfs->fs = (hfs_plus_vh *) tsk_malloc(len)) == NULL) { fs->tag = 0; tsk_fs_free((TSK_FS_INFO *)hfs); return NULL; } if (hfs_checked_read_random(fs, (char *) hfs->fs, len, (TSK_OFF_T) HFS_VH_OFF)) { tsk_error_set_errstr2("hfs_open: superblock"); fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); return NULL; } /* * Verify we are looking at an HFS+ image */ if (tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSPLUS) && tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFSX) && tsk_fs_guessu16(fs, hfs->fs->signature, HFS_VH_SIG_HFS)) { fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("not an HFS+ file system (magic)"); return NULL; } /* * Handle an HFS-wrapped HFS+ image, which is a HFS volume that contains * the HFS+ volume inside of it. */ if (tsk_getu16(fs->endian, hfs->fs->signature) == HFS_VH_SIG_HFS) { hfs_mdb *wrapper_sb = (hfs_mdb *) hfs->fs; // Verify that we are setting a wrapper and not a normal HFS volume if ((tsk_getu16(fs->endian, wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSPLUS) || (tsk_getu16(fs->endian, wrapper_sb->drEmbedSigWord) == HFS_VH_SIG_HFSX)) { TSK_FS_INFO *fs_info2; // offset in sectors to start of first HFS block uint16_t drAlBlSt = tsk_getu16(fs->endian, wrapper_sb->drAlBlSt); // size of each HFS block uint32_t drAlBlkSiz = tsk_getu32(fs->endian, wrapper_sb->drAlBlkSiz); // start of embedded FS uint16_t startBlock = tsk_getu16(fs->endian, wrapper_sb->drEmbedExtent_startBlock); // calculate the offset; 512 here is intentional. // TN1150 says "The drAlBlSt field contains the offset, in // 512-byte blocks, of the wrapper's allocation block 0 relative // to the start of the volume" TSK_OFF_T hfsplus_offset = (drAlBlSt * (TSK_OFF_T) 512) + (drAlBlkSiz * (TSK_OFF_T) startBlock); if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: HFS+/HFSX within HFS wrapper at byte offset %" PRIuOFF "\n", hfsplus_offset); fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); /* just re-open with the new offset, then record the offset */ if (hfsplus_offset == 0) { tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr("HFS+ offset is zero"); return NULL; } fs_info2 = hfs_open(img_info, offset + hfsplus_offset, ftype, test); if (fs_info2) ((HFS_INFO *) fs_info2)->hfs_wrapper_offset = hfsplus_offset; return fs_info2; } else { fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("HFS file systems (other than wrappers HFS+/HFSX file systems) are not supported"); return NULL; } } fs->block_count = tsk_getu32(fs->endian, hfs->fs->blk_cnt); fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; /* this isn't really accurate; fs->block_size reports only the size of the allocation block; the size of the device block has to be found from the device (allocation block size should always be larger than device block size and an even multiple of the device block size) */ fs->dev_bsize = fs->block_size = tsk_getu32(fs->endian, hfs->fs->blk_sz); // determine the last block we have in this image if (fs->block_size <= 1) { tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr("HFS+ allocation block size too small"); return NULL; } if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < fs->block_count) fs->last_block_act = (img_info->size - offset) / fs->block_size - 1; // Initialize the lock tsk_init_lock(&(hfs->metadata_dir_cache_lock)); /* * Set function pointers */ fs->inode_walk = hfs_inode_walk; fs->block_walk = hfs_block_walk; fs->block_getflags = hfs_block_getflags; fs->load_attrs = hfs_load_attrs; fs->get_default_attr_type = hfs_get_default_attr_type; fs->file_add_meta = hfs_inode_lookup; fs->dir_open_meta = hfs_dir_open_meta; fs->fsstat = hfs_fsstat; fs->fscheck = hfs_fscheck; fs->istat = hfs_istat; fs->close = hfs_close; // lazy loading of block map hfs->blockmap_file = NULL; hfs->blockmap_attr = NULL; hfs->blockmap_cache_start = -1; hfs->blockmap_cache_len = 0; fs->first_inum = HFS_ROOT_INUM; fs->root_inum = HFS_ROOT_INUM; fs->last_inum = HFS_FIRST_USER_CNID - 1; // we will later increase this fs->inum_count = fs->last_inum - fs->first_inum + 1; /* We will load the extents file data when we need it */ hfs->extents_file = NULL; hfs->extents_attr = NULL; if (tsk_getu32(fs->endian, hfs->fs->start_file.extents[0].blk_cnt) == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Optional Startup File is not present.\n"); hfs->has_startup_file = FALSE; } else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Startup File is present.\n"); hfs->has_startup_file = TRUE; } if (tsk_getu32(fs->endian, hfs->fs->ext_file.extents[0].blk_cnt) == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Optional Extents File (and Badblocks File) is not present.\n"); hfs->has_extents_file = FALSE; } else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Extents File (and BadBlocks File) is present.\n"); hfs->has_extents_file = TRUE; } if (tsk_getu32(fs->endian, hfs->fs->attr_file.extents[0].blk_cnt) == 0) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Optional Attributes File is not present.\n"); hfs->has_attributes_file = FALSE; } else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Attributes File is present.\n"); hfs->has_attributes_file = TRUE; } /* Load the catalog file though */ if ((hfs->catalog_file = tsk_fs_file_open_meta(fs, NULL, HFS_CATALOG_FILE_ID)) == NULL) { fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); return NULL; } /* cache the data attribute */ hfs->catalog_attr = tsk_fs_attrlist_get(hfs->catalog_file->meta->attr, TSK_FS_ATTR_TYPE_DEFAULT); if (!hfs->catalog_attr) { fs->tag = 0; tsk_fs_file_close(hfs->catalog_file); free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); tsk_error_errstr2_concat (" - Data Attribute not found in Catalog File"); return NULL; } // cache the catalog file header cnt = tsk_fs_attr_read(hfs->catalog_attr, 14, (char *) &(hfs->catalog_header), sizeof(hfs_btree_header_record), 0); if (cnt != sizeof(hfs_btree_header_record)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("hfs_open: Error reading catalog header"); fs->tag = 0; free(hfs->fs); tsk_fs_free((TSK_FS_INFO *)hfs); return NULL; } if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSPLUS) hfs->is_case_sensitive = 0; else if (tsk_getu16(fs->endian, hfs->fs->version) == HFS_VH_VER_HFSX) { if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_SENS) hfs->is_case_sensitive = 1; else if (hfs->catalog_header.compType == HFS_BT_HEAD_COMP_INSENS) hfs->is_case_sensitive = 0; else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: invalid value (0x%02" PRIx8 ") for key compare type; using case-insensitive\n", hfs->catalog_header.compType); hfs->is_case_sensitive = 0; } } else { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: unknown HFS+/HFSX version (%" PRIu16 "\n", tsk_getu16(fs->endian, hfs->fs->version)); hfs->is_case_sensitive = 0; } // update the numbers. fs->last_inum = hfs_find_highest_inum(hfs); fs->inum_count = fs->last_inum + 1; snprintf((char *) fs->fs_id, 17, "%08" PRIx32 "%08" PRIx32, tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID1]), tsk_getu32(fs->endian, hfs->fs->finder_info[HFS_VH_FI_ID2])); fs->fs_id_used = 16; /* journal */ fs->jblk_walk = hfs_jblk_walk; fs->jentry_walk = hfs_jentry_walk; fs->jopen = hfs_jopen; fs->name_cmp = hfs_name_cmp; fs->journ_inum = 0; /* Creation Times */ // First, the root file = tsk_fs_file_open_meta(fs, NULL, 2); if (file != NULL) { hfs->root_crtime = file->meta->crtime; hfs->has_root_crtime = TRUE; tsk_fs_file_close(file); } else { hfs->has_root_crtime = FALSE; } file = NULL; // disable hard link traversal while finding the hard // link directories themselves (to prevent problems if // there are hard links in the root directory) hfs->meta_inum = 0; hfs->meta_dir_inum = 0; // Now the (file) metadata directory // The metadata directory is a sub-directory of the root. Its name begins with four nulls, followed // by "HFS+ Private Data". The file system parsing code replaces nulls in filenames with UTF8_NULL_REPLACE. // In the released version of TSK, this replacement is the character '^'. // NOTE: There is a standard Unicode replacement which is 0xfffd in UTF16 and 0xEF 0xBF 0xBD in UTF8. // Systems that require the standard definition can redefine UTF8_NULL_REPLACE and UTF16_NULL_REPLACE // in tsk_hfs.h hfs->has_meta_crtime = FALSE; result = tsk_fs_path2inum(fs, "/" UTF8_NULL_REPLACE UTF8_NULL_REPLACE UTF8_NULL_REPLACE UTF8_NULL_REPLACE "HFS+ Private Data", &inum, NULL); if (result == 0) { TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum); if (file_tmp != NULL) { hfs->meta_crtime = file_tmp->meta->crtime; hfs->has_meta_crtime = TRUE; hfs->meta_inum = inum; tsk_fs_file_close(file_tmp); } } // Now, the directory metadata directory // The "directory" metadata directory, where hardlinked directories actually live, is a subdirectory // of the root. The beginning of the name of this directory is ".HFS+ Private Directory Data" which // is followed by a carriage return (ASCII 13). hfs->has_meta_dir_crtime = FALSE; result = tsk_fs_path2inum(fs, "/.HFS+ Private Directory Data\r", &inum, NULL); if (result == 0) { TSK_FS_FILE *file_tmp = tsk_fs_file_open_meta(fs, NULL, inum); if (file_tmp != NULL) { hfs->metadir_crtime = file_tmp->meta->crtime; hfs->has_meta_dir_crtime = TRUE; hfs->meta_dir_inum = inum; tsk_fs_file_close(file_tmp); } } if (hfs->has_root_crtime && hfs->has_meta_crtime && hfs->has_meta_dir_crtime) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Creation times for key folders have been read and cached.\n"); } if (!hfs->has_root_crtime) { if (tsk_verbose) tsk_fprintf(stderr, "hfs_open: Warning: Could not open the root directory. " "Hard link detection and some other functions will be impaired\n"); } else if (tsk_verbose) { tsk_fprintf(stderr, "hfs_open: The root directory is accessible.\n"); } if (tsk_verbose) { if (hfs->has_meta_crtime) tsk_fprintf(stderr, "hfs_open: \"/^^^^HFS+ Private Data\" metadata folder is accessible.\n"); else tsk_fprintf(stderr, "hfs_open: Optional \"^^^^HFS+ Private Data\" metadata folder is not accessible, or does not exist.\n"); if (hfs->has_meta_dir_crtime) tsk_fprintf(stderr, "hfs_open: \"/HFS+ Private Directory Data^\" metadata folder is accessible.\n"); else tsk_fprintf(stderr, "hfs_open: Optional \"/HFS+ Private Directory Data^\" metadata folder is not accessible, or does not exist.\n"); } // These caches will be set, if they are needed. hfs->meta_dir = NULL; hfs->dir_meta_dir = NULL; return fs; } /* * Error Handling */ /** * Call this when an error is first detected. It sets the error code and it also * sets the primary error string, describing the lowest level of error. (Actually, * it appends to the error string.) * * If the error code is already set, then this appends to the primary error * string an hex representation of the new error code, plus the new error message. * * @param errnum The desired error code * @param errstr The format string for the error message */ void error_detected(uint32_t errnum, char *errstr, ...) { va_list args; va_start(args, errstr); { TSK_ERROR_INFO *errInfo = tsk_error_get_info(); char *loc_errstr = errInfo->errstr; if (errInfo->t_errno == 0) errInfo->t_errno = errnum; else { //This should not happen! We don't want to wipe out the existing error //code, so we write the new code into the error string, in hex. int sl = strlen(errstr); snprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, " Next errnum: 0x%x ", errnum); } if (errstr != NULL) { int sl = strlen(loc_errstr); vsnprintf(loc_errstr + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, errstr, args); } } va_end(args); } /** * Call this when a called TSK function returns an error. Presumably, that * function will have set the error code and the primary error string. This * *appends* to the secondary error string. It should be called to describe * the context of the call. If no error code has been set, then this sets a * default code so that it is not zero. * * @param errstr The format string for the error message */ void error_returned(char *errstr, ...) { va_list args; va_start(args, errstr); { TSK_ERROR_INFO *errInfo = tsk_error_get_info(); char *loc_errstr2 = errInfo->errstr2; if (errInfo->t_errno == 0) errInfo->t_errno = TSK_ERR_AUX_GENERIC; if (errstr != NULL) { int sl = strlen(loc_errstr2); vsnprintf(loc_errstr2 + sl, TSK_ERROR_STRING_MAX_LENGTH - sl, errstr, args); } } va_end(args); } sleuthkit-4.2.0/tsk/fs/hfs_dent.c000644 000765 000024 00000043227 12576320700 017504 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Copyright (c) 2009-2011 Brian Carrier. All rights reserved. ** ** Judson Powers [jpowers@atc-nycorp.com] ** Matt Stillerman [matt@atc-nycorp.com] ** Copyright (c) 2008, 2012 ATC-NY. All rights reserved. ** This file contains data developed with support from the National ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier@sleuthkit.org] ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /** \file hfs_dent.c * Contains the file name layer code for HFS+ file systems -- not included in * code by default. */ #include "tsk_fs_i.h" #include "tsk_hfs.h" /* convert HFS+'s UTF16 to UTF8 * replaces null characters with another character (0xfffd) * replaces slashes (permitted by HFS+ but causes problems with TSK) * with colons (generally not allowed by Mac OS X) * note that at least one directory on HFS+ volumes begins with * four nulls, so we do need to handle nulls; also, Apple chooses * to encode nulls as UTF8 \xC0\x80, which is not a valid UTF8 sequence * * @param fs the file system * @param uni the UTF16 string as a sequence of bytes * @param ulen then length of the UTF16 string in characters * @param asc a buffer to hold the UTF8 result * @param alen the length of that buffer * @param flags control some aspects of the conversion * @return 0 on success, 1 on failure; sets up to error string 1 * * HFS_U16U8_FLAG_REPLACE_SLASH if this flag is set, then slashes will be replaced * by colons. Otherwise, they will not be replaced. * * HFS_U16U8_FLAG_REPLACE_CONTROL if this flag is set, then all control characters * will be replaced by the UTF16_NULL_REPLACE character. N.B., always replaces * null characters regardless of this flag. */ uint8_t hfs_UTF16toUTF8(TSK_FS_INFO * fs, uint8_t * uni, int ulen, char *asc, int alen, uint32_t flags) { UTF8 *ptr8; uint8_t *uniclean; UTF16 *ptr16; int i; TSKConversionResult r; // remove nulls from the Unicode string // convert / to : uniclean = (uint8_t *) tsk_malloc(ulen * 2); if (!uniclean) return 1; memcpy(uniclean, uni, ulen * 2); for (i = 0; i < ulen; ++i) { uint16_t uc = tsk_getu16(fs->endian, uniclean + i * 2); int changed = 0; if (uc == UTF16_NULL) { uc = UTF16_NULL_REPLACE; changed = 1; } else if ((flags & HFS_U16U8_FLAG_REPLACE_SLASH) && uc == UTF16_SLASH) { uc = UTF16_COLON; changed = 1; } else if ((flags & HFS_U16U8_FLAG_REPLACE_CONTROL) && uc < UTF16_LEAST_PRINTABLE) { uc = (uint16_t) UTF16_NULL_REPLACE; changed = 1; } if (changed) *((uint16_t *) (uniclean + i * 2)) = tsk_getu16(fs->endian, (uint8_t *) & uc); } // convert to UTF-8 memset(asc, 0, alen); ptr8 = (UTF8 *) asc; ptr16 = (UTF16 *) uniclean; r = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &ptr16, (const UTF16 *) (&uniclean[ulen * 2]), &ptr8, (UTF8 *) & asc[alen], TSKstrictConversion); free(uniclean); if (r != TSKconversionOK) { tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("hfs_UTF16toUTF8: unicode conversion failed (%d)", (int) r); return 1; } return 0; } static TSK_FS_NAME_TYPE_ENUM hfsmode2tsknametype(uint16_t a_mode) { switch (a_mode & HFS_IN_IFMT) { case HFS_IN_IFIFO: return TSK_FS_NAME_TYPE_FIFO; case HFS_IN_IFCHR: return TSK_FS_NAME_TYPE_CHR; case HFS_IN_IFDIR: return TSK_FS_NAME_TYPE_DIR; case HFS_IN_IFBLK: return TSK_FS_NAME_TYPE_BLK; case HFS_IN_IFREG: return TSK_FS_NAME_TYPE_REG; case HFS_IN_IFLNK: return TSK_FS_NAME_TYPE_LNK; case HFS_IN_IFSOCK: return TSK_FS_NAME_TYPE_SOCK; case HFS_IFWHT: return TSK_FS_NAME_TYPE_WHT; case HFS_IFXATTR: return TSK_FS_NAME_TYPE_UNDEF; default: /* error */ return TSK_FS_NAME_TYPE_UNDEF; } } // used to pass data to the callback typedef struct { TSK_FS_DIR *fs_dir; TSK_FS_NAME *fs_name; } HFS_DIR_OPEN_META_INFO; static uint8_t hfs_dir_open_meta_cb(HFS_INFO * hfs, int8_t level_type, const void *targ_data, const hfs_btree_key_cat * cur_key, TSK_OFF_T key_off, void *ptr) { uint32_t *cnid_p = (uint32_t *) targ_data; HFS_DIR_OPEN_META_INFO *info = (HFS_DIR_OPEN_META_INFO *) ptr; TSK_FS_INFO *fs = &hfs->fs_info; if (tsk_verbose) fprintf(stderr, "hfs_dir_open_meta_cb: want %" PRIu32 " vs got %" PRIu32 " (%s node)\n", *cnid_p, tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid), (level_type == HFS_BT_NODE_TYPE_IDX) ? "Index" : "Leaf"); if (level_type == HFS_BT_NODE_TYPE_IDX) { if (tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid) < *cnid_p) { return HFS_BTREE_CB_IDX_LT; } else { return HFS_BTREE_CB_IDX_EQGT; } } else { uint8_t *rec_buf = (uint8_t *) cur_key; uint16_t rec_type; size_t rec_off2; if (tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid) < *cnid_p) { return HFS_BTREE_CB_LEAF_GO; } else if (tsk_getu32(hfs->fs_info.endian, cur_key->parent_cnid) > *cnid_p) { return HFS_BTREE_CB_LEAF_STOP; } rec_off2 = 2 + tsk_getu16(hfs->fs_info.endian, cur_key->key_len); // @@@ NEED TO REPLACE THIS SOMEHOW, but need to figure out the max length /* if (rec_off2 > nodesize) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr( "hfs_dir_open_meta: offset of record+keylen %d in leaf node %d too large (%"PRIuSIZE" vs %" PRIu16 ")", rec, cur_node, rec_off2, nodesize); tsk_fs_name_free(fs_name); free(node); return TSK_COR; } */ rec_type = tsk_getu16(hfs->fs_info.endian, &rec_buf[rec_off2]); // Catalog entry is for a file if (rec_type == HFS_FILE_THREAD) { tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("hfs_dir_open_meta: Entry" " is a file, not a folder"); return HFS_BTREE_CB_ERR; } /* This will link the folder to its parent, which is the ".." entry */ else if (rec_type == HFS_FOLDER_THREAD) { hfs_thread *thread = (hfs_thread *) & rec_buf[rec_off2]; strcpy(info->fs_name->name, ".."); info->fs_name->meta_addr = tsk_getu32(hfs->fs_info.endian, thread->parent_cnid); info->fs_name->type = TSK_FS_NAME_TYPE_DIR; info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } /* This is a folder in the folder */ else if (rec_type == HFS_FOLDER_RECORD) { hfs_folder *folder = (hfs_folder *) & rec_buf[rec_off2]; info->fs_name->meta_addr = tsk_getu32(hfs->fs_info.endian, folder->std.cnid); info->fs_name->type = TSK_FS_NAME_TYPE_DIR; info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (hfs_UTF16toUTF8(fs, (uint8_t *) cur_key->name.unicode, tsk_getu16(hfs->fs_info.endian, cur_key->name.length), info->fs_name->name, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) { return HFS_BTREE_CB_ERR; } } /* This is a normal file in the folder */ else if (rec_type == HFS_FILE_RECORD) { hfs_file *file = (hfs_file *) & rec_buf[rec_off2]; // This could be a hard link. We need to test this CNID, and follow it if necessary. unsigned char is_err; TSK_INUM_T file_cnid = tsk_getu32(hfs->fs_info.endian, file->std.cnid); TSK_INUM_T target_cnid = hfs_follow_hard_link(hfs, file, &is_err); if (is_err > 1) { error_returned ("hfs_dir_open_meta_cb: trying to follow a possible hard link in the directory"); return HFS_BTREE_CB_ERR; } if (target_cnid != file_cnid) { HFS_ENTRY entry; uint8_t lkup; // lookup result // This is a hard link. We need to fill in the name->type and name->meta_addr from the target info->fs_name->meta_addr = target_cnid; // get the Catalog entry for the target CNID lkup = hfs_cat_file_lookup(hfs, target_cnid, &entry, FALSE); if (lkup != 0) { error_returned ("hfs_dir_open_meta_cb: retrieving the catalog entry for the target of a hard link"); return HFS_BTREE_CB_ERR; } info->fs_name->type = hfsmode2tsknametype(tsk_getu16(hfs->fs_info.endian, entry.cat.std.perm.mode)); } else { // This is NOT a hard link. info->fs_name->meta_addr = tsk_getu32(hfs->fs_info.endian, file->std.cnid); info->fs_name->type = hfsmode2tsknametype(tsk_getu16(hfs->fs_info.endian, file->std.perm.mode)); } info->fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (hfs_UTF16toUTF8(fs, (uint8_t *) cur_key->name.unicode, tsk_getu16(hfs->fs_info.endian, cur_key->name.length), info->fs_name->name, HFS_MAXNAMLEN + 1, HFS_U16U8_FLAG_REPLACE_SLASH)) { return HFS_BTREE_CB_ERR; } } else { tsk_error_set_errno(TSK_ERR_FS_GENFS); // @@@ MAY NEED TO IMPROVE BELOW MESSAGE tsk_error_set_errstr ("hfs_dir_open_meta: Unknown record type %d in leaf node", rec_type); return HFS_BTREE_CB_ERR; } if (tsk_fs_dir_add(info->fs_dir, info->fs_name)) { return HFS_BTREE_CB_ERR; } return HFS_BTREE_CB_LEAF_GO; } } /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM hfs_dir_open_meta(TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { HFS_INFO *hfs = (HFS_INFO *) fs; uint32_t cnid; /* catalog node ID of the entry (= inum) */ TSK_FS_DIR *fs_dir; TSK_FS_NAME *fs_name; HFS_DIR_OPEN_META_INFO info; tsk_error_reset(); cnid = (uint32_t) a_addr; if (tsk_verbose) fprintf(stderr, "hfs_dir_open_meta: called for directory %" PRIu32 "\n", cnid); if (a_addr < fs->first_inum || a_addr > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("hfs_dir_open_meta: Invalid inode value: %" PRIuINUM, a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("hfs_dir_open_meta: NULL fs_dir argument given"); return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "hfs_dir_open_meta: Processing directory %" PRIuINUM "\n", a_addr); fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(fs, a_addr, 128)) == NULL) { return TSK_ERR; } if ((fs_name = tsk_fs_name_alloc(HFS_MAXNAMLEN, 0)) == NULL) { return TSK_ERR; } info.fs_dir = fs_dir; info.fs_name = fs_name; if ((fs_dir->fs_file = tsk_fs_file_open_meta(fs, NULL, a_addr)) == NULL) { tsk_error_errstr2_concat(" - hfs_dir_open_meta"); tsk_fs_name_free(fs_name); return TSK_ERR; } // if we are listing the root directory, add the Orphan directory and special HFS file entries if (a_addr == fs->root_inum) { int i; for (i = 0; i < 6; i++) { switch (i) { case 0: if (!hfs->has_extents_file) continue; strncpy(fs_name->name, HFS_EXTENTS_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_EXTENTS_FILE_ID; break; case 1: strncpy(fs_name->name, HFS_CATALOG_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_CATALOG_FILE_ID; break; case 2: // Note: the Extents file and the BadBlocks file are really the same. if (!hfs->has_extents_file) continue; strncpy(fs_name->name, HFS_BAD_BLOCK_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_BAD_BLOCK_FILE_ID; break; case 3: strncpy(fs_name->name, HFS_ALLOCATION_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_ALLOCATION_FILE_ID; break; case 4: if (!hfs->has_startup_file) continue; strncpy(fs_name->name, HFS_STARTUP_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_STARTUP_FILE_ID; break; case 5: if (!hfs->has_attributes_file) continue; strncpy(fs_name->name, HFS_ATTRIBUTES_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_ATTRIBUTES_FILE_ID; break; /* case 6: strncpy(fs_name->name, HFS_REPAIR_CATALOG_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_REPAIR_CATALOG_FILE_ID; break; case 7: strncpy(fs_name->name, HFS_BOGUS_EXTENT_FILE_NAME, fs_name->name_size); fs_name->meta_addr = HFS_BOGUS_EXTENT_FILE_ID; break; */ } fs_name->type = TSK_FS_NAME_TYPE_REG; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } } } if (hfs_cat_traverse(hfs, &cnid, hfs_dir_open_meta_cb, &info)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); return TSK_OK; } int hfs_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { HFS_INFO *hfs = (HFS_INFO *) a_fs_info; if (hfs->is_case_sensitive) return strcmp(s1, s2); else return strcasecmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/hfs_journal.c000644 000765 000024 00000001432 12576320700 020214 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * This software is distributed under the Common Public License 1.0 */ /** * \file hfs_journal.c * Contains the internal TSK HFS+ journal code -- not included in code by default. */ #include "tsk_fs_i.h" #include "tsk_hfs.h" uint8_t hfs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_fprintf(stderr, "jopen not implemented for HFS yet"); return 0; } uint8_t hfs_jentry_walk(TSK_FS_INFO * fs, int flags, TSK_FS_JENTRY_WALK_CB action, void *ptr) { tsk_fprintf(stderr, "jentry_walk not implemented for HFS yet"); return 0; } uint8_t hfs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int flags, TSK_FS_JBLK_WALK_CB action, void *ptr) { tsk_fprintf(stderr, "jblk_walk not implemented for HFS yet"); return 0; } sleuthkit-4.2.0/tsk/fs/hfs_unicompare.c000644 000765 000024 00000102743 12576320700 020713 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Judson Powers [jpowers@atc-nycorp.com] ** Copyright (c) 2008 ATC-NY. All rights reserved. ** This file contains data developed with support from the National ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. */ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ // Adapted from Apple's Tech Note 1150 // // FastUnicodeCompare - Compare two Unicode strings; produce a relative ordering // // IF RESULT // -------------------------- // str1 < str2 => -1 // str1 = str2 => 0 // str1 > str2 => +1 // // The lower case table starts with 256 entries (one for each of the upper bytes // of the original Unicode char). If that entry is zero, then all characters with // that upper byte are already case folded. If the entry is non-zero, then it is // the _index_ (not byte offset) of the start of the sub-table for the characters // with that upper byte. All ignorable characters are folded to the value zero. // // In pseudocode: // // Let c = source Unicode character // Let table[] = lower case table // // lower = table[highbyte(c)] // if (lower == 0) // lower = c // else // lower = table[lower+lowbyte(c)] // // if (lower == 0) // ignore this character // // To handle ignorable characters, we now need a loop to find the next valid // character. Also, we can't pre-compute the number of characters to compare; // the string length might be larger than the number of non-ignorable characters. // Further, we must be able to handle ignorable characters at any point in the // string, including as the first or last characters. We use a zero value as a // sentinel to detect both end-of-string and ignorable characters. Since the File // Manager doesn't prevent the NUL character (value zero) as part of a filename, // the case mapping table is assumed to map u+0000 to some non-zero value (like // 0xFFFF, which is an invalid Unicode character). // // Pseudocode: // // while (1) { // c1 = GetNextValidChar(str1) // returns zero if at end of string // c2 = GetNextValidChar(str2) // // if (c1 != c2) break // found a difference // // if (c1 == 0) // reached end of string on both // // strings at once? // return 0; // yes, so strings are equal // } // // // When we get here, c1 != c2. So, we just need to determine which one is // // less. // if (c1 < c2) // return -1; // else // return 1; // #include "tsk_fs_i.h" #include "tsk_hfs.h" static int hfs_unicode_compare_int(uint16_t endian, const hfs_uni_str * uni1, const hfs_uni_str * uni2); /** * This function compares two unicode strings (as hfs_uni_str) as either case-sensitive * or case-insensitive, based on the case-sensitivity of the file system. The file * system is supplied as a parameter. We need this to compare file names that appear in * the catalog file (b-tree) keys. * * I (Matt S.) think that this is wrong. The two bytes unicode characters should always be * treated as big endian (TSK_BIG_ENDIAN) because they are documented to be "in canonical * order". */ int hfs_unicode_compare(HFS_INFO * hfs, const hfs_uni_str * uni1, const hfs_uni_str * uni2) { if (hfs->is_case_sensitive) { uint16_t l1, l2; const uint8_t *s1, *s2; uint16_t c1, c2; l1 = tsk_getu16(hfs->fs_info.endian, uni1->length); l2 = tsk_getu16(hfs->fs_info.endian, uni2->length); s1 = uni1->unicode; s2 = uni2->unicode; while (1) { if ((l1 == 0) && (l2 == 0)) return 0; if (l1 == 0) return -1; if (l2 == 0) return 1; c1 = tsk_getu16(hfs->fs_info.endian, s1); c2 = tsk_getu16(hfs->fs_info.endian, s2); if (c1 < c2) return -1; if (c1 > c2) return 1; s1 += 2; s2 += 2; --l1; --l2; } return 0; } else return hfs_unicode_compare_int(hfs->fs_info.endian, uni1, uni2); } extern uint16_t gLowerCaseTable[]; /** * This performs the case-insensitive comparison of unicode strings. Here, again, * I think that the two bytes of each unicode character should always be treated as * big-endian, rather than using the file system endian-ness. */ static int hfs_unicode_compare_int(uint16_t endian, const hfs_uni_str * uni1, const hfs_uni_str * uni2) { uint16_t c1, c2; uint16_t temp; uint16_t *lowerCaseTable; const uint8_t *str1 = uni1->unicode; const uint8_t *str2 = uni2->unicode; uint16_t length1 = tsk_getu16(endian, uni1->length); uint16_t length2 = tsk_getu16(endian, uni2->length); lowerCaseTable = gLowerCaseTable; while (1) { // Set default values for c1, c2 in case there are no more valid chars c1 = 0; c2 = 0; // Find next non-ignorable char from str1, or zero if no more while (length1 && c1 == 0) { c1 = tsk_getu16(endian, str1); str1 += 2; --length1; if ((temp = lowerCaseTable[c1 >> 8]) != 0) // is there a subtable // for this upper byte? c1 = lowerCaseTable[temp + (c1 & 0x00FF)]; // yes, so fold the char } // Find next non-ignorable char from str2, or zero if no more while (length2 && c2 == 0) { c2 = tsk_getu16(endian, str2); str2 += 2; --length2; if ((temp = lowerCaseTable[c2 >> 8]) != 0) // is there a subtable // for this upper byte? c2 = lowerCaseTable[temp + (c2 & 0x00FF)]; // yes, so fold the char } if (c1 != c2) // found a difference, so stop looping break; if (c1 == 0) // did we reach the end of both strings at the same time? return 0; // yes, so strings are equal } if (c1 < c2) return -1; else return 1; } /* The lower case table consists of a 256-entry high-byte table followed by some number of 256-entry subtables. The high-byte table contains either an offset to the subtable for characters with that high byte or zero, which means that there are no case mappings or ignored characters in that block. Ignored characters are mapped to zero. */ uint16_t gLowerCaseTable[] = { // High-byte indices ( == 0 iff no case mapping and no ignorables ) /* 0 */ 0x0100, 0x0200, 0x0000, 0x0300, 0x0400, 0x0500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 1 */ 0x0600, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 2 */ 0x0700, 0x0800, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0900, 0x0A00, // Table 1 (for high byte 0x00) /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7, 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, 0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF, /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF, // Table 2 (for high byte 0x01) /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, 0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F, /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117, 0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F, /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127, 0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F, /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137, 0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140, /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147, 0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F, /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157, 0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F, /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167, 0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F, /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177, 0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F, /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188, 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259, /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268, 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275, /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8, 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF, /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292, 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF, /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9, 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF, /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7, 0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF, /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7, 0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF, /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7, 0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF, // Table 3 (for high byte 0x03) /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F, /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F, /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F, /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F, /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347, 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F, /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F, /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F, /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377, 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F, /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387, 0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F, /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF, /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF, /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7, 0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF, /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7, 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF, /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7, 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF, // Table 4 (for high byte 0x04) /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407, 0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F, /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467, 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F, /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477, 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F, /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F, /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497, 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F, /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7, 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF, /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7, 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF, /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8, 0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF, /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7, 0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF, /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7, 0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF, /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7, 0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF, // Table 5 (for high byte 0x05) /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507, 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F, /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517, 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F, /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527, 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F, /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557, 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F, /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587, 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F, /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597, 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F, /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7, 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF, /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF, /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7, 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF, /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF, /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF, /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7, 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF, // Table 6 (for high byte 0x10) /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F, /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017, 0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F, /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027, 0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F, /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037, 0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F, /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047, 0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F, /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057, 0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F, /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067, 0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F, /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077, 0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F, /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087, 0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F, /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097, 0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F, /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7, 0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF, /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7, 0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF, // Table 7 (for high byte 0x20) /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000, /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017, 0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F, /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027, 0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F, /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, 0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F, /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047, 0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F, /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057, 0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F, /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067, 0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F, /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, 0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F, /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F, /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7, 0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF, /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7, 0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF, /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7, 0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF, /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7, 0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF, /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7, 0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF, /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7, 0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF, // Table 8 (for high byte 0x21) /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F, /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117, 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F, /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127, 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F, /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F, /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F, /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F, /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187, 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F, /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197, 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F, /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7, 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF, /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7, 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF, /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7, 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF, /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7, 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF, /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7, 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF, /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7, 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF, // Table 9 (for high byte 0xFE) /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07, 0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F, /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17, 0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F, /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27, 0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F, /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37, 0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F, /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47, 0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F, /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57, 0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F, /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67, 0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F, /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77, 0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F, /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87, 0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F, /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97, 0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F, /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7, 0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF, /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7, 0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF, /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7, 0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF, /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7, 0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF, /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7, 0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF, /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7, 0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000, // Table 10 (for high byte 0xFF) /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07, 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F, /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17, 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F, /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F, /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F, /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67, 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F, /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77, 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F, /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87, 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F, /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97, 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F, /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7, 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF, /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7, 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF, /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7, 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF, /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7, 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF, /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7, 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF, /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7, 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF, }; sleuthkit-4.2.0/tsk/fs/icat_lib.c000644 000765 000024 00000004611 12576320700 017452 0ustar00carrierstaff000000 000000 /* ** icat_lib ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** * \file icat_lib.c * Contains the library API functions used by the TSK icat command * line tool. */ #include "tsk_fs_i.h" /* Call back action for file_walk */ static TSK_WALK_RET_ENUM icat_action(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { if (size == 0) return TSK_WALK_CONT; if (fwrite(buf, size, 1, stdout) != 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr("icat_action: error writing to stdout: %s", strerror(errno)); return TSK_WALK_ERROR; } return TSK_WALK_CONT; } /* Return 1 on error and 0 on success */ uint8_t tsk_fs_icat(TSK_FS_INFO * fs, TSK_INUM_T inum, TSK_FS_ATTR_TYPE_ENUM type, uint8_t type_used, uint16_t id, uint8_t id_used, TSK_FS_FILE_WALK_FLAG_ENUM flags) { TSK_FS_FILE *fs_file; #ifdef TSK_WIN32 if (-1 == _setmode(_fileno(stdout), _O_BINARY)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WRITE); tsk_error_set_errstr ("icat_lib: error setting stdout to binary: %s", strerror(errno)); return 1; } #endif fs_file = tsk_fs_file_open_meta(fs, NULL, inum); if (!fs_file) { return 1; } if (type_used) { if (id_used == 0) { flags |= TSK_FS_FILE_WALK_FLAG_NOID; } if (tsk_fs_file_walk_type(fs_file, type, id, flags, icat_action, NULL)) { tsk_fs_file_close(fs_file); return 1; } } else { if (tsk_fs_file_walk(fs_file, flags, icat_action, NULL)) { tsk_fs_file_close(fs_file); return 1; } } tsk_fs_file_close(fs_file); return 0; } sleuthkit-4.2.0/tsk/fs/ifind_lib.c000644 000765 000024 00000044154 12576320700 017631 0ustar00carrierstaff000000 000000 /* ** ifind (inode find) ** The Sleuth Kit ** ** Given an image and block number, identify which inode it is used by ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** TCTUTILs ** Copyright (c) 2001 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file ifind_lib.c * Contains the library API functions used by the TSK ifind command * line tool. */ #include "tsk_fs_i.h" #include "tsk_hfs.h" /******************************************************************************* * Find an unallocated NTFS MFT entry based on its parent directory */ typedef struct { TSK_INUM_T parinode; TSK_FS_IFIND_FLAG_ENUM flags; uint8_t found; } IFIND_PAR_DATA; /* inode walk call back for tsk_fs_ifind_par to find unallocated files * based on parent directory */ static TSK_WALK_RET_ENUM ifind_par_act(TSK_FS_FILE * fs_file, void *ptr) { IFIND_PAR_DATA *data = (IFIND_PAR_DATA *) ptr; TSK_FS_META_NAME_LIST *fs_name_list; /* go through each file name attribute for this file */ fs_name_list = fs_file->meta->name2; while (fs_name_list) { /* we found a file that has the target parent directory. * Make a FS_NAME structure and print it. */ if (fs_name_list->par_inode == data->parinode) { int i, cnt; uint8_t printed; TSK_FS_NAME *fs_name; if ((fs_name = tsk_fs_name_alloc(256, 0)) == NULL) return TSK_WALK_ERROR; /* Fill in the basics of the fs_name entry * so we can print in the fls formats */ fs_name->meta_addr = fs_file->meta->addr; fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; strncpy(fs_name->name, fs_name_list->name, fs_name->name_size); // now look for the $Data and $IDXROOT attributes fs_file->name = fs_name; printed = 0; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if ((fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_DATA) || (fs_attr->type == TSK_FS_ATTR_TYPE_NTFS_IDXROOT)) { if (data->flags & TSK_FS_IFIND_PAR_LONG) { tsk_fs_name_print_long(stdout, fs_file, NULL, fs_file->fs_info, fs_attr, 0, 0); } else { tsk_fs_name_print(stdout, fs_file, NULL, fs_file->fs_info, fs_attr, 0); tsk_printf("\n"); } printed = 1; } } // if there were no attributes, print what we got if (printed == 0) { if (data->flags & TSK_FS_IFIND_PAR_LONG) { tsk_fs_name_print_long(stdout, fs_file, NULL, fs_file->fs_info, NULL, 0, 0); } else { tsk_fs_name_print(stdout, fs_file, NULL, fs_file->fs_info, NULL, 0); tsk_printf("\n"); } } tsk_fs_name_free(fs_name); data->found = 1; } fs_name_list = fs_name_list->next; } return TSK_WALK_CONT; } /** * Searches for unallocated MFT entries that have a given * MFT entry as their parent directory (as reported in FILE_NAME). * @param fs File system to search * @param lclflags Flags * @param par Parent directory MFT entry address * @returns 1 on error and 0 on success */ uint8_t tsk_fs_ifind_par(TSK_FS_INFO * fs, TSK_FS_IFIND_FLAG_ENUM lclflags, TSK_INUM_T par) { IFIND_PAR_DATA data; data.found = 0; data.flags = lclflags; data.parinode = par; /* Walk unallocated MFT entries */ if (fs->inode_walk(fs, fs->first_inum, fs->last_inum, TSK_FS_META_FLAG_UNALLOC, ifind_par_act, &data)) { return 1; } return 0; } /** * \ingroup fslib * * Find the meta data address for a given file name (UTF-8). * The basic idea of the function is to break the given name into its * subdirectories and start looking for each (starting in the root * directory). * * @param a_fs FS to analyze * @param a_path UTF-8 path of file to search for * @param [out] a_result Meta data address of file * @param [out] a_fs_name Copy of name details (or NULL if details not wanted) * @returns -1 on (system) error, 0 if found, and 1 if not found */ int8_t tsk_fs_path2inum(TSK_FS_INFO * a_fs, const char *a_path, TSK_INUM_T * a_result, TSK_FS_NAME * a_fs_name) { char *cpath; size_t clen; char *cur_dir; // The "current" directory or file we are looking for char *cur_attr; // The "current" attribute of the dir we are looking for TSK_INUM_T next_meta; uint8_t is_done; char *strtok_last; *a_result = 0; // copy path to a buffer that we can modify clen = strlen(a_path) + 1; if ((cpath = (char *) tsk_malloc(clen)) == NULL) { return -1; } strncpy(cpath, a_path, clen); // Get the first part of the directory path. cur_dir = (char *) strtok_r(cpath, "/", &strtok_last); cur_attr = NULL; /* If there is no token, then only a '/' was given */ if (cur_dir == NULL) { free(cpath); *a_result = a_fs->root_inum; // create the dummy entry if needed if (a_fs_name) { a_fs_name->meta_addr = a_fs->root_inum; // Note that we are not filling in meta_seq -- we could, we just aren't a_fs_name->type = TSK_FS_NAME_TYPE_DIR; a_fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (a_fs_name->name) a_fs_name->name[0] = '\0'; if (a_fs_name->shrt_name) a_fs_name->shrt_name[0] = '\0'; } return 0; } /* If this is NTFS, seperate out the attribute of the current directory */ if (TSK_FS_TYPE_ISNTFS(a_fs->ftype) && ((cur_attr = strchr(cur_dir, ':')) != NULL)) { *(cur_attr) = '\0'; cur_attr++; } if (tsk_verbose) tsk_fprintf(stderr, "Looking for %s\n", cur_dir); // initialize the first place to look, the root dir next_meta = a_fs->root_inum; // we loop until we know the outcome and then exit. // everything should return from inside the loop. is_done = 0; while (is_done == 0) { size_t i; TSK_FS_FILE *fs_file_alloc = NULL; // set to the allocated file that is our target TSK_FS_FILE *fs_file_del = NULL; // set to an unallocated file that matches our criteria TSK_FS_DIR *fs_dir = NULL; // open the next directory in the recursion if ((fs_dir = tsk_fs_dir_open_meta(a_fs, next_meta)) == NULL) { free(cpath); return -1; } /* Verify this is indeed a directory. We had one reported * problem where a file was a disk image and opening it as * a directory found the directory entries inside of the file * and this caused problems... */ if (fs_dir->fs_file->meta->type != TSK_FS_META_TYPE_DIR) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("Address %" PRIuINUM " is not for a directory\n", next_meta); free(cpath); return -1; } // cycle through each entry for (i = 0; i < tsk_fs_dir_getsize(fs_dir); i++) { TSK_FS_FILE *fs_file; uint8_t found_name = 0; if ((fs_file = tsk_fs_dir_get(fs_dir, i)) == NULL) { tsk_fs_dir_close(fs_dir); free(cpath); return -1; } /* * Check if this is the name that we are currently looking for, * as identified in 'cur_dir' */ /* FAT is a special case because we check the short name */ if (TSK_FS_TYPE_ISFAT(a_fs->ftype)) { if ((fs_file->name->name) && (a_fs->name_cmp(a_fs, fs_file->name->name, cur_dir) == 0)) { found_name = 1; } else if ((fs_file->name->shrt_name) && (a_fs->name_cmp(a_fs, fs_file->name->shrt_name, cur_dir) == 0)) { found_name = 1; } } /* NTFS gets a case insensitive comparison */ else if (TSK_FS_TYPE_ISNTFS(a_fs->ftype)) { if ((fs_file->name->name) && (a_fs->name_cmp(a_fs, fs_file->name->name, cur_dir) == 0)) { /* ensure we have the right attribute name */ if (cur_attr == NULL) { found_name = 1; } else { if (fs_file->meta) { int cnt, i; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if ((fs_attr->name) && (a_fs->name_cmp(a_fs, fs_attr->name, cur_attr) == 0)) { found_name = 1; } } } } } } else { if ((fs_file->name->name) && (a_fs->name_cmp(a_fs, fs_file->name->name, cur_dir) == 0)) { found_name = 1; } } if (found_name) { /* If we found our file and it is allocated, then stop. If * it is unallocated, keep on going to see if we can get * an allocated hit */ if (fs_file->name->flags & TSK_FS_NAME_FLAG_ALLOC) { fs_file_alloc = fs_file; break; } else { // if we already have an unalloc and its addr is 0, then use the new one if ((fs_file_del) && (fs_file_del->name->meta_addr == 0)) { tsk_fs_file_close(fs_file_del); } fs_file_del = fs_file; } } // close the file if we did not save it for future analysis. else { tsk_fs_file_close(fs_file); fs_file = NULL; } } // we found a directory, go into it if ((fs_file_alloc) || (fs_file_del)) { const char *pname; TSK_FS_FILE *fs_file_tmp; // choose the alloc one first (if they both exist) if (fs_file_alloc) fs_file_tmp = fs_file_alloc; else fs_file_tmp = fs_file_del; pname = cur_dir; // save a copy of the current name pointer // advance to the next name cur_dir = (char *) strtok_r(NULL, "/", &(strtok_last)); cur_attr = NULL; if (tsk_verbose) tsk_fprintf(stderr, "Found it (%s), now looking for %s\n", pname, cur_dir); /* That was the last name in the path -- we found the file! */ if (cur_dir == NULL) { *a_result = fs_file_tmp->name->meta_addr; // make a copy if one was requested if (a_fs_name) { tsk_fs_name_copy(a_fs_name, fs_file_tmp->name); } tsk_fs_dir_close(fs_dir); free(cpath); return 0; } // update the attribute field, if needed if (TSK_FS_TYPE_ISNTFS(a_fs->ftype) && ((cur_attr = strchr(cur_dir, ':')) != NULL)) { *(cur_attr) = '\0'; cur_attr++; } // update the value for the next directory to open next_meta = fs_file_tmp->name->meta_addr; if (fs_file_alloc) { tsk_fs_file_close(fs_file_alloc); fs_file_alloc = NULL; } if (fs_file_del) { tsk_fs_file_close(fs_file_del); fs_file_del = NULL; } } // no hit in directory else { is_done = 1; } tsk_fs_dir_close(fs_dir); fs_dir = NULL; } free(cpath); return 1; } /** * Find the meta data address for a given file TCHAR name * * @param fs FS to analyze * @param tpath Path of file to search for * @param [out] result Meta data address of file * @returns -1 on error, 0 if found, and 1 if not found */ int8_t tsk_fs_ifind_path(TSK_FS_INFO * fs, TSK_TCHAR * tpath, TSK_INUM_T * result) { #ifdef TSK_WIN32 // Convert the UTF-16 path to UTF-8 { size_t clen; UTF8 *ptr8; UTF16 *ptr16; int retval; char *cpath; clen = TSTRLEN(tpath) * 4; if ((cpath = (char *) tsk_malloc(clen)) == NULL) { return -1; } ptr8 = (UTF8 *) cpath; ptr16 = (UTF16 *) tpath; retval = tsk_UTF16toUTF8_lclorder((const UTF16 **) &ptr16, (UTF16 *) & ptr16[TSTRLEN(tpath) + 1], &ptr8, (UTF8 *) ((uintptr_t) ptr8 + clen), TSKlenientConversion); if (retval != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNICODE); tsk_error_set_errstr ("tsk_fs_ifind_path: Error converting path to UTF-8: %d", retval); free(cpath); return -1; } return tsk_fs_path2inum(fs, cpath, result, NULL); } #else return tsk_fs_path2inum(fs, (const char *) tpath, result, NULL); #endif } /******************************************************************************* * Find an inode given a data unit */ typedef struct { TSK_DADDR_T block; /* the block to find */ TSK_FS_IFIND_FLAG_ENUM flags; uint8_t found; TSK_INUM_T curinode; /* the inode being analyzed */ uint32_t curtype; /* the type currently being analyzed: NTFS */ uint16_t curid; } IFIND_DATA_DATA; /* * file_walk action for non-ntfs */ static TSK_WALK_RET_ENUM ifind_data_file_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { TSK_FS_INFO *fs = fs_file->fs_info; IFIND_DATA_DATA *data = (IFIND_DATA_DATA *) ptr; /* Ignore sparse blocks because they do not reside on disk */ if (flags & TSK_FS_BLOCK_FLAG_SPARSE) return TSK_WALK_CONT; if (addr == data->block) { if (TSK_FS_TYPE_ISNTFS(fs->ftype)) tsk_printf("%" PRIuINUM "-%" PRIu32 "-%" PRIu16 "\n", data->curinode, data->curtype, data->curid); else tsk_printf("%" PRIuINUM "\n", data->curinode); data->found = 1; return TSK_WALK_STOP; } return TSK_WALK_CONT; } /* ** find_inode ** ** Callback action for inode_walk */ static TSK_WALK_RET_ENUM ifind_data_act(TSK_FS_FILE * fs_file, void *ptr) { IFIND_DATA_DATA *data = (IFIND_DATA_DATA *) ptr; int file_flags = (TSK_FS_FILE_WALK_FLAG_AONLY | TSK_FS_FILE_WALK_FLAG_SLACK); int i, cnt; data->curinode = fs_file->meta->addr; /* Search all attrributes */ cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; data->curtype = fs_attr->type; data->curid = fs_attr->id; if (fs_attr->flags & TSK_FS_ATTR_NONRES) { if (tsk_fs_attr_walk(fs_attr, file_flags, ifind_data_file_act, ptr)) { if (tsk_verbose) tsk_fprintf(stderr, "Error walking file %" PRIuINUM " Attribute: %i", fs_file->meta->addr, i); /* Ignore these errors */ tsk_error_reset(); } // stop if we only want one hit if ((data->found) && (!(data->flags & TSK_FS_IFIND_ALL))) break; } } if ((data->found) && (!(data->flags & TSK_FS_IFIND_ALL))) return TSK_WALK_STOP; else return TSK_WALK_CONT; } /* * Find the inode that has allocated block blk * Return 1 on error, 0 if no error */ uint8_t tsk_fs_ifind_data(TSK_FS_INFO * fs, TSK_FS_IFIND_FLAG_ENUM lclflags, TSK_DADDR_T blk) { IFIND_DATA_DATA data; memset(&data, 0, sizeof(IFIND_DATA_DATA)); data.flags = lclflags; data.block = blk; if (fs->inode_walk(fs, fs->first_inum, fs->last_inum, TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC, ifind_data_act, &data)) { return 1; } /* If we did not find an inode yet, get the block's * flags so we can identify it as a meta data block */ if (!data.found) { TSK_FS_BLOCK *fs_block; if ((fs_block = tsk_fs_block_get(fs, NULL, blk)) != NULL) { if (fs_block->flags & TSK_FS_BLOCK_FLAG_META) { tsk_printf("Meta Data\n"); data.found = 1; } tsk_fs_block_free(fs_block); } } if (!data.found) { tsk_printf("Inode not found\n"); } return 0; } sleuthkit-4.2.0/tsk/fs/ils_lib.c000644 000765 000024 00000017232 12576320700 017324 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /** * \file ils_lib.c * Library functionality of the TSK ils tool. */ /* TCT */ /*++ * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ #include "tsk_fs_i.h" #ifdef TSK_WIN32 #include #endif typedef struct { const TSK_TCHAR *image; /* number of seconds time skew of system * if the system was 100 seconds fast, the value should be +100 */ int32_t sec_skew; TSK_FS_ILS_FLAG_ENUM flags; } ILS_DATA; /* print_header - print time machine header */ static void print_header(TSK_FS_INFO * fs) { char hostnamebuf[BUFSIZ]; time_t now; #ifndef TSK_WIN32 if (gethostname(hostnamebuf, sizeof(hostnamebuf) - 1) < 0) { if (tsk_verbose) tsk_fprintf(stderr, "error getting host by name\n"); strcpy(hostnamebuf, "unknown"); } hostnamebuf[sizeof(hostnamebuf) - 1] = 0; #else strcpy(hostnamebuf, "unknown"); #endif now = time((time_t *) 0); /* * Identify table type and table origin. */ tsk_printf("class|host|device|start_time\n"); tsk_printf("ils|%s||%" PRIu64 "\n", hostnamebuf, (uint64_t) now); /* * Identify the fields in the data that follow. */ tsk_printf ("st_ino|st_alloc|st_uid|st_gid|st_mtime|st_atime|st_ctime|st_crtime"); tsk_printf("|st_mode|st_nlink|st_size\n"); } static void print_header_mac() { char hostnamebuf[BUFSIZ]; #ifndef TSK_WIN32 if (gethostname(hostnamebuf, sizeof(hostnamebuf) - 1) < 0) { if (tsk_verbose) tsk_fprintf(stderr, "Error getting host by name\n"); strcpy(hostnamebuf, "unknown"); } hostnamebuf[sizeof(hostnamebuf) - 1] = 0; #else strcpy(hostnamebuf, "unknown"); #endif /* * Identify the fields in the data that follow. */ tsk_printf ("md5|file|st_ino|st_ls|st_uid|st_gid|st_size|st_atime|st_mtime|st_ctime|st_crtime\n"); return; } /* print_inode - list generic inode */ static TSK_WALK_RET_ENUM ils_act(TSK_FS_FILE * fs_file, void *ptr) { ILS_DATA *data = (ILS_DATA *) ptr; // if we have no link count and want open files -- exit if ((fs_file->meta->nlink == 0) && ((data->flags & TSK_FS_ILS_OPEN) != 0)) { return TSK_WALK_CONT; } // verify link flags if ((fs_file->meta->nlink == 0) && ((data->flags & TSK_FS_ILS_UNLINK) == 0)) { return TSK_WALK_CONT; } else if ((fs_file->meta->nlink > 0) && ((data->flags & TSK_FS_ILS_LINK) == 0)) { return TSK_WALK_CONT; } if (data->sec_skew != 0) { fs_file->meta->mtime -= data->sec_skew; fs_file->meta->atime -= data->sec_skew; fs_file->meta->ctime -= data->sec_skew; fs_file->meta->crtime -= data->sec_skew; } tsk_printf("%" PRIuINUM "|%c|%" PRIuUID "|%" PRIuGID "|%" PRIu32 "|%" PRIu32 "|%" PRIu32 "|%" PRIu32, fs_file->meta->addr, (fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) ? 'a' : 'f', fs_file->meta->uid, fs_file->meta->gid, (uint32_t) fs_file->meta->mtime, (uint32_t) fs_file->meta->atime, (uint32_t) fs_file->meta->ctime, (uint32_t) fs_file->meta->crtime); if (data->sec_skew != 0) { fs_file->meta->mtime += data->sec_skew; fs_file->meta->atime += data->sec_skew; fs_file->meta->ctime += data->sec_skew; fs_file->meta->crtime += data->sec_skew; } tsk_printf("|%lo|%d|%" PRIuOFF "\n", (unsigned long) fs_file->meta->mode, fs_file->meta->nlink, fs_file->meta->size); return TSK_WALK_CONT; } /* * Print the inode information in the format that the mactimes program expects */ static TSK_WALK_RET_ENUM ils_mac_act(TSK_FS_FILE * fs_file, void *ptr) { char ls[12]; ILS_DATA *data = (ILS_DATA *) ptr; if ((fs_file->meta->nlink == 0) && ((data->flags & TSK_FS_ILS_UNLINK) == 0)) { return TSK_WALK_CONT; } else if ((fs_file->meta->nlink > 0) && ((data->flags & TSK_FS_ILS_LINK) == 0)) { return TSK_WALK_CONT; } /* ADD image and file name (if we have one) */ TFPRINTF(stdout, _TSK_T("0|<%s-"), data->image); tsk_printf("%s%s%s-%" PRIuINUM ">|%" PRIuINUM "|", (fs_file->meta->name2) ? fs_file->meta->name2->name : "", (fs_file->meta->name2) ? "-" : "", (fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) ? "alive" : "dead", fs_file->meta->addr, fs_file->meta->addr); /* Print the "ls" mode in ascii format */ tsk_fs_meta_make_ls(fs_file->meta, ls, sizeof(ls)); if (data->sec_skew != 0) { fs_file->meta->mtime -= data->sec_skew; fs_file->meta->atime -= data->sec_skew; fs_file->meta->ctime -= data->sec_skew; fs_file->meta->crtime -= data->sec_skew; } tsk_printf("-/%s|%" PRIuUID "|%" PRIuGID "|%" PRIuOFF "|%" PRIu32 "|%" PRIu32 "|%" PRIu32 "|%" PRIu32 "\n", ls, fs_file->meta->uid, fs_file->meta->gid, fs_file->meta->size, (uint32_t) fs_file->meta->atime, (uint32_t) fs_file->meta->mtime, (uint32_t) fs_file->meta->ctime, (uint32_t) fs_file->meta->crtime); if (data->sec_skew != 0) { fs_file->meta->mtime -= data->sec_skew; fs_file->meta->atime -= data->sec_skew; fs_file->meta->ctime -= data->sec_skew; fs_file->meta->crtime -= data->sec_skew; } return TSK_WALK_CONT; } /** * Library API for inode walking. * * @param fs File system to analyze * @param lclflags TSK_FS_ILS_XXX flag settings * @param istart Starting inode address * @param ilast Ending inode address * @param flags Inode walk flags * @param skew clock skew in seconds * @param img Path to disk image name for header * * @returns 1 on error and 0 on success */ uint8_t tsk_fs_ils(TSK_FS_INFO * fs, TSK_FS_ILS_FLAG_ENUM lclflags, TSK_INUM_T istart, TSK_INUM_T ilast, TSK_FS_META_FLAG_ENUM flags, int32_t skew, const TSK_TCHAR * img) { ILS_DATA data; /* If orphan is desired, then make sure LINK flags are set */ if (flags & TSK_FS_META_FLAG_ORPHAN) { lclflags |= (TSK_FS_ILS_LINK | TSK_FS_ILS_UNLINK); } /* if OPEN lcl flag is given, then make sure ALLOC is not and UNALLOC is */ if (lclflags & TSK_FS_ILS_OPEN) { flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; lclflags |= TSK_FS_ILS_LINK; lclflags &= ~TSK_FS_ILS_UNLINK; } else { /* If LINK is not set at all, then set them */ if (((lclflags & TSK_FS_ILS_LINK) == 0) && ((lclflags & TSK_FS_ILS_UNLINK) == 0)) lclflags |= (TSK_FS_ILS_LINK | TSK_FS_ILS_UNLINK); } data.flags = lclflags; data.sec_skew = skew; /* Print the data */ if (lclflags & TSK_FS_ILS_MAC) { TSK_TCHAR *tmpptr; data.image = img; #ifdef TSK_WIN32 tmpptr = TSTRCHR(data.image, '\\'); #else tmpptr = strrchr(data.image, '/'); #endif if (tmpptr) data.image = ++tmpptr; print_header_mac(); if (fs->inode_walk(fs, istart, ilast, flags, ils_mac_act, &data)) return 1; } else { print_header(fs); if (fs->inode_walk(fs, istart, ilast, flags, ils_act, &data)) return 1; } return 0; } sleuthkit-4.2.0/tsk/fs/iso9660.c000644 000765 000024 00000252163 12576320700 017032 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** Copyright (c) 2007-2011 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /** * \file iso9660.c * Contains the internal TSK ISO9660 file system code to handle basic file * system processing for opening file system, processing sectors, and directory entries. */ #include "tsk_fs_i.h" #include "tsk_iso9660.h" #include /* free all memory used by inode linked list */ static void iso9660_inode_list_free(TSK_FS_INFO * fs) { ISO_INFO *iso = (ISO_INFO *) fs; iso9660_inode_node *tmp; while (iso->in_list) { tmp = iso->in_list; iso->in_list = iso->in_list->next; free(tmp); } iso->in_list = NULL; } /** * Process the System Use Sharing Protocol (SUSP) data. Typically, * rockridge data are stored in this. * * @param fs File system to process * @param buf Buffer of data to process * @param count Length of buffer in bytes. * @param hFile File handle to print details to (or NULL for no printing) * @returns NULL on error */ static rockridge_ext * parse_susp(TSK_FS_INFO * fs, char *buf, int count, FILE * hFile) { rockridge_ext *rr; ISO_INFO *iso = (ISO_INFO *) fs; char *end = buf + count - 1; if (tsk_verbose) tsk_fprintf(stderr, "parse_susp: count is: %d\n", count); // allocate the output data structure rr = (rockridge_ext *) tsk_malloc(sizeof(rockridge_ext)); if (rr == NULL) { return NULL; } while ((uintptr_t)buf + sizeof(iso9660_susp_head) <= (uintptr_t)end) { iso9660_susp_head *head = (iso9660_susp_head *) buf; if (buf + head->len - 1 > end) break; /* Identify the entry type -- listed in the order * that they are listed in the specs */ // SUSP Continuation Entry if ((head->sig[0] == 'C') && (head->sig[1] == 'E')) { iso9660_susp_ce *ce = (iso9660_susp_ce *) buf; if ((uintptr_t)buf + sizeof(iso9660_susp_ce) - 1 > (uintptr_t)end) { if (tsk_verbose) tsk_fprintf(stderr, "parse_susp: not enough room for CE structure\n"); break; } if (hFile) { fprintf(hFile, "CE Entry\n"); fprintf(hFile, "* Block: %" PRIu32 "\n", tsk_getu32(fs->endian, ce->blk_m)); fprintf(hFile, "* Offset: %" PRIu32 "\n", tsk_getu32(fs->endian, ce->offset_m)); fprintf(hFile, "* Len: %" PRIu32 "\n", tsk_getu32(fs->endian, ce->celen_m)); } // read the continued buffer and parse it if ((tsk_getu32(fs->endian, ce->blk_m) < fs->last_block) && (tsk_getu32(fs->endian, ce->offset_m) < fs->block_size)) { ssize_t cnt; TSK_OFF_T off; char *buf2; off = tsk_getu32(fs->endian, ce->blk_m) * fs->block_size + tsk_getu32(fs->endian, ce->offset_m); buf2 = (char *) tsk_malloc(tsk_getu32(fs->endian, ce->celen_m)); if (buf2 != NULL) { cnt = tsk_fs_read(fs, off, buf2, tsk_getu32(fs->endian, ce->celen_m)); if (cnt == tsk_getu32(fs->endian, ce->celen_m)) { parse_susp(fs, buf2, (int) cnt, hFile); } else if (tsk_verbose) { fprintf(stderr, "parse_susp: error reading CE entry\n"); tsk_error_print(stderr); tsk_error_reset(); } free(buf2); } else { if (tsk_verbose) fprintf(stderr, "parse_susp: error allocating memory to process CE entry\n"); tsk_error_reset(); } } else { if (tsk_verbose) fprintf(stderr, "parse_susp: CE offset or block too large to process\n"); } buf += head->len; } // SUSP Padding Entry else if ((head->sig[0] == 'P') && (head->sig[1] == 'D')) { if (hFile) { fprintf(hFile, "PD Entry\n"); } buf += head->len; } // SUSP Sharing Protocol Entry -- we ignore else if ((head->sig[0] == 'S') && (head->sig[1] == 'P')) { iso9660_susp_sp *sp = (iso9660_susp_sp *) buf; if (hFile) { fprintf(hFile, "SP Entry\n"); fprintf(hFile, "* SKip Len: %d\n", sp->skip); } buf += head->len; } // SUSP System Terminator else if ((head->sig[0] == 'S') && (head->sig[1] == 'T')) { if (hFile) { fprintf(hFile, "ST Entry\n"); } buf += head->len; } // SUSP Extention Registration -- not used else if ((head->sig[0] == 'E') && (head->sig[1] == 'R')) { iso9660_susp_er *er = (iso9660_susp_er *) buf; if (hFile) { char buf[258]; fprintf(hFile, "ER Entry\n"); memcpy(buf, er->ext_id, er->len_id); buf[er->len_id] = '\0'; fprintf(hFile, "* Extension ID: %s\n", buf); memcpy(buf, er->ext_id + er->len_id, er->len_des); buf[er->len_des] = '\0'; fprintf(hFile, "* Extension Descriptor: %s\n", buf); memcpy(buf, er->ext_id + er->len_id + er->len_des, er->len_src); buf[er->len_src] = '\0'; fprintf(hFile, "* Extension Spec Source: %s\n", buf); } buf += head->len; } // SUSP Extention Sigs -- not used else if ((head->sig[0] == 'E') && (head->sig[1] == 'S')) { if (hFile) { fprintf(hFile, "ES Entry\n"); } buf += head->len; } /* * Rock Ridge Extensions */ /* POSIX file attributes */ else if ((head->sig[0] == 'P') && (head->sig[1] == 'X')) { iso9660_rr_px_entry *rr_px; if ((uintptr_t)buf + sizeof(iso9660_rr_px_entry) - 1> (uintptr_t)end) { if (tsk_verbose) tsk_fprintf(stderr, "parse_susp: not enough room for POSIX structure\n"); break; } rr_px = (iso9660_rr_px_entry *) buf; rr->uid = tsk_getu32(fs->endian, rr_px->uid_m); rr->gid = tsk_getu32(fs->endian, rr_px->gid_m); rr->mode = tsk_getu16(fs->endian, rr_px->mode_m); rr->nlink = tsk_getu32(fs->endian, rr_px->links_m); if (hFile) { fprintf(hFile, "PX Entry\n"); fprintf(hFile, "* UID: %" PRIuUID "\n", rr->uid); fprintf(hFile, "* GID: %" PRIuGID "\n", rr->gid); fprintf(hFile, "* Mode: %d\n", rr->mode); fprintf(hFile, "* Links: %" PRIu32 "\n", rr->nlink); } buf += head->len; } // RR - device information else if ((head->sig[0] == 'P') && (head->sig[1] == 'N')) { iso9660_rr_pn_entry *rr_pn = (iso9660_rr_pn_entry *) buf; if (hFile) { fprintf(hFile, "PN Entry\n"); fprintf(hFile, "* Device ID High: %" PRIu32 "\n", tsk_getu32(fs->endian, rr_pn->dev_h_m)); fprintf(hFile, "* Device ID Low: %" PRIu32 "\n", tsk_getu32(fs->endian, rr_pn->dev_l_m)); } buf += head->len; } // RR - symbolic link else if ((head->sig[0] == 'S') && (head->sig[1] == 'L')) { //iso9660_rr_sl_entry *rr_sl = (iso9660_rr_sl_entry *) buf; if (hFile) { fprintf(hFile, "SL Entry\n"); } buf += head->len; } // RR -- alternative name else if ((head->sig[0] == 'N') && (head->sig[1] == 'M')) { iso9660_rr_nm_entry *rr_nm; if ((uintptr_t)buf + sizeof(iso9660_rr_nm_entry) - 1> (uintptr_t)end) { if (tsk_verbose) tsk_fprintf(stderr, "parse_susp: not enough room for RR alternative name structure\n"); break; } rr_nm = (iso9660_rr_nm_entry *) buf; if ((uintptr_t)&rr_nm->name[0] + (int) rr_nm->len - 5 - 1> (uintptr_t)end) { if (tsk_verbose) tsk_fprintf(stderr, "parse_susp: not enough room for RR alternative name\n"); break; } strncpy(rr->fn, &rr_nm->name[0], (int) rr_nm->len - 5); rr->fn[(int) rr_nm->len - 5] = '\0'; if (hFile) { fprintf(hFile, "NM Entry\n"); fprintf(hFile, "* %s\n", rr->fn); } buf += head->len; } // RR - relocated directory else if ((head->sig[0] == 'C') && (head->sig[1] == 'L')) { if (hFile) { fprintf(hFile, "CL Entry\n"); } buf += head->len; } // RR - parent of relocated directory else if ((head->sig[0] == 'P') && (head->sig[1] == 'L')) { if (hFile) { fprintf(hFile, "PL Entry\n"); } buf += head->len; } // RR - relocation signal else if ((head->sig[0] == 'R') && (head->sig[1] == 'E')) { if (hFile) { fprintf(hFile, "RE Entry\n"); } buf += head->len; } // RR - time stamps else if ((head->sig[0] == 'T') && (head->sig[1] == 'F')) { if (hFile) { fprintf(hFile, "TF Entry\n"); } buf += head->len; } // RR - sparse file else if ((head->sig[0] == 'S') && (head->sig[1] == 'F')) { if (hFile) { fprintf(hFile, "SF Entry\n"); } buf += head->len; } /* RR is a system use field indicating RockRidge, but not part of RockRidge */ else if ((head->sig[0] == 'R') && (head->sig[1] == 'R')) { iso->rr_found = 1; if (hFile) { fprintf(hFile, "RR Entry\n"); } buf += head->len; } else { buf += 2; if ((uintptr_t) buf % 2) buf--; } } return rr; } /////////////////////////////////////////////////////////////////////////// // The following functions are responsible for loading all of the file metadata into memory. // The process is that the Path table is processed first. It contains an entry for each // directory. That info is then used to locate the directory contents and those contents // are then processed. // // Files do not have a corresponding metadata entry, so we assign them based // on the order that they are loaded. /////////////////////////////////////////////////////////////////////////// /* XXX Instead of loading all of the file metadata, we could instead save a mapping * between inode number and the byte offset of the metadata (and any other data * needed for fast lookups). */ /** \internal * Process the contents of a directory and load the * information about files in that directory into ISO_INFO. This is called * by the methods that process the path table (which contains pointers to the * various directories). The results in ISO_INFO are used to identify the * inode address of files found from dent_walk and for file lookups. * * Type: ISO9660_TYPE_PVD for primary volume descriptor, ISO9660_TYPE_SVD for * supplementary volume descriptor (do Joliet utf-8 conversion). * * @param fs File system to analyze * @param a_offs Byte offset of directory start * @param count previous file count * @param ctype Character set used for the names * @param a_fn Name of the directory to use for the "." entry (in UTF-8) * * @returns total number of files or -1 on error */ static int iso9660_load_inodes_dir(TSK_FS_INFO * fs, TSK_OFF_T a_offs, int count, int ctype, const char *a_fn, uint8_t is_first) { ISO_INFO *iso = (ISO_INFO *) fs; int s_cnt = 1; // count of sectors needed for dir TSK_OFF_T s_offs = a_offs; // offset for sector reads int i; if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: offs: %" PRIuOFF " count: %d ctype: %d fn: %s\n", a_offs, count, ctype, a_fn); // cycle through each sector -- entries will not cross them for (i = 0; i < s_cnt; i++) { ssize_t cnt1; int b_offs; // offset in buffer char buf[ISO9660_SSIZE_B]; cnt1 = tsk_fs_read(fs, s_offs, buf, ISO9660_SSIZE_B); if (cnt1 != ISO9660_SSIZE_B) { if (cnt1 >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_get_dentries"); return -1; } /* process the directory entries */ for (b_offs = 0; b_offs < ISO9660_SSIZE_B;) { iso9660_inode_node *in_node = NULL; iso9660_dentry *dentry; dentry = (iso9660_dentry *) & buf[b_offs]; if (dentry->entry_len == 0) { b_offs += 2; continue; } // sanity checks on entry_len else if (dentry->entry_len < sizeof(iso9660_dentry)) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: entry length is shorter than dentry, bailing\n"); break; } else if (b_offs + dentry->entry_len > ISO9660_SSIZE_B) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: entry is longer than sector, bailing\n"); break; } /* when processing the other volume descriptor directories, we ignore the * directories because we have no way of detecting if it is a duplicate of * a directory from the other volume descriptor (they use different blocks). * We will see the contents of this directory from the path table anyway. */ if ((dentry->flags & ISO9660_FLAG_DIR) && (is_first == 0)) { b_offs += dentry->entry_len; continue; } // allocate a node for this entry in_node = (iso9660_inode_node *) tsk_malloc(sizeof(iso9660_inode_node)); if (in_node == NULL) { return -1; } // the first entry is for the current directory if ((i == 0) && (b_offs == 0)) { // should have no name or '.' if (dentry->fi_len > 1) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: first entry has name length > 1\n"); free(in_node); in_node = NULL; b_offs += dentry->entry_len; continue; } /* find how many more sectors are in the directory */ s_cnt = tsk_getu32(fs->endian, dentry->data_len_m) / ISO9660_SSIZE_B; if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: %d number of additional sectors\n", s_cnt); // @@@ Should have a sanity check here on s_cnt, but I'm not sure what it would be... /* use the specified name instead of "." */ if (strlen(a_fn) > ISO9660_MAXNAMLEN_STD) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("iso9660_load_inodes_dir: Name argument specified is too long"); return -1; } strncpy(in_node->inode.fn, a_fn, ISO9660_MAXNAMLEN_STD + 1); /* for all directories except the root, we skip processing the "." and ".." entries because * they duplicate the other entires and the dent_walk code will rely on the offset * for the entry in the parent directory. */ if (count != 0) { free(in_node); in_node = NULL; b_offs += dentry->entry_len; dentry = (iso9660_dentry *) & buf[b_offs]; b_offs += dentry->entry_len; continue; } } else { char *file_ver; // the entry has a UTF-16 name if (ctype == ISO9660_CTYPE_UTF16) { UTF16 *name16; UTF8 *name8; int retVal; if (dentry->entry_len < sizeof(iso9660_dentry) + dentry->fi_len) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: UTF-16 name length is too large, bailing\n"); break; } name16 = (UTF16 *) & buf[b_offs + sizeof(iso9660_dentry)]; // the name is in UTF-16 BE -- convert to LE if needed if (fs->endian & TSK_LIT_ENDIAN) { int a; for (a = 0; a < dentry->fi_len / 2; a++) { name16[a] = ((name16[a] & 0xff) << 8) + ((name16[a] & 0xff00) >> 8); } } name8 = (UTF8 *) in_node->inode.fn; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) & buf[b_offs + sizeof(iso9660_dentry) + dentry->fi_len], &name8, (UTF8 *) ((uintptr_t) & in_node->inode. fn[ISO9660_MAXNAMLEN_STD]), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: Error converting Joliet name to UTF8: %d", retVal); in_node->inode.fn[0] = '\0'; } *name8 = '\0'; } else if (ctype == ISO9660_CTYPE_ASCII) { int readlen; readlen = dentry->fi_len; if (readlen > ISO9660_MAXNAMLEN_STD) readlen = ISO9660_MAXNAMLEN_STD; if (dentry->entry_len < sizeof(iso9660_dentry) + dentry->fi_len) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: ASCII name length is too large, bailing\n"); break; } memcpy(in_node->inode.fn, &buf[b_offs + sizeof(iso9660_dentry)], readlen); in_node->inode.fn[readlen] = '\0'; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("Invalid ctype in iso9660_load_inodes_dir"); return -1; } // the version is embedded in the name file_ver = strchr(in_node->inode.fn, ';'); if (file_ver) { in_node->inode.version = atoi(file_ver + 1); *file_ver = '\0'; file_ver = NULL; } // if no extension, remove the final '.' if (in_node->inode.fn[strlen(in_node->inode.fn) - 1] == '.') in_node->inode.fn[strlen(in_node->inode.fn) - 1] = '\0'; if (strlen(in_node->inode.fn) == 0) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: length of name after processing is 0. bailing\n"); break; } } // copy the raw dentry data into the node memcpy(&(in_node->inode.dr), dentry, sizeof(iso9660_dentry)); in_node->inode.ea = NULL; // sanity checks if (tsk_getu32(fs->endian, dentry->ext_loc_m) > fs->last_block) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: file starts past end of image (%"PRIu32"). bailing\n", tsk_getu32(fs->endian, dentry->ext_loc_m)); break; } in_node->offset = tsk_getu32(fs->endian, dentry->ext_loc_m) * fs->block_size; if (tsk_getu32(fs->endian, in_node->inode.dr.data_len_m) + in_node->offset > fs->block_count * fs->block_size) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: file ends past end of image (%"PRIu32" bytes). bailing\n", tsk_getu32(fs->endian, in_node->inode.dr.data_len_m) + in_node->offset); break; } /* record size to make sure fifos show up as unique files */ in_node->size = tsk_getu32(fs->endian, in_node->inode.dr.data_len_m); in_node->ea_size = dentry->ext_len; in_node->dentry_offset = s_offs + b_offs; if (is_first) in_node->inode.is_orphan = 0; else in_node->inode.is_orphan = 1; in_node->inum = count++; /* RockRidge data is located after the name. See if it is there. */ if ((int) (dentry->entry_len - sizeof(iso9660_dentry) - dentry->fi_len) > 1) { int extra_bytes = dentry->entry_len - sizeof(iso9660_dentry) - dentry->fi_len; in_node->inode.rr = parse_susp(fs, &buf[b_offs + sizeof(iso9660_dentry) + dentry->fi_len], extra_bytes, NULL); if (in_node->inode.rr == NULL) { if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: parse_susp returned error (%s). bailing\n", tsk_error_get()); break; } in_node->inode.susp_off = b_offs + sizeof(iso9660_dentry) + dentry->fi_len + s_offs; in_node->inode.susp_len = extra_bytes; } else { in_node->inode.rr = NULL; in_node->inode.susp_off = 0; in_node->inode.susp_len = 0; } /* add inode to the list */ if (iso->in_list) { iso9660_inode_node *tmp, *prev_tmp; for (tmp = iso->in_list; tmp; tmp = tmp->next) { /* When processing the "first" volume descriptor, all entries get added to the list. * for the later ones, we skip duplicate ones that have content (blocks) that overlaps * with entries from a previous volume descriptor. */ if ((in_node->offset == tmp->offset) && (in_node->size == tmp->size) && (in_node->size) && (is_first == 0)) { // if we found rockridge, then update original if needed. if (in_node->inode.rr) { if (tmp->inode.rr == NULL) { tmp->inode.rr = in_node->inode.rr; tmp->inode.susp_off = in_node->inode.susp_off; tmp->inode.susp_len = in_node->inode.susp_len; in_node->inode.rr = NULL; } else { free(in_node->inode.rr); in_node->inode.rr = NULL; } } if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_dir: Removing duplicate entry for: %s (orig name: %s start: %d size: %d)\n", in_node->inode.fn, tmp->inode.fn, in_node->offset, in_node->size); free(in_node); in_node = NULL; count--; break; } prev_tmp = tmp; } // add it to the end (if we didn't get rid of it above) if (in_node) { prev_tmp->next = in_node; in_node->next = NULL; } } else { iso->in_list = in_node; in_node->next = NULL; } // skip two entries if this was the root directory (the . and ..). if ((i == 0) && (b_offs == 0) && (count == 1)) { b_offs += dentry->entry_len; dentry = (iso9660_dentry *) & buf[b_offs]; } b_offs += dentry->entry_len; } s_offs += cnt1; } return count; } /** * Process the path table for a joliet secondary volume descriptor * and load all of the files pointed to it. * The path table contains an entry for each directory. This code * then locates each of the diretories and proceses the contents. * * @param fs File system to process * @param svd Pointer to the secondary volume descriptor * @param count Current count of inodes * @returns updated count of inodes or -1 on error */ static int iso9660_load_inodes_pt_joliet(TSK_FS_INFO * fs, iso9660_svd * svd, int count, uint8_t is_first) { TSK_OFF_T pt_offs; /* offset of where we are in path table */ size_t pt_len; /* bytes left in path table */ // get the location of the path table pt_offs = (TSK_OFF_T) (tsk_getu32(fs->endian, svd->pt_loc_m) * fs->block_size); pt_len = tsk_getu32(fs->endian, svd->pt_size_m); while (pt_len > 0) { char utf16_buf[ISO9660_MAXNAMLEN_JOL + 1]; // UTF-16 name from img char utf8buf[2 * ISO9660_MAXNAMLEN_JOL + 1]; // UTF-8 version of name int readlen; TSK_OFF_T extent; /* offset of extent for current directory */ path_table_rec dir; int retVal; ssize_t cnt; UTF16 *name16; UTF8 *name8; // Read the path table entry cnt = tsk_fs_read(fs, pt_offs, (char *) &dir, (int) sizeof(dir)); if (cnt != sizeof(dir)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso9660_load_inodes_pt"); return -1; } pt_len -= cnt; pt_offs += (TSK_OFF_T) cnt; readlen = dir.len_di; if (dir.len_di > ISO9660_MAXNAMLEN_JOL) readlen = ISO9660_MAXNAMLEN_JOL; memset(utf16_buf, 0, ISO9660_MAXNAMLEN_JOL); /* get UCS-2 filename for the entry */ cnt = tsk_fs_read(fs, pt_offs, (char *) utf16_buf, readlen); if (cnt != dir.len_di) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_find_inodes"); return -1; } pt_len -= cnt; pt_offs += (TSK_OFF_T) cnt; // ISO stores UTF-16 in BE -- convert to local if we need to if (fs->endian & TSK_LIT_ENDIAN) { int i; for (i = 0; i < cnt; i += 2) { char t = utf16_buf[i]; utf16_buf[i] = utf16_buf[i + 1]; utf16_buf[i] = t; } } name16 = (UTF16 *) utf16_buf; name8 = (UTF8 *) utf8buf; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) & utf16_buf[cnt + 1]), &name8, (UTF8 *) ((uintptr_t) & utf8buf[2 * ISO9660_MAXNAMLEN_JOL]), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "fsstat: Error converting Joliet name to UTF8: %d", retVal); utf8buf[0] = '\0'; } *name8 = '\0'; /* padding byte is there if strlen(file name) is odd */ if (dir.len_di % 2) { pt_len--; pt_offs++; } extent = (TSK_OFF_T) (tsk_getu32(fs->endian, dir.ext_loc) * fs->block_size); // process the directory contents count = iso9660_load_inodes_dir(fs, extent, count, ISO9660_CTYPE_UTF16, utf8buf, is_first); if (count == -1) { return -1; } } return count; } /** * Proces the path table and the directories that are listed in it. * The files in each directory will be stored in ISO_INFO. * * @param iso File system to analyze and store results in * @returns -1 on error or count of inodes found. */ static int iso9660_load_inodes_pt(ISO_INFO * iso) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & iso->fs_info; int count = 0; iso9660_svd_node *s; iso9660_pvd_node *p; char fn[ISO9660_MAXNAMLEN_STD + 1]; /* store current directory name */ path_table_rec dir; TSK_OFF_T pt_offs; /* offset of where we are in path table */ size_t pt_len; /* bytes left in path table */ TSK_OFF_T extent; /* offset of extent for current directory */ ssize_t cnt; uint8_t is_first = 1; if (tsk_verbose) tsk_fprintf(stderr, "iso9660_load_inodes_pt\n"); /* initialize in case repeatedly called */ iso9660_inode_list_free(fs); iso->in_list = NULL; /* The secondary volume descriptor table will contain the * longer / unicode files, so we process it first to give them * a higher priority */ for (s = iso->svd; s != NULL; s = s->next) { /* Check if this is Joliet -- there are three possible signatures */ if ((s->svd.esc_seq[0] == 0x25) && (s->svd.esc_seq[1] == 0x2F) && ((s->svd.esc_seq[2] == 0x40) || (s->svd.esc_seq[2] == 0x43) || (s->svd.esc_seq[2] == 0x45))) { count = iso9660_load_inodes_pt_joliet(fs, &(s->svd), count, is_first); if (count == -1) { return -1; } is_first = 0; } } /* Now look for unique files in the primary descriptors */ for (p = iso->pvd; p != NULL; p = p->next) { pt_offs = (TSK_OFF_T) (tsk_getu32(fs->endian, p->pvd.pt_loc_m) * fs->block_size); pt_len = tsk_getu32(fs->endian, p->pvd.pt_size_m); while (pt_len > 0) { int readlen; /* get next path table entry... */ cnt = tsk_fs_read(fs, pt_offs, (char *) &dir, sizeof(dir)); if (cnt != sizeof(dir)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_find_inodes"); return -1; } pt_len -= cnt; pt_offs += (TSK_OFF_T) cnt; readlen = dir.len_di; if (readlen > ISO9660_MAXNAMLEN_STD) readlen = ISO9660_MAXNAMLEN_STD; /* get directory name, this is the only chance */ cnt = tsk_fs_read(fs, pt_offs, fn, readlen); if (cnt != readlen) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_find_inodes"); return -1; } fn[cnt] = '\0'; pt_len -= cnt; pt_offs += (TSK_OFF_T) cnt; /* padding byte is there if strlen(file name) is odd */ if (dir.len_di % 2) { pt_len--; pt_offs++; } extent = (TSK_OFF_T) (tsk_getu32(fs->endian, dir.ext_loc) * fs->block_size); // process the directory contents count = iso9660_load_inodes_dir(fs, extent, count, ISO9660_CTYPE_ASCII, fn, is_first); if (count == -1) { return -1; } } } return count; } /** * Load the raw "inode" into the cached buffer (iso->dinode) * * dinode_load (for now) does not check for extended attribute records... * my issue is I dont have an iso9660 image with extended attr recs, so I * can't test/debug, etc * * @returns 1 if not found and 0 on succuss */ uint8_t iso9660_dinode_load(ISO_INFO * iso, TSK_INUM_T inum, iso9660_inode * dinode) { iso9660_inode_node *n; n = iso->in_list; while (n && (n->inum != inum)) n = n->next; if (n) { memcpy(dinode, &n->inode, sizeof(iso9660_inode)); return 0; } else { return 1; } } static uint16_t isomode2tskmode(uint16_t a_mode) { uint16_t mode = 0; if (a_mode & ISO_EA_IRUSR) mode |= TSK_FS_META_MODE_IRUSR; if (a_mode & ISO_EA_IWUSR) mode |= TSK_FS_META_MODE_IWUSR; if (a_mode & ISO_EA_IXUSR) mode |= TSK_FS_META_MODE_IXUSR; if (a_mode & ISO_EA_IRGRP) mode |= TSK_FS_META_MODE_IRGRP; if (a_mode & ISO_EA_IWGRP) mode |= TSK_FS_META_MODE_IWGRP; if (a_mode & ISO_EA_IXGRP) mode |= TSK_FS_META_MODE_IXGRP; if (a_mode & ISO_EA_IROTH) mode |= TSK_FS_META_MODE_IROTH; if (a_mode & ISO_EA_IWOTH) mode |= TSK_FS_META_MODE_IWOTH; if (a_mode & ISO_EA_IXOTH) mode |= TSK_FS_META_MODE_IXOTH; return mode; } /** * Copies cached disk inode into generic structure. * * @returns 1 on error and 0 on success */ static uint8_t iso9660_dinode_copy(ISO_INFO * iso, TSK_FS_META * fs_meta, TSK_INUM_T inum, iso9660_inode * dinode) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & iso->fs_info; struct tm t; if (fs_meta == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("iso9660_dinode_copy: fs_file or meta is NULL"); return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_EMPTY; if (fs_meta->attr) { tsk_fs_attrlist_markunused(fs_meta->attr); } if (fs_meta->content_len < ISO9660_FILE_CONTENT_LEN) { if ((fs_meta = tsk_fs_meta_realloc(fs_meta, ISO9660_FILE_CONTENT_LEN)) == NULL) { return 1; } } fs_meta->addr = inum; fs_meta->size = tsk_getu32(fs->endian, dinode->dr.data_len_m); memset(&t, 0, sizeof(struct tm)); t.tm_sec = dinode->dr.rec_time.sec; t.tm_min = dinode->dr.rec_time.min; t.tm_hour = dinode->dr.rec_time.hour; t.tm_mday = dinode->dr.rec_time.day; t.tm_mon = dinode->dr.rec_time.month - 1; t.tm_year = dinode->dr.rec_time.year; //gmt_hrdiff = iso->dinode->dr.rec_time.gmt_off * 15 / 60; fs_meta->crtime = mktime(&t); fs_meta->mtime = fs_meta->atime = fs_meta->ctime = 0; fs_meta->crtime_nano = fs_meta->mtime_nano = fs_meta->atime_nano = fs_meta->ctime_nano = 0; if (dinode->dr.flags & ISO9660_FLAG_DIR) fs_meta->type = TSK_FS_META_TYPE_DIR; else fs_meta->type = TSK_FS_META_TYPE_REG; if (dinode->ea) { fs_meta->uid = tsk_getu32(fs->endian, dinode->ea->uid); fs_meta->gid = tsk_getu32(fs->endian, dinode->ea->gid); fs_meta->mode = isomode2tskmode(tsk_getu16(fs->endian, dinode->ea->mode)); fs_meta->nlink = 1; } else { fs_meta->uid = 0; fs_meta->gid = 0; fs_meta->mode = 0; fs_meta->nlink = 1; } ((TSK_DADDR_T *) fs_meta->content_ptr)[0] = (TSK_DADDR_T) tsk_getu32(fs->endian, dinode->dr.ext_loc_m); // mark files that were found from other volume descriptors as unalloc so that they // come up as orphan files. if (dinode->is_orphan) fs_meta->flags = TSK_FS_META_FLAG_UNALLOC | TSK_FS_META_FLAG_USED; else fs_meta->flags = TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED; return 0; } static void iso9660_close(TSK_FS_INFO * fs) { ISO_INFO *iso = (ISO_INFO *) fs; iso9660_pvd_node *p; iso9660_svd_node *s; fs->tag = 0; while (iso->pvd != NULL) { p = iso->pvd; iso->pvd = iso->pvd->next; free(p); } while (iso->svd != NULL) { s = iso->svd; iso->svd = iso->svd->next; free(s); } tsk_fs_free(fs); } static uint8_t iso9660_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { ISO_INFO *iso = (ISO_INFO *) fs; iso9660_inode *dinode; if (tsk_verbose) tsk_fprintf(stderr, "iso9660_inode_lookup: iso:" " inum: %" PRIuINUM "\n", inum); if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("iso9660_inode_lookup: fs_file is NULL"); return 1; } if (a_fs_file->meta == NULL) { if ((a_fs_file->meta = tsk_fs_meta_alloc(ISO9660_FILE_CONTENT_LEN)) == NULL) return 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } // see if they are looking for the special "orphans" directory if (inum == TSK_FS_ORPHANDIR_INUM(fs)) { if (tsk_fs_dir_make_orphan_dir_meta(fs, a_fs_file->meta)) { return 1; } else { return 0; } } else { /* allocate cache buffers */ /* dinode */ dinode = (iso9660_inode *) tsk_malloc(sizeof(iso9660_inode)); if (dinode == NULL) { fs->tag = 0; iso9660_close(fs); return 1; } // load the inode into the ISO buffer if (iso9660_dinode_load(iso, inum, dinode)) { free(dinode); return 1; } // copy into the FS_META structure if (iso9660_dinode_copy(iso, a_fs_file->meta, inum, dinode)) { free(dinode); return 1; } } free(dinode); return 0; } static uint8_t iso9660_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start, TSK_INUM_T last, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB action, void *ptr) { char *myname = "iso9660_inode_walk"; ISO_INFO *iso = (ISO_INFO *) fs; TSK_INUM_T inum, end_inum_tmp; TSK_FS_FILE *fs_file; int myflags; iso9660_inode *dinode; // clean up any error messages that are lying around tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "iso9660_inode_walk: " " start: %" PRIuINUM " last: %" PRIuINUM " flags: %d" " action: %" PRIu64 " ptr: %" PRIu64 "\n", start, last, flags, (uint64_t) action, (uint64_t) ptr); myflags = TSK_FS_META_FLAG_ALLOC; /* * Sanity checks. */ if (start < fs->first_inum || start > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Start inode: %" PRIuINUM "", myname, start); return 1; } if (last < fs->first_inum || last > fs->last_inum || last < start) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End inode: %" PRIuINUM "", myname, last); return 1; } /* If ORPHAN is wanted, then make sure that the flags are correct */ if (flags & TSK_FS_META_FLAG_ORPHAN) { flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; flags |= TSK_FS_META_FLAG_USED; flags &= ~TSK_FS_META_FLAG_UNUSED; } else if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((flags & TSK_FS_META_FLAG_USED) == 0) && ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } /* If we are looking for orphan files and have not yet filled * in the list of unalloc inodes that are pointed to, then fill * in the list * */ if ((flags & TSK_FS_META_FLAG_ORPHAN)) { if (tsk_fs_dir_load_inum_named(fs) != TSK_OK) { tsk_error_errstr2_concat ("- iso9660_inode_walk: identifying inodes allocated by file names"); return 1; } } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(ISO9660_FILE_CONTENT_LEN)) == NULL) return 1; // we need to handle fs->last_inum specially because it is for the // virtual ORPHANS directory. Handle it outside of the loop. if (last == TSK_FS_ORPHANDIR_INUM(fs)) end_inum_tmp = last - 1; else end_inum_tmp = last; /* allocate cache buffers */ /* dinode */ dinode = (iso9660_inode *) tsk_malloc(sizeof(iso9660_inode)); if (dinode == NULL) { fs->tag = 0; iso9660_close(fs); return 1; } /* * Iterate. */ for (inum = start; inum <= end_inum_tmp; inum++) { int retval; if (iso9660_dinode_load(iso, inum, dinode)) { tsk_fs_file_close(fs_file); free(dinode); return 1; } if (iso9660_dinode_copy(iso, fs_file->meta, inum, dinode)) { free(dinode); return 1; } myflags = fs_file->meta->flags; if ((flags & myflags) != myflags) continue; /* If we want only orphans, then check if this * inode is in the seen list * */ if ((myflags & TSK_FS_META_FLAG_UNALLOC) && (flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(fs, inum))) { continue; } retval = action(fs_file, ptr); if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dinode); return 1; } else if (retval == TSK_WALK_STOP) { break; } } // handle the virtual orphans folder if they asked for it if ((last == TSK_FS_ORPHANDIR_INUM(fs)) && (flags & TSK_FS_META_FLAG_ALLOC) && (flags & TSK_FS_META_FLAG_USED)) { int retval; if (tsk_fs_dir_make_orphan_dir_meta(fs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(dinode); return 1; } /* call action */ retval = action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(dinode); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(dinode); return 1; } } /* * Cleanup. */ tsk_fs_file_close(fs_file); if (dinode != NULL) free((char *) dinode); return 0; } // @@@ Doesn' thit seem to ignore interleave? /* return 1 if block is allocated in a file's extent, return 0 otherwise */ static int iso9660_is_block_alloc(TSK_FS_INFO * fs, TSK_DADDR_T blk_num) { ISO_INFO *iso = (ISO_INFO *) fs; iso9660_inode_node *in_node; if (tsk_verbose) tsk_fprintf(stderr, "iso9660_is_block_alloc: " " blk_num: %" PRIuDADDR "\n", blk_num); for (in_node = iso->in_list; in_node; in_node = in_node->next) { TSK_DADDR_T first_block = in_node->offset / fs->block_size; TSK_DADDR_T file_size = tsk_getu32(fs->endian, in_node->inode.dr.data_len_m); TSK_DADDR_T last_block = first_block + (file_size / fs->block_size); if (file_size % fs->block_size) last_block++; if ((blk_num >= first_block) && (blk_num <= last_block)) return 1; } return 0; } TSK_FS_BLOCK_FLAG_ENUM static iso9660_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { return (iso9660_is_block_alloc(a_fs, a_addr)) ? TSK_FS_BLOCK_FLAG_ALLOC : TSK_FS_BLOCK_FLAG_UNALLOC; } /* flags: TSK_FS_BLOCK_FLAG_ALLOC and FS_FLAG_UNALLOC * ISO9660 has a LOT of very sparse meta, so in this function a block is only * checked to see if it is part of an inode's extent */ static uint8_t iso9660_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T last, TSK_FS_BLOCK_WALK_FLAG_ENUM flags, TSK_FS_BLOCK_WALK_CB action, void *ptr) { char *myname = "iso9660_block_walk"; TSK_DADDR_T addr; TSK_FS_BLOCK *fs_block; // clean up any error messages that are lying around tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "iso9660_block_walk: " " start: %" PRIuDADDR " last: %" PRIuDADDR " flags: %d" " action: %" PRIu64 " ptr: %" PRIu64 "\n", start, last, flags, (uint64_t) action, (uint64_t) ptr); /* * Sanity checks. */ if (start < fs->first_block || start > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: Start block: %" PRIuDADDR "", myname, start); return 1; } if (last < fs->first_block || last > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: End block: %" PRIuDADDR "", myname, last); return 1; } /* Sanity check on flags -- make sure at least one ALLOC is set */ if (((flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } if (tsk_verbose) tsk_fprintf(stderr, "isofs_block_walk: Block Walking %" PRIuDADDR " to %" PRIuDADDR "\n", start, last); /* cycle through block addresses */ for (addr = start; addr <= last; addr++) { int retval; int myflags = iso9660_block_getflags(fs, addr); // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; if (tsk_fs_block_get_flag(fs, fs_block, addr, myflags) == NULL) { tsk_error_set_errstr2("iso_block_walk"); tsk_fs_block_free(fs_block); return 1; } retval = action(fs_block, ptr); if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); return 1; } else if (retval == TSK_WALK_STOP) { break; } } tsk_fs_block_free(fs_block); return 0; } static uint8_t iso9660_make_data_run(TSK_FS_FILE * a_fs_file) { ISO_INFO *iso; iso9660_dentry dd; TSK_FS_INFO *fs = NULL; TSK_FS_ATTR *fs_attr = NULL; TSK_FS_ATTR_RUN *data_run = NULL; iso9660_inode *dinode; // clean up any error messages that are lying around tsk_error_reset(); if ((a_fs_file == NULL) || (a_fs_file->meta == NULL) || (a_fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("iso9660_make_data_run: fs_file or meta is NULL"); return 1; } fs = a_fs_file->fs_info; iso = (ISO_INFO *) fs; // see if we have already loaded the runs if ((a_fs_file->meta->attr != NULL) && (a_fs_file->meta->attr_state == TSK_FS_META_ATTR_STUDIED)) { return 0; } else if (a_fs_file->meta->attr_state == TSK_FS_META_ATTR_ERROR) { return 1; } // not sure why this would ever happen, but... else if (a_fs_file->meta->attr != NULL) { tsk_fs_attrlist_markunused(a_fs_file->meta->attr); } else if (a_fs_file->meta->attr == NULL) { a_fs_file->meta->attr = tsk_fs_attrlist_alloc(); } /* allocate cache buffers */ /* dinode */ if ((dinode = (iso9660_inode *) tsk_malloc(sizeof(iso9660_inode))) == NULL) { fs->tag = 0; iso9660_close(fs); return 1; } // copy the raw data if (iso9660_dinode_load(iso, a_fs_file->meta->addr, dinode)) { tsk_error_set_errstr2("iso9660_make_data_run"); a_fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; free(dinode); return 1; } memcpy(&dd, &dinode->dr, sizeof(iso9660_dentry)); free(dinode); dinode = NULL; if (dd.gap_sz) { a_fs_file->meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("file %" PRIuINUM " has an interleave gap -- not supported", a_fs_file->meta->addr); return 1; } if ((fs_attr = tsk_fs_attrlist_getnew(a_fs_file->meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } // make a non-resident run data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { return -1; } data_run->addr = ((TSK_DADDR_T *) a_fs_file->meta->content_ptr)[0]; data_run->len = (a_fs_file->meta->size + fs->block_size - 1) / fs->block_size; data_run->offset = 0; // initialize the data run if (tsk_fs_attr_set_run(a_fs_file, fs_attr, data_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, a_fs_file->meta->size, a_fs_file->meta->size, roundup(a_fs_file->meta->size + dd.ext_len, fs->block_size) - dd.ext_len, 0, 0)) { return 1; } // the first bytes in the run could be allocated for the extended attribute. fs_attr->nrd.skiplen = dd.ext_len; a_fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } static uint8_t iso9660_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented for iso9660 yet"); return 1; } /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t iso9660_fsstat(TSK_FS_INFO * fs, FILE * hFile) { char str[129]; /* store name of publisher/preparer/etc */ ISO_INFO *iso = (ISO_INFO *) fs; char *cp; int i; iso9660_pvd_node *p = iso->pvd; iso9660_svd_node *s; // clean up any error messages that are lying around tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "iso9660_fsstat:\n"); i = 0; for (p = iso->pvd; p != NULL; p = p->next) { i++; tsk_fprintf(hFile, "\n=== PRIMARY VOLUME DESCRIPTOR %d ===\n", i); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: ISO9660\n"); tsk_fprintf(hFile, "Volume Name: %s\n", p->pvd.vol_id); tsk_fprintf(hFile, "Volume Set Size: %d\n", tsk_getu16(fs->endian, p->pvd.vol_set_m)); tsk_fprintf(hFile, "Volume Set Sequence: %d\n", tsk_getu16(fs->endian, p->pvd.vol_seq_m)); /* print publisher */ if (p->pvd.pub_id[0] == 0x5f) /* publisher is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", p->pvd.pub_id); cp = &str[127]; /* find last printable non space character */ while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Publisher: %s\n", str); memset(str, ' ', 128); /* print data preparer */ if (p->pvd.prep_id[0] == 0x5f) /* preparer is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", p->pvd.prep_id); cp = &str[127]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Data Preparer: %s\n", str); memset(str, ' ', 128); /* print recording application */ if (p->pvd.app_id[0] == 0x5f) /* application is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", p->pvd.app_id); cp = &str[127]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Recording Application: %s\n", str); memset(str, ' ', 128); /* print copyright */ if (p->pvd.copy_id[0] == 0x5f) /* copyright is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 37, "%s", p->pvd.copy_id); cp = &str[36]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Copyright: %s\n", str); memset(str, ' ', 37); tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Path Table Location: %" PRIu32 "-%" PRIu32 "\n", tsk_getu32(fs->endian, p->pvd.pt_loc_m), tsk_getu32(fs->endian, p->pvd.pt_loc_m) + tsk_getu32(fs->endian, p->pvd.pt_size_m) / fs->block_size); tsk_fprintf(hFile, "Inode Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); tsk_fprintf(hFile, "Root Directory Block: %" PRIuDADDR "\n", tsk_getu32(fs->endian, p->pvd.dir_rec.ext_loc_m)); tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Sector Size: %d\n", ISO9660_SSIZE_B); tsk_fprintf(hFile, "Block Size: %d\n", tsk_getu16(fs->endian, p->pvd.blk_sz_m)); if (fs->block_pre_size) { tsk_fprintf(hFile, "Raw CD pre-block size: %d\n", fs->block_pre_size); tsk_fprintf(hFile, "Raw CD post-block size: %d\n", fs->block_post_size); } tsk_fprintf(hFile, "Total Sector Range: 0 - %d\n", (int) ((fs->block_size / ISO9660_SSIZE_B) * (fs->block_count - 1))); /* get image slack, ignore how big the image claims itself to be */ tsk_fprintf(hFile, "Total Block Range: 0 - %d\n", (int) fs->block_count - 1); } i = 0; for (s = iso->svd; s != NULL; s = s->next) { i++; tsk_fprintf(hFile, "\n=== SUPPLEMENTARY VOLUME DESCRIPTOR %d ===\n", i); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: ISO9660\n"); tsk_fprintf(hFile, "Volume Name: %s\n", s->svd.vol_id); tsk_fprintf(hFile, "Volume Set Size: %d\n", tsk_getu16(fs->endian, s->svd.vol_set_m)); tsk_fprintf(hFile, "Volume Set Sequence: %d\n", tsk_getu16(fs->endian, s->svd.vol_seq_m)); /* print publisher */ if (s->svd.pub_id[0] == 0x5f) /* publisher is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", s->svd.pub_id); cp = &str[127]; /* find last printable non space character */ while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Publisher: %s\n", str); memset(str, ' ', 128); /* print data preparer */ if (s->svd.prep_id[0] == 0x5f) /* preparer is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", s->svd.prep_id); cp = &str[127]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Data Preparer: %s\n", str); memset(str, ' ', 128); /* print recording application */ if (s->svd.app_id[0] == 0x5f) /* application is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 128, "%s", s->svd.app_id); cp = &str[127]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Recording Application: %s\n", str); memset(str, ' ', 128); /* print copyright */ if (s->svd.copy_id[0] == 0x5f) /* copyright is in a file. TODO: handle this properly */ snprintf(str, 8, "In file\n"); else snprintf(str, 37, "%s\n", s->svd.copy_id); cp = &str[36]; while ((!isprint(*cp) || isspace(*cp)) && (cp != str)) cp--; *++cp = '\0'; tsk_fprintf(hFile, "Copyright: %s\n", str); memset(str, ' ', 37); tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Path Table Location: %" PRIu32 "-%" PRIu32 "\n", tsk_getu32(fs->endian, s->svd.pt_loc_m), tsk_getu32(fs->endian, s->svd.pt_loc_m) + tsk_getu32(fs->endian, s->svd.pt_size_m) / fs->block_size); tsk_fprintf(hFile, "Root Directory Block: %" PRIuDADDR "\n", tsk_getu32(fs->endian, s->svd.dir_rec.ext_loc_m)); /* learn joliet level (1-3) */ if (!strncmp((char *) s->svd.esc_seq, "%/E", 3)) tsk_fprintf(hFile, "Joliet Name Encoding: UCS-2 Level 3\n"); if (!strncmp((char *) s->svd.esc_seq, "%/C", 3)) tsk_fprintf(hFile, "Joliet Name Encoding: UCS-2 Level 2\n"); if (!strncmp((char *) s->svd.esc_seq, "%/@", 3)) tsk_fprintf(hFile, "Joliet Name Encoding: UCS-2 Level 1\n"); if (iso->rr_found) tsk_fprintf(hFile, "RockRidge Extensions present\n"); tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Sector Size: %d\n", ISO9660_SSIZE_B); tsk_fprintf(hFile, "Block Size: %d\n", fs->block_size); tsk_fprintf(hFile, "Total Sector Range: 0 - %d\n", (int) ((fs->block_size / ISO9660_SSIZE_B) * (fs->block_count - 1))); /* get image slack, ignore how big the image claims itself to be */ tsk_fprintf(hFile, "Total Block Range: 0 - %d\n", (int) fs->block_count - 1); } return 0; } /** * Make a unix-style permissions string based the flags in dentry and * the cached inode in fs, storing results in perm. Caller must * ensure perm can hold 10 chars plus one null char. */ static char * make_unix_perm(TSK_FS_INFO * fs, iso9660_dentry * dd, iso9660_inode * dinode, char *perm) { if (tsk_verbose) tsk_fprintf(stderr, "make_unix_perm: fs: %" PRIu64 " dd: %" PRIu64 "\n", (uint64_t) fs, (uint64_t) dd); memset(perm, '-', 10); perm[10] = '\0'; if (dd->flags & ISO9660_FLAG_DIR) perm[0] = 'd'; if (dinode->ea) { if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_UR) perm[1] = 'r'; if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_UX) perm[3] = 'x'; if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_GR) perm[4] = 'r'; if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_GX) perm[6] = 'x'; if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_AR) perm[7] = 'r'; if (tsk_getu16(fs->endian, dinode->ea->mode) & ISO9660_BIT_AX) perm[9] = 'x'; } else { strcpy(&perm[1], "r-xr-xr-x"); } return perm; } #if 0 static void iso9660_print_rockridge(FILE * hFile, rockridge_ext * rr) { char mode_buf[11]; tsk_fprintf(hFile, "\nROCKRIDGE EXTENSIONS\n"); tsk_fprintf(hFile, "Owner-ID: "); tsk_fprintf(hFile, "%d\t", (int) rr->uid); tsk_fprintf(hFile, "Group-ID: "); tsk_fprintf(hFile, "%d\n", (int) rr->gid); tsk_fprintf(hFile, "Mode: "); memset(mode_buf, '-', 11); mode_buf[10] = '\0'; /* file type */ /* note: socket and symbolic link are multi bit fields */ if ((rr->mode & MODE_IFSOCK) == MODE_IFSOCK) mode_buf[0] = 's'; else if ((rr->mode & MODE_IFLNK) == MODE_IFLNK) mode_buf[0] = 'l'; else if (rr->mode & MODE_IFDIR) mode_buf[0] = 'd'; else if (rr->mode & MODE_IFIFO) mode_buf[0] = 'p'; else if (rr->mode & MODE_IFBLK) mode_buf[0] = 'b'; else if (rr->mode & MODE_IFCHR) mode_buf[0] = 'c'; /* owner permissions */ if (rr->mode & TSK_FS_META_MODE_IRUSR) mode_buf[1] = 'r'; if (rr->mode & TSK_FS_META_MODE_IWUSR) mode_buf[2] = 'w'; if ((rr->mode & TSK_FS_META_MODE_IXUSR) && (rr->mode & TSK_FS_META_MODE_ISUID)) mode_buf[3] = 's'; else if (rr->mode & TSK_FS_META_MODE_IXUSR) mode_buf[3] = 'x'; else if (rr->mode & TSK_FS_META_MODE_ISUID) mode_buf[3] = 'S'; /* group permissions */ if (rr->mode & TSK_FS_META_MODE_IRGRP) mode_buf[4] = 'r'; if (rr->mode & TSK_FS_META_MODE_IWGRP) mode_buf[5] = 'w'; if ((rr->mode & TSK_FS_META_MODE_IXGRP) && (rr->mode & TSK_FS_META_MODE_ISGID)) mode_buf[6] = 's'; else if (rr->mode & TSK_FS_META_MODE_IXGRP) mode_buf[6] = 'x'; else if (rr->mode & TSK_FS_META_MODE_ISGID) mode_buf[6] = 'S'; /* other permissions */ if (rr->mode & TSK_FS_META_MODE_IROTH) mode_buf[7] = 'r'; if (rr->mode & TSK_FS_META_MODE_IWOTH) mode_buf[8] = 'w'; if ((rr->mode & TSK_FS_META_MODE_IXOTH) && (rr->mode & TSK_FS_META_MODE_ISVTX)) mode_buf[9] = 't'; else if (rr->mode & TSK_FS_META_MODE_IXOTH) mode_buf[9] = 'x'; else if (rr->mode & TSK_FS_META_MODE_ISVTX) mode_buf[9] = 'T'; tsk_fprintf(hFile, "%s\n", mode_buf); tsk_fprintf(hFile, "Number links: %" PRIu32 "\n", rr->nlink); tsk_fprintf(hFile, "Alternate name: %s\n", rr->fn); tsk_fprintf(hFile, "\n"); } #endif /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File handle to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t iso9660_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { ISO_INFO *iso = (ISO_INFO *) fs; TSK_FS_FILE *fs_file; iso9660_dentry dd; iso9660_inode *dinode; char timeBuf[128]; // clean up any error messages that are lying around tsk_error_reset(); if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) return 1; tsk_fprintf(hFile, "Entry: %" PRIuINUM "\n", inum); /* allocate cache buffers */ /* dinode */ dinode = (iso9660_inode *) tsk_malloc(sizeof(iso9660_inode)); if (dinode == NULL) { fs->tag = 0; iso9660_close(fs); return 1; } if (iso9660_dinode_load(iso, inum, dinode)) { tsk_error_set_errstr2("iso9660_istat"); tsk_fs_file_close(fs_file); free(dinode); return 1; } memcpy(&dd, &dinode->dr, sizeof(iso9660_dentry)); tsk_fprintf(hFile, "Type: "); if (dd.flags & ISO9660_FLAG_DIR) tsk_fprintf(hFile, "Directory\n"); else tsk_fprintf(hFile, "File\n"); tsk_fprintf(hFile, "Links: %d\n", fs_file->meta->nlink); if (dd.gap_sz > 0) { tsk_fprintf(hFile, "Interleave Gap Size: %d\n", dd.gap_sz); tsk_fprintf(hFile, "Interleave File Unit Size: %d\n", dd.unit_sz); } tsk_fprintf(hFile, "Flags: "); if (dd.flags & ISO9660_FLAG_HIDE) tsk_fprintf(hFile, "Hidden, "); if (dd.flags & ISO9660_FLAG_ASSOC) tsk_fprintf(hFile, "Associated, "); if (dd.flags & ISO9660_FLAG_RECORD) tsk_fprintf(hFile, "Record Format, "); if (dd.flags & ISO9660_FLAG_PROT) tsk_fprintf(hFile, "Protected, "); /* check if reserved bits are set, be suspicious */ if (dd.flags & ISO9660_FLAG_RES1) tsk_fprintf(hFile, "Reserved1, "); if (dd.flags & ISO9660_FLAG_RES2) tsk_fprintf(hFile, "Reserved2, "); if (dd.flags & ISO9660_FLAG_MULT) tsk_fprintf(hFile, "Non-final multi-extent entry"); putchar('\n'); tsk_fprintf(hFile, "Name: %s\n", dinode->fn); tsk_fprintf(hFile, "Size: %" PRIu32 "\n", tsk_getu32(fs->endian, dinode->dr.data_len_m)); if (dinode->ea) { char perm_buf[11]; tsk_fprintf(hFile, "\nEXTENDED ATTRIBUTE INFO\n"); tsk_fprintf(hFile, "Owner-ID: %" PRIu32 "\n", tsk_getu32(fs->endian, dinode->ea->uid)); tsk_fprintf(hFile, "Group-ID: %" PRIu32 "\n", tsk_getu32(fs->endian, dinode->ea->gid)); tsk_fprintf(hFile, "Mode: %s\n", make_unix_perm(fs, &dd, dinode, perm_buf)); } else if (dinode->susp_off) { char *buf2 = (char *) tsk_malloc((size_t) dinode->susp_len); if (buf2 != NULL) { ssize_t cnt; fprintf(hFile, "\nRock Ridge Extension Data\n"); cnt = tsk_fs_read(fs, dinode->susp_off, buf2, (size_t) dinode->susp_len); if (cnt == dinode->susp_len) { parse_susp(fs, buf2, (int) cnt, hFile); } else { fprintf(hFile, "Error reading Rock Ridge Location\n"); if (tsk_verbose) { fprintf(stderr, "istat: error reading rock ridge entry\n"); tsk_error_print(stderr); } tsk_error_reset(); } free(buf2); } else { if (tsk_verbose) fprintf(stderr, "istat: error allocating memory to process rock ridge entry\n"); tsk_error_reset(); } } //else if (iso->dinode->rr) { // iso9660_print_rockridge(hFile, iso->dinode->rr); //} else { char perm_buf[11]; tsk_fprintf(hFile, "Owner-ID: 0\n"); tsk_fprintf(hFile, "Group-ID: 0\n"); tsk_fprintf(hFile, "Mode: %s\n", make_unix_perm(fs, &dd, dinode, perm_buf)); } if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted File Times:\n"); if (fs_file->meta->mtime) fs_file->meta->mtime -= sec_skew; if (fs_file->meta->atime) fs_file->meta->atime -= sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime -= sec_skew; tsk_fprintf(hFile, "Written:\t%s\n", tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); if (fs_file->meta->mtime) fs_file->meta->mtime += sec_skew; if (fs_file->meta->atime) fs_file->meta->atime += sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime += sec_skew; tsk_fprintf(hFile, "\nOriginal File Times:\n"); } else { tsk_fprintf(hFile, "\nFile Times:\n"); } tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str(fs_file->meta->crtime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_file->meta->mtime, timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_file->meta->atime, timeBuf)); tsk_fprintf(hFile, "\nSectors:\n"); /* since blocks are all contiguous, print them here to simplify file_walk */ { int block = tsk_getu32(fs->endian, dinode->dr.ext_loc_m); TSK_OFF_T size = fs_file->meta->size; int rowcount = 0; while ((int64_t) size > 0) { tsk_fprintf(hFile, "%d ", block++); size -= fs->block_size; rowcount++; if (rowcount == 8) { rowcount = 0; tsk_fprintf(hFile, "\n"); } } tsk_fprintf(hFile, "\n"); } tsk_fs_file_close(fs_file); if (dinode != NULL) free((char *) dinode); return 0; } static uint8_t iso9660_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("ISO9660 does not have a journal"); return 1; } static uint8_t iso9660_jentry_walk(TSK_FS_INFO * fs, int flags, TSK_FS_JENTRY_WALK_CB action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("ISO9660 does not have a journal"); return 1; } static uint8_t iso9660_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int flags, TSK_FS_JBLK_WALK_CB action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("ISO9660 does not have a journal"); return 1; } static TSK_FS_ATTR_TYPE_ENUM iso9660_get_default_attr_type(const TSK_FS_FILE * a_file) { return TSK_FS_ATTR_TYPE_DEFAULT; } /** Load the volume descriptors into save the raw data structures in * the file system state structure (fs). Also determines the block size. * * This is useful for discs which may have 2 volumes on them (no, not * multisession CD-R/CD-RW). * Design note: If path table address is the same, then you have the same image. * Only store unique image info. * Uses a linked list even though Ecma-119 says there is only 1 primary vol * desc, consider possibility of more. * * Returns -1 on error and 0 on success */ static int load_vol_desc(TSK_FS_INFO * fs) { int count = 0; ISO_INFO *iso = (ISO_INFO *) fs; TSK_OFF_T offs; char *myname = "iso_load_vol_desc"; ssize_t cnt; iso9660_pvd_node *p; iso9660_svd_node *s; uint8_t magic_seen = 0; iso->pvd = NULL; iso->svd = NULL; //fs->block_size = 0; fs->dev_bsize = fs->img_info->sector_size; #if 0 b = (iso_bootrec *) tsk_malloc(sizeof(iso_bootrec)); if (b == NULL) { return -1; } #endif // @@@ Technically, we should seek ahea 16 * sector size for (offs = ISO9660_SBOFF;; offs += sizeof(iso9660_gvd)) { iso9660_gvd *vd; // allocate a buffer the size of the nodes in the linked list // this will be stored in ISO_INFO, so it is not always freed here if ((vd = (iso9660_gvd *) tsk_malloc(sizeof(iso9660_pvd_node))) == NULL) { return -1; } ISO_RETRY_MAGIC: // read the full descriptor cnt = tsk_fs_read(fs, offs, (char *) vd, sizeof(iso9660_gvd)); if (cnt != sizeof(iso9660_gvd)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_load_vol_desc: Error reading"); free(vd); return -1; } // verify the magic value if (strncmp(vd->magic, ISO9660_MAGIC, 5)) { if (tsk_verbose) tsk_fprintf(stderr, "%s: Bad volume descriptor: Magic number is not CD001\n", myname); // see if we have a RAW image if (magic_seen == 0) { if (fs->block_pre_size == 0) { if (tsk_verbose) tsk_fprintf(stderr, "Trying RAW ISO9660 with 16-byte pre-block size\n"); fs->block_pre_size = 16; fs->block_post_size = 288; goto ISO_RETRY_MAGIC; } else if (fs->block_pre_size == 16) { if (tsk_verbose) tsk_fprintf(stderr, "Trying RAW ISO9660 with 24-byte pre-block size\n"); fs->block_pre_size = 24; fs->block_post_size = 280; goto ISO_RETRY_MAGIC; } else { fs->block_pre_size = 0; fs->block_post_size = 0; } } free(vd); return -1; } magic_seen = 1; // see if we are done if (vd->type == ISO9660_VOL_DESC_SET_TERM) break; switch (vd->type) { case ISO9660_PRIM_VOL_DESC: p = (iso9660_pvd_node *) vd; /* list not empty */ if (iso->pvd) { iso9660_pvd_node *ptmp = iso->pvd; /* append to list if path table address not found in list */ while ((p->pvd.pt_loc_l != ptmp->pvd.pt_loc_l) && (ptmp->next)) ptmp = ptmp->next; // we already have it if (p->pvd.pt_loc_l == ptmp->pvd.pt_loc_l) { free(vd); p = NULL; vd = NULL; } else { ptmp->next = p; p->next = NULL; count++; } } /* list empty, insert */ else { iso->pvd = p; p->next = NULL; count++; } break; case ISO9660_SUPP_VOL_DESC: s = (iso9660_svd_node *) vd; /* list not empty */ if (iso->svd) { iso9660_svd_node *stmp = iso->svd; /* append to list if path table address not found in list */ while ((s->svd.pt_loc_l != stmp->svd.pt_loc_l) && (stmp->next)) stmp = stmp->next; // we already have it if (s->svd.pt_loc_l == stmp->svd.pt_loc_l) { free(vd); s = NULL; vd = NULL; } else { stmp->next = s; s->next = NULL; count++; } } /* list empty, insert */ else { iso->svd = s; s->next = NULL; count++; } break; /* boot records are just read and discarded for now... */ case ISO9660_BOOT_RECORD: #if 0 cnt = tsk_fs_read(fs, offs, (char *) b, sizeof(iso_bootrec)); if (cnt != sizeof(iso_bootrec)) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso_load_vol_desc: Error reading"); return -1; } offs += sizeof(iso_bootrec); #endif break; } } /* now that we have all primary and supplementary volume descs, we should cull the list of */ /* primary that match up with supplems, since supplem has all info primary has plus more. */ /* this will make jobs such as searching all volumes easier later */ for (s = iso->svd; s != NULL; s = s->next) { for (p = iso->pvd; p != NULL; p = p->next) { // see if they have the same starting address if (tsk_getu32(fs->endian, p->pvd.pt_loc_m) == tsk_getu32(fs->endian, s->svd.pt_loc_m)) { // see if it is the head of the list if (p == iso->pvd) { iso->pvd = p->next; } else { iso9660_pvd_node *ptmp = iso->pvd; while (ptmp->next != p) ptmp = ptmp->next; ptmp->next = p->next; } p->next = NULL; free(p); p = NULL; count--; break; } } } if ((iso->pvd == NULL) && (iso->svd == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("load_vol_desc: primary and secondary volume descriptors null"); return -1; } return 0; } /* iso9660_open - * opens an iso9660 filesystem. * Design note: This function doesn't read a superblock, since iso9660 doesnt * really have one. Volume info is read in with a call to load_vol_descs(). */ TSK_FS_INFO * iso9660_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype, uint8_t test) { ISO_INFO *iso; TSK_FS_INFO *fs; uint8_t tmpguess[4]; if (TSK_FS_TYPE_ISISO9660(ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS type in iso9660_open"); return NULL; } if (tsk_verbose) { tsk_fprintf(stderr, "iso9660_open img_info: %" PRIu64 " ftype: %" PRIu8 " test: %" PRIu8 "\n", (uint64_t) img_info, ftype, test); } if ((iso = (ISO_INFO *) tsk_fs_malloc(sizeof(ISO_INFO))) == NULL) { return NULL; } fs = &(iso->fs_info); iso->rr_found = 0; iso->in_list = NULL; fs->ftype = TSK_FS_TYPE_ISO9660; fs->duname = "Block"; fs->flags = 0; fs->tag = TSK_FS_INFO_TAG; fs->img_info = img_info; fs->offset = offset; /* ISO has no magic to calibrate the endian ordering on and it * stores all numbers in big and small endian. We will use the big * endian order so load up a 4-byte array and flags. We could hardwire * the definition, but I would rather use guessu32 in case I later add * other initialization data to that function (since all other FSs use * it) */ tmpguess[0] = 0; tmpguess[1] = 0; tmpguess[2] = 0; tmpguess[3] = 1; tsk_fs_guessu32(fs, tmpguess, 1); // we need a value here to test for RAW images. So, start with 2048 fs->block_size = 2048; /* load_vol_descs checks magic value */ if (load_vol_desc(fs) == -1) { fs->tag = 0; iso9660_close(fs); if (tsk_verbose) fprintf(stderr, "iso9660_open: Error loading volume descriptor\n"); if (test) return NULL; else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Invalid FS type in iso9660_open"); return NULL; } } if (iso->pvd) { fs->block_size = tsk_getu16(fs->endian, iso->pvd->pvd.blk_sz_m); fs->block_count = tsk_getu32(fs->endian, iso->pvd->pvd.vs_sz_m); /* Volume ID */ for (fs->fs_id_used = 0; fs->fs_id_used < 32; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = iso->pvd->pvd.vol_id[fs->fs_id_used]; } } else { fs->block_size = tsk_getu16(fs->endian, iso->svd->svd.blk_sz_m); fs->block_count = tsk_getu32(fs->endian, iso->svd->svd.vs_sz_m); /* Volume ID */ for (fs->fs_id_used = 0; fs->fs_id_used < 32; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = iso->svd->svd.vol_id[fs->fs_id_used]; } } /* We have seen this case on an image that seemed to be only * setting blk_siz_l instead of both blk_sz_m and _l. We should * support both in the future, but this prevents a crash later * on when we divide by block_size. */ if (fs->block_size == 0) { fs->tag = 0; iso9660_close(fs); if (tsk_verbose) fprintf(stderr, "iso9660_open: Block size is 0\n"); if (test) return NULL; else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Block size is 0"); return NULL; } } fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; // determine the last block we have in this image if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < fs->block_count) fs->last_block_act = (img_info->size - offset) / fs->block_size - 1; fs->inum_count = iso9660_load_inodes_pt(iso); if ((int) fs->inum_count == -1) { fs->tag = 0; iso9660_close(fs); if (tsk_verbose) fprintf(stderr, "iso9660_open: Error loading primary table\n"); return NULL; } fs->inum_count++; // account for the orphan directory fs->last_inum = fs->inum_count - 1; fs->first_inum = ISO9660_FIRSTINO; fs->root_inum = ISO9660_ROOTINO; fs->inode_walk = iso9660_inode_walk; fs->block_walk = iso9660_block_walk; fs->block_getflags = iso9660_block_getflags; fs->get_default_attr_type = iso9660_get_default_attr_type; fs->load_attrs = iso9660_make_data_run; fs->file_add_meta = iso9660_inode_lookup; fs->dir_open_meta = iso9660_dir_open_meta; fs->fsstat = iso9660_fsstat; fs->fscheck = iso9660_fscheck; fs->istat = iso9660_istat; fs->close = iso9660_close; fs->name_cmp = iso9660_name_cmp; fs->jblk_walk = iso9660_jblk_walk; fs->jentry_walk = iso9660_jentry_walk; fs->jopen = iso9660_jopen; return fs; } sleuthkit-4.2.0/tsk/fs/iso9660_dent.c000644 000765 000024 00000024651 12576320700 020043 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c)2007 Brian Carrier. All righs reserved. ** ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Copyright (c) 2007-2011 Brian Carrier. All rights reserved ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /** * \file iso9660_dent.c * Contains the internal TSK ISO9660 file system code to handle the parsing of * file names and directory structures. */ #include "tsk_fs_i.h" #include "tsk_iso9660.h" /** \internal * process the data from inside of a directory and load the corresponding * file data into a TSK_FS_DIR structure. * * @param a_fs File system * @param a_fs_dir Structore to store file names into * @param buf Buffer that contains the directory content * @param a_length Number of bytes in buffer * @param a_addr The metadata address for the directory being processed * @param a_dir_addr The block offset where this directory starts * @returns TSK_ERR on error and TSK_OK otherwise */ static uint8_t iso9660_proc_dir(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir, char *buf, size_t a_length, TSK_INUM_T a_addr, TSK_OFF_T a_dir_addr) { ISO_INFO *iso = (ISO_INFO *) a_fs; TSK_FS_NAME *fs_name; size_t buf_idx; iso9660_dentry *dd; /* directory descriptor */ iso9660_inode_node *in; TSK_OFF_T dir_offs = a_dir_addr * a_fs->block_size; if ((fs_name = tsk_fs_name_alloc(ISO9660_MAXNAMLEN + 1, 0)) == NULL) return TSK_ERR; buf_idx = 0; dd = (iso9660_dentry *) & buf[buf_idx]; /* handle "." entry */ fs_name->meta_addr = a_addr; strcpy(fs_name->name, "."); fs_name->type = TSK_FS_NAME_TYPE_DIR; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; tsk_fs_dir_add(a_fs_dir, fs_name); buf_idx += dd->entry_len; if (buf_idx > a_length - sizeof(iso9660_dentry)) { free(buf); tsk_fs_name_free(fs_name); return TSK_OK; } dd = (iso9660_dentry *) & buf[buf_idx]; /* handle ".." entry */ in = iso->in_list; while (in && (tsk_getu32(a_fs->endian, in->inode.dr.ext_loc_m) != tsk_getu32(a_fs->endian, dd->ext_loc_m))) in = in->next; if (in) { fs_name->meta_addr = in->inum; strcpy(fs_name->name, ".."); fs_name->type = TSK_FS_NAME_TYPE_DIR; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; tsk_fs_dir_add(a_fs_dir, fs_name); } buf_idx += dd->entry_len; // process the rest of the entries in the directory while (buf_idx < a_length - sizeof(iso9660_dentry)) { dd = (iso9660_dentry *) & buf[buf_idx]; // process the entry (if it has a defined and valid length) if ((dd->entry_len) && (buf_idx + dd->entry_len < a_length)) { /* We need to find the data in the pre-processed list because that * contains the meta data address that TSK assigned to this file. * We find the entry by looking for one that was stored at the same * byte offset that we now are. We used to use the extent location, but * we found an image * that had a file with 0 bytes with the same starting block as another * file. */ for (in = iso->in_list; in; in = in->next) { if (in->dentry_offset == dir_offs + buf_idx) break; } // we may have not found it because we are reading corrupt data... if (!in) { buf_idx++; continue; } // copy the data in fs_name for loading fs_name->meta_addr = in->inum; strncpy(fs_name->name, in->inode.fn, ISO9660_MAXNAMLEN); if (dd->flags & ISO9660_FLAG_DIR) fs_name->type = TSK_FS_NAME_TYPE_DIR; else fs_name->type = TSK_FS_NAME_TYPE_REG; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; tsk_fs_dir_add(a_fs_dir, fs_name); buf_idx += dd->entry_len; } /* If the length was not defined, we are probably in a hole in the * directory. The contents are block aligned. So, we * scan ahead until we get either a non-zero entry or the block boundary */ else { buf_idx++; for (; buf_idx < a_length - sizeof(iso9660_dentry); buf_idx++) { if (buf[buf_idx] != 0) { dd = (iso9660_dentry *) & buf[buf_idx]; if ((dd->entry_len) && (buf_idx + dd->entry_len < a_length)) break; } if (buf_idx % a_fs->block_size == 0) break; } } } free(buf); tsk_fs_name_free(fs_name); return TSK_OK; } /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM iso9660_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { TSK_RETVAL_ENUM retval; TSK_FS_DIR *fs_dir; ssize_t cnt; char *buf; size_t length; if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("iso9660_dir_open_meta: Invalid inode value: %" PRIuINUM, a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("iso9660_dir_open_meta: NULL fs_attr argument given"); return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "iso9660_dir_open_meta: Processing directory %" PRIuINUM "\n", a_addr); fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else { if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } } // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { return tsk_fs_dir_find_orphans(a_fs, fs_dir); } fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr); if (fs_dir->fs_file == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("iso9660_dir_open_meta: %" PRIuINUM " is not a valid inode", a_addr); return TSK_COR; } /* read directory extent into memory */ length = (size_t) fs_dir->fs_file->meta->size; if ((buf = tsk_malloc(length)) == NULL) return TSK_ERR; cnt = tsk_fs_file_read(fs_dir->fs_file, 0, buf, length, 0); if (cnt != length) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("iso9660_dir_open_meta"); return TSK_ERR; } // process the contents retval = iso9660_proc_dir(a_fs, fs_dir, buf, length, a_addr, fs_dir->fs_file->meta->attr->head->nrd.run->addr); // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); } return retval; } int iso9660_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { return strcmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/Makefile.am000644 000765 000024 00000001612 12576320700 017572 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskfs.la # Note that the .h files are in the top-level Makefile libtskfs_la_SOURCES = tsk_fs_i.h fs_inode.c fs_io.c fs_block.c fs_open.c \ fs_name.c fs_dir.c fs_types.c fs_attr.c fs_attrlist.c fs_load.c \ fs_parse.c fs_file.c \ unix_misc.c nofs_misc.c \ ffs.c ffs_dent.c ext2fs.c ext2fs_dent.c ext2fs_journal.c \ fatfs.c fatfs_meta.c fatfs_dent.cpp \ fatxxfs.c fatxxfs_meta.c fatxxfs_dent.c \ exfatfs.c exfatfs_meta.c exfatfs_dent.c \ fatfs_utils.c \ ntfs.c ntfs_dent.cpp swapfs.c rawfs.c \ iso9660.c iso9660_dent.c \ hfs.c hfs_dent.c hfs_journal.c hfs_unicompare.c \ dcalc_lib.c dcat_lib.c dls_lib.c dstat_lib.c ffind_lib.c \ fls_lib.c icat_lib.c ifind_lib.c ils_lib.c \ walk_cpp.cpp yaffs.cpp indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ sleuthkit-4.2.0/tsk/fs/Makefile.in000644 000765 000024 00000061422 12576326300 017612 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/fs ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskfs_la_LIBADD = am_libtskfs_la_OBJECTS = fs_inode.lo fs_io.lo fs_block.lo fs_open.lo \ fs_name.lo fs_dir.lo fs_types.lo fs_attr.lo fs_attrlist.lo \ fs_load.lo fs_parse.lo fs_file.lo unix_misc.lo nofs_misc.lo \ ffs.lo ffs_dent.lo ext2fs.lo ext2fs_dent.lo ext2fs_journal.lo \ fatfs.lo fatfs_meta.lo fatfs_dent.lo fatxxfs.lo \ fatxxfs_meta.lo fatxxfs_dent.lo exfatfs.lo exfatfs_meta.lo \ exfatfs_dent.lo fatfs_utils.lo ntfs.lo ntfs_dent.lo swapfs.lo \ rawfs.lo iso9660.lo iso9660_dent.lo hfs.lo hfs_dent.lo \ hfs_journal.lo hfs_unicompare.lo dcalc_lib.lo dcat_lib.lo \ dls_lib.lo dstat_lib.lo ffind_lib.lo fls_lib.lo icat_lib.lo \ ifind_lib.lo ils_lib.lo walk_cpp.lo yaffs.lo libtskfs_la_OBJECTS = $(am_libtskfs_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libtskfs_la_SOURCES) DIST_SOURCES = $(libtskfs_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskfs.la # Note that the .h files are in the top-level Makefile libtskfs_la_SOURCES = tsk_fs_i.h fs_inode.c fs_io.c fs_block.c fs_open.c \ fs_name.c fs_dir.c fs_types.c fs_attr.c fs_attrlist.c fs_load.c \ fs_parse.c fs_file.c \ unix_misc.c nofs_misc.c \ ffs.c ffs_dent.c ext2fs.c ext2fs_dent.c ext2fs_journal.c \ fatfs.c fatfs_meta.c fatfs_dent.cpp \ fatxxfs.c fatxxfs_meta.c fatxxfs_dent.c \ exfatfs.c exfatfs_meta.c exfatfs_dent.c \ fatfs_utils.c \ ntfs.c ntfs_dent.cpp swapfs.c rawfs.c \ iso9660.c iso9660_dent.c \ hfs.c hfs_dent.c hfs_journal.c hfs_unicompare.c \ dcalc_lib.c dcat_lib.c dls_lib.c dstat_lib.c ffind_lib.c \ fls_lib.c icat_lib.c ifind_lib.c ils_lib.c \ walk_cpp.cpp yaffs.cpp all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/fs/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/fs/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskfs.la: $(libtskfs_la_OBJECTS) $(libtskfs_la_DEPENDENCIES) $(EXTRA_libtskfs_la_DEPENDENCIES) $(AM_V_CXXLD)$(CXXLINK) $(libtskfs_la_OBJECTS) $(libtskfs_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dcalc_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dcat_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dls_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dstat_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/exfatfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/exfatfs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/exfatfs_meta.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ext2fs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ext2fs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ext2fs_journal.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatfs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatfs_meta.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatfs_utils.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatxxfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatxxfs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fatxxfs_meta.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ffind_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ffs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ffs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fls_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_attr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_attrlist.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_block.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_dir.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_file.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_inode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_io.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_load.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_name.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_open.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_parse.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_types.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hfs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hfs_journal.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hfs_unicompare.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/icat_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ifind_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ils_lib.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/iso9660.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/iso9660_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nofs_misc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ntfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ntfs_dent.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rawfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/swapfs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unix_misc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/walk_cpp.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/yaffs.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/fs/nofs_misc.c000644 000765 000024 00000014757 12576320700 017700 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2004-2005 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ #include "tsk_fs_i.h" /** *\file nofs_misc.c * Contains internal functions that are common to the "non-file system" file systems, such as * raw and swap. Many of the functions in this file simply give an error that the functionality * is not supported for the given file system type. */ /** \internal * Print details about the file system to a file handle. * * @param a_fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ uint8_t tsk_fs_nofs_fsstat(TSK_FS_INFO * a_fs, FILE * hFile) { tsk_fprintf(hFile, "%s Data\n", tsk_fs_type_toname(a_fs->ftype)); tsk_fprintf(hFile, "Block Size: %d\n", a_fs->block_size); tsk_fprintf(hFile, "Block Range: 0 - %" PRIuDADDR "\n", a_fs->last_block); return 0; } /** \internal */ TSK_FS_ATTR_TYPE_ENUM tsk_fs_nofs_get_default_attr_type(const TSK_FS_FILE * a_fs_file) { return TSK_FS_ATTR_TYPE_DEFAULT; } /** \internal */ uint8_t tsk_fs_nofs_make_data_run(TSK_FS_FILE * a_fs_file) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", (a_fs_file->fs_info) ? tsk_fs_type_toname(a_fs_file-> fs_info->ftype) : ""); return 1; } /** \internal */ void tsk_fs_nofs_close(TSK_FS_INFO * a_fs) { a_fs->tag = 0; tsk_fs_free(a_fs); } /************* BLOCKS *************/ /** \internal */ TSK_FS_BLOCK_FLAG_ENUM tsk_fs_nofs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { return TSK_FS_BLOCK_FLAG_ALLOC | TSK_FS_BLOCK_FLAG_CONT; } /** \internal * * return 1 on error and 0 on success */ uint8_t tsk_fs_nofs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (a_start_blk < fs->first_block || a_start_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("nofs_block_walk: Start block number: %" PRIuDADDR, a_start_blk); return 1; } if (a_end_blk < fs->first_block || a_end_blk > fs->last_block || a_end_blk < a_start_blk) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("nofs_block_walk: Last block number: %" PRIuDADDR, a_end_blk); return 1; } /* Sanity check on a_flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } /* All swap has is allocated blocks... exit if not wanted */ if (!(a_flags & TSK_FS_BLOCK_FLAG_ALLOC)) { return 0; } if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } for (addr = a_start_blk; addr <= a_end_blk; addr++) { int retval; if (tsk_fs_block_get(fs, fs_block, addr) == NULL) { tsk_error_set_errstr2("nofs_block_walk: Block %" PRIuDADDR, addr); tsk_fs_block_free(fs_block); return 1; } retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { break; } else if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); return 1; } } /* * Cleanup. */ tsk_fs_block_free(fs_block); return 0; } /************ META / FILES ************/ /** \internal */ uint8_t tsk_fs_nofs_inode_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_start_inum, TSK_INUM_T a_end_inum, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB a_action, void *a_ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } /** \internal */ uint8_t tsk_fs_nofs_file_add_meta(TSK_FS_INFO * a_fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } /** \internal */ uint8_t tsk_fs_nofs_istat(TSK_FS_INFO * a_fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } /************ DIR **************/ /** \internal */ TSK_RETVAL_ENUM tsk_fs_nofs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return TSK_ERR; } /******** JOURNAL **********/ /** \internal */ uint8_t tsk_fs_nofs_jopen(TSK_FS_INFO * a_fs, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } /** \internal */ uint8_t tsk_fs_nofs_jentry_walk(TSK_FS_INFO * a_fs, int a_flags, TSK_FS_JENTRY_WALK_CB a_action, void *a_ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } /** \internal */ uint8_t tsk_fs_nofs_jblk_walk(TSK_FS_INFO * a_fs, TSK_INUM_T start, TSK_INUM_T end, int a_flags, TSK_FS_JBLK_WALK_CB a_action, void *a_ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Illegal analysis method for %s data ", tsk_fs_type_toname(a_fs->ftype)); return 1; } int tsk_fs_nofs_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { return strcmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/ntfs.c000755 000765 000024 00000532711 12576320700 016670 0ustar00carrierstaff000000 000000 /* ** ntfs ** The Sleuth Kit ** ** Content and meta data layer support for the NTFS file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ #include "tsk_fs_i.h" #include "tsk_ntfs.h" #include /** * \file ntfs.c * Contains the TSK internal general NTFS processing code */ /* * NOTES TO SELF: * * - multiple ".." entries may exist */ /* * How are we to handle the META flag? Is the MFT $Data Attribute META? */ /* Macro to pass in both the epoch time value and the nano time value */ #define WITHNANO(x) x, x##_nano /* mini-design note: * The MFT has entries for every file and dir in the fs. * The first entry ($MFT) is for the MFT itself and it is used to find * the location of the entire table because it can become fragmented. * Therefore, the $Data attribute of $MFT is saved in the NTFS_INFO * structure for easy access. We also use the size of the MFT as * a way to calculate the maximum MFT entry number (last_inum). * * Ok, that is simple, but getting the full $Data attribute can be tough * because $MFT may not fit into one MFT entry (i.e. an attribute list). * We need to process the attribute list attribute to find out which * other entries to process. But, the attribute list attribute comes * before any $Data attribute (so it could refer to an MFT that has not * yet been 'defined'). Although, the $Data attribute seems to always * exist and define at least the run for the entry in the attribute list. * * So, the way this is solved is that generic mft_lookup is used to get * any MFT entry, even $MFT. If $MFT is not cached then we calculate * the address of where to read based on mutliplication and guessing. * When we are loading the $MFT, we set 'loading_the_MFT' to 1 so * that we can update things as we go along. When we read $MFT we * read all the attributes and save info about the $Data one. If * there is an attribute list, we will have the location of the * additional MFT in the cached $Data location, which will be * updated as we process the attribute list. After each MFT * entry that we process while loading the MFT, the 'final_inum' * value is updated to reflect what we can currently load so * that the sanity checks still work. */ /********************************************************************** * * MISC FUNCS * **********************************************************************/ /* convert the NT Time (UTC hundred nanoseconds from 1/1/1601) * to UNIX (UTC seconds from 1/1/1970) * * The basic calculation is to remove the nanoseconds and then * subtract the number of seconds between 1601 and 1970 * i.e. TIME - DELTA * */ uint32_t nt2unixtime(uint64_t ntdate) { // (369*365 + 89) * 24 * 3600 * 10000000 #define NSEC_BTWN_1601_1970 (uint64_t)(116444736000000000ULL) ntdate -= (uint64_t) NSEC_BTWN_1601_1970; ntdate /= (uint64_t) 10000000; return (uint32_t) ntdate; } /* convert the NT Time (UTC hundred nanoseconds from 1/1/1601) * to only the nanoseconds * */ static uint32_t nt2nano(uint64_t ntdate) { return (uint32_t) (ntdate % 10000000)*100; } /********************************************************************** * * Lookup Functions * **********************************************************************/ /** * Read an MFT entry and save it in raw form in the given buffer. * NOTE: This will remove the update sequence integrity checks in the * structure. * * @param a_ntfs File system to read from * @param a_buf Buffer to save raw data to. Must be of size NTFS_INFO.mft_rsize_b * @param a_mftnum Address of MFT entry to read * * @returns Error value */ TSK_RETVAL_ENUM ntfs_dinode_lookup(NTFS_INFO * a_ntfs, char *a_buf, TSK_INUM_T a_mftnum) { TSK_OFF_T mftaddr_b, mftaddr2_b, offset; size_t mftaddr_len = 0; int i; TSK_FS_INFO *fs = (TSK_FS_INFO *) & a_ntfs->fs_info; TSK_FS_ATTR_RUN *data_run; ntfs_upd *upd; uint16_t sig_seq; ntfs_mft *mft; /* sanity checks */ if (!a_buf) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("mft_lookup: null mft buffer"); return TSK_ERR; } if (a_mftnum < fs->first_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("mft_lookup: inode number is too small (%" PRIuINUM ")", a_mftnum); return TSK_ERR; } /* Because this code reads teh actual MFT, we need to make sure we * decrement the last_inum because the last value is a special value * for the ORPHANS directory */ if (a_mftnum > fs->last_inum - 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("mft_lookup: inode number is too large (%" PRIuINUM ")", a_mftnum); return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dinode_lookup: Processing MFT %" PRIuINUM "\n", a_mftnum); /* If mft_data (the cached $Data attribute of $MFT) is not there yet, * then we have not started to load $MFT yet. In that case, we will * 'cheat' and calculate where it goes. This should only be for * $MFT itself, in which case the calculation is easy */ if (!a_ntfs->mft_data) { /* This is just a random check with the assumption being that * we don't want to just do a guess calculation for a very large * MFT entry */ if (a_mftnum > NTFS_LAST_DEFAULT_INO) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("Error trying to load a high MFT entry when the MFT itself has not been loaded (%" PRIuINUM ")", a_mftnum); return TSK_ERR; } mftaddr_b = a_ntfs->root_mft_addr + a_mftnum * a_ntfs->mft_rsize_b; mftaddr2_b = 0; } else { /* The MFT may not be in consecutive clusters, so we need to use its * data attribute run list to find out what address to read * * This is why we cached it */ // will be set to the address of the MFT entry mftaddr_b = mftaddr2_b = 0; /* The byte offset within the $Data stream */ offset = a_mftnum * a_ntfs->mft_rsize_b; /* NOTE: data_run values are in clusters * * cycle through the runs in $Data and identify which * has the MFT entry that we want */ for (data_run = a_ntfs->mft_data->nrd.run; data_run != NULL; data_run = data_run->next) { /* The length of this specific run */ TSK_OFF_T run_len = data_run->len * a_ntfs->csize_b; /* Is our MFT entry is in this run somewhere ? */ if (offset < run_len) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dinode_lookup: Found in offset: %" PRIuDADDR " size: %" PRIuDADDR " at offset: %" PRIuOFF "\n", data_run->addr, data_run->len, offset); /* special case where the MFT entry crosses * a run (only happens when cluster size is 512-bytes * and there are an odd number of clusters in the run) */ if (run_len < offset + a_ntfs->mft_rsize_b) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dinode_lookup: Entry crosses run border\n"); if (data_run->next == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("mft_lookup: MFT entry crosses a cluster and there are no more clusters!"); return TSK_COR; } /* Assign address where the remainder of the entry is */ mftaddr2_b = data_run->next->addr * a_ntfs->csize_b; /* this should always be 512, but just in case */ mftaddr_len = (size_t) (run_len - offset); } /* Assign address of where the MFT entry starts */ mftaddr_b = data_run->addr * a_ntfs->csize_b + offset; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dinode_lookup: Entry address at: %" PRIuOFF "\n", mftaddr_b); break; } /* decrement the offset we are looking for */ offset -= run_len; } /* Did we find it? */ if (!mftaddr_b) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_set_errstr("mft_lookup: Error finding MFT entry %" PRIuINUM " in $MFT", a_mftnum); return TSK_ERR; } } /* can we do just one read or do we need multiple? */ if (mftaddr2_b) { ssize_t cnt; /* read the first part into mft */ cnt = tsk_fs_read(&a_ntfs->fs_info, mftaddr_b, a_buf, mftaddr_len); if (cnt != mftaddr_len) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ntfs_dinode_lookup: Error reading MFT Entry (part 1) at %" PRIuOFF, mftaddr_b); return TSK_ERR; } /* read the second part into mft */ cnt = tsk_fs_read (&a_ntfs->fs_info, mftaddr2_b, (char *) ((uintptr_t) a_buf + (uintptr_t) mftaddr_len), a_ntfs->mft_rsize_b - mftaddr_len); if (cnt != a_ntfs->mft_rsize_b - mftaddr_len) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ntfs_dinode_lookup: Error reading MFT Entry (part 2) at %" PRIuOFF, mftaddr2_b); return TSK_ERR; } } else { ssize_t cnt; /* read the raw entry into mft */ cnt = tsk_fs_read(&a_ntfs->fs_info, mftaddr_b, a_buf, a_ntfs->mft_rsize_b); if (cnt != a_ntfs->mft_rsize_b) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ntfs_dinode_lookup: Error reading MFT Entry at %" PRIuOFF, mftaddr_b); return TSK_ERR; } } /* Sanity Check */ #if 0 /* This is no longer applied because it caused too many problems * with images that had 0 and 1 etc. as values. Testing shows that * even Windows XP doesn't care if entries have an invalid entry, so * this is no longer checked. The update sequence check should find * corrupt entries * */ if ((tsk_getu32(fs->endian, mft->magic) != NTFS_MFT_MAGIC) && (tsk_getu32(fs->endian, mft->magic) != NTFS_MFT_MAGIC_BAAD) && (tsk_getu32(fs->endian, mft->magic) != NTFS_MFT_MAGIC_ZERO)) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("entry %d has an invalid MFT magic: %x", mftnum, tsk_getu32(fs->endian, mft->magic)); return 1; } #endif /* The MFT entries have error and integrity checks in them * called update sequences. They must be checked and removed * so that later functions can process the data as normal. * They are located in the last 2 bytes of each 512-byte sector * * We first verify that the the 2-byte value is a give value and * then replace it with what should be there */ /* sanity check so we don't run over in the next loop */ mft = (ntfs_mft *) a_buf; if ((tsk_getu16(fs->endian, mft->upd_cnt) > 0) && (((uint32_t) (tsk_getu16(fs->endian, mft->upd_cnt) - 1) * a_ntfs->ssize_b) > a_ntfs->mft_rsize_b)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("dinode_lookup: More Update Sequence Entries than MFT size"); return TSK_COR; } if (tsk_getu16(fs->endian, mft->upd_off) > a_ntfs->mft_rsize_b) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("dinode_lookup: Update sequence offset larger than MFT size"); return TSK_COR; } /* Apply the update sequence structure template */ upd = (ntfs_upd *) ((uintptr_t) a_buf + tsk_getu16(fs->endian, mft->upd_off)); /* Get the sequence value that each 16-bit value should be */ sig_seq = tsk_getu16(fs->endian, upd->upd_val); /* cycle through each sector */ for (i = 1; i < tsk_getu16(fs->endian, mft->upd_cnt); i++) { uint8_t *new_val, *old_val; /* The offset into the buffer of the value to analyze */ size_t offset = i * a_ntfs->ssize_b - 2; /* get the current sequence value */ uint16_t cur_seq = tsk_getu16(fs->endian, (uintptr_t) a_buf + offset); if (cur_seq != sig_seq) { /* get the replacement value */ uint16_t cur_repl = tsk_getu16(fs->endian, &upd->upd_seq + (i - 1) * 2); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("Incorrect update sequence value in MFT entry\nSignature Value: 0x%" PRIx16 " Actual Value: 0x%" PRIx16 " Replacement Value: 0x%" PRIx16 "\nThis is typically because of a corrupted entry", sig_seq, cur_seq, cur_repl); return TSK_COR; } new_val = &upd->upd_seq + (i - 1) * 2; old_val = (uint8_t *) ((uintptr_t) a_buf + offset); /* if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dinode_lookup: upd_seq %i Replacing: %.4" PRIx16 " With: %.4" PRIx16 "\n", i, tsk_getu16(fs->endian, old_val), tsk_getu16(fs->endian, new_val)); */ *old_val++ = *new_val++; *old_val = *new_val; } return TSK_OK; } /* * given a cluster, return the allocation status or * -1 if an error occurs */ static int is_clustalloc(NTFS_INFO * ntfs, TSK_DADDR_T addr) { int bits_p_clust, b; TSK_DADDR_T base; int8_t ret; bits_p_clust = 8 * ntfs->fs_info.block_size; /* While we are loading the MFT, assume that everything * is allocated. This should only be needed when we are * dealing with an attribute list ... */ if (ntfs->loading_the_MFT == 1) { return 1; } else if (ntfs->bmap == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("is_clustalloc: Bitmap pointer is null: %" PRIuDADDR "\n", addr); return -1; } /* Is the cluster too big? */ if (addr > ntfs->fs_info.last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("is_clustalloc: cluster too large"); return -1; } /* identify the base cluster in the bitmap file */ base = addr / bits_p_clust; b = (int) (addr % bits_p_clust); tsk_take_lock(&ntfs->lock); /* is this the same as in the cached buffer? */ if (base != ntfs->bmap_buf_off) { TSK_DADDR_T c = base; TSK_FS_ATTR_RUN *run; TSK_DADDR_T fsaddr = 0; ssize_t cnt; /* get the file system address of the bitmap cluster */ for (run = ntfs->bmap; run; run = run->next) { if (run->len <= c) { c -= run->len; } else { fsaddr = run->addr + c; break; } } if (fsaddr == 0) { tsk_release_lock(&ntfs->lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("is_clustalloc: cluster not found in bitmap: %" PRIuDADDR "", c); return -1; } if (fsaddr > ntfs->fs_info.last_block) { tsk_release_lock(&ntfs->lock); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("is_clustalloc: Cluster in bitmap too large for image: %" PRIuDADDR, fsaddr); return -1; } ntfs->bmap_buf_off = base; cnt = tsk_fs_read_block (&ntfs->fs_info, fsaddr, ntfs->bmap_buf, ntfs->fs_info.block_size); if (cnt != ntfs->fs_info.block_size) { tsk_release_lock(&ntfs->lock); if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("is_clustalloc: Error reading bitmap at %" PRIuDADDR, fsaddr); return -1; } } /* identify if the cluster is allocated or not */ ret = (isset(ntfs->bmap_buf, b)) ? 1 : 0; tsk_release_lock(&ntfs->lock); return ret; } /********************************************************************** * * TSK_FS_ATTR functions * **********************************************************************/ /** * Process a non-resident runlist and convert its contents into the generic fs_attr_run * structure. * @param ntfs File system that attribute is located in. * @param start_vcn The starting VCN for this run. * @param runlist The raw runlist data from the MFT entry. * @param a_data_run_head [out] Pointer to pointer of run that is created. (NULL on error and for $BadClust - special case because it is a sparse file for the entire FS). * @param totlen [out] Pointer to location where total length of run (in bytes) can be returned (or NULL) * @param mnum MFT entry address * * @returns Return status of error, corrupt, or OK (note a_data_run can be NULL even when OK is returned if $BadClust is encountered) */ static TSK_RETVAL_ENUM ntfs_make_data_run(NTFS_INFO * ntfs, TSK_OFF_T start_vcn, ntfs_runlist * runlist_head, TSK_FS_ATTR_RUN ** a_data_run_head, TSK_OFF_T * totlen, TSK_INUM_T mnum) { TSK_FS_INFO *fs = (TSK_FS_INFO *) ntfs; ntfs_runlist *run; TSK_FS_ATTR_RUN *data_run, *data_run_prev = NULL; unsigned int i, idx; TSK_DADDR_T prev_addr = 0; TSK_OFF_T file_offset = start_vcn; run = runlist_head; *a_data_run_head = NULL; /* initialize if non-NULL */ if (totlen) *totlen = 0; /* Cycle through each run in the runlist * We go until we find an entry with no length * An entry with offset of 0 is for a sparse run */ while (NTFS_RUNL_LENSZ(run) != 0) { int64_t addr_offset = 0; /* allocate a new tsk_fs_attr_run */ if ((data_run = tsk_fs_attr_run_alloc()) == NULL) { tsk_fs_attr_run_free(*a_data_run_head); *a_data_run_head = NULL; return TSK_ERR; } /* make the list, unless its the first pass & then we set the head */ if (data_run_prev) data_run_prev->next = data_run; else *a_data_run_head = data_run; data_run_prev = data_run; /* These fields are a variable number of bytes long * these for loops are the equivalent of the getuX macros */ idx = 0; /* Get the length of this run */ for (i = 0, data_run->len = 0; i < NTFS_RUNL_LENSZ(run); i++) { data_run->len |= (run->buf[idx++] << (i * 8)); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_make_data_run: Len idx: %i cur: %" PRIu8 " (%" PRIx8 ") tot: %" PRIuDADDR " (%" PRIxDADDR ")\n", i, run->buf[idx - 1], run->buf[idx - 1], data_run->len, data_run->len); } /* Sanity check on length */ if (data_run->len > fs->block_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ntfs_make_run: Run length is larger than file system"); tsk_fs_attr_run_free(*a_data_run_head); *a_data_run_head = NULL; return TSK_COR; } data_run->offset = file_offset; file_offset += data_run->len; /* Update the length if we were passed a value */ if (totlen) *totlen += (data_run->len * ntfs->csize_b); /* Get the address of this run */ for (i = 0, data_run->addr = 0; i < NTFS_RUNL_OFFSZ(run); i++) { //data_run->addr |= (run->buf[idx++] << (i * 8)); addr_offset |= (run->buf[idx++] << (i * 8)); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_make_data_run: Off idx: %i cur: %" PRIu8 " (%" PRIx8 ") tot: %" PRIuDADDR " (%" PRIxDADDR ")\n", i, run->buf[idx - 1], run->buf[idx - 1], addr_offset, addr_offset); } /* addr_offset value is signed so extend it to 64-bits */ if ((int8_t) run->buf[idx - 1] < 0) { for (; i < sizeof(addr_offset); i++) addr_offset |= (int64_t) ((int64_t) 0xff << (i * 8)); } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_make_data_run: Signed addr_offset: %" PRIdDADDR " Previous address: %" PRIdDADDR "\n", addr_offset, prev_addr); /* The NT 4.0 version of NTFS uses an offset of -1 to represent * a hole, so add the sparse flag and make it look like the 2K * version with a offset of 0 * * A user reported an issue where the $Bad file started with * its offset as -1 and it was not NT (maybe a conversion) * Change the check now to not limit to NT, but make sure * that it is the first run */ if (((addr_offset == -1) && (prev_addr == 0)) || ((addr_offset == -1) && (ntfs->ver == NTFS_VINFO_NT))) { data_run->flags |= TSK_FS_ATTR_RUN_FLAG_SPARSE; data_run->addr = 0; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_make_data_run: Sparse Run\n"); } /* A Sparse file has a run with an offset of 0 * there is a special case though of the BOOT MFT entry which * is the super block and has a legit offset of 0. * * The value given is a delta of the previous offset, so add * them for non-sparse files * * For sparse files the next run will have its offset relative * to the current "prev_addr" so skip that code */ // @@@ BC: we'll need to pass in an inode value for this check else if ((addr_offset) || (mnum == NTFS_MFT_BOOT)) { data_run->addr = prev_addr + addr_offset; prev_addr = data_run->addr; /* Sanity check on length and offset */ if (data_run->addr + data_run->len > fs->block_count) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ntfs_make_run: Run offset and length is larger than file system"); tsk_fs_attr_run_free(*a_data_run_head); *a_data_run_head = NULL; return TSK_COR; } } else { data_run->flags |= TSK_FS_ATTR_RUN_FLAG_SPARSE; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_make_data_run: Sparse Run\n"); } /* Advance run */ run = (ntfs_runlist *) ((uintptr_t) run + (1 + NTFS_RUNL_LENSZ(run) + NTFS_RUNL_OFFSZ(run))); } /* special case for $BADCLUST, which is a sparse file whose size is * the entire file system. * * If there is only one run entry and it is sparse, then there are no * bad blocks, so get rid of it. */ if ((*a_data_run_head != NULL) && ((*a_data_run_head)->next == NULL) && ((*a_data_run_head)->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) && ((*a_data_run_head)->len == fs->last_block + 1)) { tsk_fs_attr_run_free(*a_data_run_head); *a_data_run_head = NULL; } return TSK_OK; } /*********** UNCOMPRESSION CODE *************/ /* * NTFS Breaks compressed data into compression units, which are * typically 16 clusters in size. If the data in the comp unit * compresses to something smaller than 16 clusters then the * compresed data is stored and the rest of the compression unit * is filled with sparse clusters. The entire compression unit * can also be sparse. * * When the data is compressed, it is broken up into 4k blocks. Each * of the blocks is compressed and the resulting data is stored with * a 2-byte header that identifies the compressed size. The * compressed data contains token groups, which contain a 1 byte * header followed by 8 variable length tokens. There are two types * of tokens. Symbol tokens are 1 byte in length and the 1byte value * should be directly copied into the uncompressed data. Phrase tokens * identify a run of data in the same compression unit that should be * copied to the current location. Each bit in the 1-byte header identifies * the type of the 8 tokens in the group. * */ /* Variables used for ntfs_uncompress() method */ typedef struct { char *uncomp_buf; // Buffer for uncompressed data char *comp_buf; // buffer for compressed data size_t comp_len; // number of bytes used in compressed data size_t uncomp_idx; // Index into buffer for next byte size_t buf_size_b; // size of buffer in bytes (1 compression unit) } NTFS_COMP_INFO; /** * Reset the values in the NTFS_COMP_INFO structure. We need to * do this in between every compression unit that we process in the file. * * @param comp Structure to reset */ static void ntfs_uncompress_reset(NTFS_COMP_INFO * comp) { memset(comp->uncomp_buf, 0, comp->buf_size_b); comp->uncomp_idx = 0; memset(comp->comp_buf, 0, comp->buf_size_b); comp->comp_len = 0; } /** * Setup the NTFS_COMP_INFO structure with a buffer and * initialize the basic settings. * * @param fs File system state information * @param comp Compression state information to initialize * @param compunit_size_c The size (in clusters) of a compression * unit * @return 1 on error and 0 on success */ static int ntfs_uncompress_setup(TSK_FS_INFO * fs, NTFS_COMP_INFO * comp, uint32_t compunit_size_c) { comp->buf_size_b = fs->block_size * compunit_size_c; if ((comp->uncomp_buf = tsk_malloc(comp->buf_size_b)) == NULL) { comp->buf_size_b = 0; return 1; } if ((comp->comp_buf = tsk_malloc(comp->buf_size_b)) == NULL) { free(comp->uncomp_buf); comp->uncomp_buf = NULL; comp->buf_size_b = 0; return 1; } ntfs_uncompress_reset(comp); return 0; } static void ntfs_uncompress_done(NTFS_COMP_INFO * comp) { if (comp->uncomp_buf) free(comp->uncomp_buf); comp->uncomp_buf = NULL; if (comp->comp_buf) free(comp->comp_buf); comp->comp_buf = NULL; comp->buf_size_b = 0; } /** * Uncompress the block of data in comp->comp_buf, * which has a size of comp->comp_len. * Store the result in the comp->uncomp_buf. * * @param comp Compression unit structure * * @returns 1 on error and 0 on success */ static uint8_t ntfs_uncompress_compunit(NTFS_COMP_INFO * comp) { size_t cl_index; tsk_error_reset(); comp->uncomp_idx = 0; /* Cycle through the compressed data * We maintain state using different levels of loops. * We use +1 here because the size value at start of block is 2 bytes. */ for (cl_index = 0; cl_index + 1 < comp->comp_len;) { size_t blk_end; // index into the buffer to where block ends size_t blk_size; // size of the current block uint8_t iscomp; // set to 1 if block is compressed size_t blk_st_uncomp; // index into uncompressed buffer where block started /* The first two bytes of each block contain the size * information.*/ blk_size = ((((unsigned char) comp->comp_buf[cl_index + 1] << 8) | ((unsigned char) comp->comp_buf[cl_index])) & 0x0FFF) + 3; // this seems to indicate end of block if (blk_size == 3) break; blk_end = cl_index + blk_size; if (blk_end > comp->comp_len) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Block length longer than buffer length: %" PRIuSIZE "", blk_end); return 1; } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_uncompress_compunit: Block size is %" PRIuSIZE "\n", blk_size); /* The MSB identifies if the block is compressed */ if ((comp->comp_buf[cl_index + 1] & 0x8000) == 0) iscomp = 0; else iscomp = 1; // keep track of where this block started in the buffer blk_st_uncomp = comp->uncomp_idx; cl_index += 2; // the 4096 size seems to occur at the same times as no compression if ((iscomp) || (blk_size - 2 != 4096)) { // cycle through the block while (cl_index < blk_end) { int a; // get the header header unsigned char header = comp->comp_buf[cl_index]; cl_index++; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_uncompress_compunit: New Tag: %x\n", header); for (a = 0; a < 8 && cl_index < blk_end; a++) { /* Determine token type and parse appropriately. * * Symbol tokens are the symbol themselves, so copy it * into the umcompressed buffer */ if ((header & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_uncompress_compunit: Symbol Token: %" PRIuSIZE "\n", cl_index); if (comp->uncomp_idx >= comp->buf_size_b) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Trying to write past end of uncompression buffer: %" PRIuSIZE "", comp->uncomp_idx); return 1; } comp->uncomp_buf[comp->uncomp_idx++] = comp->comp_buf[cl_index]; cl_index++; } /* Otherwise, it is a phrase token, which points back * to a previous sequence of bytes. */ else { size_t i; int shift; size_t start_position_index = 0; size_t end_position_index = 0; unsigned int offset = 0; unsigned int length = 0; uint16_t pheader; if (cl_index + 1 >= blk_end) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Phrase token index is past end of block: %d", a); return 1; } pheader = ((((comp->comp_buf[cl_index + 1]) << 8) & 0xFF00) | (comp->comp_buf[cl_index] & 0xFF)); cl_index += 2; /* The number of bits for the start and length * in the 2-byte header change depending on the * location in the compression unit. This identifies * how many bits each has */ shift = 0; for (i = comp->uncomp_idx - blk_st_uncomp - 1; i >= 0x10; i >>= 1) { shift++; } //tsk_fprintf(stderr, "Start: %X Shift: %d UnComp_IDX %d BlkStart: %lu BlkIdx: %d BlkSize: %d\n", (int)(comp->uncomp_idx - comp->blk_st - 1), shift, comp->uncomp_idx, comp->blk_st, comp->blk_idx, comp->blk_size); offset = (pheader >> (12 - shift)) + 1; length = (pheader & (0xFFF >> shift)) + 2; start_position_index = comp->uncomp_idx - offset; end_position_index = start_position_index + length; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_uncompress_compunit: Phrase Token: %" PRIuSIZE "\t%d\t%d\t%x\n", cl_index, length, offset, pheader); /* Sanity checks on values */ if (offset > comp->uncomp_idx) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Phrase token offset is too large: %d (max: %" PRIuSIZE ")", offset, comp->uncomp_idx); return 1; } else if (length + start_position_index > comp->buf_size_b) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Phrase token length is too large: %d (max: %"PRIuSIZE")", length, comp->buf_size_b - start_position_index); return 1; } else if (end_position_index - start_position_index + 1 > comp->buf_size_b - comp->uncomp_idx) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Phrase token length is too large for rest of uncomp buf: %"PRIuSIZE" (max: %" PRIuSIZE ")", end_position_index - start_position_index + 1, comp->buf_size_b - comp->uncomp_idx); return 1; } for (; start_position_index <= end_position_index && comp->uncomp_idx < comp->buf_size_b; start_position_index++) { // Copy the previous data to the current position comp->uncomp_buf[comp->uncomp_idx++] = comp->uncomp_buf[start_position_index]; } } header >>= 1; } // end of loop inside of token group } // end of loop inside of block } // this block contains uncompressed data uncompressed data else { while (cl_index < blk_end && cl_index < comp->comp_len) { /* This seems to happen only with corrupt data -- such as * when an unallocated file is being processed... */ if (comp->uncomp_idx >= comp->buf_size_b) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_uncompress_compunit: Trying to write past end of uncompression buffer (1) -- corrupt data?)"); return 1; } // Place data in uncompression_buffer comp->uncomp_buf[comp->uncomp_idx++] = comp->comp_buf[cl_index++]; } } } // end of loop inside of compression unit return 0; } /** * Process a compression unit and return the decompressed data in a buffer in comp. * * @param ntfs File system * @param comp Compression state info (output will be stored in here) * @param comp_unit List of addresses that store compressed data * @param comp_unit_size Number of addresses in comp_unit * @returns 1 on error and 0 on success */ static uint8_t ntfs_proc_compunit(NTFS_INFO * ntfs, NTFS_COMP_INFO * comp, TSK_DADDR_T * comp_unit, uint32_t comp_unit_size) { TSK_FS_INFO *fs = (TSK_FS_INFO *) ntfs; int sparse; uint64_t a; /* With compressed attributes, there are three scenarios. * 1: The compression unit is not compressed, * 2: The compression unit is sparse * 3: The compression unit is compressed */ /* Check if the entire compression unit is sparse */ sparse = 1; for (a = 0; a < comp_unit_size && sparse == 1; a++) { if (comp_unit[a]) { sparse = 0; break; } } /* Entire comp unit is sparse... */ if (sparse) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_compunit: Unit is fully sparse\n"); memset(comp->uncomp_buf, 0, comp->buf_size_b); comp->uncomp_idx = comp->buf_size_b; } /* Check if the end of the unit is sparse, which means the * unit is compressed */ else if (comp_unit[comp_unit_size - 1] == 0) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_compunit: Unit is compressed\n"); // load up the compressed buffer so we can decompress it ntfs_uncompress_reset(comp); for (a = 0; a < comp_unit_size; a++) { ssize_t cnt; if (comp_unit[a] == 0) break; /* To get the uncompressed size, we must uncompress the * data -- even if addresses are only needed */ cnt = tsk_fs_read_block(fs, comp_unit[a], &comp->comp_buf[comp->comp_len], fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ntfs_proc_compunit: Error reading block at %" PRIuDADDR, comp_unit[a]); return 1; } comp->comp_len += fs->block_size; } if (ntfs_uncompress_compunit(comp)) { return 1; } } /* Uncompressed data */ else { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_compunit: Unit is not compressed\n"); comp->uncomp_idx = 0; for (a = 0; a < comp_unit_size; a++) { ssize_t cnt; cnt = tsk_fs_read_block(fs, comp_unit[a], &comp->uncomp_buf[comp->uncomp_idx], fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2 ("ntfs_proc_compunit: Error reading block at %" PRIuDADDR, comp_unit[a]); return 1; } comp->uncomp_idx += fs->block_size; } } return 0; } /** * Currently ignores the SPARSE flag */ static uint8_t ntfs_attr_walk_special(const TSK_FS_ATTR * fs_attr, int flags, TSK_FS_FILE_WALK_CB a_action, void *ptr) { TSK_FS_INFO *fs; NTFS_INFO *ntfs; // clean up any error messages that are lying around tsk_error_reset(); if ((fs_attr == NULL) || (fs_attr->fs_file == NULL) || (fs_attr->fs_file->meta == NULL) || (fs_attr->fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_attr_walk_special: Null arguments given\n"); return 1; } fs = fs_attr->fs_file->fs_info; ntfs = (NTFS_INFO *) fs; /* Process the compressed buffer * * The compsize value equal to 0 can occur if we are processing an * isolated entry that is part of an attribute list. The first * sequence of the attribute has the compsize and the latter ones * do not. So, if one of the non-base MFT entries is processed by * itself, we have that case. I tried to assume it was 16, but it * caused decompression problems -- likely because this sequence * did not start on a compression unit boundary. So, now we just * dump the compressed data instead of giving an error. */ if (fs_attr->flags & TSK_FS_ATTR_COMP) { TSK_DADDR_T addr; TSK_FS_ATTR_RUN *fs_attr_run; TSK_DADDR_T *comp_unit; uint32_t comp_unit_idx = 0; NTFS_COMP_INFO comp; TSK_OFF_T off = 0; int retval; uint8_t stop_loop = 0; if (fs_attr->nrd.compsize <= 0) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_attrwalk_special: Compressed attribute has compsize of 0 (%" PRIuINUM ")", fs_attr->fs_file->meta->addr); return 1; } /* Allocate the buffers and state structure */ if (ntfs_uncompress_setup(fs, &comp, fs_attr->nrd.compsize)) { return 1; } comp_unit = (TSK_DADDR_T *) tsk_malloc(fs_attr->nrd.compsize * sizeof(TSK_DADDR_T)); if (comp_unit == NULL) { ntfs_uncompress_done(&comp); return 1; } retval = TSK_WALK_CONT; /* cycle through the number of runs we have */ for (fs_attr_run = fs_attr->nrd.run; fs_attr_run; fs_attr_run = fs_attr_run->next) { size_t len_idx; /* We may get a FILLER entry at the beginning of the run * if we are processing a non-base file record since * this $DATA attribute could not be the first sequence in the * attribute. Therefore, do not error if it starts at 0 */ if (fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) { if (fs_attr_run->addr != 0) { tsk_error_reset(); if (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); else tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("ntfs_attr_walk_special: Filler Entry exists in fs_attr_run %" PRIuDADDR "@%" PRIuDADDR " - type: %" PRIu32 " id: %d Meta: %" PRIuINUM " Status: %s", fs_attr_run->len, fs_attr_run->addr, fs_attr->type, fs_attr->id, fs_attr->fs_file->meta->addr, (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC) ? "Allocated" : "Deleted"); free(comp_unit); ntfs_uncompress_done(&comp); return 1; } else { off += (fs_attr_run->len * fs->block_size); continue; } } addr = fs_attr_run->addr; /* cycle through each cluster in the run */ for (len_idx = 0; len_idx < fs_attr_run->len; len_idx++) { if (addr > fs->last_block) { tsk_error_reset(); if (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); else tsk_error_set_errno(TSK_ERR_FS_BLK_NUM); tsk_error_set_errstr ("ntfs_attr_walk_special: Invalid address in run (too large): %" PRIuDADDR " Meta: %" PRIuINUM " Status: %s", addr, fs_attr->fs_file->meta->addr, (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC) ? "Allocated" : "Deleted"); free(comp_unit); ntfs_uncompress_done(&comp); return 1; } // queue up the addresses until we get a full unit comp_unit[comp_unit_idx++] = addr; // time to decompress (if queue is full or this is the last block) if ((comp_unit_idx == fs_attr->nrd.compsize) || ((len_idx == fs_attr_run->len - 1) && (fs_attr_run->next == NULL))) { size_t i; // decompress the unit if (ntfs_proc_compunit(ntfs, &comp, comp_unit, comp_unit_idx)) { tsk_error_set_errstr2("%" PRIuINUM " - type: %" PRIu32 " id: %d Status: %s", fs_attr->fs_file->meta->addr, fs_attr->type, fs_attr->id, (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC) ? "Allocated" : "Deleted"); free(comp_unit); ntfs_uncompress_done(&comp); return 1; } // now call the callback with the uncompressed data for (i = 0; i < comp_unit_idx; i++) { int myflags; size_t read_len; myflags = TSK_FS_BLOCK_FLAG_CONT | TSK_FS_BLOCK_FLAG_COMP; retval = is_clustalloc(ntfs, comp_unit[i]); if (retval == -1) { if (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); free(comp_unit); ntfs_uncompress_done(&comp); return 1; } else if (retval == 1) { myflags |= TSK_FS_BLOCK_FLAG_ALLOC; } else if (retval == 0) { myflags |= TSK_FS_BLOCK_FLAG_UNALLOC; } if (fs_attr->size - off > fs->block_size) read_len = fs->block_size; else read_len = (size_t) (fs_attr->size - off); if (i * fs->block_size + read_len > comp.uncomp_idx) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_attrwalk_special: Trying to read past end of uncompressed buffer: %" PRIuSIZE " %" PRIuSIZE " Meta: %" PRIuINUM " Status: %s", i * fs->block_size + read_len, comp.uncomp_idx, fs_attr->fs_file->meta->addr, (fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC) ? "Allocated" : "Deleted"); free(comp_unit); ntfs_uncompress_done(&comp); return 1; } // call the callback retval = a_action(fs_attr->fs_file, off, comp_unit[i], &comp.uncomp_buf[i * fs->block_size], read_len, myflags, ptr); off += read_len; if (off >= fs_attr->size) { stop_loop = 1; break; } if (retval != TSK_WALK_CONT) { stop_loop = 1; break; } } comp_unit_idx = 0; } if (stop_loop) break; /* If it is a sparse run, don't increment the addr so that * it remains 0 */ if (((fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) == 0) && ((fs_attr_run->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) == 0)) addr++; } if (stop_loop) break; } ntfs_uncompress_done(&comp); free(comp_unit); if (retval == TSK_WALK_ERROR) return 1; else return 0; } else { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_attrwalk_special: called with non-special attribute: %x", fs_attr->flags); return 1; } } /** \internal * * @returns number of bytes read or -1 on error (incl if offset is past EOF) */ static ssize_t ntfs_file_read_special(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset, char *a_buf, size_t a_len) { TSK_FS_INFO *fs = NULL; NTFS_INFO *ntfs = NULL; if ((a_fs_attr == NULL) || (a_fs_attr->fs_file == NULL) || (a_fs_attr->fs_file->meta == NULL) || (a_fs_attr->fs_file->fs_info == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_file_read_special: NULL parameters passed"); return -1; } fs = a_fs_attr->fs_file->fs_info; ntfs = (NTFS_INFO *) fs; if (a_fs_attr->flags & TSK_FS_ATTR_COMP) { TSK_FS_ATTR_RUN *data_run_cur; TSK_OFF_T cu_blkoffset; // block offset of starting compression unit to start reading from size_t byteoffset; // byte offset in compression unit of where we want to start reading from TSK_DADDR_T *comp_unit; uint32_t comp_unit_idx = 0; NTFS_COMP_INFO comp; size_t buf_idx = 0; if (a_fs_attr->nrd.compsize <= 0) { tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("ntfs_file_read_special: Compressed attribute has compsize of 0"); return -1; } if (a_offset >= a_fs_attr->size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ_OFF); tsk_error_set_errstr("ntfs_file_read_special - %" PRIuOFF " Meta: %" PRIuINUM, a_offset, a_fs_attr->fs_file->meta->addr); return -1; } // we return 0s for reads past the initsize if (a_offset >= a_fs_attr->nrd.initsize) { ssize_t len; if (tsk_verbose) fprintf(stderr, "ntfs_file_read_special: Returning 0s for read past end of initsize (%" PRIuINUM ")\n", a_fs_attr->fs_file->meta->addr); if (a_offset + a_len > a_fs_attr->nrd.allocsize) len = (ssize_t) (a_fs_attr->nrd.allocsize - a_offset); else len = (ssize_t) a_len; memset(a_buf, 0, a_len); return len; } /* Allocate the buffers and state structure */ if (ntfs_uncompress_setup(fs, &comp, a_fs_attr->nrd.compsize)) { return -1; } comp_unit = (TSK_DADDR_T *) tsk_malloc(a_fs_attr->nrd.compsize * sizeof(TSK_DADDR_T)); if (comp_unit == NULL) { ntfs_uncompress_done(&comp); return -1; } // figure out the needed offsets cu_blkoffset = a_offset / fs->block_size; if (cu_blkoffset) { cu_blkoffset /= a_fs_attr->nrd.compsize; cu_blkoffset *= a_fs_attr->nrd.compsize; } byteoffset = (size_t) (a_offset - cu_blkoffset * fs->block_size); // cycle through the run until we find where we can start to process the clusters for (data_run_cur = a_fs_attr->nrd.run; (data_run_cur) && (buf_idx < a_len); data_run_cur = data_run_cur->next) { TSK_DADDR_T addr; size_t a; // See if this run contains the starting offset they requested if (data_run_cur->offset + data_run_cur->len < (TSK_DADDR_T) cu_blkoffset) continue; // seek to the start of where we want to read (we may need to read several runs) if (data_run_cur->offset > (TSK_DADDR_T) cu_blkoffset) a = 0; else a = (size_t) (cu_blkoffset - data_run_cur->offset); addr = data_run_cur->addr; // don't increment addr if it is 0 -- sparse if (addr) addr += a; /* cycle through the relevant in the run */ for (; a < data_run_cur->len && buf_idx < a_len; a++) { // queue up the addresses until we get a full unit comp_unit[comp_unit_idx++] = addr; // time to decompress (if queue is full or this is the last block) if ((comp_unit_idx == a_fs_attr->nrd.compsize) || ((a == data_run_cur->len - 1) && (data_run_cur->next == NULL))) { size_t cpylen; // decompress the unit if (ntfs_proc_compunit(ntfs, &comp, comp_unit, comp_unit_idx)) { tsk_error_set_errstr2("%" PRIuINUM " - type: %" PRIu32 " id: %d Status: %s", a_fs_attr->fs_file->meta->addr, a_fs_attr->type, a_fs_attr->id, (a_fs_attr->fs_file->meta-> flags & TSK_FS_META_FLAG_ALLOC) ? "Allocated" : "Deleted"); free(comp_unit); ntfs_uncompress_done(&comp); return -1; } // copy uncompressed data to the output buffer if (comp.uncomp_idx < byteoffset) { // @@ ERROR free(comp_unit); ntfs_uncompress_done(&comp); return -1; } else if (comp.uncomp_idx - byteoffset < a_len - buf_idx) { cpylen = comp.uncomp_idx - byteoffset; } else { cpylen = a_len - buf_idx; } // Make sure not to return more bytes than are in the file if (cpylen > (a_fs_attr->size - (a_offset + buf_idx))) cpylen = (size_t) (a_fs_attr->size - (a_offset + buf_idx)); memcpy(&a_buf[buf_idx], &comp.uncomp_buf[byteoffset], cpylen); // reset this in case we need to also read from the next run byteoffset = 0; buf_idx += cpylen; comp_unit_idx = 0; } /* If it is a sparse run, don't increment the addr so that * it remains 0 */ if (((data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) == 0) && ((data_run_cur->flags & TSK_FS_ATTR_RUN_FLAG_FILLER) == 0)) addr++; } } free(comp_unit); ntfs_uncompress_done(&comp); return (ssize_t) buf_idx; } else { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_file_read_special: called with non-special attribute: %x", a_fs_attr->flags); return -1; } } /* needs to be predefined for proc_attrseq */ static uint8_t ntfs_proc_attrlist(NTFS_INFO *, TSK_FS_FILE *, const TSK_FS_ATTR *); /* This structure is used when processing attrlist attributes. * The Id part of the MFTNUM-TYPE-ID triple is unique only to a given * MFTNUM. With the case of attribute lists, a file may use multiple * MFT entires and therefore have multiple attributes with the same * type and id pair (if they are in different MFT entries). This map * is created by proc_attrlist when it assigns unique IDs to the * other entries. proc_attrseq uses this when it adds the attributes. */ typedef struct { int num_used; TSK_INUM_T extMft[256]; uint32_t type[256]; uint32_t extId[256]; uint8_t name[256][512]; uint32_t newId[256]; } NTFS_ATTRLIST_MAP; /* * Process an NTFS attribute sequence and load the data into data * structures. * An attribute sequence is a linked list of the attributes in an MFT entry. * This is called by copy_inode and proc_attrlist. * * @param ntfs File system to analyze * @param fs_file Generic metadata structure to add the attribute info to * @param attrseq Start of the attribute sequence to analyze * @param len Length of the attribute sequence buffer * @param a_attrinum MFT entry address that the attribute sequence came from (diff from fs_file for attribute lists) * @param a_attr_map List that maps to new IDs that were assigned by processing * the attribute list attribute (if it exists) or NULL if there is no attrlist. * @returns Error code */ static TSK_RETVAL_ENUM ntfs_proc_attrseq(NTFS_INFO * ntfs, TSK_FS_FILE * fs_file, const ntfs_attr * a_attrseq, size_t len, TSK_INUM_T a_attrinum, const NTFS_ATTRLIST_MAP * a_attr_map) { const ntfs_attr *attr; const TSK_FS_ATTR *fs_attr_attrl = NULL; char name[NTFS_MAXNAMLEN_UTF8 + 1]; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Processing extended entry for primary entry %" PRIuINUM "\n", fs_file->meta->addr); if (fs_file->meta->attr == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Null attribute list in ntfs_proc_attrseq"); return TSK_ERR; } /* Cycle through the list of attributes */ for (attr = a_attrseq; ((uintptr_t) attr >= (uintptr_t) a_attrseq) && ((uintptr_t) attr <= ((uintptr_t) a_attrseq + len)) && (tsk_getu32(fs->endian, attr->len) > 0 && (tsk_getu32(fs->endian, attr->type) != 0xffffffff)); attr = (ntfs_attr *) ((uintptr_t) attr + tsk_getu32(fs->endian, attr->len))) { int retVal; int i; /* Get the type of this attribute */ uint32_t type = tsk_getu32(fs->endian, attr->type); uint16_t id = tsk_getu16(fs->endian, attr->id); uint16_t id_new = id; /* If the map was supplied, search through it to see if this * entry is in there. Use that ID instead so that we always have * unique IDs for each attribute -- even if it spans multiple MFT entries. */ if (a_attr_map) { for (i = 0; i < a_attr_map->num_used; i++) { if ((a_attr_map->type[i] == type) && (memcmp(a_attr_map->name[i], (void *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->name_off)), attr->nlen * 2) == 0)) { id_new = a_attr_map->newId[i]; break; } } } /* Copy the name and convert it to UTF8 */ if ((attr->nlen) && (tsk_getu16(fs->endian, attr->name_off) + attr->nlen * 2 < tsk_getu32(fs->endian, attr->len))) { int i; UTF8 *name8; UTF16 *name16; name8 = (UTF8 *) name; name16 = (UTF16 *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->name_off)); retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + attr->nlen * 2), &name8, (UTF8 *) ((uintptr_t) name8 + sizeof(name)), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Error converting NTFS attribute name to UTF8: %d %" PRIuINUM, retVal, fs_file->meta->addr); *name = '\0'; } /* Make sure it is NULL Terminated */ else if ((uintptr_t) name8 >= (uintptr_t) name + sizeof(name)) name[sizeof(name) - 1] = '\0'; else *name8 = '\0'; /* Clean up name */ i = 0; while (name[i] != '\0') { if (TSK_IS_CNTRL(name[i])) name[i] = '^'; i++; } } else { name[0] = '\0'; } /* For resident attributes, we will copy the buffer into * a TSK_FS_ATTR buffer, which is stored in the TSK_FS_META * structure */ if (attr->res == NTFS_MFT_RES) { TSK_FS_ATTR *fs_attr; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Resident Attribute in Type: %" PRIu32 " Id: %" PRIu16 " IdNew: %" PRIu16 " Name: %s\n", type, id, id_new, name); /* Validate the offset lengths */ if (((tsk_getu16(fs->endian, attr->c.r.soff) + (uintptr_t) attr) > ((uintptr_t) a_attrseq + len)) || ((tsk_getu16(fs->endian, attr->c.r.soff) + tsk_getu32(fs->endian, attr->c.r.ssize) + (uintptr_t) attr) > ((uintptr_t) a_attrseq + len))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_CORRUPT); tsk_error_set_errstr("ntfs_attr_walk: Resident attribute %" PRIuINUM "-%" PRIu32 " starting offset and length too large", fs_file->meta->addr, type); return TSK_COR; } // Get a free fs_attr structure if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) { tsk_error_errstr2_concat(" - proc_attrseq"); return TSK_ERR; } // set the details in the fs_attr structure if (tsk_fs_attr_set_str(fs_file, fs_attr, name, type, id_new, (void *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->c.r.soff)), tsk_getu32(fs->endian, attr->c.r.ssize))) { tsk_error_errstr2_concat("- proc_attrseq"); return TSK_ERR; } // set the meta size if we find the relevant attribute if ((fs_file->meta->type == TSK_FS_META_TYPE_DIR) && (type == NTFS_ATYPE_IDXROOT)) { fs_file->meta->size = tsk_getu32(fs->endian, attr->c.r.ssize); } else if ((fs_file->meta->type == TSK_FS_META_TYPE_REG) && (type == NTFS_ATYPE_DATA) && (name[0] == '\0')) { fs_file->meta->size = tsk_getu32(fs->endian, attr->c.r.ssize); } } /* For non-resident attributes, we will copy the runlist * to the generic form and then save it in the TSK_FS_META->attr * list */ else { TSK_FS_ATTR *fs_attr = NULL; TSK_FS_ATTR_RUN *fs_attr_run = NULL; uint8_t data_flag = 0; uint32_t compsize = 0; TSK_RETVAL_ENUM retval; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Non-Resident Attribute Type: %" PRIu32 " Id: %" PRIu16 " IdNew: %" PRIu16 " Name: %s Start VCN: %" PRIu64 "\n", type, id, id_new, name, tsk_getu64(fs->endian, attr->c.nr.start_vcn)); /* convert the run to generic form */ retval = ntfs_make_data_run(ntfs, tsk_getu64(fs->endian, attr->c.nr.start_vcn), (ntfs_runlist *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->c.nr.run_off)), &fs_attr_run, NULL, a_attrinum); if (retval != TSK_OK) { tsk_error_errstr2_concat(" - proc_attrseq"); return retval; } /* Determine the flags based on compression and stuff */ data_flag = 0; if (tsk_getu16(fs->endian, attr->flags) & NTFS_ATTR_FLAG_COMP) { data_flag |= TSK_FS_ATTR_COMP; fs_file->meta->flags |= TSK_FS_META_FLAG_COMP; } if (tsk_getu16(fs->endian, attr->flags) & NTFS_ATTR_FLAG_ENC) data_flag |= TSK_FS_ATTR_ENC; if (tsk_getu16(fs->endian, attr->flags) & NTFS_ATTR_FLAG_SPAR) data_flag |= TSK_FS_ATTR_SPARSE; /* SPECIAL CASE * We are in non-res section, so we know this * isn't $STD_INFO and $FNAME * * When we are processing a non-base entry, we may * find an attribute with an id of 0 and it is an * extention of a previous run (i.e. non-zero start VCN) * * We will lookup if we already have such an attribute * and get its ID * * We could also check for a start_vcn if this does * not fix the problem. * * NOTE: This should not be needed now that TSK assigns * unique ID values to the extended attributes. */ if (id_new == 0) { int cnt, i; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr2 = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr2) continue; /* We found an attribute with the same name and type */ if (fs_attr2->type == type) { if (((name[0] == '\0') && (fs_attr2->name == NULL)) || ((fs_attr2->name) && (strcmp(fs_attr2->name, name) == 0))) { id_new = fs_attr2->id; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrseq: Updating id from 0 to %" PRIu16 "\n", id_new); break; } } } } /* the compression unit size is stored in the header * it is stored as the power of 2 (if it is not 0) */ if (tsk_getu16(fs->endian, attr->c.nr.compusize) > 0) { compsize = 1 << (tsk_getu16(fs->endian, attr->c.nr.compusize)); } else { compsize = 0; /* if this is 0, be sure to cancel out the COMP flag. * This occurs when we process an extended attribute * that has compressed data -- the attributes in the * latter MFT entries do not have compsize set. */ if (data_flag & TSK_FS_ATTR_COMP) { if (tsk_verbose) fprintf(stderr, "ntfs_proc_attrseq: Clearing compression setting for attribute %" PRIuINUM "-%d because compsize is 0\n", fs_file->meta->addr, type); data_flag &= ~TSK_FS_ATTR_COMP; } } /* Add the run to the list */ // see if this attribute has already been partially defined // @@@ This is bad design, we are casting away the const... fs_attr = (TSK_FS_ATTR *) tsk_fs_attrlist_get_id(fs_file->meta->attr, type, id_new); if (fs_attr == NULL) { uint64_t ssize; // size uint64_t alen; // allocated length if ((fs_attr = tsk_fs_attrlist_getnew(fs_file->meta->attr, TSK_FS_ATTR_RES)) == NULL) { tsk_error_errstr2_concat(" - proc_attrseq: getnew"); // JRB: Coverity found leak. if (fs_attr_run) tsk_fs_attr_run_free(fs_attr_run); fs_attr_run = NULL; return TSK_ERR; } ssize = tsk_getu64(fs->endian, attr->c.nr.ssize); /* This can happen with extended attributes, so * we set it based on what we currently have. * fs_attr_run can be NULL for $BadClust file. */ if ((ssize == 0) && (fs_attr_run)) { TSK_FS_ATTR_RUN *fs_attr_run_tmp; ssize = fs_attr_run->offset * fs->block_size; fs_attr_run_tmp = fs_attr_run; while (fs_attr_run_tmp) { ssize += (fs_attr_run_tmp->len * fs->block_size); fs_attr_run_tmp = fs_attr_run_tmp->next; } } // update the meta->size value if this is the default $Data attribute if ((fs_file->meta->type == TSK_FS_META_TYPE_REG) && (type == NTFS_ATYPE_DATA) && (name[0] == '\0')) { fs_file->meta->size = ssize; } alen = tsk_getu64(fs->endian, attr->c.nr.alen); /* This can also happen with extended attributes. * set it to what we know about */ if (alen == 0) { alen = ssize; } if (tsk_fs_attr_set_run(fs_file, fs_attr, fs_attr_run, name, type, id_new, ssize, tsk_getu64(fs->endian, attr->c.nr.initsize), alen, data_flag, compsize)) { tsk_error_errstr2_concat("- proc_attrseq: set run"); return TSK_ERR; } // set the special functions if (fs_file->meta->flags & TSK_FS_META_FLAG_COMP) { fs_attr->w = ntfs_attr_walk_special; fs_attr->r = ntfs_file_read_special; } } else { if (tsk_fs_attr_add_run(fs, fs_attr, fs_attr_run)) { tsk_error_errstr2_concat(" - proc_attrseq: put run"); return TSK_ERR; } } } /* * Special Cases, where we grab additional information * regardless if they are resident or not */ /* Standard Information (is always resident) */ if (type == NTFS_ATYPE_SI) { ntfs_attr_si *si; if (attr->res != NTFS_MFT_RES) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("proc_attrseq: Standard Information Attribute is not resident!"); return TSK_COR; } si = (ntfs_attr_si *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->c.r.soff)); fs_file->meta->mtime = nt2unixtime(tsk_getu64(fs->endian, si->mtime)); fs_file->meta->mtime_nano = nt2nano(tsk_getu64(fs->endian, si->mtime)); fs_file->meta->atime = nt2unixtime(tsk_getu64(fs->endian, si->atime)); fs_file->meta->atime_nano = nt2nano(tsk_getu64(fs->endian, si->atime)); fs_file->meta->ctime = nt2unixtime(tsk_getu64(fs->endian, si->ctime)); fs_file->meta->ctime_nano = nt2nano(tsk_getu64(fs->endian, si->ctime)); fs_file->meta->crtime = nt2unixtime(tsk_getu64(fs->endian, si->crtime)); fs_file->meta->crtime_nano = nt2nano(tsk_getu64(fs->endian, si->crtime)); fs_file->meta->uid = tsk_getu32(fs->endian, si->own_id); fs_file->meta->mode |= (TSK_FS_META_MODE_IXUSR | TSK_FS_META_MODE_IXGRP | TSK_FS_META_MODE_IXOTH); if ((tsk_getu32(fs->endian, si->dos) & NTFS_SI_RO) == 0) fs_file->meta->mode |= (TSK_FS_META_MODE_IRUSR | TSK_FS_META_MODE_IRGRP | TSK_FS_META_MODE_IROTH); if ((tsk_getu32(fs->endian, si->dos) & NTFS_SI_HID) == 0) fs_file->meta->mode |= (TSK_FS_META_MODE_IWUSR | TSK_FS_META_MODE_IWGRP | TSK_FS_META_MODE_IWOTH); } /* File Name (always resident) */ else if (type == NTFS_ATYPE_FNAME) { ntfs_attr_fname *fname; TSK_FS_META_NAME_LIST *fs_name; UTF16 *name16; UTF8 *name8; if (attr->res != NTFS_MFT_RES) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("proc_attr_seq: File Name Attribute is not resident!"); return TSK_COR; } fname = (ntfs_attr_fname *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->c.r.soff)); if (fname->nspace == NTFS_FNAME_DOS) { continue; } fs_file->meta->time2.ntfs.fn_mtime = nt2unixtime(tsk_getu64(fs->endian, fname->mtime)); fs_file->meta->time2.ntfs.fn_mtime_nano = nt2nano(tsk_getu64(fs->endian, fname->mtime)); fs_file->meta->time2.ntfs.fn_atime = nt2unixtime(tsk_getu64(fs->endian, fname->atime)); fs_file->meta->time2.ntfs.fn_atime_nano = nt2nano(tsk_getu64(fs->endian, fname->atime)); fs_file->meta->time2.ntfs.fn_ctime = nt2unixtime(tsk_getu64(fs->endian, fname->ctime)); fs_file->meta->time2.ntfs.fn_ctime_nano = nt2nano(tsk_getu64(fs->endian, fname->ctime)); fs_file->meta->time2.ntfs.fn_crtime = nt2unixtime(tsk_getu64(fs->endian, fname->crtime)); fs_file->meta->time2.ntfs.fn_crtime_nano = nt2nano(tsk_getu64(fs->endian, fname->crtime)); fs_file->meta->time2.ntfs.fn_id = id; /* Seek to the end of the fs_name structures in TSK_FS_META */ if (fs_file->meta->name2) { for (fs_name = fs_file->meta->name2; (fs_name) && (fs_name->next != NULL); fs_name = fs_name->next) { } /* add to the end of the existing list */ fs_name->next = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST)); if (fs_name->next == NULL) { return TSK_ERR; } fs_name = fs_name->next; fs_name->next = NULL; } else { /* First name, so we start a list */ fs_file->meta->name2 = fs_name = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST)); if (fs_name == NULL) { return TSK_ERR; } fs_name->next = NULL; } name16 = (UTF16 *) & fname->name; name8 = (UTF8 *) fs_name->name; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + fname->nlen * 2), &name8, (UTF8 *) ((uintptr_t) name8 + sizeof(fs_name->name)), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "proc_attr_seq: Error converting NTFS name in $FNAME to UTF8: %d", retVal); *name8 = '\0'; } /* Make sure it is NULL Terminated */ else if ((uintptr_t) name8 >= (uintptr_t) fs_name->name + sizeof(fs_name->name)) fs_name->name[sizeof(fs_name->name) - 1] = '\0'; else *name8 = '\0'; fs_name->par_inode = tsk_getu48(fs->endian, fname->par_ref); fs_name->par_seq = tsk_getu16(fs->endian, fname->par_seq); } /* If this is an attribute list than we need to process * it to get the list of other entries to read. But, because * of the wierd scenario of the $MFT having an attribute list * and not knowing where the other MFT entires are yet, we wait * until the end of the attrseq to processes the list and then * we should have the $Data attribute loaded */ else if (type == NTFS_ATYPE_ATTRLIST) { if (fs_attr_attrl) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr ("Multiple instances of attribute lists in the same MFT\n" "I didn't realize that could happen, contact the developers"); return TSK_ERR; } fs_attr_attrl = tsk_fs_attrlist_get_id(fs_file->meta->attr, NTFS_ATYPE_ATTRLIST, id_new); if (fs_attr_attrl == NULL) { tsk_error_errstr2_concat ("- proc_attrseq: getting attribute list"); return TSK_ERR; } } } /* Are we currently in the process of loading $MFT? */ if (ntfs->loading_the_MFT == 1) { /* If we don't even have a mini cached version, get it now * Even if we are not done because of attribute lists, then we * should at least have the head of the list */ if (!ntfs->mft_data) { int cnt, i; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; // get the default attribute if ((fs_attr->type == NTFS_ATYPE_DATA) && (fs_attr->name == NULL)) { ntfs->mft_data = fs_attr; break; } } // @@@ Is this needed here -- maybe it should be only in _open if (!ntfs->mft_data) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("$Data not found while loading the MFT"); return TSK_ERR; } } /* Update the inode count based on the current size * IF $MFT has an attribute list, this value will increase each * time */ fs->inum_count = ntfs->mft_data->size / ntfs->mft_rsize_b; fs->last_inum = fs->inum_count - 1; } /* If there was an attribute list, process it now, we wait because * the list can contain MFT entries that are described in $Data * of this MFT entry. For example, part of the $DATA attribute * could follow the ATTRLIST entry, so we read it first and then * process the attribute list */ if (fs_attr_attrl) { if (ntfs_proc_attrlist(ntfs, fs_file, fs_attr_attrl)) { return TSK_ERR; } } fs_file->meta->attr_state = TSK_FS_META_ATTR_STUDIED; return TSK_OK; } /******** Attribute List Action and Function ***********/ /* * Attribute lists are used when all of the attribute headers can not * fit into one MFT entry. This contains an entry for every attribute * and where they are located. We process this to get the locations * and then call proc_attrseq on each of those, which adds the data * to the fs_file structure. * * @param ntfs File system being analyzed * @param fs_file Main file that will have attributes added to it. * @param fs_attr_attrlist Attrlist attribute that needs to be parsed. * * Return 1 on error and 0 on success */ static uint8_t ntfs_proc_attrlist(NTFS_INFO * ntfs, TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr_attrlist) { ntfs_attrlist *list; char *buf; uintptr_t endaddr; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; ntfs_mft *mft; TSK_FS_LOAD_FILE load_file; TSK_INUM_T mftToDo[256]; uint16_t mftToDoCnt = 0; NTFS_ATTRLIST_MAP *map; uint16_t nextid = 0; int a; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrlist: Processing entry %" PRIuINUM "\n", fs_file->meta->addr); if ((mft = (ntfs_mft *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { return 1; } if ((map = (NTFS_ATTRLIST_MAP *) tsk_malloc(sizeof(NTFS_ATTRLIST_MAP))) == NULL) { free(mft); return 1; } /* Clear the contents of the todo buffer */ memset(mftToDo, 0, sizeof(mftToDo)); /* Get a copy of the attribute list stream using the above action */ load_file.left = load_file.total = (size_t) fs_attr_attrlist->size; load_file.base = load_file.cur = buf = tsk_malloc((size_t) fs_attr_attrlist->size); if (buf == NULL) { free(mft); free(map); return 1; } endaddr = (uintptr_t) buf + (uintptr_t) fs_attr_attrlist->size; if (tsk_fs_attr_walk(fs_attr_attrlist, 0, tsk_fs_load_file_action, (void *) &load_file)) { tsk_error_errstr2_concat("- processing attrlist"); free(mft); free(map); return 1; } /* this value should be zero, if not then we didn't read all of the * buffer */ if (load_file.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr2("processing attrlist of entry %" PRIuINUM, fs_file->meta->addr); free(mft); free(buf); free(map); return 1; } /* The TSK design requres that each attribute have its own ID. * Therefore, we need to identify all of the unique attributes * so that we can assign a unique ID to them. * In this process, we will also identify the unique MFT entries to * process. */ nextid = fs_attr_attrlist->id; // we won't see this entry in the list for (list = (ntfs_attrlist *) buf; (list) && ((uintptr_t) list < endaddr) && (tsk_getu16(fs->endian, list->len) > 0); list = (ntfs_attrlist *) ((uintptr_t) list + tsk_getu16(fs->endian, list->len))) { uint8_t found; int i; TSK_INUM_T mftnum = tsk_getu48(fs->endian, list->file_ref); uint32_t type = tsk_getu32(fs->endian, list->type); uint16_t id = tsk_getu16(fs->endian, list->id); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_attrlist: mft: %" PRIuINUM " type %" PRIu32 " id %" PRIu16 " VCN: %" PRIu64 "\n", mftnum, type, id, tsk_getu64(fs->endian, list->start_vcn)); // keep track of the biggest ID that we saw. if (id > nextid) nextid = id; /* First identify the unique attributes. * we can have duplicate entries at different VCNs. Ignore those. */ found = 0; for (i = 0; i < map->num_used; i++) { if ((map->type[i] == type) && (memcmp(map->name[i], &list->name, list->nlen * 2) == 0)) { found = 1; break; } } // add it to the list if (found == 0) { map->extMft[map->num_used] = mftnum; map->type[map->num_used] = type; map->extId[map->num_used] = id; memcpy(map->name[map->num_used], &list->name, list->nlen * 2); if (map->num_used < 255) map->num_used++; } /* also check the todo list -- skip the base entry * the goal here is to get a unique list of MFT entries * to later process. */ if (mftnum != fs_file->meta->addr) { found = 0; for (i = 0; i < mftToDoCnt; i++) { if (mftToDo[i] == mftnum) { found = 1; break; } } if ((found == 0) && (mftToDoCnt < 256)) { mftToDo[mftToDoCnt++] = mftnum; } } } // update the map and assign unique IDs for (a = 0; a < map->num_used; a++) { // skip the base entry attributes -- they have unique attribute IDs if (map->extMft[a] == fs_file->meta->addr) continue; map->newId[a] = ++nextid; } /* Process the ToDo list & and call ntfs_proc_attr */ for (a = 0; a < mftToDoCnt; a++) { TSK_RETVAL_ENUM retval; /* Sanity check. */ if (mftToDo[a] < ntfs->fs_info.first_inum || mftToDo[a] > ntfs->fs_info.last_inum) { if (tsk_verbose) { /* this case can easily occur if the attribute list was non-resident and the cluster has been reallocated */ tsk_fprintf(stderr, "Invalid MFT file reference (%" PRIuINUM ") in the unallocated attribute list of MFT %" PRIuINUM "", mftToDo[a], fs_file->meta->addr); } continue; } if ((retval = ntfs_dinode_lookup(ntfs, (char *) mft, mftToDo[a])) != TSK_OK) { // if the entry is corrupt, then continue if (retval == TSK_COR) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); continue; } free(mft); free(map); free(buf); tsk_error_errstr2_concat(" - proc_attrlist"); return 1; } /* verify that this entry refers to the original one */ if (tsk_getu48(fs->endian, mft->base_ref) != fs_file->meta->addr) { /* Before we raise alarms, check if the original was * unallocated. If so, then the list entry could * have been reallocated, so we will just ignore it */ if (((tsk_getu16(fs->endian, mft->flags) & NTFS_MFT_INUSE) == 0) || (fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC)) { continue; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("Extension record %" PRIuINUM " (file ref = %" PRIuINUM ") is not for attribute list of %" PRIuINUM "", mftToDo[a], tsk_getu48(fs->endian, mft->base_ref), fs_file->meta->addr); free(mft); free(map); free(buf); return 1; } } /* Process the attribute seq for this MFT entry and add them * to the TSK_FS_META structure */ if ((retval = ntfs_proc_attrseq(ntfs, fs_file, (ntfs_attr *) ((uintptr_t) mft + tsk_getu16(fs->endian, mft->attr_off)), ntfs->mft_rsize_b - tsk_getu16(fs->endian, mft->attr_off), mftToDo[a], map)) != TSK_OK) { if (retval == TSK_COR) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); continue; } tsk_error_errstr2_concat("- proc_attrlist"); free(mft); free(map); free(buf); return 1; } } free(mft); free(map); free(buf); return 0; } /** * Copy the MFT entry saved in a_buf to the generic structure. * * @param ntfs File system structure that contains entry to copy * @param fs_file Structure to copy processed data to. * @param a_buf MFT structure to copy from. Must be of size NTFS_INFO.mft_rsize_b * @param a_mnum MFT entry address * * @returns error code */ static TSK_RETVAL_ENUM ntfs_dinode_copy(NTFS_INFO * ntfs, TSK_FS_FILE * a_fs_file, char *a_buf, TSK_INUM_T a_mnum) { ntfs_attr *attr; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; TSK_RETVAL_ENUM retval; ntfs_mft *mft = (ntfs_mft *) a_buf; if ((a_fs_file == NULL) || (a_fs_file->meta == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ntfs_dinode_copy: NULL fs_file given"); return TSK_ERR; } /* if the attributes list has been used previously, then make sure the * flags are cleared */ if (a_fs_file->meta->attr) { tsk_fs_attrlist_markunused(a_fs_file->meta->attr); } else { a_fs_file->meta->attr = tsk_fs_attrlist_alloc(); if (a_fs_file->meta->attr == NULL) return TSK_ERR; } a_fs_file->meta->attr_state = TSK_FS_META_ATTR_EMPTY; /* If there are any name structures allocated, then free 'em */ if (a_fs_file->meta->name2) { TSK_FS_META_NAME_LIST *fs_name1, *fs_name2; fs_name1 = a_fs_file->meta->name2; while (fs_name1) { fs_name2 = fs_name1->next; free(fs_name1); fs_name1 = fs_name2; } a_fs_file->meta->name2 = NULL; } /* Set the a_fs_file->meta values from mft */ a_fs_file->meta->nlink = tsk_getu16(fs->endian, mft->link); a_fs_file->meta->seq = tsk_getu16(fs->endian, mft->seq); a_fs_file->meta->addr = a_mnum; /* Set the mode for file or directory */ if (tsk_getu16(fs->endian, mft->flags) & NTFS_MFT_DIR) a_fs_file->meta->type = TSK_FS_META_TYPE_DIR; else a_fs_file->meta->type = TSK_FS_META_TYPE_REG; a_fs_file->meta->mode = 0; // will be set by proc_attrseq /* the following will be changed once we find the correct attribute, * but initialize them now just in case */ a_fs_file->meta->uid = 0; a_fs_file->meta->gid = 0; a_fs_file->meta->size = 0; a_fs_file->meta->mtime = 0; a_fs_file->meta->mtime_nano = 0; a_fs_file->meta->atime = 0; a_fs_file->meta->atime_nano = 0; a_fs_file->meta->ctime = 0; a_fs_file->meta->ctime_nano = 0; a_fs_file->meta->crtime = 0; a_fs_file->meta->crtime_nano = 0; a_fs_file->meta->time2.ntfs.fn_mtime = 0; a_fs_file->meta->time2.ntfs.fn_mtime_nano = 0; a_fs_file->meta->time2.ntfs.fn_atime = 0; a_fs_file->meta->time2.ntfs.fn_atime_nano = 0; a_fs_file->meta->time2.ntfs.fn_ctime = 0; a_fs_file->meta->time2.ntfs.fn_ctime_nano = 0; a_fs_file->meta->time2.ntfs.fn_crtime = 0; a_fs_file->meta->time2.ntfs.fn_crtime_nano = 0; a_fs_file->meta->time2.ntfs.fn_id = 0; /* add the flags */ a_fs_file->meta->flags = ((tsk_getu16(fs->endian, mft->flags) & NTFS_MFT_INUSE) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); /* Process the attribute sequence to fill in the fs_meta->attr * list and the other info such as size and times */ attr = (ntfs_attr *) ((uintptr_t) mft + tsk_getu16(fs->endian, mft->attr_off)); if ((retval = ntfs_proc_attrseq(ntfs, a_fs_file, attr, ntfs->mft_rsize_b - tsk_getu16(fs->endian, mft->attr_off), a_fs_file->meta->addr, NULL)) != TSK_OK) { return retval; } /* The entry has been 'used' if it has attributes */ if ((a_fs_file->meta->attr == NULL) || (a_fs_file->meta->attr->head == NULL) || ((a_fs_file->meta->attr->head->flags & TSK_FS_ATTR_INUSE) == 0)) a_fs_file->meta->flags |= TSK_FS_META_FLAG_UNUSED; else a_fs_file->meta->flags |= TSK_FS_META_FLAG_USED; return 0; } /** \internal * Load the attributes. In NTFS, the attributes are already loaded * so return error values based on current state. * @param a_fs_file File to load attributes for. * @returns 1 on error */ static uint8_t ntfs_load_attrs(TSK_FS_FILE * a_fs_file) { if ((a_fs_file == NULL) || (a_fs_file->meta == NULL)) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ntfs_load_attrs: called with NULL pointers"); return 1; } /* Verify the file has attributes */ if (a_fs_file->meta->attr == NULL) { if (a_fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); else tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ntfs_load_attrs: attributes are NULL"); return 1; } return 0; } /** * Read an MFT entry and save it in the generic TSK_FS_META format. * * @param fs File system to read from. * @param mftnum Address of mft entry to read * @returns 1 on error */ static uint8_t ntfs_inode_lookup(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T mftnum) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; char *mft; uint8_t allocedMeta = 0; // clean up any error messages that are lying around tsk_error_reset(); if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ntfs_inode_lookup: fs_file is NULL"); return 1; } if (a_fs_file->meta == NULL) { a_fs_file->meta = tsk_fs_meta_alloc(NTFS_FILE_CONTENT_LEN); if (a_fs_file->meta == NULL) return 1; allocedMeta = 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } // see if they are looking for the special "orphans" directory if (mftnum == TSK_FS_ORPHANDIR_INUM(fs)) { if (tsk_fs_dir_make_orphan_dir_meta(fs, a_fs_file->meta)) return 1; else return 0; } if ((mft = (char *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { return 1; } /* Lookup inode and store it in the ntfs structure */ if (ntfs_dinode_lookup(ntfs, mft, mftnum) != TSK_OK) { free(mft); return 1; } /* Copy the structure in ntfs to generic a_fs_file->meta */ if (ntfs_dinode_copy(ntfs, a_fs_file, mft, mftnum) != TSK_OK) { free(mft); return 1; } /* Check if the metadata is the same sequence as the name - if it was already set. * Note that this is not as effecient and elegant as desired, but works for now. * Better design would be to pass sequence into dinode_lookup and have a more * obvious way to pass the desired sequence in. fs_dir_walk_lcl sets the name * before calling this, which motivated this quick fix. */ if ((a_fs_file->name != NULL) && (a_fs_file->name->meta_addr == mftnum)) { /* NTFS Updates the sequence when an entry is deleted and not when * it is allocated. So, if we have a deleted MFT entry, then use * its previous sequence number to compare with the name so that we * still match them up (until the entry is allocated again). */ uint16_t seqToCmp = a_fs_file->meta->seq; if (a_fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) { if (a_fs_file->meta->seq > 0) seqToCmp--; } if (a_fs_file->name->meta_seq != seqToCmp) { if (allocedMeta) { tsk_fs_meta_close(a_fs_file->meta); a_fs_file->meta = NULL; } else { tsk_fs_meta_reset(a_fs_file->meta); } } } free((char *) mft); return 0; } /********************************************************************** * * Load special MFT structures into the NTFS_INFO structure * **********************************************************************/ /* The attrdef structure defines the types of attributes and gives a * name value to the type number. * * We currently do not use this during the analysis (Because it has not * historically changed, but we do display it in fsstat * * Return 1 on error and 0 on success */ static uint8_t ntfs_load_attrdef(NTFS_INFO * ntfs) { TSK_FS_FILE *fs_file; const TSK_FS_ATTR *fs_attr; TSK_FS_INFO *fs = &ntfs->fs_info; TSK_FS_LOAD_FILE load_file; /* if already loaded, return now */ if (ntfs->attrdef) return 1; if ((fs_file = tsk_fs_file_open_meta(fs, NULL, NTFS_MFT_ATTR)) == NULL) return 1; fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_DATA); if (!fs_attr) { //("Data attribute not found in $Attr"); tsk_fs_file_close(fs_file); return 1; } // @@@ We need to do a sanity check on the size of fs_attr->size /* Get a copy of the attribute list stream using the above action */ load_file.left = load_file.total = (size_t) fs_attr->size; load_file.base = load_file.cur = tsk_malloc((size_t) fs_attr->size); if (load_file.cur == NULL) { tsk_fs_file_close(fs_file); return 1; } ntfs->attrdef = (ntfs_attrdef *) load_file.base; if (tsk_fs_attr_walk(fs_attr, 0, tsk_fs_load_file_action, (void *) &load_file)) { tsk_error_errstr2_concat(" - load_attrdef"); tsk_fs_file_close(fs_file); free(ntfs->attrdef); ntfs->attrdef = NULL; return 1; } else if (load_file.left > 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr ("load_attrdef: space still left after walking $Attr data"); tsk_fs_file_close(fs_file); free(ntfs->attrdef); ntfs->attrdef = NULL; return 1; } ntfs->attrdef_len = (size_t) fs_attr->size; tsk_fs_file_close(fs_file); return 0; } /* * return the name of the attribute type. If the attribute has not * been loaded yet, it will be. * * Return 1 on error and 0 on success */ uint8_t ntfs_attrname_lookup(TSK_FS_INFO * fs, uint16_t type, char *name, int len) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_attrdef *attrdef; if (!ntfs->attrdef) { if (ntfs_load_attrdef(ntfs)) return 1; } attrdef = ntfs->attrdef; while ( (((uintptr_t) attrdef - (uintptr_t) ntfs->attrdef + sizeof(ntfs_attrdef)) < ntfs->attrdef_len) && (tsk_getu32(fs->endian, attrdef->type))) { if (tsk_getu32(fs->endian, attrdef->type) == type) { UTF16 *name16 = (UTF16 *) attrdef->label; UTF8 *name8 = (UTF8 *) name; int retVal; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + sizeof(attrdef->label)), &name8, (UTF8 *) ((uintptr_t) name8 + len), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "attrname_lookup: Error converting NTFS attribute def label to UTF8: %d", retVal); break; } /* Make sure it is NULL Terminated */ else if ((uintptr_t) name8 >= (uintptr_t) name + len) name[len - 1] = '\0'; else *name8 = '\0'; return 0; } attrdef++; } /* If we didn't find it, then call it '?' */ snprintf(name, len, "?"); return 0; } /* Load the block bitmap $Data run and allocate a buffer for a cache * * return 1 on error and 0 on success * */ static uint8_t ntfs_load_bmap(NTFS_INFO * ntfs) { ssize_t cnt = 0; ntfs_attr *attr = NULL; TSK_FS_INFO *fs = NULL; ntfs_mft *mft = NULL; if( ntfs == NULL ) { goto on_error; } fs = &ntfs->fs_info; if ((mft = (ntfs_mft *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { goto on_error; } /* Get data on the bitmap */ if (ntfs_dinode_lookup(ntfs, (char *) mft, NTFS_MFT_BMAP) != TSK_OK) { goto on_error; } attr = (ntfs_attr *) ((uintptr_t) mft + tsk_getu16(fs->endian, mft->attr_off)); /* cycle through them */ while (((uintptr_t) attr >= (uintptr_t) mft) && ((uintptr_t) attr <= ((uintptr_t) mft + (uintptr_t) ntfs->mft_rsize_b)) && (tsk_getu32(fs->endian, attr->len) > 0 && (tsk_getu32(fs->endian, attr->type) != 0xffffffff) && (tsk_getu32(fs->endian, attr->type) != NTFS_ATYPE_DATA))) { attr = (ntfs_attr *) ((uintptr_t) attr + tsk_getu32(fs->endian, attr->len)); } /* did we get it? */ if (tsk_getu32(fs->endian, attr->type) != NTFS_ATYPE_DATA) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("Error Finding Bitmap Data Attribute"); goto on_error; } /* convert to generic form */ if ((ntfs_make_data_run(ntfs, tsk_getu64(fs->endian, attr->c.nr.start_vcn), (ntfs_runlist *) ((uintptr_t) attr + tsk_getu16(fs->endian, attr->c.nr.run_off)), &(ntfs->bmap), NULL, NTFS_MFT_BMAP)) != TSK_OK) { goto on_error; } ntfs->bmap_buf = (char *) tsk_malloc(fs->block_size); if (ntfs->bmap_buf == NULL) { goto on_error; } /* Load the first cluster so that we have something there */ ntfs->bmap_buf_off = 0; // Check ntfs->bmap before it is accessed. if( ntfs->bmap == NULL ) { goto on_error; } if (ntfs->bmap->addr > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("ntfs_load_bmap: Bitmap too large for image size: %" PRIuDADDR "", ntfs->bmap->addr); goto on_error; } cnt = tsk_fs_read_block(fs, ntfs->bmap->addr, ntfs->bmap_buf, fs->block_size); if (cnt != fs->block_size) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("ntfs_load_bmap: Error reading block at %" PRIuDADDR, ntfs->bmap->addr); goto on_error; } free( mft ); return 0; on_error: if( mft != NULL ) { free( mft ); } return 1; } /* * Load the VOLUME MFT entry and the VINFO attribute so that we * can identify the volume version of this. * * Return 1 on error and 0 on success */ static uint8_t ntfs_load_ver(NTFS_INFO * ntfs) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; TSK_FS_FILE *fs_file; const TSK_FS_ATTR *fs_attr; if ((fs_file = tsk_fs_file_open_meta(fs, NULL, NTFS_MFT_VOL)) == NULL) { return 1; } /* cache the data attribute */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_VINFO); if (!fs_attr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("Volume Info attribute not found in $Volume"); tsk_fs_file_close(fs_file); return 1; } if ((fs_attr->flags & TSK_FS_ATTR_RES) && (fs_attr->size)) { ntfs_attr_vinfo *vinfo = (ntfs_attr_vinfo *) fs_attr->rd.buf; if ((vinfo->maj_ver == 1) && (vinfo->min_ver == 2)) { ntfs->ver = NTFS_VINFO_NT; } else if ((vinfo->maj_ver == 3) && (vinfo->min_ver == 0)) { ntfs->ver = NTFS_VINFO_2K; } else if ((vinfo->maj_ver == 3) && (vinfo->min_ver == 1)) { ntfs->ver = NTFS_VINFO_XP; } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("unknown version: %d.%d\n", vinfo->maj_ver, vinfo->min_ver); tsk_fs_file_close(fs_file); return 1; } } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("load_version: VINFO is a non-resident attribute"); return 1; } tsk_fs_file_close(fs_file); return 0; } #if TSK_USE_SID /** \internal * Prints the value of sds into the a_sidstr string in ASCII form. This will allocate a new buffer for the * string, so a_sidstr should not point to a buffer. Output is in format of: * S-R-I-S-S... with 'R' being revision, 'I' being the identifier authority, and 'S' being subauthority values. * * @param a_fs File system * @param a_sds SDS * @param a_sidstr [out] Pointer that will be assigned to the buffer allocated by this function to store the string. * @returns 1 on error, 0 on success */ static uint8_t ntfs_sds_to_str(TSK_FS_INFO * a_fs, const ntfs_attr_sds * a_sds, char **a_sidstr) { ntfs_sid *sid = NULL; uint32_t owner_offset; *a_sidstr = NULL; if ((a_fs == NULL) || (a_sds == NULL) || (a_sidstr == NULL)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid argument"); return 1; } owner_offset = tsk_getu32(a_fs->endian, a_sds->self_rel_sec_desc.owner); if (((uintptr_t) & a_sds->self_rel_sec_desc + owner_offset) > ((uintptr_t) a_sds + tsk_getu32(a_fs->endian, a_sds->ent_size))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("ntfs_sds_to_str: owner offset larger than a_sds length"); return 1; } sid = (ntfs_sid *) ((uint8_t *) & a_sds->self_rel_sec_desc + owner_offset); //tsk_fprintf(stderr, "Revision: %i\n", sid->revision); // This check helps not process invalid data, which was noticed while testing // a failing harddrive if (sid->revision == 1) { uint64_t authority = 0; int i, len; char *sid_str_offset = NULL; char *sid_str = NULL; unsigned int sid_str_len; //tsk_fprintf(stderr, "Sub-Authority Count: %i\n", sid->sub_auth_count); authority = 0; for (i = 0; i < 6; i++) authority += (uint64_t) sid->ident_auth[i] << ((5 - i) * 8); //tsk_fprintf(stderr, "NT Authority: %" PRIu64 "\n", authority); // "S-1-AUTH-SUBAUTH-SUBAUTH..." sid_str_len = 4 + 13 + (1 + 10) * sid->sub_auth_count + 1; // Allocate the buffer for the string representation of the SID. if ((sid_str = (char *) tsk_malloc(sid_str_len)) == NULL) { return 1; } len = sprintf(sid_str, "S-1-%" PRIu64, authority); sid_str_offset = sid_str + len; for (i = 0; i < sid->sub_auth_count; i++) { len = sprintf(sid_str_offset, "-%" PRIu32, sid->sub_auth[i]); sid_str_offset += len; } *a_sidstr = sid_str; //tsk_fprintf(stderr, "SID: %s\n", sid_str); } else { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_sds_to_str: Invalid SID revision (%d)", sid->revision); return 1; // Invalid revision number in the SID. } return 0; } /** \internal * Maps a security id value from a file to its SDS structure * * Note: This routine assumes &ntfs->sid_lock is locked by the caller. * * @param fs File system * @param secid Security Id to find SDS for. * @returns NULL on error */ static const ntfs_attr_sds * ntfs_get_sds(TSK_FS_INFO * fs, uint32_t secid) { uint32_t i = 0; NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_attr_sii *sii = NULL; ntfs_attr_sds *sds = NULL; uint32_t sii_secid = 0; uint32_t sds_secid = 0; uint32_t sii_sechash = 0; uint32_t sds_sechash = 0; uint64_t sds_file_off = 0; //uint32_t sds_ent_size = 0; uint64_t sii_sds_file_off = 0; uint32_t sii_sds_ent_size = 0; if ((fs == NULL) || (secid == 0)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid argument"); return NULL; } // Loop through all the SII entries looking for the security id matching that found in the file. // This lookup is obviously O(n^2) for all n files. However, since so many files have the exact // same security identifier, it is not really that bad. In reality, 100,000 files may only map to // 10,000 security identifiers. Since SII entries are 0x28 bytes each and security identifiers // increase incrementally, we could go directly to the entry in question ((secid * 0x28) + 256). // SII entries started at 256 on Vista; however, I did not look at the starting secid for other // versions of NTFS. for (i = 0; i < ntfs->sii_data.used; i++) { if (tsk_getu32(fs->endian, ((ntfs_attr_sii *) (ntfs->sii_data.buffer))[i]. key_sec_id) == secid) { sii = &((ntfs_attr_sii *) (ntfs->sii_data.buffer))[i]; break; } } if (sii == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_get_sds: SII entry not found (%" PRIu32 ")", secid); return NULL; } sii_secid = tsk_getu32(fs->endian, sii->key_sec_id); sii_sechash = tsk_getu32(fs->endian, sii->data_hash_sec_desc); sii_sds_file_off = tsk_getu64(fs->endian, sii->sec_desc_off); sii_sds_ent_size = tsk_getu32(fs->endian, sii->sec_desc_size); // Check that we do not go out of bounds. if ((uint32_t) sii_sds_file_off > ntfs->sds_data.size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_get_sds: SII offset too large (%" PRIu64 ")", sii_sds_file_off); return NULL; } else if (!sii_sds_ent_size) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_get_sds: SII entry size is invalid (%" PRIu32 ")", sii_sds_ent_size); return NULL; } sds = (ntfs_attr_sds *) ((uint8_t *) ntfs->sds_data.buffer + sii_sds_file_off); sds_secid = tsk_getu32(fs->endian, sds->sec_id); sds_sechash = tsk_getu32(fs->endian, sds->hash_sec_desc); sds_file_off = tsk_getu64(fs->endian, sds->file_off); //sds_ent_size = tsk_getu32(fs->endian, sds->ent_size); // Sanity check to make sure the $SII entry points to // the correct $SDS entry. if ((sds_secid == sii_secid) && (sds_sechash == sii_sechash) && (sds_file_off == sii_sds_file_off) //&& (sds_ent_size == sii_sds_ent_size) ) { return sds; } else { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_get_sds: entry found was for wrong Security ID (%" PRIu32 " vs %" PRIu32 ")\n", sds_secid, sii_secid); // if (sii_secid != 0) { // There is obviously a mismatch between the information in the SII entry and that in the SDS entry. // After looking at these mismatches, it appears there is not a pattern. Perhaps some entries have been reused. //printf("\nsecid %d hash %x offset %I64x size %x\n", sii_secid, sii_sechash, sii_sds_file_off, sii_sds_ent_size); //printf("secid %d hash %x offset %I64x size %x\n", sds_secid, sds_sechash, sds_file_off, sds_ent_size); // } } tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_get_sds: Got to end w/out data"); return NULL; } #endif /** \internal * NTFS-specific function (pointed to in FS_INFO) that maps a security ID * to an ASCII printable string. * Read the contents of the STANDARD_INFORMATION attribute of a file * to get the security id. Once we have the security id, we will * search $Secure:$SII to find a matching security id. That $SII entry * will contain the offset within the $SDS stream for the $SDS entry, * which contains the owner SID * * @param a_fs_file File to get security info on * @param sid_str [out] location where string representation of security info will be stored. Caller must free the string. * @returns 1 on error */ static uint8_t ntfs_file_get_sidstr(TSK_FS_FILE * a_fs_file, char **sid_str) { #if TSK_USE_SID const TSK_FS_ATTR *fs_data; ntfs_attr_si *si; const ntfs_attr_sds *sds; NTFS_INFO *ntfs = (NTFS_INFO *) a_fs_file->fs_info; *sid_str = NULL; if (!a_fs_file->meta->attr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr ("ntfs_file_get_sidstr: file argument has no meta data"); return 1; } // Read STANDARD_INFORMATION attribute for the security id of the file. fs_data = tsk_fs_attrlist_get(a_fs_file->meta->attr, TSK_FS_ATTR_TYPE_NTFS_SI); if (!fs_data) { tsk_error_set_errstr2("- ntfs_file_get_sidstr:SI attribute"); return 1; } si = (ntfs_attr_si *) fs_data->rd.buf; if (!si) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_GENFS); tsk_error_set_errstr("ntfs_file_get_sidstr: SI buf is NULL"); return 1; } tsk_take_lock(&ntfs->sid_lock); // sds points inside ntfs->sds_data, which we've just locked sds = ntfs_get_sds(a_fs_file->fs_info, tsk_getu32(a_fs_file->fs_info->endian, si->sec_id)); if (!sds) { tsk_release_lock(&ntfs->sid_lock); tsk_error_set_errstr2("- ntfs_file_get_sidstr:SI attribute"); return 1; } if (ntfs_sds_to_str(a_fs_file->fs_info, sds, sid_str)) { tsk_release_lock(&ntfs->sid_lock); tsk_error_set_errstr2("- ntfs_file_get_sidstr:SI attribute"); return 1; } tsk_release_lock(&ntfs->sid_lock); return 0; #else *sid_str = NULL; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Unsupported function"); return 1; #endif } #if TSK_USE_SID /** \internal * Process all the $SII entries into a single array by removing all the Attribute Headers. * Note: This routine assumes &ntfs->sid_lock is locked by the caller. * @param fs File system structure to store results into * @param sii_buffer Buffer of raw $SII entries to parse */ static void ntfs_proc_sii(TSK_FS_INFO * fs, NTFS_SXX_BUFFER * sii_buffer) { unsigned int total_bytes_processed = 0; unsigned int idx_buffer_length = 0; unsigned int sii_buffer_offset = 0; NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_attr_sii *sii; if ((fs == NULL) || (sii_buffer == NULL) || (ntfs->sii_data.buffer == NULL)) return; /* Loop by cluster size */ for (sii_buffer_offset = 0; sii_buffer_offset < sii_buffer->size; sii_buffer_offset += ntfs->idx_rsize_b) { ntfs_idxrec *idxrec = (ntfs_idxrec *) & sii_buffer->buffer[sii_buffer_offset]; idx_buffer_length = tsk_getu32(fs->endian, idxrec->list.bufend_off); // get pointer to first record sii = (ntfs_attr_sii *) ((uintptr_t) & idxrec->list + tsk_getu32(fs->endian, idxrec->list.begin_off)); total_bytes_processed = (uint8_t) ((uintptr_t) sii - (uintptr_t) idxrec); // copy records into NTFS_INFO do { /* if ((tsk_getu16(fs->endian,sii->size) == 0x14) && (tsk_getu16(fs->endian,sii->data_off) == 0x14) && (tsk_getu16(fs->endian,sii->ent_size) == 0x28) ) { */ memcpy(ntfs->sii_data.buffer + (ntfs->sii_data.used * sizeof(ntfs_attr_sii)), sii, sizeof(ntfs_attr_sii)); ntfs->sii_data.used++; /* printf("Security id %d is at offset 0x%I64x for 0x%x bytes\n", tsk_getu32(fs->endian,sii->key_sec_id), tsk_getu64(fs->endian,sii->sec_desc_off), tsk_getu32(fs->endian,sii->sec_desc_size)); } else { printf("\n\tOffset to data %x Size of data %x Size of Index entry %x\n", tsk_getu16(fs->endian,sii->data_off), tsk_getu16(fs->endian,sii->size), tsk_getu16(fs->endian,sii->ent_size)); printf("\tSecurity id %d is at offset 0x%I64x for 0x%x bytes\n\n", tsk_getu32(fs->endian,sii->key_sec_id), tsk_getu64(fs->endian,sii->sec_desc_off), tsk_getu32(fs->endian,sii->sec_desc_size)); } */ sii++; total_bytes_processed += sizeof(ntfs_attr_sii); } while (total_bytes_processed + sizeof(ntfs_attr_sii) <= idx_buffer_length); } } /* * Load the $Secure attributes so that we can identify the user. * * Note: This routine is called only from ntfs_open and therefore does * not need to lock ntfs->sid_lock. * * @returns 1 on error (which occurs only if malloc or other system error). */ static uint8_t ntfs_load_secure(NTFS_INFO * ntfs) { TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; TSK_FS_META *fs_meta = NULL; const TSK_FS_ATTR *fs_attr_sds = NULL; const TSK_FS_ATTR *fs_attr_sii = NULL; NTFS_SXX_BUFFER sii_buffer; TSK_FS_FILE *secure = NULL; ssize_t cnt; ntfs->sii_data.buffer = NULL; ntfs->sii_data.size = 0; ntfs->sii_data.used = 0; ntfs->sds_data.buffer = NULL; ntfs->sds_data.size = 0; ntfs->sds_data.used = 0; // Open $Secure. The $SDS stream contains all the security descriptors // and is indexed by $SII and $SDH. secure = tsk_fs_file_open_meta(fs, NULL, NTFS_MFT_SECURE); if (!secure) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: error opening $Secure file: %s\n", tsk_error_get_errstr()); tsk_error_reset(); return 0; } // Make sure the TSK_FS_META is not NULL. We need it to get the // $SII and $SDH attributes. fs_meta = secure->meta; if (!fs_meta) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: $Secure file has no attributes\n"); tsk_error_reset(); tsk_fs_file_close(secure); return 0; } // Get the $SII attribute. fs_attr_sii = tsk_fs_attrlist_get_name_type(fs_meta->attr, NTFS_ATYPE_IDXALLOC, "$SII\0"); if (!fs_attr_sii) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: error getting $Secure:$SII IDX_ALLOC attribute\n"); tsk_error_reset(); tsk_fs_file_close(secure); return 0; } // Get the $SDS attribute. fs_attr_sds = tsk_fs_attrlist_get(fs_meta->attr, NTFS_ATYPE_DATA); if (!fs_attr_sds) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: error getting $Secure:$SDS $Data attribute\n"); tsk_error_reset(); tsk_fs_file_close(secure); return 0; } /* First we read in $SII to a local buffer adn then process it into NTFS_INFO */ // Allocate local space for the entire $SII stream. sii_buffer.size = (size_t) roundup(fs_attr_sii->size, fs->block_size); sii_buffer.used = 0; // arbitrary check because we had problems before with alloc too much memory if (sii_buffer.size > 64000000) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: sii_buffer.size is too large: %z\n", sii_buffer.size); return 0; } if ((sii_buffer.buffer = tsk_malloc(sii_buffer.size)) == NULL) { return 1; } // Read in the raw $SII stream. cnt = tsk_fs_attr_read(fs_attr_sii, 0, sii_buffer.buffer, sii_buffer.size, TSK_FS_FILE_READ_FLAG_NONE); if (cnt != sii_buffer.size) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: error reading $Secure:$SII attribute: %s\n", tsk_error_get_errstr()); tsk_error_reset(); free(sii_buffer.buffer); tsk_fs_file_close(secure); return 0; } // allocate the structure for the processed version of the data ntfs->sii_data.used = 0; // use this to count the number of $SII entries if ((ntfs->sii_data.buffer = (char *) tsk_malloc(sii_buffer.size)) == NULL) { free(sii_buffer.buffer); tsk_fs_file_close(secure); return 1; } ntfs->sii_data.size = sii_buffer.size; // parse sii_buffer into ntfs->sii_data. ntfs_proc_sii(fs, &sii_buffer); free(sii_buffer.buffer); /* Now we copy $SDS into NTFS_INFO. We do not do any processing in this step. */ // Allocate space for the entire $SDS stream with all the security // descriptors. We should be able to use the $SII offset to index // into the $SDS stream. ntfs->sds_data.size = (size_t) fs_attr_sds->size; // arbitrary check because we had problems before with alloc too much memory if (ntfs->sds_data.size > 64000000) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: ntfs->sds_data.size is too large: %z\n", ntfs->sds_data.size); free(ntfs->sii_data.buffer); ntfs->sii_data.buffer = NULL; ntfs->sii_data.used = 0; ntfs->sii_data.size = 0; tsk_fs_file_close(secure); return 0; } ntfs->sds_data.used = 0; if ((ntfs->sds_data.buffer = (char *) tsk_malloc(ntfs->sds_data.size)) == NULL) { free(ntfs->sii_data.buffer); ntfs->sii_data.buffer = NULL; ntfs->sii_data.used = 0; ntfs->sii_data.size = 0; tsk_fs_file_close(secure); return 1; } // Read in the raw $SDS ($DATA) stream. cnt = tsk_fs_attr_read(fs_attr_sds, 0, ntfs->sds_data.buffer, ntfs->sds_data.size, TSK_FS_FILE_READ_FLAG_NONE); if (cnt != ntfs->sds_data.size) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_load_secure: error reading $Secure:$SDS attribute: %s\n", tsk_error_get_errstr()); tsk_error_reset(); free(ntfs->sii_data.buffer); ntfs->sii_data.buffer = NULL; ntfs->sii_data.used = 0; ntfs->sii_data.size = 0; free(ntfs->sds_data.buffer); ntfs->sds_data.buffer = NULL; ntfs->sds_data.used = 0; ntfs->sds_data.size = 0; tsk_fs_file_close(secure); return 0; } tsk_fs_file_close(secure); return 0; } #endif /********************************************************************** * * Exported Walk Functions * **********************************************************************/ static TSK_FS_BLOCK_FLAG_ENUM ntfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr) { NTFS_INFO *ntfs = (NTFS_INFO *) a_fs; int retval; int flags = 0; /* identify if the cluster is allocated or not */ retval = is_clustalloc(ntfs, a_addr); if (retval == 1) flags = TSK_FS_BLOCK_FLAG_ALLOC; else if (retval == 0) flags = TSK_FS_BLOCK_FLAG_UNALLOC; return flags; } /* * flags: TSK_FS_BLOCK_FLAG_ALLOC and FS_FLAG_UNALLOC * * @@@ We should probably consider some data META, but it is tough with * the NTFS design ... */ static uint8_t ntfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { char *myname = "ntfs_block_walk"; NTFS_INFO *ntfs = (NTFS_INFO *) fs; TSK_DADDR_T addr; TSK_FS_BLOCK *fs_block; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (a_start_blk < fs->first_block || a_start_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: start block: %" PRIuDADDR "", myname, a_start_blk); return 1; } else if (a_end_blk < fs->first_block || a_end_blk > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("%s: last block: %" PRIuDADDR "", myname, a_end_blk); return 1; } /* Sanity check on a_flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { a_flags |= (TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(fs)) == NULL) { return 1; } /* Cycle through the blocks */ for (addr = a_start_blk; addr <= a_end_blk; addr++) { int retval; int myflags; /* identify if the cluster is allocated or not */ retval = is_clustalloc(ntfs, addr); if (retval == -1) { tsk_fs_block_free(fs_block); return 1; } else if (retval == 1) { myflags = TSK_FS_BLOCK_FLAG_ALLOC; } else { myflags = TSK_FS_BLOCK_FLAG_UNALLOC; } // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (a_flags & TSK_FS_BLOCK_WALK_FLAG_AONLY) myflags |= TSK_FS_BLOCK_FLAG_AONLY; if (tsk_fs_block_get_flag(fs, fs_block, addr, (TSK_FS_BLOCK_FLAG_ENUM) myflags) == NULL) { tsk_error_set_errstr2 ("ntfs_block_walk: Error reading block at %" PRIuDADDR, addr); tsk_fs_block_free(fs_block); return 1; } retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { break; } else if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); return 1; } } tsk_fs_block_free(fs_block); return 0; } /* * inode_walk * * Flags: TSK_FS_META_FLAG_ALLOC, TSK_FS_META_FLAG_UNALLOC, * TSK_FS_META_FLAG_USED, TSK_FS_META_FLAG_UNUSED, TSK_FS_META_FLAG_ORPHAN * * Note that with ORPHAN, entries will be found that can also be * found by searching based on parent directories (if parent directory is * known) */ static uint8_t ntfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB a_action, void *ptr) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; int myflags; TSK_INUM_T mftnum; TSK_FS_FILE *fs_file; TSK_INUM_T end_inum_tmp; ntfs_mft *mft; /* * Sanity checks. */ if (start_inum < fs->first_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("inode_walk: Starting inode number is too small (%" PRIuINUM ")", start_inum); return 1; } if (start_inum > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("inode_walk: Starting inode number is too large (%" PRIuINUM ")", start_inum); return 1; } if (end_inum < fs->first_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr ("inode_walk: Ending inode number is too small (%" PRIuINUM ")", end_inum); return 1; } if (end_inum > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("Ending inode number is too large (%" PRIuINUM ")", end_inum); return 1; } /* If ORPHAN is wanted, then make sure that the flags are correct */ if (flags & TSK_FS_META_FLAG_ORPHAN) { flags |= TSK_FS_META_FLAG_UNALLOC; flags &= ~TSK_FS_META_FLAG_ALLOC; flags |= TSK_FS_META_FLAG_USED; flags &= ~TSK_FS_META_FLAG_UNUSED; } else { if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags |= (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((flags & TSK_FS_META_FLAG_USED) == 0) && ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { flags |= (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } } /* If we are looking for orphan files and have not yet filled * in the list of unalloc inodes that are pointed to, then fill * in the list * */ if ((flags & TSK_FS_META_FLAG_ORPHAN)) { if (tsk_fs_dir_load_inum_named(fs) != TSK_OK) { tsk_error_errstr2_concat ("- ntfs_inode_walk: identifying inodes allocated by file names"); return 1; } } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(NTFS_FILE_CONTENT_LEN)) == NULL) { // JRB: Coverity CID: 348 if (fs_file) tsk_fs_file_close(fs_file); return 1; } if ((mft = (ntfs_mft *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { tsk_fs_file_close(fs_file); return 1; } // we need to handle fs->last_inum specially because it is for the // virtual ORPHANS directory. Handle it outside of the loop. if (end_inum == TSK_FS_ORPHANDIR_INUM(fs)) end_inum_tmp = end_inum - 1; else end_inum_tmp = end_inum; for (mftnum = start_inum; mftnum <= end_inum_tmp; mftnum++) { int retval; TSK_RETVAL_ENUM retval2; /* read MFT entry in to NTFS_INFO */ if ((retval2 = ntfs_dinode_lookup(ntfs, (char *) mft, mftnum)) != TSK_OK) { // if the entry is corrupt, then skip to the next one if (retval2 == TSK_COR) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); continue; } tsk_fs_file_close(fs_file); free(mft); return 1; } /* we only want to look at base file records * (extended are because the base could not fit into one) */ if (tsk_getu48(fs->endian, mft->base_ref) != NTFS_MFT_BASE) continue; /* NOTE: We could add a sanity check here with the MFT bitmap * to validate of the INUSE flag and bitmap are in agreement */ /* check flags */ myflags = ((tsk_getu16(fs->endian, mft->flags) & NTFS_MFT_INUSE) ? TSK_FS_META_FLAG_ALLOC : TSK_FS_META_FLAG_UNALLOC); /* If we want only orphans, then check if this * inode is in the seen list * */ if ((myflags & TSK_FS_META_FLAG_UNALLOC) && (flags & TSK_FS_META_FLAG_ORPHAN) && (tsk_fs_dir_find_inum_named(fs, mftnum))) { continue; } /* copy into generic format */ if ((retval = ntfs_dinode_copy(ntfs, fs_file, (char *) mft, mftnum)) != TSK_OK) { // continue on if there were only corruption problems if (retval == TSK_COR) { if (tsk_verbose) tsk_error_print(stderr); tsk_error_reset(); continue; } tsk_fs_file_close(fs_file); free(mft); return 1; } myflags |= (fs_file->meta->flags & (TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED)); if ((flags & myflags) != myflags) continue; /* call action */ retval = a_action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(mft); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(mft); return 1; } } // handle the virtual orphans folder if they asked for it if ((end_inum == TSK_FS_ORPHANDIR_INUM(fs)) && (flags & TSK_FS_META_FLAG_ALLOC) && (flags & TSK_FS_META_FLAG_USED)) { int retval; if (tsk_fs_dir_make_orphan_dir_meta(fs, fs_file->meta)) { tsk_fs_file_close(fs_file); free(mft); return 1; } /* call action */ retval = a_action(fs_file, ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(mft); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(mft); return 1; } } tsk_fs_file_close(fs_file); free((char *) mft); return 0; } static uint8_t ntfs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented for NTFS yet"); return 1; } /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t ntfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; TSK_FS_FILE *fs_file; const TSK_FS_ATTR *fs_attr; char asc[512]; ntfs_attrdef *attrdeftmp; tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: NTFS\n"); tsk_fprintf(hFile, "Volume Serial Number: %.16" PRIX64 "\n", tsk_getu64(fs->endian, ntfs->fs->serial)); tsk_fprintf(hFile, "OEM Name: %c%c%c%c%c%c%c%c\n", ntfs->fs->oemname[0], ntfs->fs->oemname[1], ntfs->fs->oemname[2], ntfs->fs->oemname[3], ntfs->fs->oemname[4], ntfs->fs->oemname[5], ntfs->fs->oemname[6], ntfs->fs->oemname[7]); /* * Volume */ if ((fs_file = tsk_fs_file_open_meta(fs, NULL, NTFS_MFT_VOL)) == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_NUM); tsk_error_errstr2_concat (" - fsstat: Error finding Volume MFT Entry"); return 1; } fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_VNAME); if (!fs_attr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("Volume Name attribute not found in $Volume"); return 1; } if ((fs_attr->flags & TSK_FS_ATTR_RES) && (fs_attr->size)) { UTF16 *name16 = (UTF16 *) fs_attr->rd.buf; UTF8 *name8 = (UTF8 *) asc; int retVal; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + (int) fs_attr->size), &name8, (UTF8 *) ((uintptr_t) name8 + sizeof(asc)), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "fsstat: Error converting NTFS Volume label to UTF8: %d", retVal); *name8 = '\0'; } /* Make sure it is NULL Terminated */ else if ((uintptr_t) name8 >= (uintptr_t) asc + sizeof(asc)) asc[sizeof(asc) - 1] = '\0'; else *name8 = '\0'; tsk_fprintf(hFile, "Volume Name: %s\n", asc); } tsk_fs_file_close(fs_file); fs_file = NULL; fs_attr = NULL; if (ntfs->ver == NTFS_VINFO_NT) tsk_fprintf(hFile, "Version: Windows NT\n"); else if (ntfs->ver == NTFS_VINFO_2K) tsk_fprintf(hFile, "Version: Windows 2000\n"); else if (ntfs->ver == NTFS_VINFO_XP) tsk_fprintf(hFile, "Version: Windows XP\n"); tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "First Cluster of MFT: %" PRIu64 "\n", tsk_getu64(fs->endian, ntfs->fs->mft_clust)); tsk_fprintf(hFile, "First Cluster of MFT Mirror: %" PRIu64 "\n", tsk_getu64(fs->endian, ntfs->fs->mftm_clust)); tsk_fprintf(hFile, "Size of MFT Entries: %" PRIu16 " bytes\n", ntfs->mft_rsize_b); tsk_fprintf(hFile, "Size of Index Records: %" PRIu16 " bytes\n", ntfs->idx_rsize_b); tsk_fprintf(hFile, "Range: %" PRIuINUM " - %" PRIuINUM "\n", fs->first_inum, fs->last_inum); tsk_fprintf(hFile, "Root Directory: %" PRIuINUM "\n", fs->root_inum); tsk_fprintf(hFile, "\nCONTENT INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "Sector Size: %" PRIu16 "\n", ntfs->ssize_b); tsk_fprintf(hFile, "Cluster Size: %" PRIu16 "\n", ntfs->csize_b); tsk_fprintf(hFile, "Total Cluster Range: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block); if (fs->last_block != fs->last_block_act) tsk_fprintf(hFile, "Total Range in Image: %" PRIuDADDR " - %" PRIuDADDR "\n", fs->first_block, fs->last_block_act); tsk_fprintf(hFile, "Total Sector Range: 0 - %" PRIu64 "\n", tsk_getu64(fs->endian, ntfs->fs->vol_size_s) - 1); /* * Attrdef Info */ tsk_fprintf(hFile, "\n$AttrDef Attribute Values:\n"); if (!ntfs->attrdef) { if (ntfs_load_attrdef(ntfs)) { tsk_fprintf(hFile, "Error loading attribute definitions\n"); goto attrdef_egress; } } attrdeftmp = ntfs->attrdef; while ((((uintptr_t) attrdeftmp - (uintptr_t) ntfs->attrdef + sizeof(ntfs_attrdef)) < ntfs->attrdef_len) && (tsk_getu32(fs->endian, attrdeftmp->type))) { UTF16 *name16 = (UTF16 *) attrdeftmp->label; UTF8 *name8 = (UTF8 *) asc; int retVal; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + sizeof(attrdeftmp->label)), &name8, (UTF8 *) ((uintptr_t) name8 + sizeof(asc)), TSKlenientConversion); if (retVal != TSKconversionOK) { if (tsk_verbose) tsk_fprintf(stderr, "fsstat: Error converting NTFS attribute def label to UTF8: %d", retVal); *name8 = '\0'; } /* Make sure it is NULL Terminated */ else if ((uintptr_t) name8 >= (uintptr_t) asc + sizeof(asc)) asc[sizeof(asc) - 1] = '\0'; else *name8 = '\0'; tsk_fprintf(hFile, "%s (%" PRIu32 ") ", asc, tsk_getu32(fs->endian, attrdeftmp->type)); if ((tsk_getu64(fs->endian, attrdeftmp->minsize) == 0) && (tsk_getu64(fs->endian, attrdeftmp->maxsize) == 0xffffffffffffffffULL)) { tsk_fprintf(hFile, "Size: No Limit"); } else { tsk_fprintf(hFile, "Size: %" PRIu64 "-%" PRIu64, tsk_getu64(fs->endian, attrdeftmp->minsize), tsk_getu64(fs->endian, attrdeftmp->maxsize)); } tsk_fprintf(hFile, " Flags: %s%s%s\n", (tsk_getu32(fs->endian, attrdeftmp->flags) & NTFS_ATTRDEF_FLAGS_RES ? "Resident" : ""), (tsk_getu32(fs->endian, attrdeftmp->flags) & NTFS_ATTRDEF_FLAGS_NONRES ? "Non-resident" : ""), (tsk_getu32(fs->endian, attrdeftmp->flags) & NTFS_ATTRDEF_FLAGS_IDX ? ",Index" : "")); attrdeftmp++; } attrdef_egress: return 0; } /************************* istat *******************************/ #define NTFS_PRINT_WIDTH 8 typedef struct { FILE *hFile; int idx; } NTFS_PRINT_ADDR; static TSK_WALK_RET_ENUM print_addr_act(TSK_FS_FILE * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *ptr) { NTFS_PRINT_ADDR *print = (NTFS_PRINT_ADDR *) ptr; tsk_fprintf(print->hFile, "%" PRIuDADDR " ", addr); if (++(print->idx) == NTFS_PRINT_WIDTH) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File name to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t ntfs_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { TSK_FS_FILE *fs_file; const TSK_FS_ATTR *fs_attr; NTFS_INFO *ntfs = (NTFS_INFO *) fs; ntfs_mft *mft; char timeBuf[128]; // clean up any error messages that are lying around tsk_error_reset(); if ((mft = (ntfs_mft *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { return 1; } if (ntfs_dinode_lookup(ntfs, (char *) mft, inum)) { free(mft); return 1; } if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { tsk_error_errstr2_concat(" - istat"); free(mft); return 1; } tsk_fprintf(hFile, "MFT Entry Header Values:\n"); tsk_fprintf(hFile, "Entry: %" PRIuINUM " Sequence: %" PRIu32 "\n", inum, fs_file->meta->seq); if (tsk_getu48(fs->endian, mft->base_ref) != 0) { tsk_fprintf(hFile, "Base File Record: %" PRIu64 "\n", (uint64_t) tsk_getu48(fs->endian, mft->base_ref)); } tsk_fprintf(hFile, "$LogFile Sequence Number: %" PRIu64 "\n", tsk_getu64(fs->endian, mft->lsn)); tsk_fprintf(hFile, "%sAllocated %s\n", (fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) ? "" : "Not ", (fs_file->meta->type == TSK_FS_META_TYPE_DIR) ? "Directory" : "File"); tsk_fprintf(hFile, "Links: %u\n", fs_file->meta->nlink); /* STANDARD_INFORMATION info */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_SI); if (fs_attr) { ntfs_attr_si *si = (ntfs_attr_si *) fs_attr->rd.buf; char *sid_str; int a = 0; tsk_fprintf(hFile, "\n$STANDARD_INFORMATION Attribute Values:\n"); tsk_fprintf(hFile, "Flags: "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_RO) tsk_fprintf(hFile, "%sRead Only", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_HID) tsk_fprintf(hFile, "%sHidden", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_SYS) tsk_fprintf(hFile, "%sSystem", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_ARCH) tsk_fprintf(hFile, "%sArchive", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_DEV) tsk_fprintf(hFile, "%sDevice", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_NORM) tsk_fprintf(hFile, "%sNormal", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_TEMP) tsk_fprintf(hFile, "%sTemporary", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_SPAR) tsk_fprintf(hFile, "%sSparse", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_REP) tsk_fprintf(hFile, "%sReparse Point", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_COMP) tsk_fprintf(hFile, "%sCompressed", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_OFF) tsk_fprintf(hFile, "%sOffline", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_NOIDX) tsk_fprintf(hFile, "%sNot Content Indexed", a++ == 0 ? "" : ", "); if (tsk_getu32(fs->endian, si->dos) & NTFS_SI_ENC) tsk_fprintf(hFile, "%sEncrypted", a++ == 0 ? "" : ", "); tsk_fprintf(hFile, "\n"); tsk_fprintf(hFile, "Owner ID: %" PRIu32 "\n", tsk_getu32(fs->endian, si->own_id)); #if TSK_USE_SID ntfs_file_get_sidstr(fs_file, &sid_str); tsk_fprintf(hFile, "Security ID: %" PRIu32 " (%s)\n", tsk_getu32(fs->endian, si->sec_id), sid_str ? sid_str : ""); free(sid_str); sid_str = NULL; #endif if (tsk_getu32(fs->endian, si->maxver) != 0) { tsk_fprintf(hFile, "Version %" PRIu32 " of %" PRIu32 "\n", tsk_getu32(fs->endian, si->ver), tsk_getu32(fs->endian, si->maxver)); } if (tsk_getu64(fs->endian, si->quota) != 0) { tsk_fprintf(hFile, "Quota Charged: %" PRIu64 "\n", tsk_getu64(fs->endian, si->quota)); } if (tsk_getu64(fs->endian, si->usn) != 0) { tsk_fprintf(hFile, "Last User Journal Update Sequence Number: %" PRIu64 "\n", tsk_getu64(fs->endian, si->usn)); } /* Times - take it from fs_file->meta instead of redoing the work */ if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted times:\n"); if (fs_file->meta->mtime) fs_file->meta->mtime -= sec_skew; if (fs_file->meta->atime) fs_file->meta->atime -= sec_skew; if (fs_file->meta->ctime) fs_file->meta->ctime -= sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime -= sec_skew; tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->crtime), timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->mtime), timeBuf)); tsk_fprintf(hFile, "MFT Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->ctime), timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->atime), timeBuf)); if (fs_file->meta->mtime) fs_file->meta->mtime += sec_skew; if (fs_file->meta->atime) fs_file->meta->atime += sec_skew; if (fs_file->meta->ctime) fs_file->meta->ctime += sec_skew; if (fs_file->meta->crtime) fs_file->meta->crtime += sec_skew; tsk_fprintf(hFile, "\nOriginal times:\n"); } tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->crtime), timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->mtime), timeBuf)); tsk_fprintf(hFile, "MFT Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->ctime), timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->atime), timeBuf)); } /* $FILE_NAME Information */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_FNAME); if (fs_attr) { ntfs_attr_fname *fname = (ntfs_attr_fname *) fs_attr->rd.buf; uint64_t flags; int a = 0; tsk_fprintf(hFile, "\n$FILE_NAME Attribute Values:\n"); flags = tsk_getu64(fs->endian, fname->flags); tsk_fprintf(hFile, "Flags: "); if (flags & NTFS_FNAME_FLAGS_DIR) tsk_fprintf(hFile, "%sDirectory", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_DEV) tsk_fprintf(hFile, "%sDevice", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_NORM) tsk_fprintf(hFile, "%sNormal", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_RO) tsk_fprintf(hFile, "%sRead Only", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_HID) tsk_fprintf(hFile, "%sHidden", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_SYS) tsk_fprintf(hFile, "%sSystem", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_ARCH) tsk_fprintf(hFile, "%sArchive", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_TEMP) tsk_fprintf(hFile, "%sTemp", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_SPAR) tsk_fprintf(hFile, "%sSparse", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_REP) tsk_fprintf(hFile, "%sReparse Point", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_COMP) tsk_fprintf(hFile, "%sCompressed", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_ENC) tsk_fprintf(hFile, "%sEncrypted", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_OFF) tsk_fprintf(hFile, "%sOffline", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_NOIDX) tsk_fprintf(hFile, "%sNot Content Indexed", a++ == 0 ? "" : ", "); if (flags & NTFS_FNAME_FLAGS_IDXVIEW) tsk_fprintf(hFile, "%sIndex View", a++ == 0 ? "" : ", "); tsk_fprintf(hFile, "\n"); /* We could look this up in the attribute, but we already did * the work */ if (fs_file->meta->name2) { TSK_FS_META_NAME_LIST *fs_name = fs_file->meta->name2; tsk_fprintf(hFile, "Name: "); while (fs_name) { tsk_fprintf(hFile, "%s", fs_name->name); fs_name = fs_name->next; if (fs_name) tsk_fprintf(hFile, ", "); else tsk_fprintf(hFile, "\n"); } } tsk_fprintf(hFile, "Parent MFT Entry: %" PRIu64 " \tSequence: %" PRIu16 "\n", (uint64_t) tsk_getu48(fs->endian, fname->par_ref), tsk_getu16(fs->endian, fname->par_seq)); tsk_fprintf(hFile, "Allocated Size: %" PRIu64 " \tActual Size: %" PRIu64 "\n", tsk_getu64(fs->endian, fname->alloc_fsize), tsk_getu64(fs->endian, fname->real_fsize)); /* * Times */ /* Times - take it from fs_file->meta instead of redoing the work */ if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted times:\n"); if (fs_file->meta->time2.ntfs.fn_mtime) fs_file->meta->time2.ntfs.fn_mtime -= sec_skew; if (fs_file->meta->time2.ntfs.fn_atime) fs_file->meta->time2.ntfs.fn_atime -= sec_skew; if (fs_file->meta->time2.ntfs.fn_ctime) fs_file->meta->time2.ntfs.fn_ctime -= sec_skew; if (fs_file->meta->time2.ntfs.fn_crtime) fs_file->meta->time2.ntfs.fn_crtime -= sec_skew; tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_crtime), timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_mtime), timeBuf)); tsk_fprintf(hFile, "MFT Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_ctime), timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_atime), timeBuf)); if (fs_file->meta->time2.ntfs.fn_mtime) fs_file->meta->time2.ntfs.fn_mtime += sec_skew; if (fs_file->meta->time2.ntfs.fn_atime) fs_file->meta->time2.ntfs.fn_atime += sec_skew; if (fs_file->meta->time2.ntfs.fn_ctime) fs_file->meta->time2.ntfs.fn_ctime += sec_skew; if (fs_file->meta->time2.ntfs.fn_crtime) fs_file->meta->time2.ntfs.fn_crtime += sec_skew; tsk_fprintf(hFile, "\nOriginal times:\n"); } tsk_fprintf(hFile, "Created:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_crtime), timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_mtime), timeBuf)); tsk_fprintf(hFile, "MFT Modified:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_ctime), timeBuf)); tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str_subsecs(WITHNANO(fs_file->meta->time2.ntfs.fn_atime), timeBuf)); } /* $OBJECT_ID Information */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_OBJID); if (fs_attr) { ntfs_attr_objid *objid = (ntfs_attr_objid *) fs_attr->rd.buf; uint64_t id1, id2; tsk_fprintf(hFile, "\n$OBJECT_ID Attribute Values:\n"); id1 = tsk_getu64(fs->endian, objid->objid1); id2 = tsk_getu64(fs->endian, objid->objid2); tsk_fprintf(hFile, "Object Id: %.8" PRIx32 "-%.4" PRIx16 "-%.4" PRIx16 "-%.4" PRIx16 "-%.12" PRIx64 "\n", (uint32_t) (id2 >> 32) & 0xffffffff, (uint16_t) (id2 >> 16) & 0xffff, (uint16_t) (id2 & 0xffff), (uint16_t) (id1 >> 48) & 0xffff, (uint64_t) (id1 & (uint64_t) 0x0000ffffffffffffULL)); /* The rest of the fields do not always exist. Check the attr size */ if (fs_attr->size > 16) { id1 = tsk_getu64(fs->endian, objid->orig_volid1); id2 = tsk_getu64(fs->endian, objid->orig_volid2); tsk_fprintf(hFile, "Birth Volume Id: %.8" PRIx32 "-%.4" PRIx16 "-%.4" PRIx16 "-%.4" PRIx16 "-%.12" PRIx64 "\n", (uint32_t) (id2 >> 32) & 0xffffffff, (uint16_t) (id2 >> 16) & 0xffff, (uint16_t) (id2 & 0xffff), (uint16_t) (id1 >> 48) & 0xffff, (uint64_t) (id1 & (uint64_t) 0x0000ffffffffffffULL)); } if (fs_attr->size > 32) { id1 = tsk_getu64(fs->endian, objid->orig_objid1); id2 = tsk_getu64(fs->endian, objid->orig_objid2); tsk_fprintf(hFile, "Birth Object Id: %.8" PRIx32 "-%.4" PRIx16 "-%.4" PRIx16 "-%.4" PRIx16 "-%.12" PRIx64 "\n", (uint32_t) (id2 >> 32) & 0xffffffff, (uint16_t) (id2 >> 16) & 0xffff, (uint16_t) (id2 & 0xffff), (uint16_t) (id1 >> 48) & 0xffff, (uint64_t) (id1 & (uint64_t) 0x0000ffffffffffffULL)); } if (fs_attr->size > 48) { id1 = tsk_getu64(fs->endian, objid->orig_domid1); id2 = tsk_getu64(fs->endian, objid->orig_domid2); tsk_fprintf(hFile, "Birth Domain Id: %.8" PRIx32 "-%.4" PRIx16 "-%.4" PRIx16 "-%.4" PRIx16 "-%.12" PRIx64 "\n", (uint32_t) (id2 >> 32) & 0xffffffff, (uint16_t) (id2 >> 16) & 0xffff, (uint16_t) (id2 & 0xffff), (uint16_t) (id1 >> 48) & 0xffff, (uint64_t) (id1 & (uint64_t) 0x0000ffffffffffffULL)); } } /* Attribute List Information */ fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, NTFS_ATYPE_ATTRLIST); if (fs_attr) { char *buf; ntfs_attrlist *list; uintptr_t endaddr; TSK_FS_LOAD_FILE load_file; tsk_fprintf(hFile, "\n$ATTRIBUTE_LIST Attribute Values:\n"); /* Get a copy of the attribute list stream */ load_file.total = load_file.left = (size_t) fs_attr->size; load_file.cur = load_file.base = buf = tsk_malloc((size_t) fs_attr->size); if (buf == NULL) { free(mft); return 1; } endaddr = (uintptr_t) buf + (uintptr_t) fs_attr->size; if (tsk_fs_attr_walk(fs_attr, 0, tsk_fs_load_file_action, (void *) &load_file)) { tsk_fprintf(hFile, "error reading attribute list buffer\n"); tsk_error_reset(); goto egress; } /* this value should be zero, if not then we didn't read all of the * buffer */ if (load_file.left > 0) { tsk_fprintf(hFile, "error reading attribute list buffer\n"); goto egress; } /* Process the list & print the details */ for (list = (ntfs_attrlist *) buf; (list) && ((uintptr_t) list < endaddr) && (tsk_getu16(fs->endian, list->len) > 0); list = (ntfs_attrlist *) ((uintptr_t) list + tsk_getu16(fs->endian, list->len))) { tsk_fprintf(hFile, "Type: %" PRIu32 "-%" PRIu16 " \tMFT Entry: %" PRIu64 " \tVCN: %" PRIu64 "\n", tsk_getu32(fs->endian, list->type), tsk_getu16(fs->endian, list->id), (uint64_t) tsk_getu48(fs->endian, list->file_ref), tsk_getu64(fs->endian, list->start_vcn)); } egress: free(buf); } /* Print all of the attributes */ tsk_fprintf(hFile, "\nAttributes: \n"); if (fs_file->meta->attr) { int cnt, i; // cycle through the attributes cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { char type[512]; const TSK_FS_ATTR *fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; if (ntfs_attrname_lookup(fs, fs_attr->type, type, 512)) { tsk_fprintf(hFile, "error looking attribute name\n"); break; } /* print the layout if it is non-resident and not "special" */ if (fs_attr->flags & TSK_FS_ATTR_NONRES) { NTFS_PRINT_ADDR print_addr; tsk_fprintf(hFile, "Type: %s (%" PRIu32 "-%" PRIu16 ") Name: %s Non-Resident%s%s%s size: %" PRIuOFF " init_size: %" PRIuOFF "\n", type, fs_attr->type, fs_attr->id, (fs_attr->name) ? fs_attr->name : "N/A", (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : "", (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : "", (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : "", fs_attr->size, fs_attr->nrd.initsize); print_addr.idx = 0; print_addr.hFile = hFile; if (tsk_fs_file_walk_type(fs_file, fs_attr->type, fs_attr->id, (TSK_FS_FILE_WALK_FLAG_AONLY | TSK_FS_FILE_WALK_FLAG_SLACK), print_addr_act, (void *) &print_addr)) { tsk_fprintf(hFile, "\nError walking file\n"); tsk_error_print(hFile); tsk_error_reset(); } if (print_addr.idx != 0) tsk_fprintf(hFile, "\n"); } else { tsk_fprintf(hFile, "Type: %s (%" PRIu32 "-%" PRIu16 ") Name: %s Resident%s%s%s size: %" PRIuOFF "\n", type, fs_attr->type, fs_attr->id, (fs_attr->name) ? fs_attr->name : "N/A", (fs_attr->flags & TSK_FS_ATTR_ENC) ? ", Encrypted" : "", (fs_attr->flags & TSK_FS_ATTR_COMP) ? ", Compressed" : "", (fs_attr->flags & TSK_FS_ATTR_SPARSE) ? ", Sparse" : "", fs_attr->size); } } } tsk_fs_file_close(fs_file); free(mft); return 0; } /* JOURNAL CODE - MOVE TO NEW FILE AT SOME POINT */ static uint8_t ntfs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("NTFS Journal is not yet supported\n"); return 1; } static uint8_t ntfs_jentry_walk(TSK_FS_INFO * fs, int flags, TSK_FS_JENTRY_WALK_CB a_action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("NTFS Journal is not yet supported\n"); return 1; } static uint8_t ntfs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int flags, TSK_FS_JBLK_WALK_CB a_action, void *ptr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("NTFS Journal is not yet supported\n"); return 1; } static TSK_FS_ATTR_TYPE_ENUM ntfs_get_default_attr_type(const TSK_FS_FILE * a_file) { if ((a_file == NULL) || (a_file->meta == NULL)) return TSK_FS_ATTR_TYPE_DEFAULT; /* Use DATA for files and IDXROOT for dirs */ if (a_file->meta->type == TSK_FS_META_TYPE_DIR) return TSK_FS_ATTR_TYPE_NTFS_IDXROOT; else return TSK_FS_ATTR_TYPE_NTFS_DATA; } static void ntfs_close(TSK_FS_INFO * fs) { NTFS_INFO *ntfs = (NTFS_INFO *) fs; if (fs == NULL) return; #if TSK_USE_SID if (ntfs->sii_data.buffer) free(ntfs->sii_data.buffer); ntfs->sii_data.buffer = NULL; if (ntfs->sds_data.buffer) free(ntfs->sds_data.buffer); ntfs->sds_data.buffer = NULL; #endif fs->tag = 0; free((char *) ntfs->fs); tsk_fs_attr_run_free(ntfs->bmap); free(ntfs->bmap_buf); tsk_fs_file_close(ntfs->mft_file); if (ntfs->orphan_map) ntfs_orphan_map_free(ntfs); tsk_deinit_lock(&ntfs->lock); tsk_deinit_lock(&ntfs->orphan_map_lock); #if TSK_USE_SID tsk_deinit_lock(&ntfs->sid_lock); #endif tsk_fs_free(fs); } /** * Open part of a disk image as an NTFS file system. * * @param img_info Disk image to analyze * @param offset Byte offset where NTFS file system starts * @param ftype Specific type of NTFS file system * @param test NOT USED * @returns NULL on error or if data is not an NTFS file system */ TSK_FS_INFO * ntfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype, uint8_t test) { char *myname = "ntfs_open"; NTFS_INFO *ntfs = NULL; TSK_FS_INFO *fs = NULL; unsigned int len = 0; ssize_t cnt = 0; // clean up any error messages that are lying around tsk_error_reset(); if (TSK_FS_TYPE_ISNTFS(ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS type in ntfs_open"); return NULL; } if ((ntfs = (NTFS_INFO *) tsk_fs_malloc(sizeof(*ntfs))) == NULL) { goto on_error; } fs = &(ntfs->fs_info); fs->ftype = TSK_FS_TYPE_NTFS; fs->duname = "Cluster"; fs->flags = TSK_FS_INFO_FLAG_HAVE_SEQ; fs->tag = TSK_FS_INFO_TAG; fs->img_info = img_info; fs->offset = offset; ntfs->loading_the_MFT = 0; ntfs->bmap = NULL; ntfs->bmap_buf = NULL; /* Read the boot sector */ len = roundup(sizeof(ntfs_sb), img_info->sector_size); ntfs->fs = (ntfs_sb *) tsk_malloc(len); if (ntfs->fs == NULL) { goto on_error; } cnt = tsk_fs_read(fs, (TSK_OFF_T) 0, (char *) ntfs->fs, len); if (cnt != len) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("%s: Error reading boot sector.", myname); goto on_error; } /* Check the magic value */ if (tsk_fs_guessu16(fs, ntfs->fs->magic, NTFS_FS_MAGIC)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a NTFS file system (magic)"); if (tsk_verbose) fprintf(stderr, "ntfs_open: Incorrect NTFS magic\n"); goto on_error; } /* * block calculations : although there are no blocks in ntfs, * we are using a cluster as a "block" */ ntfs->ssize_b = tsk_getu16(fs->endian, ntfs->fs->ssize); if ((ntfs->ssize_b == 0) || (ntfs->ssize_b % 512)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a NTFS file system (invalid sector size %d))", ntfs->ssize_b); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid sector size: %d\n", ntfs->ssize_b); goto on_error; } if ((ntfs->fs->csize != 0x01) && (ntfs->fs->csize != 0x02) && (ntfs->fs->csize != 0x04) && (ntfs->fs->csize != 0x08) && (ntfs->fs->csize != 0x10) && (ntfs->fs->csize != 0x20) && (ntfs->fs->csize != 0x40) && (ntfs->fs->csize != 0x80)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a NTFS file system (invalid cluster size %d)", ntfs->fs->csize); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid cluster size: %d\n", ntfs->fs->csize); goto on_error; } ntfs->csize_b = ntfs->fs->csize * ntfs->ssize_b; fs->first_block = 0; /* This field is defined as 64-bits but according to the * NTFS drivers in Linux, windows only uses 32-bits */ fs->block_count = (TSK_DADDR_T) tsk_getu32(fs->endian, ntfs->fs->vol_size_s) / ntfs->fs->csize; if (fs->block_count == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("Not a NTFS file system (volume size is 0)"); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid volume size: 0\n"); goto on_error; } fs->last_block = fs->last_block_act = fs->block_count - 1; fs->block_size = ntfs->csize_b; fs->dev_bsize = img_info->sector_size; // determine the last block we have in this image if ((TSK_DADDR_T) ((img_info->size - offset) / fs->block_size) < fs->block_count) fs->last_block_act = (img_info->size - offset) / fs->block_size - 1; if (ntfs->fs->mft_rsize_c > 0) ntfs->mft_rsize_b = ntfs->fs->mft_rsize_c * ntfs->csize_b; else /* if the mft_rsize_c is not > 0, then it is -log2(rsize_b) */ ntfs->mft_rsize_b = 1 << -ntfs->fs->mft_rsize_c; if ((ntfs->mft_rsize_b == 0) || (ntfs->mft_rsize_b % 512)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a NTFS file system (invalid MFT entry size)"); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid MFT entry size\n"); goto on_error; } if (ntfs->fs->idx_rsize_c > 0) ntfs->idx_rsize_b = ntfs->fs->idx_rsize_c * ntfs->csize_b; else /* if the idx_rsize_c is not > 0, then it is -log2(rsize_b) */ ntfs->idx_rsize_b = 1 << -ntfs->fs->idx_rsize_c; if ((ntfs->idx_rsize_b == 0) || (ntfs->idx_rsize_b % 512)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a NTFS file system (invalid idx record size %d)", ntfs->idx_rsize_b); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid idx record size %d\n", ntfs->idx_rsize_b); goto on_error; } ntfs->root_mft_addr = tsk_getu64(fs->endian, ntfs->fs->mft_clust) * ntfs->csize_b; if (tsk_getu64(fs->endian, ntfs->fs->mft_clust) > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr ("Not a NTFS file system (invalid starting MFT clust)"); if (tsk_verbose) fprintf(stderr, "ntfs_open: invalid starting MFT cluster\n"); goto on_error; } /* * Set the function pointers (before we start calling internal functions) */ fs->inode_walk = ntfs_inode_walk; fs->block_walk = ntfs_block_walk; fs->block_getflags = ntfs_block_getflags; fs->get_default_attr_type = ntfs_get_default_attr_type; fs->load_attrs = ntfs_load_attrs; fs->file_add_meta = ntfs_inode_lookup; fs->dir_open_meta = ntfs_dir_open_meta; fs->fsstat = ntfs_fsstat; fs->fscheck = ntfs_fscheck; fs->istat = ntfs_istat; fs->close = ntfs_close; fs->name_cmp = ntfs_name_cmp; fs->fread_owner_sid = ntfs_file_get_sidstr; fs->jblk_walk = ntfs_jblk_walk; fs->jentry_walk = ntfs_jentry_walk; fs->jopen = ntfs_jopen; fs->journ_inum = 0; // set up locks tsk_init_lock(&ntfs->lock); tsk_init_lock(&ntfs->orphan_map_lock); #if TSK_USE_SID tsk_init_lock(&ntfs->sid_lock); #endif /* * inode */ fs->root_inum = NTFS_ROOTINO; fs->first_inum = NTFS_FIRSTINO; fs->last_inum = NTFS_LAST_DEFAULT_INO; ntfs->mft_data = NULL; /* load the data run for the MFT table into ntfs->mft */ ntfs->loading_the_MFT = 1; if ((ntfs->mft_file = tsk_fs_file_open_meta(fs, NULL, NTFS_MFT_MFT)) == NULL) { if (tsk_verbose) fprintf(stderr, "ntfs_open: Error opening $MFT (%s)\n", tsk_error_get()); goto on_error; } /* cache the data attribute * * This will likely be done already by proc_attrseq, but this * should be quick */ ntfs->mft_data = tsk_fs_attrlist_get(ntfs->mft_file->meta->attr, NTFS_ATYPE_DATA); if (!ntfs->mft_data) { tsk_fs_file_close(ntfs->mft_file); tsk_error_errstr2_concat(" - Data Attribute not found in $MFT"); if (tsk_verbose) fprintf(stderr, "ntfs_open: Data attribute not found in $MFT (%s)\n", tsk_error_get()); goto on_error; } /* Get the inode count based on the table size */ fs->inum_count = ntfs->mft_data->size / ntfs->mft_rsize_b + 1; // we are adding 1 in this calc to account for Orphans directory fs->last_inum = fs->inum_count - 1; /* reset the flag that we are no longer loading $MFT */ ntfs->loading_the_MFT = 0; /* Volume ID */ for (fs->fs_id_used = 0; fs->fs_id_used < 8; fs->fs_id_used++) { fs->fs_id[fs->fs_id_used] = ntfs->fs->serial[fs->fs_id_used]; } /* load the version of the file system */ if (ntfs_load_ver(ntfs)) { tsk_fs_file_close(ntfs->mft_file); if (tsk_verbose) fprintf(stderr, "ntfs_open: Error loading file system version ((%s)\n", tsk_error_get()); goto on_error; } /* load the data block bitmap data run into ntfs_info */ if (ntfs_load_bmap(ntfs)) { tsk_fs_file_close(ntfs->mft_file); if (tsk_verbose) fprintf(stderr, "ntfs_open: Error loading block bitmap (%s)\n", tsk_error_get()); goto on_error; } /* load the SID data into ntfs_info ($Secure - $SDS, $SDH, $SII */ #if TSK_USE_SID if (ntfs_load_secure(ntfs)) { tsk_fs_file_close(ntfs->mft_file); if (tsk_verbose) fprintf(stderr, "ntfs_open: Error loading Secure Info (%s)\n", tsk_error_get()); goto on_error; } #endif // initialize the caches ntfs->attrdef = NULL; ntfs->orphan_map = NULL; if (tsk_verbose) { tsk_fprintf(stderr, "ssize: %" PRIu16 " csize: %d serial: %" PRIx64 "\n", tsk_getu16(fs->endian, ntfs->fs->ssize), ntfs->fs->csize, tsk_getu64(fs->endian, ntfs->fs->serial)); tsk_fprintf(stderr, "mft_rsize: %d idx_rsize: %d vol: %d mft: %" PRIu64 " mft_mir: %" PRIu64 "\n", ntfs->mft_rsize_b, ntfs->idx_rsize_b, (int) fs->block_count, tsk_getu64(fs->endian, ntfs->fs->mft_clust), tsk_getu64(fs->endian, ntfs->fs->mftm_clust)); } return fs; on_error: if( fs != NULL ) { // Since fs->tag is ntfs->fs_info.tag why is this value set to 0 // and the memory is freed directly afterwards? fs->tag = 0; } if( ntfs != NULL ) { if( ntfs->fs != NULL ) { free( ntfs->fs ); } free( ntfs ); } return NULL; } sleuthkit-4.2.0/tsk/fs/ntfs_dent.cpp000644 000765 000024 00000147271 12576320700 020242 0ustar00carrierstaff000000 000000 /* ** ntfs_dent ** The Sleuth Kit ** ** name layer support for the NTFS file system ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** ** Unicode added with support from I.D.E.A.L. Technology Corp (Aug '05) ** */ #include "tsk_fs_i.h" #include "tsk_ntfs.h" /** * \file ntfs_dent.cpp * NTFS file name processing internal functions. */ #include #include /** * Class to hold the pair of MFT entry and sequence. */ class NTFS_META_ADDR { private: uint64_t addr; ///< MFT entry uint32_t seq; ///< Sequence public: NTFS_META_ADDR(uint64_t a_addr, uint32_t a_seq) { addr = a_addr; seq = a_seq; } uint64_t getAddr() { return addr; } uint32_t getSeq() { return seq; } }; /* When we list a directory, we need to also look at MFT entries and what * they list as their parents. We used to do this only for orphan files, but * we were pointed to a case whereby allocated files were not in IDX_ALLOC, but were * shown in Windows (when mounted). They must have been found via the MFT entry, so * we now load all parent to child relationships into the map. * * One of these classes is created per parent folder */ class NTFS_PAR_MAP { private: // maps sequence number to list of inums for the folder at that seq. std::map > seq2addrs; public: /** * Add a child to this parent. * @param seq Sequence of the parent that this child belonged to * @param inum Address of child in the folder. * @param seq Sequence of child in the folder */ void add (uint32_t parSeq, TSK_INUM_T inum, uint32_t seq) { NTFS_META_ADDR addr(inum, seq); seq2addrs[parSeq].push_back(addr); } /** * Test if there are any children for this directory at a given sequence. * @param seq Sequence to test. * @returns true if children exist */ bool exists (uint32_t seq) { if (seq2addrs.count(seq) > 0) return true; else return false; } /** * Get the children for this folder at a given sequence. Use exists first. * @param seq Sequence number to retrieve children for. * @returns list of INUMS for children. */ std::vector &get (uint32_t seq) { return seq2addrs[seq]; } }; /** \internal * Casts the void * to a map. This obfuscation is done so that the rest of the library * can remain as C and only this code needs to be C++. * * Assumes that you already have the lock */ static std::map * getParentMap(NTFS_INFO *ntfs) { // allocate it if it hasn't already been if (ntfs->orphan_map == NULL) { ntfs->orphan_map = new std::map; } return (std::map *)ntfs->orphan_map; } /** \internal * Add a parent and child pair to the map stored in NTFS_INFO * * Note: This routine assumes &ntfs->orhpan_map_lock is locked by the caller. * * @param ntfs structure to add the pair to * @param par Parent address * @param child_meta Child to add * @returns 1 on error */ static uint8_t ntfs_parent_map_add(NTFS_INFO * ntfs, TSK_FS_META_NAME_LIST *name_list, TSK_FS_META *child_meta) { std::map *tmpParentMap = getParentMap(ntfs); NTFS_PAR_MAP &tmpParMap = (*tmpParentMap)[name_list->par_inode]; tmpParMap.add(name_list->par_seq, child_meta->addr, child_meta->seq); return 0; } /** \internal * Returns if a parent has children or not. * * Note: This routine assumes &ntfs->orhpan_map_lock is locked by the caller. * * @param ntfs File system that has already been analyzed * @param par Parent inode to find child files for * @seq seq Sequence of parent folder * @returns true if parent has children. */ static bool ntfs_parent_map_exists(NTFS_INFO *ntfs, TSK_INUM_T par, uint32_t seq) { std::map *tmpParentMap = getParentMap(ntfs); if (tmpParentMap->count(par) > 0) { NTFS_PAR_MAP &tmpParMap = (*tmpParentMap)[par]; if (tmpParMap.exists(seq)) return true; } return false; } /** \internal * Look up a map entry by the parent address. You should call ntfs_parent_map_exists() before this, otherwise * an empty entry could be created. * * Note: This routine assumes &ntfs->orhpan_map_lock is locked by the caller. * * @param ntfs File system that has already been analyzed * @param par Parent inode to find child files for * @param seq Sequence of parent inode * @returns address of children files in the parent directory */ static std::vector & ntfs_parent_map_get(NTFS_INFO * ntfs, TSK_INUM_T par, uint32_t seq) { std::map *tmpParentMap = getParentMap(ntfs); NTFS_PAR_MAP &tmpParMap = (*tmpParentMap)[par]; return tmpParMap.get(seq); } // note that for consistency, this should be called parent_map_free, but // that would have required an API change in a point release and this better // matches the name in NTFS_INFO void ntfs_orphan_map_free(NTFS_INFO * a_ntfs) { // This routine is only called from ntfs_close, so it wouldn't // normally need a lock. However, it's an extern function, so be // safe in case someone else calls it. (Perhaps it's extern by // mistake?) tsk_take_lock(&a_ntfs->orphan_map_lock); if (a_ntfs->orphan_map == NULL) { tsk_release_lock(&a_ntfs->orphan_map_lock); return; } std::map *tmpParentMap = getParentMap(a_ntfs); delete tmpParentMap; a_ntfs->orphan_map = NULL; tsk_release_lock(&a_ntfs->orphan_map_lock); } /* inode_walk callback that is used to populate the orphan_map * structure in NTFS_INFO */ static TSK_WALK_RET_ENUM ntfs_parent_act(TSK_FS_FILE * fs_file, void *ptr) { NTFS_INFO *ntfs = (NTFS_INFO *) fs_file->fs_info; TSK_FS_META_NAME_LIST *fs_name_list; /* go through each file name structure */ fs_name_list = fs_file->meta->name2; while (fs_name_list) { if (ntfs_parent_map_add(ntfs, fs_name_list, fs_file->meta)) return TSK_WALK_ERROR; fs_name_list = fs_name_list->next; } return TSK_WALK_CONT; } /****************/ static uint8_t ntfs_dent_copy(NTFS_INFO * ntfs, ntfs_idxentry * idxe, TSK_FS_NAME * fs_name) { ntfs_attr_fname *fname = (ntfs_attr_fname *) & idxe->stream; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; UTF16 *name16; UTF8 *name8; int retVal; fs_name->meta_addr = tsk_getu48(fs->endian, idxe->file_ref); fs_name->meta_seq = tsk_getu16(fs->endian, idxe->seq_num); name16 = (UTF16 *) & fname->name; name8 = (UTF8 *) fs_name->name; retVal = tsk_UTF16toUTF8(fs->endian, (const UTF16 **) &name16, (UTF16 *) ((uintptr_t) name16 + fname->nlen * 2), &name8, (UTF8 *) ((uintptr_t) name8 + fs_name->name_size), TSKlenientConversion); if (retVal != TSKconversionOK) { *name8 = '\0'; if (tsk_verbose) tsk_fprintf(stderr, "Error converting NTFS name to UTF8: %d %" PRIuINUM, retVal, fs_name->meta_addr); } /* Make sure it is NULL Terminated */ if ((uintptr_t) name8 > (uintptr_t) fs_name->name + fs_name->name_size) fs_name->name[fs_name->name_size] = '\0'; else *name8 = '\0'; if (tsk_getu64(fs->endian, fname->flags) & NTFS_FNAME_FLAGS_DIR) fs_name->type = TSK_FS_NAME_TYPE_DIR; else fs_name->type = TSK_FS_NAME_TYPE_REG; fs_name->flags = (TSK_FS_NAME_FLAG_ENUM)0; return 0; } /* This is a sanity check to see if the time is valid * it is divided by 100 to keep it in a 32-bit integer */ static uint8_t is_time(uint64_t t) { #define SEC_BTWN_1601_1970_DIV100 ((369*365 + 89) * 24 * 36) #define SEC_BTWN_1601_2020_DIV100 (SEC_BTWN_1601_1970_DIV100 + (50*365 + 6) * 24 * 36) t /= 1000000000; /* put the time in seconds div by additional 100 */ if (!t) return 0; if (t < SEC_BTWN_1601_1970_DIV100) return 0; if (t > SEC_BTWN_1601_2020_DIV100) return 0; return 1; } /** * Process a lsit of index entries and add to FS_DIR * * @param a_is_del Set to 1 if these entries are for a deleted directory * @param idxe Buffer with index entries to process * @param idxe_len Length of idxe buffer (in bytes) * @param used_len Length of data as reported by idexlist header (everything * after which and less then idxe_len is considered deleted) * * @returns 1 to stop, 0 on success, and -1 on error */ // @@@ Should make a_idxe const and use internal pointer in function loop static TSK_RETVAL_ENUM ntfs_proc_idxentry(NTFS_INFO * a_ntfs, TSK_FS_DIR * a_fs_dir, uint8_t a_is_del, ntfs_idxentry * a_idxe, uint32_t a_idxe_len, uint32_t a_used_len) { uintptr_t endaddr, endaddr_alloc; TSK_FS_NAME *fs_name; TSK_FS_INFO *fs = (TSK_FS_INFO *) & a_ntfs->fs_info; if ((fs_name = tsk_fs_name_alloc(NTFS_MAXNAMLEN_UTF8, 0)) == NULL) { return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Processing index entry: %" PRIu64 " Size: %" PRIu32 " Len: %" PRIu32 "\n", (uint64_t) ((uintptr_t) a_idxe), a_idxe_len, a_used_len); /* Sanity check */ if (a_idxe_len < a_used_len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_proc_idxentry: Allocated length of index entries is larger than buffer length"); return TSK_ERR; } /* where is the end of the buffer */ endaddr = ((uintptr_t) a_idxe + a_idxe_len); /* where is the end of the allocated data */ endaddr_alloc = ((uintptr_t) a_idxe + a_used_len); /* cycle through the index entries, based on provided size */ while (((uintptr_t) & (a_idxe->stream) + sizeof(ntfs_attr_fname)) < endaddr) { ntfs_attr_fname *fname = (ntfs_attr_fname *) & a_idxe->stream; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: New IdxEnt: %" PRIu64 " $FILE_NAME Entry: %" PRIu64 " File Ref: %" PRIu64 " IdxEnt Len: %" PRIu16 " StrLen: %" PRIu16 "\n", (uint64_t) ((uintptr_t) a_idxe), (uint64_t) ((uintptr_t) fname), (uint64_t) tsk_getu48(fs->endian, a_idxe->file_ref), tsk_getu16(fs->endian, a_idxe->idxlen), tsk_getu16(fs->endian, a_idxe->strlen)); /* perform some sanity checks on index buffer head * and advance by 4-bytes if invalid */ if ((tsk_getu48(fs->endian, a_idxe->file_ref) > fs->last_inum) || (tsk_getu48(fs->endian, a_idxe->file_ref) < fs->first_inum) || (tsk_getu16(fs->endian, a_idxe->idxlen) <= tsk_getu16(fs->endian, a_idxe->strlen)) || (tsk_getu16(fs->endian, a_idxe->idxlen) % 4) || (tsk_getu16(fs->endian, a_idxe->idxlen) > a_idxe_len)) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); continue; } #if 0 // @@@ BC: This hid a lot of entries in test images. They were // only partial images, but they were not junk and the idea was // that this check would strip out chunk. Commented it out and // keeping it here as a reminder in case I think about doing it // again. // verify name length would fit in stream if (fname->nlen > tsk_getu16(fs->endian, a_idxe->strlen)) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because name is longer than stream\n"); continue; } #endif // verify it has the correct parent address if (tsk_getu48(fs->endian, fname->par_ref) != a_fs_dir->addr) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because of wrong parent address\n"); continue; } /* do some sanity checks on the deleted entries */ if ((tsk_getu16(fs->endian, a_idxe->strlen) == 0) || (((uintptr_t) a_idxe + tsk_getu16(fs->endian, a_idxe->idxlen)) > endaddr_alloc)) { /* name space checks */ if ((fname->nspace != NTFS_FNAME_POSIX) && (fname->nspace != NTFS_FNAME_WIN32) && (fname->nspace != NTFS_FNAME_DOS) && (fname->nspace != NTFS_FNAME_WINDOS)) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because of invalid name space\n"); continue; } if ((tsk_getu64(fs->endian, fname->alloc_fsize) < tsk_getu64(fs->endian, fname->real_fsize)) || (fname->nlen == 0) || (*(uint8_t *) & fname->name == 0)) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because of reported file sizes, name length, or NULL name\n"); continue; } if ((is_time(tsk_getu64(fs->endian, fname->crtime)) == 0) || (is_time(tsk_getu64(fs->endian, fname->atime)) == 0) || (is_time(tsk_getu64(fs->endian, fname->mtime)) == 0)) { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + 4); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because of invalid times\n"); continue; } } /* For all fname entries, there will exist a DOS style 8.3 * entry. We don't process those because we already processed * them before in their full version. If the type is * full POSIX or WIN32 that does not satisfy DOS, then a * type NTFS_FNAME_DOS will exist. If the name is WIN32, * but already satisfies DOS, then a type NTFS_FNAME_WINDOS * will exist * * Note that we could be missing some info from deleted files * if the windows version was deleted and the DOS wasn't... * * @@@ This should be added to the shrt_name entry of TSK_FS_NAME. The short * name entry typically comes after the long name */ if (fname->nspace == NTFS_FNAME_DOS) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because of name space: %d\n", fname->nspace); goto incr_entry; } /* Copy it into the generic form */ if (ntfs_dent_copy(a_ntfs, a_idxe, fs_name)) { if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Skipping because error copying dent_entry\n"); goto incr_entry; } /* * Check if this entry is deleted * * The final check is to see if the end of this entry is * within the space that the idxallocbuf claimed was valid OR * if the parent directory is deleted */ if ((a_is_del == 1) || (tsk_getu16(fs->endian, a_idxe->strlen) == 0) || (((uintptr_t) a_idxe + tsk_getu16(fs->endian, a_idxe->idxlen)) > endaddr_alloc)) { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; } else { fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_proc_idxentry: Entry Details of %s: Str Len: %" PRIu16 " Len to end after current: %" PRIu64 " flags: %x\n", fs_name->name, tsk_getu16(fs->endian, a_idxe->strlen), (uint64_t) (endaddr_alloc - (uintptr_t) a_idxe - tsk_getu16(fs->endian, a_idxe->idxlen)), fs_name->flags); if (tsk_fs_dir_add(a_fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } incr_entry: /* the theory here is that deleted entries have strlen == 0 and * have been found to have idxlen == 16 * * if the strlen is 0, then guess how much the indexlen was * before it was deleted */ /* 16: size of idxentry before stream * 66: size of fname before name * 2*nlen: size of name (in unicode) */ if (tsk_getu16(fs->endian, a_idxe->strlen) == 0) { a_idxe = (ntfs_idxentry *) ((((uintptr_t) a_idxe + 16 + 66 + 2 * fname->nlen + 3) / 4) * 4); } else { a_idxe = (ntfs_idxentry *) ((uintptr_t) a_idxe + tsk_getu16(fs->endian, a_idxe->idxlen)); } } /* end of loop of index entries */ tsk_fs_name_free(fs_name); return TSK_OK; } /* * remove the update sequence values that are changed in the last two * bytes of each sector * * return 1 on error and 0 on success */ static uint8_t ntfs_fix_idxrec(NTFS_INFO * ntfs, ntfs_idxrec * idxrec, uint32_t len) { int i; uint16_t orig_seq; TSK_FS_INFO *fs = (TSK_FS_INFO *) & ntfs->fs_info; ntfs_upd *upd; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_fix_idxrec: Fixing idxrec: %" PRIu64 " Len: %" PRIu32 "\n", (uint64_t) ((uintptr_t) idxrec), len); /* sanity check so we don't run over in the next loop */ if ((unsigned int) ((tsk_getu16(fs->endian, idxrec->upd_cnt) - 1) * ntfs->ssize_b) > len) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("fix_idxrec: More Update Sequence Entries than idx record size"); return 1; } /* Apply the update sequence structure template */ upd = (ntfs_upd *) ((uintptr_t) idxrec + tsk_getu16(fs->endian, idxrec->upd_off)); /* Get the sequence value that each 16-bit value should be */ orig_seq = tsk_getu16(fs->endian, upd->upd_val); /* cycle through each sector */ for (i = 1; i < tsk_getu16(fs->endian, idxrec->upd_cnt); i++) { /* The offset into the buffer of the value to analyze */ int offset = i * ntfs->ssize_b - 2; uint8_t *new_val, *old_val; /* get the current sequence value */ uint16_t cur_seq = tsk_getu16(fs->endian, (uintptr_t) idxrec + offset); if (cur_seq != orig_seq) { /* get the replacement value */ uint16_t cur_repl = tsk_getu16(fs->endian, &upd->upd_seq + (i - 1) * 2); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("fix_idxrec: Incorrect update sequence value in index buffer\nUpdate Value: 0x%" PRIx16 " Actual Value: 0x%" PRIx16 " Replacement Value: 0x%" PRIx16 "\nThis is typically because of a corrupted entry", orig_seq, cur_seq, cur_repl); return 1; } new_val = &upd->upd_seq + (i - 1) * 2; old_val = (uint8_t *) ((uintptr_t) idxrec + offset); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_fix_idxrec: upd_seq %i Replacing: %.4" PRIx16 " With: %.4" PRIx16 "\n", i, tsk_getu16(fs->endian, old_val), tsk_getu16(fs->endian, new_val)); *old_val++ = *new_val++; *old_val = *new_val; } return 0; } /** \internal * Process a directory and load up FS_DIR with the entries. If a pointer to * an already allocated FS_DIR struture is given, it will be cleared. If no existing * FS_DIR structure is passed (i.e. NULL), then a new one will be created. If the return * value is error or corruption, then the FS_DIR structure could * have entries (depending on when the error occured). * * @param a_fs File system to analyze * @param a_fs_dir Pointer to FS_DIR pointer. Can contain an already allocated * structure or a new structure. * @param a_addr Address of directory to process. * @returns error, corruption, ok etc. */ TSK_RETVAL_ENUM ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { NTFS_INFO *ntfs = (NTFS_INFO *) a_fs; TSK_FS_DIR *fs_dir; const TSK_FS_ATTR *fs_attr_root = NULL; const TSK_FS_ATTR *fs_attr_idx; char *idxalloc; ntfs_idxentry *idxe; ntfs_idxroot *idxroot; ntfs_idxelist *idxelist; ntfs_idxrec *idxrec_p, *idxrec; int off; TSK_OFF_T idxalloc_len; TSK_FS_LOAD_FILE load_file; /* In this function, we will return immediately if we get an error. * If we get corruption though, we will record that in 'retval_final' * and continue processing. */ TSK_RETVAL_ENUM retval_final = TSK_OK; TSK_RETVAL_ENUM retval_tmp; /* sanity check */ if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("ntfs_dir_open_meta: inode value: %" PRIuINUM "\n", a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("ntfs_dir_open_meta: NULL fs_attr argument given"); return TSK_ERR; } if (tsk_verbose) tsk_fprintf(stderr, "ntfs_open_dir: Processing directory %" PRIuINUM "\n", a_addr); fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else { if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } } // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { return tsk_fs_dir_find_orphans(a_fs, fs_dir); } /* Get the inode and verify it has attributes */ if ((fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr)) == NULL) { tsk_error_errstr2_concat("- ntfs_dir_open_meta"); return TSK_COR; } if (!(fs_dir->fs_file->meta->attr)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("dent_walk: Error: Directory address %" PRIuINUM " has no attributes", a_addr); return TSK_COR; } // Update with the sequence number fs_dir->seq = fs_dir->fs_file->meta->seq; /* * Read the Index Root Attribute -- we do some sanity checking here * to report errors before we start to make up data for the "." and ".." * entries */ fs_attr_root = tsk_fs_attrlist_get(fs_dir->fs_file->meta->attr, TSK_FS_ATTR_TYPE_NTFS_IDXROOT); if (!fs_attr_root) { tsk_error_errstr2_concat(" - dent_walk: $IDX_ROOT not found"); return TSK_COR; } if (fs_attr_root->flags & TSK_FS_ATTR_NONRES) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("dent_walk: $IDX_ROOT is not resident - it should be"); return TSK_COR; } idxroot = (ntfs_idxroot *) fs_attr_root->rd.buf; /* Verify that the attribute type is $FILE_NAME */ if (tsk_getu32(a_fs->endian, idxroot->type) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("dent_walk: Attribute type in index root is 0"); return TSK_COR; } else if (tsk_getu32(a_fs->endian, idxroot->type) != NTFS_ATYPE_FNAME) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("ERROR: Directory index is sorted by type: %" PRIu32 ".\nOnly $FNAME is currently supported", tsk_getu32(a_fs->endian, idxroot->type)); return TSK_COR; } /* Get the header of the index entry list */ idxelist = &idxroot->list; /* Get the offset to the start of the index entry list */ idxe = (ntfs_idxentry *) ((uintptr_t) idxelist + tsk_getu32(a_fs->endian, idxelist->begin_off)); /* * NTFS does not have "." and ".." entries in the index trees * (except for a "." entry in the root directory) * * So, we'll make 'em up by making a TSK_FS_NAME structure for * a '.' and '..' entry and call the action */ if (a_addr != a_fs->root_inum) { // && (flags & TSK_FS_NAME_FLAG_ALLOC)) { TSK_FS_NAME *fs_name; TSK_FS_META_NAME_LIST *fs_name_list; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Creating . and .. entries\n"); if ((fs_name = tsk_fs_name_alloc(16, 0)) == NULL) { return TSK_ERR; } /* * "." */ fs_name->type = TSK_FS_NAME_TYPE_DIR; strcpy(fs_name->name, "."); fs_name->meta_addr = a_addr; if (fs_dir->fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; /* If the folder was deleted, the MFT entry sequence will have been incremented. * File name entries are not incremented on delete, so make it one less to * be consistent. */ fs_name->meta_seq = fs_dir->fs_file->meta->seq - 1; } else { fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; fs_name->meta_seq = fs_dir->fs_file->meta->seq; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* * ".." */ strcpy(fs_name->name, ".."); fs_name->type = TSK_FS_NAME_TYPE_DIR; /* The fs_name structure holds the parent inode value, so we * just cycle using those */ for (fs_name_list = fs_dir->fs_file->meta->name2; fs_name_list != NULL; fs_name_list = fs_name_list->next) { fs_name->meta_addr = fs_name_list->par_inode; fs_name->meta_seq = fs_name_list->par_seq; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } } tsk_fs_name_free(fs_name); fs_name = NULL; } /* Now we return to processing the Index Root Attribute */ if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Processing $IDX_ROOT of inum %" PRIuINUM "\n", a_addr); /* Verify the offset pointers */ if ((tsk_getu32(a_fs->endian, idxelist->seqend_off) < tsk_getu32(a_fs->endian, idxelist->begin_off)) || (tsk_getu32(a_fs->endian, idxelist->bufend_off) < tsk_getu32(a_fs->endian, idxelist->seqend_off)) || (((uintptr_t) idxe + tsk_getu32(a_fs->endian, idxelist->bufend_off)) > ((uintptr_t) fs_attr_root->rd.buf + fs_attr_root->rd.buf_size))) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("Error: Index list offsets are invalid on entry: %" PRIuINUM, fs_dir->fs_file->meta->addr); return TSK_COR; } retval_tmp = ntfs_proc_idxentry(ntfs, fs_dir, (fs_dir->fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) ? 1 : 0, idxe, tsk_getu32(a_fs->endian, idxelist->bufend_off) - tsk_getu32(a_fs->endian, idxelist->begin_off), tsk_getu32(a_fs->endian, idxelist->seqend_off) - tsk_getu32(a_fs->endian, idxelist->begin_off)); // stop if we get an error, continue if we got corruption if (retval_tmp == TSK_ERR) { return TSK_ERR; } else if (retval_tmp == TSK_COR) { retval_final = TSK_COR; } /* * get the index allocation attribute if it exists (it doesn't for * small directories */ fs_attr_idx = tsk_fs_attrlist_get(fs_dir->fs_file->meta->attr, TSK_FS_ATTR_TYPE_NTFS_IDXALLOC); /* if we don't have an index alloc then return, we have processed * all of the entries */ if (!fs_attr_idx) { if (tsk_getu32(a_fs->endian, idxelist->flags) & NTFS_IDXELIST_CHILD) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("Error: $IDX_ROOT says there should be children, but there isn't"); return TSK_COR; } } else { if (fs_attr_idx->flags & TSK_FS_ATTR_RES) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("$IDX_ALLOC is Resident - it shouldn't be"); return TSK_COR; } /* * Copy the index allocation run into a big buffer */ idxalloc_len = fs_attr_idx->nrd.allocsize; if ((idxalloc = (char *)tsk_malloc((size_t) idxalloc_len)) == NULL) { return TSK_ERR; } /* Fill in the loading data structure */ load_file.total = load_file.left = (size_t) idxalloc_len; load_file.cur = load_file.base = idxalloc; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Copying $IDX_ALLOC into buffer\n"); if (tsk_fs_attr_walk(fs_attr_idx, TSK_FS_FILE_WALK_FLAG_SLACK, tsk_fs_load_file_action, (void *) &load_file)) { free(idxalloc); tsk_error_errstr2_concat(" - ntfs_dir_open_meta"); return TSK_COR; // this could be an error though } /* Not all of the directory was copied, so we exit */ if (load_file.left > 0) { free(idxalloc); tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_FWALK); tsk_error_set_errstr("Error reading directory contents: %" PRIuINUM "\n", a_addr); return TSK_COR; } /* * The idxalloc is a big buffer that contains one or more * idx buffer structures. Each idxrec is a node in the B-Tree. * We do not process the tree as a tree because then we could * not find the deleted file names. * * Therefore, we scan the big buffer looking for the index record * structures. We save a pointer to the known beginning (idxrec_p). * Then we scan for the beginning of the next one (idxrec) and process * everything in the middle as an ntfs_idxrec. We can't use the * size given because then we wouldn't see the deleted names */ /* Set the previous pointer to NULL */ idxrec_p = idxrec = NULL; /* Loop by cluster size */ for (off = 0; off < idxalloc_len; off += ntfs->csize_b) { uint32_t list_len, rec_len; idxrec = (ntfs_idxrec *) & idxalloc[off]; if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Index Buffer Offset: %d Magic: %" PRIx32 "\n", off, tsk_getu32(a_fs->endian, idxrec->magic)); /* Is this the begining of an index record? */ if (tsk_getu32(a_fs->endian, idxrec->magic) != NTFS_IDXREC_MAGIC) continue; /* idxrec_p is only NULL for the first time * Set it and start again to find the next one */ if (idxrec_p == NULL) { idxrec_p = idxrec; continue; } /* Process the previous structure */ /* idxrec points to the next idxrec structure, idxrec_p * points to the one we are going to process */ rec_len = (uint32_t) ((uintptr_t) idxrec - (uintptr_t) idxrec_p); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Processing previous index record (len: %" PRIu32 ")\n", rec_len); /* remove the update sequence in the index record */ if (ntfs_fix_idxrec(ntfs, idxrec_p, rec_len)) { free(idxalloc); return TSK_COR; } /* Locate the start of the index entry list */ idxelist = &idxrec_p->list; idxe = (ntfs_idxentry *) ((uintptr_t) idxelist + tsk_getu32(a_fs->endian, idxelist->begin_off)); /* the length from the start of the next record to where our * list starts. * This should be the same as bufend_off in idxelist, but we don't * trust it. */ list_len = (uint32_t) ((uintptr_t) idxrec - (uintptr_t) idxe); /* Verify the offset pointers */ if (((uintptr_t) idxe > (uintptr_t) idxrec) || ((uintptr_t) idxelist + tsk_getu32(a_fs->endian, idxelist->seqend_off) > (uintptr_t) idxrec)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("Error: Index list offsets are invalid on entry: %" PRIuINUM, fs_dir->fs_file->meta->addr); free(idxalloc); return TSK_COR; } /* process the list of index entries */ retval_tmp = ntfs_proc_idxentry(ntfs, fs_dir, (fs_dir->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) ? 1 : 0, idxe, list_len, tsk_getu32(a_fs->endian, idxelist->seqend_off) - tsk_getu32(a_fs->endian, idxelist->begin_off)); // stop if we get an error, record if we get corruption if (retval_tmp == TSK_ERR) { free(idxalloc); return TSK_ERR; } else if (retval_tmp == TSK_COR) { retval_final = TSK_COR; } /* reset the pointer to the next record */ idxrec_p = idxrec; } /* end of cluster loop */ /* Process the final record */ if (idxrec_p) { uint32_t list_len, rec_len; /* Length from end of attribute to start of this */ rec_len = (uint32_t) (idxalloc_len - (uintptr_t) idxrec_p - (uintptr_t) idxalloc); if (tsk_verbose) tsk_fprintf(stderr, "ntfs_dir_open_meta: Processing final index record (len: %" PRIu32 ")\n", rec_len); /* remove the update sequence */ if (ntfs_fix_idxrec(ntfs, idxrec_p, rec_len)) { free(idxalloc); return TSK_COR; } idxelist = &idxrec_p->list; idxe = (ntfs_idxentry *) ((uintptr_t) idxelist + tsk_getu32(a_fs->endian, idxelist->begin_off)); /* This is the length of the idx entries */ list_len = (uint32_t) (((uintptr_t) idxalloc + idxalloc_len) - (uintptr_t) idxe); /* Verify the offset pointers */ if ((list_len > rec_len) || ((uintptr_t) idxelist + tsk_getu32(a_fs->endian, idxelist->seqend_off) > (uintptr_t) idxalloc + idxalloc_len)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("Error: Index list offsets are invalid on entry: %" PRIuINUM, fs_dir->fs_file->meta->addr); free(idxalloc); return TSK_COR; } /* process the list of index entries */ retval_tmp = ntfs_proc_idxentry(ntfs, fs_dir, (fs_dir->fs_file->meta-> flags & TSK_FS_META_FLAG_UNALLOC) ? 1 : 0, idxe, list_len, tsk_getu32(a_fs->endian, idxelist->seqend_off) - tsk_getu32(a_fs->endian, idxelist->begin_off)); // stop if we get an error, record if we get corruption if (retval_tmp == TSK_ERR) { free(idxalloc); return TSK_ERR; } else if (retval_tmp == TSK_COR) { retval_final = TSK_COR; } } free(idxalloc); } // get the orphan files // load and cache the map if it has not already been done tsk_take_lock(&ntfs->orphan_map_lock); if (ntfs->orphan_map == NULL) { // we do this to make it non-NULL. WE had some images that // had no orphan files and it repeatedly did inode_walks // because orphan_map was always NULL getParentMap(ntfs); if (a_fs->inode_walk(a_fs, a_fs->first_inum, a_fs->last_inum, (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_UNALLOC | TSK_FS_META_FLAG_ALLOC), ntfs_parent_act, NULL)) { tsk_release_lock(&ntfs->orphan_map_lock); return TSK_ERR; } } /* see if there are any entries in MFT for this dir that we didn't see. * Need to make sure it is for this version (sequence) though. * NTFS Updates the sequence when a directory is deleted and not when * it is allocated. So, if we have a deleted directory, then use * its previous sequence number to find the files that were in it when * it was allocated. */ uint16_t seqToSrch = fs_dir->fs_file->meta->seq; if (fs_dir->fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) { if (seqToSrch > 0) seqToSrch--; else // I can't imagine how we get here or what we should do except maybe not do the search. seqToSrch = 0; } if (ntfs_parent_map_exists(ntfs, a_addr, seqToSrch)) { TSK_FS_NAME *fs_name; std::vector &childFiles = ntfs_parent_map_get(ntfs, a_addr, seqToSrch); if ((fs_name = tsk_fs_name_alloc(256, 0)) == NULL) return TSK_ERR; fs_name->type = TSK_FS_NAME_TYPE_UNDEF; fs_name->par_addr = a_addr; fs_name->par_seq = fs_dir->fs_file->meta->seq; for (size_t a = 0; a < childFiles.size(); a++) { TSK_FS_FILE *fs_file_orp = NULL; /* Fill in the basics of the fs_name entry * so we can print in the fls formats */ fs_name->meta_addr = childFiles[a].getAddr(); fs_name->meta_seq = childFiles[a].getSeq(); // lookup the file to get more info (we did not cache that) fs_file_orp = tsk_fs_file_open_meta(a_fs, fs_file_orp, fs_name->meta_addr); if (fs_file_orp) { if (fs_file_orp->meta) { if (fs_file_orp->meta->flags & TSK_FS_META_FLAG_ALLOC) { fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } else { fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; /* This sequence is the MFT entry, which gets * incremented when it is unallocated. So, * decrement it back down so that it is more * similar to the usual situation, where the * name sequence is 1 smaller than the meta * sequence. */ fs_name->meta_seq--; } if (fs_file_orp->meta->name2) { TSK_FS_META_NAME_LIST *n2 = fs_file_orp->meta->name2; while (n2) { if (n2->par_inode == a_addr) { strncpy(fs_name->name, n2->name, fs_name->name_size); tsk_fs_dir_add(fs_dir, fs_name); } n2 = n2->next; } } } tsk_fs_file_close(fs_file_orp); } } tsk_fs_name_free(fs_name); } tsk_release_lock(&ntfs->orphan_map_lock); // if we are listing the root directory, add the Orphan directory entry if (a_addr == a_fs->root_inum) { TSK_FS_NAME *fs_name; if ((fs_name = tsk_fs_name_alloc(256, 0)) == NULL) return TSK_ERR; if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } tsk_fs_name_free(fs_name); } return retval_final; } /**************************************************************************** * FIND_FILE ROUTINES * */ #define MAX_DEPTH 128 #define DIR_STRSZ 4096 typedef struct { /* Recursive path stuff */ /* how deep in the directory tree are we */ unsigned int depth; /* pointer in dirs string to where '/' is for given depth */ char *didx[MAX_DEPTH]; /* The current directory name string */ char dirs[DIR_STRSZ]; } NTFS_DINFO; /* * Looks up the parent inode described in fs_name. * * fs_name was filled in by ntfs_find_file and will get the final path * added to it before action is called * * return 1 on error and 0 on success */ static uint8_t ntfs_find_file_rec(TSK_FS_INFO * fs, NTFS_DINFO * dinfo, TSK_FS_FILE * fs_file, TSK_FS_META_NAME_LIST * fs_name_list, TSK_FS_DIR_WALK_CB action, void *ptr) { TSK_FS_FILE *fs_file_par; TSK_FS_META_NAME_LIST *fs_name_list_par; uint8_t decrem = 0; size_t len = 0, i; char *begin = NULL; int retval; if (fs_name_list->par_inode < fs->first_inum || fs_name_list->par_inode > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("invalid inode value: %" PRIuINUM "\n", fs_name_list->par_inode); return 1; } fs_file_par = tsk_fs_file_open_meta(fs, NULL, fs_name_list->par_inode); if (fs_file_par == NULL) { tsk_error_errstr2_concat(" - ntfs_find_file_rec"); return 1; } /* * Orphan File * This occurs when the file is deleted and either: * - The parent is no longer a directory * - The sequence number of the parent is no longer correct */ if ((fs_file_par->meta->type != TSK_FS_META_TYPE_DIR) || (fs_file_par->meta->seq != fs_name_list->par_seq)) { const char *str = TSK_FS_ORPHAN_STR; len = strlen(str); /* @@@ There should be a sanity check here to verify that the * previous name was unallocated ... but how do I get it again? */ if ((((uintptr_t) dinfo->didx[dinfo->depth - 1] - len) >= (uintptr_t) & dinfo->dirs[0]) && (dinfo->depth < MAX_DEPTH)) { begin = dinfo->didx[dinfo->depth] = (char *) ((uintptr_t) dinfo->didx[dinfo->depth - 1] - len); dinfo->depth++; decrem = 1; for (i = 0; i < len; i++) begin[i] = str[i]; } retval = action(fs_file, begin, ptr); if (decrem) dinfo->depth--; tsk_fs_file_close(fs_file_par); return (retval == TSK_WALK_ERROR) ? 1 : 0; } for (fs_name_list_par = fs_file_par->meta->name2; fs_name_list_par != NULL; fs_name_list_par = fs_name_list_par->next) { len = strlen(fs_name_list_par->name); /* do some length checks on the dir structure * if we can't fit it then forget about it */ if ((((uintptr_t) dinfo->didx[dinfo->depth - 1] - len - 1) >= (uintptr_t) & dinfo->dirs[0]) && (dinfo->depth < MAX_DEPTH)) { begin = dinfo->didx[dinfo->depth] = (char *) ((uintptr_t) dinfo->didx[dinfo->depth - 1] - len - 1); dinfo->depth++; decrem = 1; *begin = '/'; for (i = 0; i < len; i++) begin[i + 1] = fs_name_list_par->name[i]; } else { begin = dinfo->didx[dinfo->depth]; decrem = 0; } /* if we are at the root, then fill out the rest of fs_name with * the full path and call the action */ if (fs_name_list_par->par_inode == NTFS_ROOTINO) { /* increase the path by one so that we do not pass the '/' * if we do then the printed result will have '//' at * the beginning */ if (TSK_WALK_ERROR == action(fs_file, (const char *) ((uintptr_t) begin + 1), ptr)) { tsk_fs_file_close(fs_file_par); return 1; } } /* otherwise, recurse some more */ else { if (ntfs_find_file_rec(fs, dinfo, fs_file, fs_name_list_par, action, ptr)) { tsk_fs_file_close(fs_file_par); return 1; } } /* if we incremented before, then decrement the depth now */ if (decrem) dinfo->depth--; } tsk_fs_file_close(fs_file_par); return 0; } /* \ingroup fslib * NTFS can map a meta address to its name much faster than in other file systems * because each entry stores the address of its parent. * * This can not be called with dent_walk because the path * structure will get messed up! * * @param fs File system being analyzed * @param inode_toid Address of file to find the name for. * @param type_toid Attribute type to find the more specific name for (if you want more than just the base file name) * @param type_used 1 if the type_toid value was passed a valid value. 0 otherwise. * @param id_toid Attribute id to find the more specific name for (if you want more than just the base file name) * @param id_used 1 if the id_toid value was passed a valid value. 0 otherwise. * @param dir_walk_flags Flags to use during search * @param action Callback that will be called for each name that uses the specified addresses. * @param ptr Pointer that will be passed into action when it is called (so that you can pass in other data) * @returns 1 on error, 0 on sucess */ uint8_t ntfs_find_file(TSK_FS_INFO * fs, TSK_INUM_T inode_toid, uint32_t type_toid, uint8_t type_used, uint16_t id_toid, uint8_t id_used, TSK_FS_DIR_WALK_FLAG_ENUM dir_walk_flags, TSK_FS_DIR_WALK_CB action, void *ptr) { TSK_FS_META_NAME_LIST *fs_name_list; char *attr = NULL; NTFS_DINFO dinfo; TSK_FS_FILE *fs_file; ntfs_mft *mft; TSK_RETVAL_ENUM r_enum; NTFS_INFO *ntfs = (NTFS_INFO *) fs; /* sanity check */ if (inode_toid < fs->first_inum || inode_toid > fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("ntfs_find_file: invalid inode value: %" PRIuINUM "\n", inode_toid); return 1; } if ((mft = (ntfs_mft *) tsk_malloc(ntfs->mft_rsize_b)) == NULL) { return 1; } r_enum = ntfs_dinode_lookup(ntfs, (char *) mft, inode_toid); if (r_enum == TSK_ERR) { free(mft); return 1; } // open the file to ID fs_file = tsk_fs_file_open_meta(fs, NULL, inode_toid); if (fs_file == NULL) { tsk_error_errstr2_concat("- ntfs_find_file"); tsk_fs_file_close(fs_file); free(mft); return 1; } // see if its allocation status meets the callback needs if ((fs_file->meta->flags & TSK_FS_META_FLAG_ALLOC) && ((dir_walk_flags & TSK_FS_DIR_WALK_FLAG_ALLOC) == 0)) { tsk_fs_file_close(fs_file); free(mft); return 1; } else if ((fs_file->meta->flags & TSK_FS_META_FLAG_UNALLOC) && ((dir_walk_flags & TSK_FS_DIR_WALK_FLAG_UNALLOC) == 0)) { tsk_fs_file_close(fs_file); free(mft); return 1; } /* Allocate a name and fill in some details */ if ((fs_file->name = tsk_fs_name_alloc(NTFS_MAXNAMLEN_UTF8, 0)) == NULL) { free(mft); return 1; } fs_file->name->meta_addr = inode_toid; fs_file->name->meta_seq = 0; fs_file->name->flags = ((tsk_getu16(fs->endian, mft->flags) & NTFS_MFT_INUSE) ? TSK_FS_NAME_FLAG_ALLOC : TSK_FS_NAME_FLAG_UNALLOC); memset(&dinfo, 0, sizeof(NTFS_DINFO)); /* in this function, we use the dinfo->dirs array in the opposite order. * we set the end of it to NULL and then prepend the * directories to it * * dinfo->didx[dinfo->depth] will point to where the current level started their * dir name */ dinfo.dirs[DIR_STRSZ - 2] = '/'; dinfo.dirs[DIR_STRSZ - 1] = '\0'; dinfo.didx[0] = &dinfo.dirs[DIR_STRSZ - 2]; dinfo.depth = 1; /* Get the name for the attribute - if specified */ if (type_used) { const TSK_FS_ATTR *fs_attr; if (id_used) fs_attr = tsk_fs_attrlist_get_id(fs_file->meta->attr, (TSK_FS_ATTR_TYPE_ENUM)type_toid, id_toid); else fs_attr = tsk_fs_attrlist_get(fs_file->meta->attr, (TSK_FS_ATTR_TYPE_ENUM)type_toid); if (!fs_attr) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("find_file: Type %" PRIu32 " Id %" PRIu16 " not found in MFT %" PRIuINUM "", type_toid, id_toid, inode_toid); tsk_fs_file_close(fs_file); free(mft); return 1; } /* only add the attribute name if it is the non-default data stream */ if (fs_attr->name != NULL) attr = fs_attr->name; } /* loop through all the names it may have */ for (fs_name_list = fs_file->meta->name2; fs_name_list != NULL; fs_name_list = fs_name_list->next) { int retval; /* Append on the attribute name, if it exists */ if (attr != NULL) { snprintf(fs_file->name->name, fs_file->name->name_size, "%s:%s", fs_name_list->name, attr); } else { strncpy(fs_file->name->name, fs_name_list->name, fs_file->name->name_size); } /* if this is in the root directory, then call back */ if (fs_name_list->par_inode == NTFS_ROOTINO) { retval = action(fs_file, dinfo.didx[0], ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); free(mft); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); free(mft); return 1; } } /* call the recursive function on the parent to get the full path */ else { if (ntfs_find_file_rec(fs, &dinfo, fs_file, fs_name_list, action, ptr)) { tsk_fs_file_close(fs_file); free(mft); return 1; } } } /* end of name loop */ tsk_fs_file_close(fs_file); free(mft); return 0; } int ntfs_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { return strcasecmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/rawfs.c000644 000765 000024 00000005176 12576320700 017035 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2004-2005 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ #include "tsk_fs_i.h" /** *\file rawfs.c * Contains internal "raw" specific file system functions. The raw file system is used to process * an arbitrary chunk of data as 512-byte sectors that have no other structure. * This means that you can use the data-level tools, but that is it. Because raw and swapfs * are very similar implementations, they share many of the tsk_fs_nofs_XXX functions, such as * tsk_fs_nofs_close(); */ /** \internal * Open part of a disk image as a raw file system -- which basically means that it has no file system structure. * The data is considered to be in 512-byte sectors. * * @param img_info Disk image to analyze * @param offset Byte offset where "file system" starts * @returns NULL on error */ TSK_FS_INFO * rawfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset) { TSK_OFF_T len; TSK_FS_INFO *fs; // clean up any error messages that are lying around tsk_error_reset(); fs = (TSK_FS_INFO *) tsk_fs_malloc(sizeof(TSK_FS_INFO)); if (fs == NULL) return NULL; /* All we need to set are the block sizes and max block size etc. */ fs->img_info = img_info; fs->offset = offset; fs->ftype = TSK_FS_TYPE_RAW; fs->duname = "Sector"; fs->flags = 0; fs->tag = TSK_FS_INFO_TAG; fs->inum_count = 0; fs->root_inum = 0; fs->first_inum = 0; fs->last_inum = 0; len = img_info->size; fs->block_size = 512; fs->block_count = len / fs->block_size; if (len % fs->block_size) fs->block_count++; fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; fs->dev_bsize = img_info->sector_size; /* Pointer to functions */ fs->close = tsk_fs_nofs_close; fs->fsstat = tsk_fs_nofs_fsstat; fs->block_walk = tsk_fs_nofs_block_walk; fs->block_getflags = tsk_fs_nofs_block_getflags; fs->inode_walk = tsk_fs_nofs_inode_walk; fs->file_add_meta = tsk_fs_nofs_file_add_meta; fs->istat = tsk_fs_nofs_istat; fs->get_default_attr_type = tsk_fs_nofs_get_default_attr_type; fs->load_attrs = tsk_fs_nofs_make_data_run; fs->dir_open_meta = tsk_fs_nofs_dir_open_meta; fs->name_cmp = tsk_fs_nofs_name_cmp; fs->jblk_walk = tsk_fs_nofs_jblk_walk; fs->jentry_walk = tsk_fs_nofs_jentry_walk; fs->jopen = tsk_fs_nofs_jopen; fs->journ_inum = 0; return (fs); } sleuthkit-4.2.0/tsk/fs/swapfs.c000644 000765 000024 00000005270 12576320700 017211 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2004-2005 Brian Carrier. All rights reserved ** ** ** This software is distributed under the Common Public License 1.0 ** */ #include "tsk_fs_i.h" /** *\file swapfs.c * Contains the internal "swapfs" specific functions. The "swap" file system is used to process * an arbitrary chunk of data as 4096-byte pages that have no other structure. * This means that you can use the data-level tools, but that is it. This is similar to * the rawfs code, but a different block size. This is primarily intended for Unix systems * that have a swap space partition. Much of the code for swap and rawfs are similar and therefore * share tsk_fs_nofs_XXXX functions, such as tsk_fs_nofs_close() */ /** \internal * Open part of a disk image as "swap" space. This assumes no structure exists. * Data are organized into 4096-byte pages. * * @param img_info Disk image to analyze * @param offset Byte offset where swap space starts. * @returns NULL on error */ TSK_FS_INFO * swapfs_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset) { TSK_OFF_T len; TSK_FS_INFO *fs; // clean up any error messages that are lying around tsk_error_reset(); fs = (TSK_FS_INFO *) tsk_fs_malloc(sizeof(*fs)); if (fs == NULL) return NULL; /* All we need to set are the block sizes and max bloc size etc. */ fs->img_info = img_info; fs->offset = offset; fs->ftype = TSK_FS_TYPE_SWAP; fs->duname = "Page"; fs->flags = 0; fs->tag = TSK_FS_INFO_TAG; fs->inum_count = 0; fs->root_inum = 0; fs->first_inum = 0; fs->last_inum = 0; len = img_info->size; fs->block_count = len / 4096; if (len % 4096) fs->block_count++; fs->first_block = 0; fs->last_block = fs->last_block_act = fs->block_count - 1; fs->block_size = 4096; fs->dev_bsize = img_info->sector_size; /* Pointers to functions */ fs->close = tsk_fs_nofs_close; fs->fsstat = tsk_fs_nofs_fsstat; fs->block_walk = tsk_fs_nofs_block_walk; fs->block_getflags = tsk_fs_nofs_block_getflags; fs->inode_walk = tsk_fs_nofs_inode_walk; fs->istat = tsk_fs_nofs_istat; fs->file_add_meta = tsk_fs_nofs_file_add_meta; fs->get_default_attr_type = tsk_fs_nofs_get_default_attr_type; fs->load_attrs = tsk_fs_nofs_make_data_run; fs->dir_open_meta = tsk_fs_nofs_dir_open_meta; fs->name_cmp = tsk_fs_nofs_name_cmp; fs->jblk_walk = tsk_fs_nofs_jblk_walk; fs->jentry_walk = tsk_fs_nofs_jentry_walk; fs->jopen = tsk_fs_nofs_jopen; fs->journ_inum = 0; return (fs); } sleuthkit-4.2.0/tsk/fs/tsk_exfatfs.h000755 000765 000024 00000040252 12576320700 020236 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /* * This code makes use of research presented in the following paper: * "Reverse Engineering the exFAT File System" by Robert Shullich * Retrieved May 2013 from: * http://www.sans.org/reading_room/whitepapers/forensics/reverse-engineering-microsoft-exfat-file-system_33274 * * Some additional details concerning TexFAT were obtained in May 2013 * from: * http://msdn.microsoft.com/en-us/library/ee490643(v=winembedded.60).aspx */ /** * \file tsk_exfatfs.h * Contains declarations of structures and functions specific to TSK exFAT * file system support. */ #ifndef _TSK_EXFATFS_H #define _TSK_EXFATFS_H #include "tsk_fs_i.h" #include "tsk_fatfs.h" /** * \internal * The first cluster of an exFAT cluster heap (data area) is cluster #2. */ #define EXFATFS_FIRST_CLUSTER 2 /** * \internal * An exFAT volume name directory entry includes from 0 to 15 UTF-16 * characters. */ #define EXFATFS_MAX_VOLUME_LABEL_LEN 15 /** * \internal * AnS exFAT file entry set consists of a file directory entry followed by a * file stream directory entry and at least one file name directory entry. * The file stream and file name entries are the secondary entries. */ #define EXFATFS_MIN_FILE_SECONDARY_DENTRIES_COUNT 2 /** * \internal * An exFAT file entry set consists of a file directory entry followed by a * file stream directory entry and up to seventeen file name directory entries. * The file stream and file name entries are the secondary entries. */ #define EXFATFS_MAX_FILE_SECONDARY_DENTRIES_COUNT 18 /** * \internal * An exFAT file name directory entry includes from 1 to 15 UTF-16 characters. */ #define EXFATFS_MAX_FILE_NAME_SEGMENT_LENGTH 15 /** * \internal * In an exFAT file stream directory entry, the second bit of the general * secondary flags byte is set if there is no FAT chain for a file, i.e., the * file is not fragmented. */ #define EXFATFS_INVALID_FAT_CHAIN_MASK 0x02 /** * Name for an exFAT volume label directory entry that has an empty label, with * the "$" prefix that is used to indicate "special file" directory entries and * non-file directory entries. */ #define EXFATFS_EMPTY_VOLUME_LABEL_DENTRY_NAME "$EMPTY_VOLUME_LABEL" /** * Name for an exFAT volume GUID directory entry, with the "$" prefix that is * used to indicate "special file" directory entries and non-file directory * entries. */ #define EXFATFS_VOLUME_GUID_DENTRY_NAME "$VOLUME_GUID" /** * Name for an exFAT allocation bitmap directory entry, with the "$" prefix * that is used to indicate "special file" directory entries and non-file * directory entries. */ #define EXFATFS_ALLOC_BITMAP_DENTRY_NAME "$ALLOC_BITMAP" /** * Name for an exFAT upcase table directory entry, with the "$" prefix that is * used to indicate "special file" directory entries and non-file directory * entries. */ #define EXFATFS_UPCASE_TABLE_DENTRY_NAME "$UPCASE_TABLE" /** * Name for an exFAT TexFAT directory entry, with the "$" prefix that is used * to indicate "special file" directory entries and non-file directory entries. */ #define EXFATFS_TEX_FAT_DENTRY_NAME "$TEX_FAT" /** * Name for an exFAT access control table directory entry, with the "$" prefix * that is used to indicate "special file" directory entries and non-file * directory entries. */ #define EXFATFS_ACT_DENTRY_NAME "$ACCESS_CONTROL_TABLE" #ifdef __cplusplus extern "C" { #endif /** * \internal * Master boot record (MBR) structure for exFAT file systems. The MBR will * be at least 512 bytes in length, but may be padded for larger sector * sizes. It is part of a larger structure called the volume boot record * (VBR) that includes OEM parameters, reserved space, and a hash value. * There should be both a primary and a backup VBR, so there is a primary * MBR and a backup MBR. */ typedef struct { uint8_t jump_to_boot_code[3]; ///< 0xEB7690 uint8_t fs_name[8]; ///< "EXFAT " uint8_t must_be_zeros[53]; ///< @brief Must be 0x00 uint8_t partition_offset[8]; ///< @brief Sector address uint8_t vol_len_in_sectors[8]; ///< @brief Size of total volume in sectors uint8_t fat_offset[4]; ///< Sector address of first FAT uint8_t fat_len_in_sectors[4]; ///< Size of FAT in sectors uint8_t cluster_heap_offset[4]; ///< Sector address of the data region uint8_t cluster_cnt[4]; ///< Number of clusters in the cluster heap uint8_t root_dir_cluster[4]; ///< Cluster address of the root directory uint8_t vol_serial_no[4]; ///< Volume serial number uint8_t fs_revision[2]; ///< VV.MM uint8_t vol_flags[2]; ///< Flags: ActiveFAT, Volume Dirty, Media Failure, Clear to Zero, and Reserved uint8_t bytes_per_sector; ///< Power of 2. Minimum 2^9 = 512 bytes, maximum 2^12 = 4096 bytes uint8_t sectors_per_cluster; ///< Power of 2. Minimum 2^1 = 2. Maximum is dependant on the fact that the max cluster size is 32 MiB uint8_t num_fats; ///< 1 or 2 (only 2 if TexFAT is in use) uint8_t drive_select; ///< Used by INT 13 uint8_t percent_of_cluster_heap_in_use; ///< Percentage of the heap in use uint8_t reserved[7]; ///< Reserved uint8_t boot_code[390]; ///< Boot program uint8_t signature[2]; ///< 0xAA55 } EXFATFS_MASTER_BOOT_REC; /** * exFAT directory entry type byte, containing both the type and * the allocation status */ typedef uint8_t EXFATFS_DIR_ENTRY_TYPE; /** * exFAT directory entry types, the first byte of a directory entry minus the * high order bit (which gives allocation status) */ enum EXFATFS_DIR_ENTRY_TYPE_ENUM { EXFATFS_DIR_ENTRY_TYPE_NONE = 0x00, ///< 0x00 EXFATFS_DIR_ENTRY_TYPE_VOLUME_LABEL = 0x03, ///< 0x03 EXFATFS_DIR_ENTRY_TYPE_VOLUME_GUID = 0x20, ///< 0x20 EXFATFS_DIR_ENTRY_TYPE_ALLOC_BITMAP = 0x01, ///< 0x01 EXFATFS_DIR_ENTRY_TYPE_UPCASE_TABLE = 0x02, ///< 0x02 EXFATFS_DIR_ENTRY_TYPE_TEXFAT = 0x21, ///< 0x21 EXFATFS_DIR_ENTRY_TYPE_ACT = 0x62, ///< 0x62 EXFATFS_DIR_ENTRY_TYPE_FILE = 0x05, ///< 0x05 EXFATFS_DIR_ENTRY_TYPE_FILE_STREAM = 0x40, ///< 0x40 EXFATFS_DIR_ENTRY_TYPE_FILE_NAME = 0x41 ///< 0x41 }; typedef enum EXFATFS_DIR_ENTRY_TYPE_ENUM EXFATFS_DIR_ENTRY_TYPE_ENUM; /** * Volume label directory entry structure for exFAT file systems. This * type of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0x83 normally, 0x03 if the media was formatted without a volume label uint8_t utf16_char_count; ///< Number of characters in the volume label (max 11) uint8_t volume_label[30]; ///< Volume label in UTF16 } EXFATFS_VOL_LABEL_DIR_ENTRY; /** * Volume GUID directory entry structure for exFAT file systems. This type * of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0xA0 uint8_t secondary_entries_count; ///< Always zero uint8_t check_sum[2]; ///< Set checksum uint8_t flags[2]; ///< Flags: Allocation possible, no FAT chain, custom uint8_t volume_guid[16]; ///< Volume GUID uint8_t reserved[10]; ///< Reserved } EXFATFS_VOL_GUID_DIR_ENTRY; /** * Allocation bitmap directory entry structure for exFAT file systems. * There will be one allocation bitmap for exFAT and two for TexFAT * (transactional exFAT). Bit zero of the flags byte is 0 in the directory * entry for the first bitmap and 1 in the directory entry for the second * bitmap. This type of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0x81 uint8_t flags; ///< 0x00 for first bitmap, 0x01 for the second uint8_t reserved[18]; ///< Reserved uint8_t first_cluster_of_bitmap[4]; ///< Cluster address of first data block uint8_t length_of_alloc_bitmap_in_bytes[8]; ///< Length of the data } EXFATFS_ALLOC_BITMAP_DIR_ENTRY; /** * UP-Case table directory entry structure for exFAT file systems. * The UP-Case table is used to convert file names to upper case when * required. This type of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0x82 uint8_t reserved1[3]; ///< Reserved uint8_t table_check_sum[4]; ///< UP-Case table checksum uint8_t reserved2[12]; ///< Reserved uint8_t first_cluster_of_table[4]; ///< Cluster address of first data block uint8_t table_length_in_bytes[8]; ///< Length of the data } EXFATFS_UPCASE_TABLE_DIR_ENTRY; /** * TexFAT (transactional exFAT) directory entry structure for exFAT file * systems. This type of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0xA1 uint8_t reserved[31]; ///< Reserved } EXFATFS_TEXFAT_DIR_ENTRY; /** * Access control table directory entry structure for exFAT file systems. * This type of entry should be found only in the root directory. */ typedef struct { uint8_t entry_type; ///< 0xE2 uint8_t reserved[31]; ///< Reserved } EXFATFS_ACCESS_CTRL_TABLE_DIR_ENTRY; /** * \internal * It will be followed by a stream directory entry and 1-17 file name * entries. The stream and file name entries are secondary entries. A file * entry and its stream and file name entries constitute a file directory * entry set. */ typedef struct { uint8_t entry_type; ///< 0x85 if allocated, 0x05 if deleted uint8_t secondary_entries_count; ///< Number of entries following the primary directory entry (Range: 2 to 18) uint8_t check_sum[2]; ///< Set checksum uint8_t attrs[2]; ///< File attributes uint8_t reserved1[2]; ///< Reserved uint8_t created_time[2]; ///< Time part of DOS time stamp uint8_t created_date[2]; ///< Date part of DOS time stamp uint8_t modified_time[2]; ///< Time part of DOS time stamp uint8_t modified_date[2]; ///< Date part of DOS time stamp uint8_t accessed_time[2]; ///< Time part of DOS time stamp uint8_t accessed_date[2]; ///< Date part of DOS time stamp uint8_t created_time_tenths_of_sec; ///< Tenths of seconds part of a DOS time stamp, range is 0-199 uint8_t modified_time_tenths_of_sec; ///< Tenths of seconds part of a DOS time stamp, range is 0-199 uint8_t created_time_time_zone_offset; ///< Time zone difference to UTC in 15 minute increments uint8_t modified_time_time_zone_offset; ///< Time zone difference to UTC in 15 minute increments uint8_t accessed_time_time_zone_offset; ///< Time zone difference to UTC in 15 minute increments uint8_t reserved2[7]; ///< Reserved } EXFATFS_FILE_DIR_ENTRY; /** * Stream extension directory entry structure for exFAT file systems. * It will be preceded by a file directory entry and followed by 1-17 * file name directory entries. The stream and file name entries are * secondary entries. A file entry and its stream and file name entries * constitute a file directory entry set. */ typedef struct { uint8_t entry_type; ///< 0xC0 if allocated, 0x40 if deleted uint8_t flags; ///< Flags: Allocation possible, no FAT chain, custom uint8_t reserved1; ///< Reserved uint8_t file_name_length; ///< Length of UTF16 name contained in following file name directory entries uint8_t file_name_hash[2]; ///< Hash of up-cased file name uint8_t reserved2[2]; ///< Reserved uint8_t valid_data_length[8]; ///< How much actual data has been written to the file. Must be less than data_length uint8_t reserved3[4]; ///< Reserved uint8_t first_cluster_addr[4]; ///< Cluster address of first data block uint8_t data_length[8]; ///< Length of the data. Max 256M for directories } EXFATFS_FILE_STREAM_DIR_ENTRY; /** * File name directory entry structure for exFAT file systems. * It will be preceded by 0-16 file name entries, a stream entry, and * a file entry. A file entry and its stream and file name entries * constitute a file directory entry set. Note that file names are not * null-terminated. The length of a file name is stored in the file stream * entry of the file directory entry set. */ typedef struct { uint8_t entry_type; ///< 0xC1 if allocated, 0x41 if deleted uint8_t flags; ///< Flags: Allocation possible, no FAT chain, custom uint8_t utf16_name_chars[30]; ///< UTF16 part of file name, max 15 characters } EXFATFS_FILE_NAME_DIR_ENTRY; extern uint8_t exfatfs_open(FATFS_INFO *a_fatfs); extern int8_t exfatfs_is_cluster_alloc(FATFS_INFO *a_fatfs, TSK_DADDR_T a_cluster_addr); extern uint8_t exfatfs_fsstat(TSK_FS_INFO *a_fs, FILE *a_hFile); extern uint8_t exfatfs_is_dentry(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc, uint8_t a_do_basic_tests_only); extern uint8_t exfatfs_is_vol_label_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc); extern uint8_t exfatfs_is_vol_guid_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status); extern uint8_t exfatfs_is_alloc_bitmap_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status, FATFS_INFO *a_fatfs); extern uint8_t exfatfs_is_upcase_table_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status, FATFS_INFO *a_fatfs); extern uint8_t exfatfs_is_texfat_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status); extern uint8_t exfatfs_is_access_ctrl_table_dentry(FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_alloc_status); extern uint8_t exfatfs_is_file_dentry(FATFS_DENTRY *a_dentry, FATFS_INFO *a_fatfs); extern uint8_t exfatfs_is_file_dentry_standalone(FATFS_DENTRY *a_dentry, TSK_ENDIAN_ENUM a_endian); extern uint8_t exfatfs_is_file_stream_dentry(FATFS_DENTRY *a_dentry, FATFS_INFO *a_fatfs); extern uint8_t exfatfs_is_file_stream_dentry_standalone(FATFS_DENTRY *a_dentry, TSK_ENDIAN_ENUM a_endian, uint64_t a_cluster_heap_size, TSK_DADDR_T a_last_cluster); extern uint8_t exfatfs_find_file_stream_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_file_entry_inum, TSK_DADDR_T a_sector, uint8_t a_sector_is_alloc, EXFATFS_DIR_ENTRY_TYPE a_file_dentry_type, FATFS_DENTRY *a_stream_dentry); extern uint8_t exfatfs_is_file_name_dentry(FATFS_DENTRY *a_dentry); extern TSK_RETVAL_ENUM exfatfs_dinode_copy(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_is_alloc, TSK_FS_FILE *a_fs_file); extern uint8_t exfatfs_inode_lookup(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum); extern uint8_t exfatfs_istat_attr_flags(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FILE *a_hFile); extern uint8_t exfatfs_inode_walk_should_skip_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, unsigned int a_selection_flags, int a_cluster_is_alloc); extern uint8_t exfatfs_get_alloc_status_from_type(EXFATFS_DIR_ENTRY_TYPE a_dir_entry_type); extern EXFATFS_DIR_ENTRY_TYPE_ENUM exfatfs_get_enum_from_type(EXFATFS_DIR_ENTRY_TYPE a_dir_entry_type); extern TSK_RETVAL_ENUM exfatfs_dent_parse_buf(FATFS_INFO *a_fatfs, TSK_FS_DIR *a_fs_dir, char *a_buf, TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_ext2fs.h000644 000765 000024 00000057564 12576320700 020024 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /* * Contains the structures and function APIs for EXT2FS file system support. */ #ifndef _TSK_EXT2FS_H #define _TSK_EXT2FS_H #ifdef __cplusplus extern "C" { #endif typedef uint64_t EXT2_GRPNUM_T; #define PRI_EXT2GRP PRIu64 /** \internal * Read a 48-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x 16-bit MSB byte array to read from * @param y 32-bit byte array to read from * @returns 48-bit unsigned value */ #define ext4_getu48(endian, x, y) \ (uint64_t)( ((endian) == TSK_LIT_ENDIAN) ? \ ((uint64_t) \ ((uint64_t)((uint8_t *)(y))[0] << 0) + \ ((uint64_t)((uint8_t *)(y))[1] << 8) + \ ((uint64_t)((uint8_t *)(y))[2] << 16) + \ ((uint64_t)((uint8_t *)(y))[3] << 24) + \ ((uint64_t)((uint8_t *)(x))[0] << 32) + \ ((uint64_t)((uint8_t *)(x))[1] << 40)) \ : \ ((uint64_t) \ ((uint64_t)((uint8_t *)(y))[3] << 0) + \ ((uint64_t)((uint8_t *)(y))[2] << 8) + \ ((uint64_t)((uint8_t *)(y))[1] << 16) + \ ((uint64_t)((uint8_t *)(y))[0] << 24) + \ ((uint64_t)((uint8_t *)(x))[1] << 32) + \ ((uint64_t)((uint8_t *)(x))[0] << 40)) )\ /** \internal * Read a 48-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x 32-bit MSB byte array to read from * @param y 32-bit byte array to read from * @returns 48-bit unsigned value */ #define ext4_getu64(endian, x, y) \ (uint64_t)( ((endian) == TSK_LIT_ENDIAN) ? \ ((uint64_t) \ ((uint64_t)((uint8_t *)(y))[0] << 0) + \ ((uint64_t)((uint8_t *)(y))[1] << 8) + \ ((uint64_t)((uint8_t *)(y))[2] << 16) + \ ((uint64_t)((uint8_t *)(y))[3] << 24) + \ ((uint64_t)((uint8_t *)(x))[0] << 32) + \ ((uint64_t)((uint8_t *)(x))[1] << 40) + \ ((uint64_t)((uint8_t *)(x))[2] << 48) + \ ((uint64_t)((uint8_t *)(x))[3] << 56))\ : \ ((uint64_t) \ ((uint64_t)((uint8_t *)(y))[3] << 0) + \ ((uint64_t)((uint8_t *)(y))[2] << 8) + \ ((uint64_t)((uint8_t *)(y))[1] << 16) + \ ((uint64_t)((uint8_t *)(y))[0] << 24) + \ ((uint64_t)((uint8_t *)(x))[3] << 32) + \ ((uint64_t)((uint8_t *)(x))[2] << 40) + \ ((uint64_t)((uint8_t *)(x))[1] << 48) + \ ((uint64_t)((uint8_t *)(x))[0] << 56)) )\ /* ** Constants */ #define EXT2FS_FIRSTINO 1 /* inode 1 contains the bad blocks */ #define EXT2FS_ROOTINO 2 /* location of root directory inode */ #define EXT2FS_NDADDR 12 #define EXT2FS_NIADDR 3 #define EXT2FS_SBOFF 1024 #define EXT2FS_FS_MAGIC 0xef53 #define EXT2FS_MAXNAMLEN 255 #define EXT2FS_MAXPATHLEN 4096 #define EXT2FS_MIN_BLOCK_SIZE 1024 #define EXT2FS_MAX_BLOCK_SIZE 4096 #define EXT2FS_FILE_CONTENT_LEN ((EXT2FS_NDADDR + EXT2FS_NIADDR) * sizeof(TSK_DADDR_T)) /* ** Super Block */ typedef struct { uint8_t s_inodes_count[4]; /* u32 */ uint8_t s_blocks_count[4]; /* u32 */ uint8_t s_r_blocks_count[4]; uint8_t s_free_blocks_count[4]; /* u32 */ uint8_t s_free_inode_count[4]; /* u32 */ uint8_t s_first_data_block[4]; /* u32 */ uint8_t s_log_block_size[4]; /* u32 */ uint8_t s_log_frag_size[4]; /* s32 */ uint8_t s_blocks_per_group[4]; /* u32 */ uint8_t s_frags_per_group[4]; /* u32 */ uint8_t s_inodes_per_group[4]; /* u32 */ uint8_t s_mtime[4]; /* u32 *//* mount time */ uint8_t s_wtime[4]; /* u32 *//* write time */ uint8_t s_mnt_count[2]; /* u16 *//* mount count */ uint8_t s_max_mnt_count[2]; /* s16 */ uint8_t s_magic[2]; /* u16 */ uint8_t s_state[2]; /* u16 *//* fs state */ uint8_t s_errors[2]; /* u16 */ uint8_t s_minor_rev_level[2]; /* u16 */ uint8_t s_lastcheck[4]; /* u32 */ uint8_t s_checkinterval[4]; /* u32 */ uint8_t s_creator_os[4]; /* u32 */ uint8_t s_rev_level[4]; /* u32 */ uint8_t s_def_resuid[2]; /* u16 */ uint8_t s_def_resgid[2]; /* u16 */ uint8_t s_first_ino[4]; /* u32 */ uint8_t s_inode_size[2]; /* u16 */ uint8_t s_block_group_nr[2]; /* u16 */ uint8_t s_feature_compat[4]; /* u32 */ uint8_t s_feature_incompat[4]; /* u32 */ uint8_t s_feature_ro_compat[4]; /* u32 */ uint8_t s_uuid[16]; /* u8[16] */ char s_volume_name[16]; char s_last_mounted[64]; uint8_t s_algorithm_usage_bitmap[4]; /* u32 */ uint8_t s_prealloc_blocks; /* u8 */ uint8_t s_prealloc_dir_blocks; /* u8 */ union pad_or_gdt { uint8_t s_padding1[2]; /* u16 */ uint8_t s_reserved_gdt_blocks[2]; /*u16 */ } pad_or_gdt; /* Valid if EXT2_FEATURE_COMPAT_HAS_JOURNAL */ uint8_t s_journal_uuid[16]; /* u8[16] */ uint8_t s_journal_inum[4]; /* u32 */ uint8_t s_journal_dev[4]; /* u32 */ uint8_t s_last_orphan[4]; /* u32 */ uint8_t s_hash_seed[16]; /* u32[4] */ uint8_t s_def_hash_version; /* u8 */ uint8_t s_jnl_backup_type; /* u8 */ uint8_t s_desc_size[2]; /* u16 */ uint8_t s_default_mount_opts[4]; /* u32 */ uint8_t s_first_meta_bg[4]; /* u32 */ uint8_t s_mkfs_time[4]; /* u32 */ uint8_t s_jnl_blocks[17 * 4]; /* u32[17] */ /* Valid if EXT4_FEATURE_INCOMPAT_64BIT*/ uint8_t s_blocks_count_hi[4]; /* u32 */ uint8_t s_r_blocks_count_hi[4]; /* u32 */ uint8_t s_free_blocks_count_hi[4]; /* u32 */ uint8_t s_min_extra_isize[2]; /* u16 */ uint8_t s_want_extra_isize[2]; /* u16 */ uint8_t s_flags[4]; /* u32 */ uint8_t s_raid_stride[2]; /* u16 */ uint8_t s_mmp_interval[2]; /* u16 */ uint8_t s_mmp_block[8]; /* u64 */ uint8_t s_raid_stripe_width[4]; /* u32 */ uint8_t s_log_groups_per_flex; /* u8 */ uint8_t s_reserved_char_pad; /* u8 */ uint8_t s_reserved_pad[2]; /* u16 */ uint8_t s_kbytes_written[8]; /* u64 */ uint8_t s_snapshot_inum[4]; /* u32 */ uint8_t s_snapshot_id[4]; /* u32 */ uint8_t s_snapshot_r_blocks_count[8]; /* u64 */ uint8_t s_snapshot_list[4]; /* u32 */ uint8_t s_error_count[4]; /* u32 */ uint8_t s_first_error_time[4]; /* u32 */ uint8_t s_first_error_ino[4]; /* u32 */ uint8_t s_first_error_block[8]; /* u64 */ uint8_t s_first_error_func[32]; /* u8[32] */ uint8_t s_first_error_line[4]; /* u32 */ uint8_t s_last_error_time[4]; /* u32 */ uint8_t s_last_error_ino[4]; /* u32 */ uint8_t s_last_error_line[4]; /* u32 */ uint8_t s_last_error_block[8]; /* u64 */ uint8_t s_last_error_func[32]; /* u8[32] */ uint8_t s_mount_opts[64]; /* u8[64] */ uint8_t s_usr_quota_inum[4]; /* u32 */ uint8_t s_grp_quota_inum[4]; /* u32 */ uint8_t s_overhead_clusters[4]; /* u32 */ uint8_t s_padding[109 * 4]; } ext2fs_sb; /* File system State Values */ #define EXT2FS_STATE_VALID 0x0001 /* unmounted correctly */ #define EXT2FS_STATE_ERROR 0x0002 /* errors detected */ /* Operating System Codes */ #define EXT2FS_OS_LINUX 0 #define EXT2FS_OS_HURD 1 #define EXT2FS_OS_MASIX 2 #define EXT2FS_OS_FREEBSD 3 #define EXT2FS_OS_LITES 4 /* Revision Levels */ #define EXT2FS_REV_ORIG 0 #define EXT2FS_REV_DYN 1 /* feature flags */ #define EXT2FS_HAS_COMPAT_FEATURE(fs,sb,mask)\ ((tsk_getu32(fs->endian,sb->s_feature_compat) & mask) != 0) #define EXT2FS_FEATURE_COMPAT_DIR_PREALLOC 0x0001 #define EXT2FS_FEATURE_COMPAT_IMAGIC_INODES 0x0002 #define EXT2FS_FEATURE_COMPAT_HAS_JOURNAL 0x0004 #define EXT2FS_FEATURE_COMPAT_EXT_ATTR 0x0008 #define EXT2FS_FEATURE_COMPAT_RESIZE_INO 0x0010 #define EXT2FS_FEATURE_COMPAT_DIR_INDEX 0x0020 #define EXT2FS_HAS_INCOMPAT_FEATURE(fs,sb,mask)\ ((tsk_getu32(fs->endian,sb->s_feature_incompat) & mask) != 0) #define EXT2FS_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT2FS_FEATURE_INCOMPAT_FILETYPE 0x0002 #define EXT2FS_FEATURE_INCOMPAT_RECOVER 0x0004 #define EXT2FS_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 #define EXT2FS_FEATURE_INCOMPAT_META_BG 0x0010 #define EXT2FS_FEATURE_INCOMPAT_EXTENTS 0x0040 #define EXT2FS_FEATURE_INCOMPAT_64BIT 0x0080 #define EXT2FS_FEATURE_INCOMPAT_MMP 0x0100 #define EXT2FS_FEATURE_INCOMPAT_FLEX_BG 0x0200 #define EXT2FS_FEATURE_INCOMPAT_EA_INODE 0x0400 #define EXT2FS_FEATURE_INCOMPAT_DIRDATA 0x1000 #define EXT4FS_FEATURE_INCOMPAT_INLINEDATA 0x2000 /* data in inode */ #define EXT4FS_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */ #define EXT2FS_HAS_RO_COMPAT_FEATURE(fs,sb,mask)\ ((tsk_getu32(fs->endian,sb->s_feature_ro_compat) & mask) != 0) #define EXT2FS_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001 #define EXT2FS_FEATURE_RO_COMPAT_LARGE_FILE 0x0002 #define EXT2FS_FEATURE_RO_COMPAT_BTREE_DIR 0x0004 #define EXT2FS_FEATURE_RO_COMPAT_HUGE_FILE 0x0008 #define EXT2FS_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 #define EXT2FS_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 #define EXT2FS_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 #define EXT4FS_FEATURE_RO_COMPAT_QUOTA 0x0100 #define EXT4FS_FEATURE_RO_COMPAT_BIGALLOC 0x0200 #define EXT4FS_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 /* * Group Descriptor */ typedef struct ext2fs_gd { uint8_t bg_block_bitmap[4]; /* u32: block of blocks bitmap */ uint8_t bg_inode_bitmap[4]; /* u32: block of inodes bitmap */ uint8_t bg_inode_table[4]; /* u32: block of inodes table */ uint8_t bg_free_blocks_count[2]; /* u16: num of free blocks */ uint8_t bg_free_inodes_count[2]; /* u16: num of free inodes */ uint8_t bg_used_dirs_count[2]; /* u16: num of use directories */ uint8_t f1[14]; } ext2fs_gd; #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ #define EXT4_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */ #define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */ #define EXT4BG_HAS_FLAG(fs,gd,flag)\ ((tsk_getu16(fs->endian,gd->bg_flags) & flag) != 0) typedef struct ext4fs_gd { uint8_t bg_block_bitmap_lo[4]; /* u32 */ uint8_t bg_inode_bitmap_lo[4]; /* u32 */ uint8_t bg_inode_table_lo[4]; /* u32 */ uint8_t bg_free_blocks_count_lo[2]; /* u16 */ uint8_t bg_free_inodes_count_lo[2]; /* u16 */ uint8_t bg_used_dirs_count_lo[2]; /* u16 */ uint8_t bg_flags[2]; /* u16 */ uint8_t bg_reserved[4 * 2]; /* u32 */ uint8_t bg_itable_unused_lo[2]; /* u16 */ uint8_t bg_checksum[2]; /* u16 */ uint8_t bg_block_bitmap_hi[4]; /* u32 */ uint8_t bg_inode_bitmap_hi[4]; /* u32 */ uint8_t bg_inode_table_hi[4]; /* u32 */ uint8_t bg_free_blocks_count_hi[2]; /* u16 */ uint8_t bg_free_inodes_count_hi[2]; /* u16 */ uint8_t bg_used_dirs_count_hi[2]; /* u16 */ uint8_t bg_itable_unused_hi[2]; /* u16 */ uint8_t bg_reserved2[4 * 3]; /* u32 */ } ext4fs_gd; /* data address to group number */ #define ext2_dtog_lcl(fsi, fs, d) \ (EXT2_GRPNUM_T)(((d) - tsk_getu32(fsi->endian, fs->s_first_data_block)) / \ tsk_getu32(fsi->endian, fs->s_blocks_per_group)) /* first fragment of group */ #define ext2_cgbase_lcl(fsi, fs, c) \ ((TSK_DADDR_T)((tsk_getu32(fsi->endian, fs->s_blocks_per_group) * (c)) + \ tsk_getu32(fsi->endian, fs->s_first_data_block))) #define ext4_cgbase_lcl(fsi, fs, c) \ ((TSK_DADDR_T)((uint64_t)(tsk_getu32(fsi->endian, fs->s_blocks_per_group) * (uint64_t)(c)) + \ (uint64_t)tsk_getu32(fsi->endian, fs->s_first_data_block))) /* * Inode */ typedef struct { uint8_t i_mode[2]; /* u16 */ uint8_t i_uid[2]; /* u16 */ uint8_t i_size[4]; /* u32 */ uint8_t i_atime[4]; /* u32 */ uint8_t i_ctime[4]; /* u32 */ uint8_t i_mtime[4]; /* u32 */ uint8_t i_dtime[4]; /* u32 */ uint8_t i_gid[2]; /* u16 */ uint8_t i_nlink[2]; /* u16 */ uint8_t i_nblk[4]; uint8_t i_flags[4]; uint8_t i_f5[4]; uint8_t i_block[15][4]; /*s32 */ uint8_t i_generation[4]; uint8_t i_file_acl[4]; uint8_t i_size_high[4]; /* u32 - also i_dir_acl for non-regular */ uint8_t i_faddr[4]; uint8_t i_frag; uint8_t i_fsize; uint8_t f1[2]; uint8_t i_uid_high[2]; /* u16 */ uint8_t i_gid_high[2]; /* u16 */ uint8_t f7[4]; /* u32 */ uint8_t i_extra_isize[2]; /* u16 */ uint8_t i_pad1[2]; /* u16 */ uint8_t i_ctime_extra[4]; /* u32 */ uint8_t i_mtime_extra[4]; /* u32 */ uint8_t i_atime_extra[4]; /* u32 */ uint8_t i_crtime[4]; /* u32 */ uint8_t i_crtime_extra[4]; /* u32 */ uint8_t i_version_hi[4]; /* u32 */ } ext2fs_inode; typedef struct ext2fs_extent { uint8_t ee_block[4]; /* u32 */ uint8_t ee_len[2]; /* u16 */ uint8_t ee_start_hi[2]; /* u16 */ uint8_t ee_start_lo[4]; /* u32 */ } ext2fs_extent; typedef struct ext2fs_extent_idx { uint8_t ei_block[4]; /* u32 */ uint8_t ei_leaf_lo[4]; /* u32 */ uint8_t ei_leaf_hi[2]; /* u16 */ uint8_t ei_unused[2]; /* u16 */ } ext2fs_extent_idx; typedef struct ext2fs_extent_header { uint8_t eh_magic[2]; /* u16 */ uint8_t eh_entries[2]; /* u16 */ uint8_t eh_max[2]; /* u16 */ uint8_t eh_depth[2]; /* u16 */ uint8_t eh_generation[4]; /* u32 */ } ext2fs_extent_header; /* MODE */ #define EXT2_IN_FMT 0170000 #define EXT2_IN_SOCK 0140000 #define EXT2_IN_LNK 0120000 #define EXT2_IN_REG 0100000 #define EXT2_IN_BLK 0060000 #define EXT2_IN_DIR 0040000 #define EXT2_IN_CHR 0020000 #define EXT2_IN_FIFO 0010000 #define EXT2_IN_ISUID 0004000 #define EXT2_IN_ISGID 0002000 #define EXT2_IN_ISVTX 0001000 #define EXT2_IN_IRUSR 0000400 #define EXT2_IN_IWUSR 0000200 #define EXT2_IN_IXUSR 0000100 #define EXT2_IN_IRGRP 0000040 #define EXT2_IN_IWGRP 0000020 #define EXT2_IN_IXGRP 0000010 #define EXT2_IN_IROTH 0000004 #define EXT2_IN_IWOTH 0000002 #define EXT2_IN_IXOTH 0000001 #define EXT2_IN_SECDEL 0x00000001 /* Secure deletion */ #define EXT2_IN_UNRM 0x00000002 /* Undelete */ #define EXT2_IN_COMP 0x00000004 /* Compress file */ #define EXT2_IN_SYNC 0x00000008 /* Synchronous updates */ #define EXT2_IN_IMM 0x00000010 /* Immutable file */ #define EXT2_IN_APPEND 0x00000020 /* writes to file may only append */ #define EXT2_IN_NODUMP 0x00000040 /* do not dump file */ #define EXT2_IN_NOA 0x00000080 /* do not update atime */ #define EXT2_IN_DIRTY 0x00000100 #define EXT2_IN_COMPRBLK 0x00000200 /* One or more compressed clusters */ #define EXT2_IN_NOCOMPR 0x00000400 /* Don't compress */ #define EXT2_IN_ECOMPR 0x00000800 /* Compression error */ #define EXT2_IN_INDEX 0x00001000 /* hash-indexed directory */ #define EXT2_IN_IMAGIC 0x00002000 /* AFS directory */ #define EXT2_IN_JOURNAL_DATA 0x00004000 /* file data should be journaled */ #define EXT2_IN_NOTAIL 0x00008000 /* file tail should not be merged */ #define EXT2_IN_DIRSYNC 0x00010000 /* dirsync behaviour (directories only) */ #define EXT2_IN_TOPDIR 0x00020000 /* Top of directory hierarchies */ #define EXT2_IN_HUGE_FILE 0x00040000 /* Set to each huge file */ #define EXT2_IN_EXTENTS 0x00080000 /* Inode uses extents */ #define EXT2_IN_EA_INODE 0x00200000 /* Inode used for large EA */ #define EXT2_IN_EOFBLOCKS 0x00400000 /* Blocks allocated beyond EOF */ #define EXT2_IN_RESERVED 0x80000000 /* reserved for ext4 lib */ #define EXT2_IN_USER_VISIBLE 0x004BDFFF /* User visible flags */ #define EXT2_IN_USER_MODIFIABLE 0x004B80FF /* User modifiable flags */ /* * directory entries */ typedef struct { uint8_t inode[4]; /* u32 */ uint8_t rec_len[2]; /* u16 */ uint8_t name_len[2]; /* u16 */ char name[EXT2FS_MAXNAMLEN]; } ext2fs_dentry1; /* new structure starting at 2.2 */ typedef struct { uint8_t inode[4]; /* u32 */ uint8_t rec_len[2]; /* u16 */ uint8_t name_len; uint8_t type; char name[EXT2FS_MAXNAMLEN]; } ext2fs_dentry2; #define EXT2FS_DIRSIZ_lcl(len) \ ((len + 8 + 3) & ~(3)) /* Ext2 directory file types */ #define EXT2_DE_UNKNOWN 0 #define EXT2_DE_REG 1 #define EXT2_DE_DIR 2 #define EXT2_DE_CHR 3 #define EXT2_DE_BLK 4 #define EXT2_DE_FIFO 5 #define EXT2_DE_SOCK 6 #define EXT2_DE_LNK 7 #define EXT2_DE_MAX 8 #define EXT2_DE_V1 1 #define EXT2_DE_V2 2 /* Extended Attributes */ #define EXT2_EA_MAGIC 0xEA020000 typedef struct { uint8_t magic[4]; uint8_t refcount[4]; uint8_t blocks[4]; uint8_t hash[4]; uint8_t f1[16]; uint8_t entry; } ext2fs_ea_header; #define EXT2_EA_IDX_USER 1 #define EXT2_EA_IDX_POSIX_ACL_ACCESS 2 #define EXT2_EA_IDX_POSIX_ACL_DEFAULT 3 #define EXT2_EA_IDX_TRUSTED 4 #define EXT2_EA_IDX_LUSTRE 5 #define EXT2_EA_IDX_SECURITY 6 /* Entries follow the header and are aligned to 4-byte boundaries * the value of the attribute is stored at the bottom of the block */ typedef struct { uint8_t nlen; uint8_t nidx; uint8_t val_off[2]; uint8_t val_blk[4]; uint8_t val_size[4]; uint8_t hash[4]; uint8_t name; } ext2fs_ea_entry; #define EXT2_EA_LEN(nlen) \ ((((nlen) + 19 ) / 4) * 4) typedef struct { uint8_t ver[4]; } ext2fs_pos_acl_head; #define EXT2_PACL_TAG_USERO 0x01 #define EXT2_PACL_TAG_USER 0x02 #define EXT2_PACL_TAG_GRPO 0x04 #define EXT2_PACL_TAG_GRP 0x08 #define EXT2_PACL_TAG_MASK 0x10 #define EXT2_PACL_TAG_OTHER 0x20 #define EXT2_PACL_PERM_EXEC 0x01 #define EXT2_PACL_PERM_WRITE 0x02 #define EXT2_PACL_PERM_READ 0x04 typedef struct { uint8_t tag[2]; uint8_t perm[2]; } ext2fs_pos_acl_entry_sh; typedef struct { uint8_t tag[2]; uint8_t perm[2]; uint8_t id[4]; } ext2fs_pos_acl_entry_lo; /************** JOURNAL ******************/ /* These values are always in big endian */ #define EXT2_JMAGIC 0xC03b3998 /*JBD2 Feature Flags */ #define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001 #define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 typedef struct { uint8_t magic[4]; uint8_t entrytype[4]; uint8_t entryseq[4]; /* sequence of this entry */ uint8_t bsize[4]; /* size of block */ uint8_t num_blk[4]; /* num of blks in journal */ uint8_t first_blk[4]; /* bl where log starts */ uint8_t start_seq[4]; /* first commit ID in log */ uint8_t start_blk[4]; /* journ blk for 1st valid entry */ uint8_t j_errno[4]; /* signed error number */ /* the rest are not valid for v1 sb */ uint8_t feature_compat[4]; uint8_t feature_incompat[4]; uint8_t feature_ro_incompat[4]; uint8_t uuid[16]; uint8_t num_fs[4]; /* num of fs sharing log */ uint8_t dynsuper[4]; /* fs block of sb copy */ uint8_t max_trans[4]; /* limit of blk per trans */ uint8_t max_trans_data[4]; /* limit of data blocks per */ uint8_t reserved[176]; uint8_t id_fs[16][48]; /* Ids of fs sharing log */ } ext2fs_journ_sb; #define EXT2_J_ETYPE_DESC 1 /* descriptor block */ #define EXT2_J_ETYPE_COM 2 /* commit */ #define EXT2_J_ETYPE_SB1 3 /* super block v1 */ #define EXT2_J_ETYPE_SB2 4 /* sb v2 */ #define EXT2_J_ETYPE_REV 5 /* revoke */ /* Header that is used for all structures */ typedef struct { uint8_t magic[4]; uint8_t entry_type[4]; uint8_t entry_seq[4]; } ext2fs_journ_head; /* JBD2 Checksum types */ #define JBD2_CRC32_CHKSUM 1 #define JBD2_MD5_CHKSUM 2 #define JBD2_SHA1_CHKSUM 3 #define JBD2_CRC32_CHKSUM_SIZE 4 #define JBD2_CHECKSUM_BYTES (32/ sizeof(unsigned int)) #define NSEC_PER_SEC 1000000000L /* Header for ext4 commit blocks */ typedef struct { ext2fs_journ_head c_header; uint8_t chksum_type; uint8_t chksum_size; uint8_t padding[2]; uint8_t chksum[4 * JBD2_CHECKSUM_BYTES]; uint8_t commit_sec[8]; uint8_t commit_nsec[4]; } ext4fs_journ_commit_head; /* dentry flags */ #define EXT2_J_DENTRY_ESC 1 /* The orig block starts with magic */ #define EXT2_J_DENTRY_SAMEID 2 /* Entry is for same id, so do not skip 16 ahead */ #define EXT2_J_DENTRY_DEL 4 /* not currently used in src */ #define EXT2_J_DENTRY_LAST 8 /* Last tag */ /* Entry in the descriptor table */ typedef struct { uint8_t fs_blk[4]; uint8_t flag[4]; } ext2fs_journ_dentry; /* Journal Info */ typedef struct { TSK_FS_FILE *fs_file; TSK_INUM_T j_inum; uint32_t bsize; TSK_DADDR_T first_block; TSK_DADDR_T last_block; uint32_t start_seq; TSK_DADDR_T start_blk; } EXT2FS_JINFO; /* * Structure of an ext2fs file system handle. */ typedef struct { TSK_FS_INFO fs_info; /* super class */ ext2fs_sb *fs; /* super block */ /* lock protects grp_buf, grp_num, bmap_buf, bmap_grp_num, imap_buf, imap_grp_num */ tsk_lock_t lock; void *v_grp_buf; ext4fs_gd *ext4_grp_buf; ext2fs_gd *grp_buf; /* cached group descriptor r/w shared - lock */ EXT2_GRPNUM_T grp_num; /* cached group number r/w shared - lock */ uint8_t *bmap_buf; /* cached block allocation bitmap r/w shared - lock */ EXT2_GRPNUM_T bmap_grp_num; /* cached block bitmap nr r/w shared - lock */ uint8_t *imap_buf; /* cached inode allocation bitmap r/w shared - lock */ EXT2_GRPNUM_T imap_grp_num; /* cached inode bitmap nr r/w shared - lock */ TSK_OFF_T groups_offset; /* offset to first group desc */ EXT2_GRPNUM_T groups_count; /* nr of descriptor group blocks */ uint8_t deentry_type; /* v1 or v2 of dentry */ uint16_t inode_size; /* size of each inode */ TSK_DADDR_T first_data_block; EXT2FS_JINFO *jinfo; } EXT2FS_INFO; extern TSK_RETVAL_ENUM ext2fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); extern uint8_t ext2fs_jentry_walk(TSK_FS_INFO *, int, TSK_FS_JENTRY_WALK_CB, void *); extern uint8_t ext2fs_jblk_walk(TSK_FS_INFO *, TSK_DADDR_T, TSK_DADDR_T, int, TSK_FS_JBLK_WALK_CB, void *); extern uint8_t ext2fs_jopen(TSK_FS_INFO *, TSK_INUM_T); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_fatfs.h000644 000765 000024 00000031157 12576320700 017702 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Copyright (c) 2013 Basis Technology Corp. All rights reserved ** Contact: Brian Carrier [carrier sleuthkit [dot] org] ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file tsk_fatfs.h * Contains the structures and function APIs for TSK FAT (FAT12, FAT16, FAT32, * exFAT) file system support. */ #ifndef _TSK_FATFS_H #define _TSK_FATFS_H #include "tsk_fs_i.h" /** * \internal * * Per TSK convention, FAT file system functions return integer 0 on success * and integer 1 on failure. */ #define FATFS_OK 0 /** * \internal * * Per TSK convention, FAT file system functions return integer 0 on success * and integer 1 on failure. */ #define FATFS_FAIL 1 #define FATFS_FS_MAGIC 0xaa55 #define FATFS_FIRST_CLUSTER_ADDR 2 #define FATFS_FIRSTINO 2 #define FATFS_ROOTINO 2 /* location of root directory inode */ #define FATFS_FIRST_NORMINO 3 #define FATFS_ROOTNAME "$ROOT" #define FATFS_MBRNAME "$MBR" #define FATFS_FAT1NAME "$FAT1" #define FATFS_FAT2NAME "$FAT2" #define FATFS_NUM_VIRT_FILES(fatfs) \ (fatfs->numfat + 2) /* size of FAT to read into FATFS_INFO each time */ /* This must be at least 1024 bytes or else fat12 will get messed up */ #define FATFS_FAT_CACHE_N 4 // number of caches #define FATFS_FAT_CACHE_B 4096 #define FATFS_MASTER_BOOT_RECORD_SIZE 512 /** * Directory entries for all FAT file systems are currently 32 bytes long. */ #define FATFS_DENTRY_SIZE 32 /* MASK values for FAT entries */ #define FATFS_12_MASK 0x00000fff #define FATFS_16_MASK 0x0000ffff #define FATFS_32_MASK 0x0fffffff #define EXFATFS_MASK 0x0fffffff #define FATFS_FILE_CONTENT_LEN sizeof(TSK_DADDR_T) // we will store the starting cluster /* flags for attributes field */ #define FATFS_ATTR_NORMAL 0x00 /* normal file */ #define FATFS_ATTR_READONLY 0x01 /* file is readonly */ #define FATFS_ATTR_HIDDEN 0x02 /* file is hidden */ #define FATFS_ATTR_SYSTEM 0x04 /* file is a system file */ #define FATFS_ATTR_VOLUME 0x08 /* entry is a volume label */ #define FATFS_ATTR_DIRECTORY 0x10 /* entry is a directory name */ #define FATFS_ATTR_ARCHIVE 0x20 /* file is new or modified */ #define FATFS_ATTR_LFN 0x0f /* A long file name entry */ #define FATFS_ATTR_ALL 0x3f /* all flags set */ #define FATFS_CLUST_2_SECT(fatfs, c) \ (TSK_DADDR_T)(fatfs->firstclustsect + ((((c) & fatfs->mask) - 2) * fatfs->csize)) #define FATFS_SECT_2_CLUST(fatfs, s) \ (TSK_DADDR_T)(2 + ((s) - fatfs->firstclustsect) / fatfs->csize) /* given an inode address, determine in which sector it is located * i must be larger than 3 (2 is the root and it doesn't have a sector) */ #define FATFS_INODE_2_SECT(fatfs, i) \ (TSK_DADDR_T)((i - FATFS_FIRST_NORMINO)/(fatfs->dentry_cnt_se) + fatfs->firstdatasect) #define FATFS_INODE_2_OFF(fatfs, i) \ (size_t)(((i - FATFS_FIRST_NORMINO) % fatfs->dentry_cnt_se) * sizeof(FATFS_DENTRY)) /* given a sector IN THE DATA AREA, return the base inode for it */ #define FATFS_SECT_2_INODE(fatfs, s) \ (TSK_INUM_T)((s - fatfs->firstdatasect) * fatfs->dentry_cnt_se + FATFS_FIRST_NORMINO) /* Constants for the FAT entry */ #define FATFS_UNALLOC 0 #define FATFS_BAD 0x0ffffff7 #define FATFS_EOFS 0x0ffffff8 #define FATFS_EOFE 0x0fffffff /* macro to identify if the FAT value is End of File * returns 1 if it is and 0 if it is not */ #define FATFS_ISEOF(val, mask) \ ((val >= (FATFS_EOFS & mask)) && (val <= (FATFS_EOFE))) #define FATFS_ISBAD(val, mask) \ ((val) == (FATFS_BAD & mask)) #define FATFS_SEC_MASK 0x1f /* number of seconds div by 2 */ #define FATFS_SEC_SHIFT 0 #define FATFS_SEC_MIN 0 #define FATFS_SEC_MAX 30 #define FATFS_MIN_MASK 0x7e0 /* number of minutes 0-59 */ #define FATFS_MIN_SHIFT 5 #define FATFS_MIN_MIN 0 #define FATFS_MIN_MAX 59 #define FATFS_HOUR_MASK 0xf800 /* number of hours 0-23 */ #define FATFS_HOUR_SHIFT 11 #define FATFS_HOUR_MIN 0 #define FATFS_HOUR_MAX 23 /* return 1 if x is a valid FAT time */ #define FATFS_ISTIME(x) \ (((((x & FATFS_SEC_MASK) >> FATFS_SEC_SHIFT) > FATFS_SEC_MAX) || \ (((x & FATFS_MIN_MASK) >> FATFS_MIN_SHIFT) > FATFS_MIN_MAX) || \ (((x & FATFS_HOUR_MASK) >> FATFS_HOUR_SHIFT) > FATFS_HOUR_MAX) ) == 0) #define FATFS_DAY_MASK 0x1f /* day of month 1-31 */ #define FATFS_DAY_SHIFT 0 #define FATFS_DAY_MIN 1 #define FATFS_DAY_MAX 31 #define FATFS_MON_MASK 0x1e0 /* month 1-12 */ #define FATFS_MON_SHIFT 5 #define FATFS_MON_MIN 1 #define FATFS_MON_MAX 12 #define FATFS_YEAR_MASK 0xfe00 /* year, from 1980 0-127 */ #define FATFS_YEAR_SHIFT 9 #define FATFS_YEAR_MIN 0 #define FATFS_YEAR_MAX 127 /* return 1 if x is a valid FAT date */ #define FATFS_ISDATE(x) \ (((((x & FATFS_DAY_MASK) >> FATFS_DAY_SHIFT) > FATFS_DAY_MAX) || \ (((x & FATFS_DAY_MASK) >> FATFS_DAY_SHIFT) < FATFS_DAY_MIN) || \ (((x & FATFS_MON_MASK) >> FATFS_MON_SHIFT) > FATFS_MON_MAX) || \ (((x & FATFS_MON_MASK) >> FATFS_MON_SHIFT) < FATFS_MON_MIN) || \ (((x & FATFS_YEAR_MASK) >> FATFS_YEAR_SHIFT) > FATFS_YEAR_MAX) ) == 0) /** * \internal * Buffer size for conversion of exFAT UTF-16 strings to UTF-8 strings. */ #define FATFS_MAXNAMLEN_UTF8 1024 #ifdef __cplusplus extern "C" { #endif typedef struct FATFS_INFO FATFS_INFO; enum FATFS_DATA_UNIT_ALLOC_STATUS_ENUM { FATFS_DATA_UNIT_ALLOC_STATUS_UNALLOC = 0, FATFS_DATA_UNIT_ALLOC_STATUS_ALLOC = 1, FATFS_DATA_UNIT_ALLOC_STATUS_UNKNOWN = 2 }; typedef enum FATFS_DATA_UNIT_ALLOC_STATUS_ENUM FATFS_DATA_UNIT_ALLOC_STATUS_ENUM; typedef enum { TSK_FATFS_SUBTYPE_SPEC = 0, TSK_FATFS_SUBTYPE_ANDROID_1 = 1 } TSK_FATFS_SUBTYPE_ENUM; typedef struct { uint8_t data[FATFS_MASTER_BOOT_RECORD_SIZE - 2]; uint8_t magic[2]; } FATFS_MASTER_BOOT_RECORD; /** * Generic directory entry structure for FAT file systems. */ typedef struct { uint8_t data[FATFS_DENTRY_SIZE]; } FATFS_DENTRY; /* * Internal TSK_FS_INFO derived structure for FATXX and exFAT file systems. */ struct FATFS_INFO { TSK_FS_INFO fs_info; /* super class */ /* FAT cache */ /* cache_lock protects fatc_buf, fatc_addr, fatc_ttl */ tsk_lock_t cache_lock; char fatc_buf[FATFS_FAT_CACHE_N][FATFS_FAT_CACHE_B]; //r/w shared - lock TSK_DADDR_T fatc_addr[FATFS_FAT_CACHE_N]; // r/w shared - lock uint8_t fatc_ttl[FATFS_FAT_CACHE_N]; //r/w shared - lock /* First sector of FAT */ TSK_DADDR_T firstfatsect; /* First sector after FAT - For TSK_FS_INFO_TYPE_FAT_12 and TSK_FS_INFO_TYPE_FAT_16, this is where the * root directory entries are. For TSK_FS_INFO_TYPE_FAT_32, this is the the first * cluster */ TSK_DADDR_T firstdatasect; /* The sector number were cluster 2 (the first one) is * for TSK_FS_INFO_TYPE_FAT_32, it will be the same as firstdatasect, but for TSK_FS_INFO_TYPE_FAT_12 & 16 * it will be the first sector after the Root directory */ TSK_DADDR_T firstclustsect; /* size of data area in clusters, starting at firstdatasect */ TSK_DADDR_T clustcnt; TSK_DADDR_T lastclust; /* sector where the root directory is located */ TSK_DADDR_T rootsect; uint32_t dentry_cnt_se; /* max number of dentries per sector */ uint32_t dentry_cnt_cl; /* max number of dentries per cluster */ uint16_t ssize; /* size of sectors in bytes */ uint16_t ssize_sh; /* power of 2 for size of sectors. >> to divide by sector size. << to multiply by sector size */ uint8_t csize; /* size of clusters in sectors */ uint8_t numfat; /* number of fat tables */ uint32_t sectperfat; /* sectors per fat table */ uint16_t numroot; /* number of 32-byte dentries in root dir */ uint32_t mask; /* the mask to use for the sectors */ TSK_INUM_T mbr_virt_inum; TSK_INUM_T fat1_virt_inum; TSK_INUM_T fat2_virt_inum; tsk_lock_t dir_lock; //< Lock that protects inum2par. void *inum2par; //< Maps subfolder metadata address to parent folder metadata addresses. char boot_sector_buffer[FATFS_MASTER_BOOT_RECORD_SIZE]; int using_backup_boot_sector; TSK_FATFS_SUBTYPE_ENUM subtype; // Identifies any variations on the standard FAT format int8_t (*is_cluster_alloc)(FATFS_INFO *fatfs, TSK_DADDR_T clust); uint8_t (*is_dentry)(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc, uint8_t a_do_basic_tests_only); uint8_t (*inode_lookup)(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum); uint8_t (*inode_walk_should_skip_dentry)(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, unsigned int a_selection_flags, int a_cluster_is_alloc); uint8_t (*istat_attr_flags) (FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FILE *a_hFile); TSK_RETVAL_ENUM (*dent_parse_buf)(FATFS_INFO *a_fatfs, TSK_FS_DIR *a_fs_dir, char *a_buf, TSK_OFF_T a_buf_len, TSK_DADDR_T *a_sector_addrs); TSK_RETVAL_ENUM (*dinode_copy)(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_cluster_is_alloc, TSK_FS_FILE *a_fs_file); struct { uint64_t first_sector_of_alloc_bitmap; uint64_t length_of_alloc_bitmap_in_bytes; } EXFATFS_INFO; }; extern uint8_t fatfs_inum_is_in_range(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum); extern uint8_t fatfs_ptr_arg_is_null(void *ptr, const char *param_name, const char *func_name); extern uint8_t fatfs_inum_arg_is_in_range(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, const char *func_name); extern time_t fatfs_dos_2_unix_time(uint16_t date, uint16_t time, uint8_t timetens); extern uint32_t fatfs_dos_2_nanosec(uint8_t timetens); extern TSKConversionResult fatfs_utf16_inode_str_2_utf8(FATFS_INFO *a_fatfs, UTF16 *src, size_t src_len, UTF8 *dest, size_t dest_len, TSK_INUM_T a_inum, const char *a_desc); extern void fatfs_cleanup_ascii(char *); extern TSK_FS_INFO *fatfs_open(TSK_IMG_INFO *a_img_info, TSK_OFF_T a_offset, TSK_FS_TYPE_ENUM a_ftype, uint8_t a_test); extern uint8_t fatfs_fscheck(TSK_FS_INFO *fs, FILE *hFile); extern int8_t fatfs_is_sectalloc(FATFS_INFO *, TSK_DADDR_T); extern uint8_t fatfs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr); extern TSK_FS_BLOCK_FLAG_ENUM fatfs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr); extern TSK_FS_ATTR_TYPE_ENUM fatfs_get_default_attr_type(const TSK_FS_FILE * a_file); extern uint8_t fatfs_make_data_runs(TSK_FS_FILE * a_fs_file); extern uint8_t fatfs_getFAT(FATFS_INFO * fatfs, TSK_DADDR_T clust, TSK_DADDR_T * value); extern uint8_t fatfs_dir_buf_add(FATFS_INFO * fatfs, TSK_INUM_T par_inum, TSK_INUM_T dir_inum); extern uint8_t fatfs_dir_buf_get(FATFS_INFO * fatfs, TSK_INUM_T dir_inum, TSK_INUM_T *par_inum); extern TSK_WALK_RET_ENUM fatfs_find_parent_act(TSK_FS_FILE * fs_file, const char *a_path, void *ptr); extern uint8_t fatfs_istat(TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew); extern uint8_t fatfs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB a_action, void *a_ptr); extern uint8_t fatfs_inode_lookup(TSK_FS_INFO *a_fs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum); extern uint8_t fatfs_dentry_load(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, TSK_INUM_T a_inum); extern TSK_RETVAL_ENUM fatfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); extern int fatfs_name_cmp(TSK_FS_INFO *, const char *, const char *); extern uint8_t fatfs_dir_buf_add(FATFS_INFO * fatfs, TSK_INUM_T par_inum, TSK_INUM_T dir_inum); extern void fatfs_cleanup_ascii(char *); extern void fatfs_dir_buf_free(FATFS_INFO * fatfs); extern uint8_t fatfs_jopen(TSK_FS_INFO * fs, TSK_INUM_T inum); extern uint8_t fatfs_jentry_walk(TSK_FS_INFO * fs, int a_flags, TSK_FS_JENTRY_WALK_CB a_action, void *a_ptr); extern uint8_t fatfs_jblk_walk(TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, int a_flags, TSK_FS_JBLK_WALK_CB a_action, void *a_ptr); extern void fatfs_close(TSK_FS_INFO *fs); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_fatxxfs.h000755 000765 000024 00000014635 12576320700 020267 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2013 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file tsk_fatxxfs.h * Contains the structures and function APIs for TSK FATXX (FAT12, FAT16, * FAT32) file system support. */ #ifndef _TSK_FATXXFS_H #define _TSK_FATXXFS_H #include "tsk_fs_i.h" #include "tsk_fatfs.h" /* Macro to combine the upper and lower 2-byte parts of the starting * cluster */ #define FATXXFS_DENTRY_CLUST(fsi, de) \ (TSK_DADDR_T)((tsk_getu16(fsi->endian, de->startclust)) | (tsk_getu16(fsi->endian, de->highclust)<<16)) /* constants for first byte of name[] */ #define FATXXFS_SLOT_E5 0x05 /* actual value is 0xe5 */ #define FATXXFS_SLOT_DELETED 0xe5 /* Macro to test allocation status * Have seen FAT image that uses non-standard flags in the short name (00 00 -> unallocated, 20 00 -> allocated) */ #define FATXXFS_IS_DELETED(name, fatfs) \ (fatfs->subtype == TSK_FATFS_SUBTYPE_ANDROID_1) ? \ ((name[0] == 0) && (name[1] == 0)) : \ (name[0] == FATXXFS_SLOT_DELETED) /* *Return 1 if c is an valid charactor for a short file name * * NOTE: 0x05 is allowed in name[0], and 0x2e (".") is allowed for name[0] * and name[1] and 0xe5 is allowed for name[0] */ #define FATXXFS_IS_83_NAME(c) \ ((((c) < 0x20) || \ ((c) == 0x22) || \ (((c) >= 0x2a) && ((c) <= 0x2c)) || \ ((c) == 0x2e) || \ ((c) == 0x2f) || \ (((c) >= 0x3a) && ((c) <= 0x3f)) || \ (((c) >= 0x5b) && ((c) <= 0x5d)) || \ ((c) == 0x7c)) == 0) // extensions are to be ascii / latin #define FATXXFS_IS_83_EXT(c) \ (FATXXFS_IS_83_NAME((c)) && ((c) < 0x7f)) /* flags for lowercase field */ #define FATXXFS_CASE_LOWER_BASE 0x08 /* base is lower case */ #define FATXXFS_CASE_LOWER_EXT 0x10 /* extension is lower case */ #define FATXXFS_CASE_LOWER_ALL 0x18 /* both are lower */ /* flags for seq field */ #define FATXXFS_LFN_SEQ_FIRST 0x40 /* This bit is set for the first lfn entry */ #define FATXXFS_LFN_SEQ_MASK 0x3f /* These bits are a mask for the decreasing * sequence number for the entries */ #ifdef __cplusplus extern "C" { #endif /* * Boot sector structure for FATXX file systems (TSK_FS_INFO_TYPE_FAT_12, * TSK_FS_INFO_TYPE_FAT_16, and TSK_FS_INFO_TYPE_FAT_32). */ typedef struct { uint8_t f1[3]; char oemname[8]; uint8_t ssize[2]; /* sector size in bytes */ uint8_t csize; /* cluster size in sectors */ uint8_t reserved[2]; /* number of reserved sectors for boot sectors */ uint8_t numfat; /* Number of FATs */ uint8_t numroot[2]; /* Number of Root dentries */ uint8_t sectors16[2]; /* number of sectors in FS */ uint8_t f2[1]; uint8_t sectperfat16[2]; /* size of FAT */ uint8_t f3[4]; uint8_t prevsect[4]; /* number of sectors before FS partition */ uint8_t sectors32[4]; /* 32-bit value of number of FS sectors */ /* The following are different for fat12/fat16 and fat32 */ union { struct { uint8_t f5[3]; uint8_t vol_id[4]; uint8_t vol_lab[11]; uint8_t fs_type[8]; uint8_t f6[448]; } f16; struct { uint8_t sectperfat32[4]; uint8_t ext_flag[2]; uint8_t fs_ver[2]; uint8_t rootclust[4]; /* cluster where root directory is stored */ uint8_t fsinfo[2]; /* TSK_FS_INFO Location */ uint8_t bs_backup[2]; /* sector of backup of boot sector */ uint8_t f5[12]; uint8_t drvnum; uint8_t f6[2]; uint8_t vol_id[4]; uint8_t vol_lab[11]; uint8_t fs_type[8]; uint8_t f7[420]; } f32; } a; uint8_t magic[2]; /* MAGIC for all versions */ } FATXXFS_SB; typedef struct { uint8_t magic1[4]; /* 41615252 */ uint8_t f1[480]; uint8_t magic2[4]; /* 61417272 */ uint8_t freecnt[4]; /* free clusters 0xfffffffff if unknown */ uint8_t nextfree[4]; /* next free cluster */ uint8_t f2[12]; uint8_t magic3[4]; /* AA550000 */ } FATXXFS_FSINFO; /* directory entry short name structure */ typedef struct { uint8_t name[8]; uint8_t ext[3]; uint8_t attrib; uint8_t lowercase; uint8_t ctimeten; /* create times (ctimeten is 0-199) */ uint8_t ctime[2]; uint8_t cdate[2]; uint8_t adate[2]; /* access time */ uint8_t highclust[2]; uint8_t wtime[2]; /* last write time */ uint8_t wdate[2]; uint8_t startclust[2]; uint8_t size[4]; } FATXXFS_DENTRY; /* * Long file name support for windows * * Contents of this are in UNICODE, not ASCII */ typedef struct { uint8_t seq; uint8_t part1[10]; uint8_t attributes; uint8_t reserved1; uint8_t chksum; uint8_t part2[12]; uint8_t reserved2[2]; uint8_t part3[4]; } FATXXFS_DENTRY_LFN; extern uint8_t fatxxfs_open(FATFS_INFO *fatfs); extern int8_t fatxxfs_is_cluster_alloc(FATFS_INFO *fatfs, TSK_DADDR_T clust); extern uint8_t fatxxfs_is_dentry(FATFS_INFO *a_fatfs, FATFS_DENTRY *a_dentry, FATFS_DATA_UNIT_ALLOC_STATUS_ENUM a_cluster_is_alloc, uint8_t a_do_basic_tests_only); extern TSK_RETVAL_ENUM fatxxfs_dinode_copy(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, uint8_t a_cluster_is_alloc, TSK_FS_FILE *a_fs_file); extern uint8_t fatxxfs_inode_lookup(FATFS_INFO *a_fatfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T a_inum); extern uint8_t fatxxfs_istat_attr_flags(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FILE *a_hFile); extern uint8_t fatxxfs_inode_walk_should_skip_dentry(FATFS_INFO *a_fatfs, TSK_INUM_T a_inum, FATFS_DENTRY *a_dentry, unsigned int a_selection_flags, int a_cluster_is_alloc); extern TSK_RETVAL_ENUM fatxxfs_dent_parse_buf(FATFS_INFO * fatfs, TSK_FS_DIR * a_fs_dir, char *buf, TSK_OFF_T len, TSK_DADDR_T * addrs); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_ffs.h000644 000765 000024 00000036246 12576320700 017361 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /* * Contains the structures and function APIs for FFS file system support. */ #ifndef _TSK_FFS_H #define _TSK_FFS_H #ifdef __cplusplus extern "C" { #endif typedef uint32_t FFS_GRPNUM_T; #define PRI_FFSGRP PRIu32 /* ** CONSTANTS **/ #define FFS_FIRSTINO 0 /* 0 & 1 are reserved (1 was bad blocks) */ #define FFS_ROOTINO 2 /* location of root directory inode */ #define FFS_NDADDR 12 #define FFS_NIADDR 3 #define UFS1_SBOFF 8192 #define UFS2_SBOFF 65536 #define UFS2_SBOFF2 262144 #define UFS1_FS_MAGIC 0x011954 #define UFS2_FS_MAGIC 0x19540119 #define FFS_MAXNAMLEN 255 #define FFS_MAXPATHLEN 1024 #define FFS_DIRBLKSIZ 512 #define FFS_FILE_CONTENT_LEN ((FFS_NDADDR + FFS_NIADDR) * sizeof(TSK_DADDR_T)) typedef struct { uint8_t dir_num[4]; uint8_t blk_free[4]; uint8_t ino_free[4]; uint8_t frag_free[4]; } ffs_csum1; typedef struct { uint8_t dir_num[8]; uint8_t blk_free[8]; uint8_t ino_free[8]; uint8_t frag_free[8]; uint8_t clust_free[8]; uint8_t f1[24]; } ffs_csum2; /* * Super Block Structure */ // UFS 1 typedef struct { uint8_t f1[8]; /* Offsets in each cylinder group */ uint8_t sb_off[4]; /* s32 */ uint8_t gd_off[4]; /* s32 */ uint8_t ino_off[4]; /* s32 */ uint8_t dat_off[4]; /* s32 */ /* How much the base of the admin data in each cyl group changes */ uint8_t cg_delta[4]; /* s32 */ uint8_t cg_cyc_mask[4]; /* s32 */ uint8_t wtime[4]; /* u32 : last written time */ uint8_t frag_num[4]; /* s32 - number of fragments in FS */ uint8_t data_frag_num[4]; /* s32 - number of frags not being used for admin data */ uint8_t cg_num[4]; /* s32 - number of cyl grps in FS */ uint8_t bsize_b[4]; /* s32 - size of block */ uint8_t fsize_b[4]; /* s32 - size of fragment */ uint8_t bsize_frag[4]; /* s32 - num of frag in block */ uint8_t f5[36]; uint8_t fs_fragshift[4]; /* s32 */ uint8_t f6[20]; uint8_t fs_inopb[4]; /* s32 */ uint8_t f7[20]; uint8_t fs_id[8]; uint8_t cg_saddr[4]; /* s32 */ uint8_t cg_ssize_b[4]; /* s32 */ uint8_t fs_cgsize[4]; /* s32 */ uint8_t f7c[12]; uint8_t fs_ncyl[4]; /* s32 */ uint8_t fs_cpg[4]; /* s32 */ uint8_t cg_inode_num[4]; /* s32 */ uint8_t cg_frag_num[4]; /* s32 */ ffs_csum1 cstotal; uint8_t fs_fmod; uint8_t fs_clean; uint8_t fs_ronly; uint8_t fs_flags; uint8_t last_mnt[512]; uint8_t f8[648]; uint8_t magic[4]; /* s32 */ uint8_t f9[160]; /* filler so it is a multiple of 512 */ } ffs_sb1; // UFS 2 typedef struct { uint8_t f0[8]; /* Offsets in each cylinder group */ uint8_t sb_off[4]; /* s32 */ uint8_t gd_off[4]; /* s32 */ uint8_t ino_off[4]; /* s32 */ uint8_t dat_off[4]; /* s32 */ uint8_t f1[20]; /* s32 */ uint8_t cg_num[4]; /* s32 - number of cyl grps in FS */ uint8_t bsize_b[4]; /* s32 - size of block */ uint8_t fsize_b[4]; /* s32 - size of fragment */ uint8_t bsize_frag[4]; /* s32 - num of frag in block */ uint8_t f2[36]; uint8_t fs_fragshift[4]; /* s32 */ uint8_t f3[20]; uint8_t fs_inopb[4]; /* s32 */ uint8_t f4[32]; uint8_t cg_ssize_b[4]; /* s32 */ uint8_t fs_cgsize[4]; /* s32 */ uint8_t f5[20]; uint8_t cg_inode_num[4]; /* s32 */ uint8_t cg_frag_num[4]; /* s32 - fs_fpg */ uint8_t f6[16]; uint8_t fs_fmod; uint8_t fs_clean; uint8_t fs_ronly; uint8_t f7; uint8_t last_mnt[468]; uint8_t volname[32]; uint8_t swuid[8]; uint8_t f8[288]; ffs_csum2 cstotal; uint8_t wtime[8]; /* u32 : last written time */ uint8_t frag_num[8]; /* s32 - number of fragments in FS */ uint8_t blk_num[8]; /* s32 - number of blocks in FS */ uint8_t cg_saddr[8]; uint8_t f9[208]; uint8_t fs_flags[4]; uint8_t f10[56]; uint8_t magic[4]; /* s32 */ uint8_t f11[160]; /* filler so it is a multiple of 512 */ } ffs_sb2; #define FFS_SB_FLAG_UNCLEAN 0x01 #define FFS_SB_FLAG_SOFTDEP 0x02 #define FFS_SB_FLAG_NEEDFSCK 0x04 #define FFS_SB_FLAG_INDEXDIR 0x08 #define FFS_SB_FLAG_ACL 0x10 #define FFS_SB_FLAG_MULTILABEL 0x20 #define FFS_SB_FLAG_UPDATED 0x80 /* How the file system is optimized */ #define FFS_SB_OPT_TIME 0 #define FFS_SB_OPT_SPACE 1 /* * Cylinder Group Descriptor * * UFS1 and UFS2 are the same for the data that we care about unless we * want the wtime for 'fsstat'. */ typedef struct { uint8_t f1[4]; uint8_t magic[4]; /* 0x090255 */ uint8_t wtime[4]; /* last written time */ uint8_t cg_cgx[4]; /* s32 - my group number */ uint8_t cyl_num[2]; /* number of cyl in this group */ uint8_t ino_num[2]; /* number of inodes in this group */ uint8_t frag_num[4]; /* number of fragments in this group */ ffs_csum1 cs; uint8_t last_alloc_blk[4]; /* last allocated blk relative to start */ uint8_t last_alloc_frag[4]; /* last alloc frag relative to start */ uint8_t last_alloc_ino[4]; uint8_t avail_frag[8][4]; uint8_t f2b[8]; uint8_t cg_iusedoff[4]; /* s32 */ uint8_t cg_freeoff[4]; /* s32 */ uint8_t f3[72]; } ffs_cgd; typedef struct { uint8_t f1[4]; uint8_t magic[4]; /* 0x090255 */ uint8_t f2[4]; uint8_t cg_cgx[4]; /* s32 - my group number */ uint8_t f2a[4]; /* number of cyl in this group */ uint8_t frag_num[4]; /* number of fragments in this group */ ffs_csum1 cs; uint8_t last_alloc_blk[4]; /* last allocated blk relative to start */ uint8_t last_alloc_frag[4]; /* last alloc frag relative to start */ uint8_t last_alloc_ino[4]; uint8_t avail_frag[8][4]; uint8_t f2b[8]; uint8_t cg_iusedoff[4]; /* s32 */ uint8_t cg_freeoff[4]; /* s32 */ uint8_t cg_nextfreeoff[4]; uint8_t cg_clustersumoff[4]; uint8_t cg_clusteroff[4]; uint8_t cg_nclustersblks[4]; uint8_t cg_niblk[4]; uint8_t cg_initediblk[4]; uint8_t f3a[12]; uint8_t wtime[8]; uint8_t f3[24]; } ffs_cgd2; /* * inode */ /* ffs_inode1: OpenBSD & FreeBSD etc. */ typedef struct { uint8_t di_mode[2]; /* u16 */ uint8_t di_nlink[2]; /* s16 */ uint8_t f1[4]; uint8_t di_size[8]; /* u64 */ uint8_t di_atime[4]; /* s32 */ uint8_t di_atimensec[4]; uint8_t di_mtime[4]; /* s32 */ uint8_t di_mtimensec[4]; uint8_t di_ctime[4]; /* s32 */ uint8_t di_ctimensec[4]; uint8_t di_db[12][4]; /* s32 */ uint8_t di_ib[3][4]; /* s32 */ uint8_t f5[8]; uint8_t gen[4]; uint8_t di_uid[4]; /* u32 */ uint8_t di_gid[4]; /* u32 */ uint8_t f6[8]; } ffs_inode1; /* ffs_inode1b: Solaris */ typedef struct { uint8_t di_mode[2]; /* u16 */ uint8_t di_nlink[2]; /* s16 */ uint8_t f1[4]; uint8_t di_size[8]; /* u64 */ uint8_t di_atime[4]; /* s32 */ uint8_t f2[4]; uint8_t di_mtime[4]; /* s32 */ uint8_t f3[4]; uint8_t di_ctime[4]; /* s32 */ uint8_t f4[4]; uint8_t di_db[12][4]; /* s32 */ uint8_t di_ib[3][4]; /* s32 */ uint8_t f5[16]; uint8_t di_uid[4]; /* u32 */ uint8_t di_gid[4]; /* u32 */ uint8_t f6[4]; } ffs_inode1b; typedef struct { uint8_t di_mode[2]; /* u16 */ uint8_t di_nlink[2]; /* s16 */ uint8_t di_uid[4]; uint8_t di_gid[4]; uint8_t di_blksize[4]; /* u32 inode block size */ uint8_t di_size[8]; /* u64 */ uint8_t di_blocks[8]; /* u64 - bytes held */ uint8_t di_atime[8]; /* s64 */ uint8_t di_mtime[8]; /* s64 */ uint8_t di_ctime[8]; /* s64 */ uint8_t di_crtime[8]; /* s64 */ uint8_t di_mtimensec[4]; /* s32 */ uint8_t di_atimensec[4]; uint8_t di_ctimensec[4]; uint8_t di_crtimensec[4]; uint8_t di_gen[4]; /* s32 generation number */ uint8_t di_kflags[4]; /* u32 kernel flags */ uint8_t di_flags[4]; /* u32 flags */ uint8_t di_extsize[4]; /* s32 size of ext attributes block */ uint8_t di_extb[2][8]; /* Address of ext attribute blocks */ uint8_t di_db[12][8]; /* s32 */ uint8_t di_ib[3][8]; /* s32 */ uint8_t f2[24]; /* s32 */ } ffs_inode2; typedef struct { union { ffs_inode1 in1; ffs_inode1b in1b; ffs_inode2 in2; } in; } ffs_inode; #define FFS_IN_FMT 0170000 /* Mask of file type. */ #define FFS_IN_FIFO 0010000 /* Named pipe (fifo). */ #define FFS_IN_CHR 0020000 /* Character device. */ #define FFS_IN_DIR 0040000 /* Directory file. */ #define FFS_IN_BLK 0060000 /* Block device. */ #define FFS_IN_REG 0100000 /* Regular file. */ #define FFS_IN_LNK 0120000 /* Symbolic link. */ #define FFS_IN_SHAD 0130000 /* SOLARIS ONLY */ #define FFS_IN_SOCK 0140000 /* UNIX domain socket. */ #define FFS_IN_WHT 0160000 /* Whiteout. */ #define FFS_IN_ISUID 0004000 #define FFS_IN_ISGID 0002000 #define FFS_IN_ISVTX 0001000 #define FFS_IN_IRUSR 0000400 #define FFS_IN_IWUSR 0000200 #define FFS_IN_IXUSR 0000100 #define FFS_IN_IRGRP 0000040 #define FFS_IN_IWGRP 0000020 #define FFS_IN_IXGRP 0000010 #define FFS_IN_IROTH 0000004 #define FFS_IN_IWOTH 0000002 #define FFS_IN_IXOTH 0000001 typedef struct { uint8_t reclen[4]; uint8_t nspace; uint8_t contpad; uint8_t nlen; uint8_t name[1]; /* of length nlen and padded so contents are on 8-byte boundary */ } ffs_extattr; #define FFS_ATTR_CONT(x) \ ((((x) + 7 + 7) / 8) * 2) /* * Directory Entries */ /* ffs_dentry1: new OpenBSD & FreeBSD etc. */ typedef struct { uint8_t d_ino[4]; /* u32 */ uint8_t d_reclen[2]; /* u16 */ uint8_t d_type; /* u8 */ uint8_t d_namlen; /* u8 */ char d_name[256]; } ffs_dentry1; /* type field values */ #define FFS_DT_UNKNOWN 0 #define FFS_DT_FIFO 1 #define FFS_DT_CHR 2 #define FFS_DT_DIR 4 #define FFS_DT_BLK 6 #define FFS_DT_REG 8 #define FFS_DT_LNK 10 #define FFS_DT_SOCK 12 #define FFS_DT_WHT 14 /* ffs_dentry2: Solaris and old xBSDs (no type field) */ typedef struct { uint8_t d_ino[4]; /* u32 */ uint8_t d_reclen[2]; /* u16 */ uint8_t d_namlen[2]; /* u16 */ char d_name[256]; } ffs_dentry2; #define FFS_DIRSIZ_lcl(len) \ ((len + 8 + 3) & ~(3)) /* Return the base fragment for group c */ #define cgbase_lcl(fsi, fs, c) \ ((TSK_DADDR_T)(tsk_gets32(fsi->endian, (fs)->cg_frag_num) * (c))) /* Macros to calc the locations of structures in cyl groups */ #define cgstart_lcl(fsi, fs, c) \ ((TSK_DADDR_T)((tsk_getu32((fsi)->endian, (fs)->magic) == UFS2_FS_MAGIC) ? \ (cgbase_lcl(fsi, fs, c)) : \ (cgbase_lcl(fsi, fs, c) + tsk_gets32((fsi)->endian, (fs)->cg_delta) * \ ((c) & ~(tsk_gets32((fsi)->endian, (fs)->cg_cyc_mask)))) )) /* cyl grp block */ #define cgtod_lcl(fsi, fs, c) \ ((TSK_DADDR_T)(cgstart_lcl(fsi, fs, c) + tsk_gets32(fsi->endian, (fs)->gd_off))) /* Offset to inode table in cylinder group */ #define cgimin_lcl(fsi, fs, c) \ ((TSK_DADDR_T)(cgstart_lcl(fsi, fs, c) + tsk_gets32(fsi->endian, (fs)->ino_off))) /* 1st data block in cyl grp*/ #define cgdmin_lcl(fsi, fs, c) \ ((TSK_DADDR_T)(cgstart_lcl(fsi, fs, c) + tsk_gets32(fsi->endian, (fs)->dat_off))) /* super blk in cyl grp*/ #define cgsblock_lcl(fsi, fs, c) \ ((TSK_DADDR_T)(cgstart_lcl(fsi, fs, c) + tsk_gets32(fsi->endian, (fs)->sb_off))) /* original: ** blkstofrags(fs, blks) ** ((blks) << (fs)->fs_fragshift) */ #define blkstofrags_lcl(fsi, fs, blks) \ ((blks) << tsk_gets32(fsi->endian, (fs)->fs_fragshift)) /* original: ** itod(fs, x) \ ** ((TSK_DADDR_T)(cgimin(fs, itog(fs, x)) + \ ** (blkstofrags((fs), (((x)%(ulong_t)(fs)->cg_inode_num)/(ulong_t)INOPB(fs)))))) */ #define itod_lcl(fsi, fs, x) \ ((TSK_DADDR_T)(cgimin_lcl(fsi, fs, itog_lcl(fsi, fs, x)) + \ (blkstofrags_lcl(fsi, (fs), (((x)%(TSK_DADDR_T)tsk_gets32(fsi->endian, (fs)->cg_inode_num))/ \ (TSK_DADDR_T)tsk_gets32(fsi->endian, (fs)->fs_inopb)))))) /* original: ** itoo(fs, x) ((x) % (uint32_t)INOPB(fs)) */ #define itoo_lcl(fsi, fs, x) \ ((x) % (uint32_t)tsk_getu32(fsi->endian, (fs)->fs_inopb)) /* original: ** #define itog(fs, x) ((x) / (fs)->fs_cg_inode_num) */ #define itog_lcl(fsi, fs, x) \ (FFS_GRPNUM_T)((x) / tsk_gets32(fsi->endian, (fs)->cg_inode_num)) /* original: ** dtog(fs, d) ((d) / (fs)->fs_cg_frag_num) */ #define dtog_lcl(fsi, fs, d) \ (FFS_GRPNUM_T)((d) / tsk_gets32(fsi->endian, (fs)->cg_frag_num)) #define cg_inosused_lcl(fsi, cgp) \ ((uint8_t *)((uint8_t *)(cgp) + tsk_gets32(fsi->endian, (cgp)->cg_iusedoff))) #define cg_blksfree_lcl(fsi, cgp) \ ((uint8_t *)((uint8_t *)(cgp) + tsk_gets32(fsi->endian, (cgp)->cg_freeoff))) /* * Structure of a fast file system handle. */ typedef struct { TSK_FS_INFO fs_info; /* super class */ union { ffs_sb1 *sb1; /* super block buffer */ ffs_sb2 *sb2; /* super block buffer */ } fs; /* lock protects itbl_buf, itbl_addr, grp_buf, grp_num, grp_addr */ tsk_lock_t lock; char *itbl_buf; ///< Cached inode block buffer (r/w shared - lock) TSK_DADDR_T itbl_addr; ///< Address where inode block buf was read from (r/w shared - lock) char *grp_buf; ///< Cached cylinder group buffer (r/w shared - lock) FFS_GRPNUM_T grp_num; ///< Cyl grp num that is cached (r/w shared - lock) TSK_DADDR_T grp_addr; ///< Address where cached cyl grp data was read from (r/w shared - lock) FFS_GRPNUM_T groups_count; /* nr of descriptor group blocks */ unsigned int ffsbsize_f; /* num of frags in an FFS block */ unsigned int ffsbsize_b; /* size of an FFS block in bytes */ } FFS_INFO; extern TSK_RETVAL_ENUM ffs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); #ifdef __cplusplus } #endif #endif /* _FFS_H */ sleuthkit-4.2.0/tsk/fs/tsk_fs.h000644 000765 000024 00000327152 12576320700 017212 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2012 Brian Carrier. All rights reserved ** ** Matt Stillerman [matt@atc-nycorp.com] ** Copyright (c) 2012 ATC-NY. All rights reserved. ** This file contains data developed with support from the National ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /** \file tsk_fs.h * External header file for file system support. * Note that this file is not meant to be directly included. * It is included by both libtsk.h and tsk_fs_i.h. */ /* LICENSE * .ad * .fi * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** * \defgroup fslib C File System Functions * \defgroup fslib_cpp C++ File System Classes */ #ifndef _TSK_FS_H #define _TSK_FS_H #include #ifdef __cplusplus extern "C" { #endif typedef struct TSK_FS_INFO TSK_FS_INFO; typedef struct TSK_FS_FILE TSK_FS_FILE; /**************** BLOCK Structure *******************/ /** \name Generic File System Block Data Structure */ //@{ /** Flags that are used in TSK_FS_BLOCK and in callback of file_walk. * Note that some of these are dependent. A block can be either TSK_FS_BLOCK_FLAG_ALLOC * or TSK_FS_BLOCK_FLAG_UNALLOC. It can be one of TSK_FS_BLOCK_FLAG_RAW, TSK_FS_BLOCK_FLAG_BAD, * TSK_FS_BLOCK_FLAG_RES, TSK_FS_BLOCK_FLAG_SPARSE, or TSK_FS_BLOCK_FLAG_COMP. Note that some of * these are set only by file_walk because they are file-level details, such as compression and sparse. */ enum TSK_FS_BLOCK_FLAG_ENUM { TSK_FS_BLOCK_FLAG_UNUSED = 0x0000, ///< Used to show that TSK_FS_BLOCK structure has no data in it TSK_FS_BLOCK_FLAG_ALLOC = 0x0001, ///< Block is allocated (and not TSK_FS_BLOCK_FLAG_UNALLOC) TSK_FS_BLOCK_FLAG_UNALLOC = 0x0002, ///< Block is unallocated (and not TSK_FS_BLOCK_FLAG_ALLOC) TSK_FS_BLOCK_FLAG_CONT = 0x0004, ///< Block (could) contain file content (and not TSK_FS_BLOCK_FLAG_META) TSK_FS_BLOCK_FLAG_META = 0x0008, ///< Block (could) contain file system metadata (and not TSK_FS_BLOCK_FLAG_CONT) TSK_FS_BLOCK_FLAG_BAD = 0x0010, ///< Block has been marked as bad by the file system TSK_FS_BLOCK_FLAG_RAW = 0x0020, ///< The data has been read raw from the disk (and not COMP or SPARSE) TSK_FS_BLOCK_FLAG_SPARSE = 0x0040, ///< The data passed in the file_walk calback was stored as sparse (all zeros) (and not RAW or COMP) TSK_FS_BLOCK_FLAG_COMP = 0x0080, ///< The data passed in the file_walk callback was stored in a compressed form (and not RAW or SPARSE) TSK_FS_BLOCK_FLAG_RES = 0x0100, ///< The data passed in the file_walk callback is from an NTFS resident file TSK_FS_BLOCK_FLAG_AONLY = 0x0200 /// < The buffer in TSK_FS_BLOCK has no content (it could be non-empty, but should be ignored), but the flags and such are accurate }; typedef enum TSK_FS_BLOCK_FLAG_ENUM TSK_FS_BLOCK_FLAG_ENUM; /** * Flags that are used to specify which blocks to call the tsk_fs_block_walk() callback function with. */ enum TSK_FS_BLOCK_WALK_FLAG_ENUM { TSK_FS_BLOCK_WALK_FLAG_NONE = 0x00, ///< No Flags TSK_FS_BLOCK_WALK_FLAG_ALLOC = 0x01, ///< Allocated blocks TSK_FS_BLOCK_WALK_FLAG_UNALLOC = 0x02, ///< Unallocated blocks TSK_FS_BLOCK_WALK_FLAG_CONT = 0x04, ///< Blocks that could store file content TSK_FS_BLOCK_WALK_FLAG_META = 0x08, ///< Blocks that could store file system metadata TSK_FS_BLOCK_WALK_FLAG_AONLY = 0x10 ///< Do not include content in callback only address and allocation status }; typedef enum TSK_FS_BLOCK_WALK_FLAG_ENUM TSK_FS_BLOCK_WALK_FLAG_ENUM; #define TSK_FS_BLOCK_TAG 0x1b7c3f4a /** * Generic data structure to hold block data with metadata */ typedef struct { int tag; ///< \internal Will be set to TSK_FS_BLOCK_TAG if structure is valid / allocated TSK_FS_INFO *fs_info; ///< Pointer to file system that block is from char *buf; ///< Buffer with block data (of size TSK_FS_INFO::block_size) TSK_DADDR_T addr; ///< Address of block TSK_FS_BLOCK_FLAG_ENUM flags; /// < Flags for block (alloc or unalloc) } TSK_FS_BLOCK; /** * Function definition used for callback to tsk_fs_block_walk(). * * @param a_block Pointer to block structure that holds block content and flags * @param a_ptr Pointer that was supplied by the caller who called tsk_fs_block_walk * @returns Value to identify if walk should continue, stop, or stop because of error */ typedef TSK_WALK_RET_ENUM(*TSK_FS_BLOCK_WALK_CB) (const TSK_FS_BLOCK * a_block, void *a_ptr); // external block-level functions extern void tsk_fs_block_free(TSK_FS_BLOCK * a_fs_block); extern TSK_FS_BLOCK *tsk_fs_block_get(TSK_FS_INFO * fs, TSK_FS_BLOCK * fs_block, TSK_DADDR_T addr); extern TSK_FS_BLOCK *tsk_fs_block_get_flag(TSK_FS_INFO * a_fs, TSK_FS_BLOCK * a_fs_block, TSK_DADDR_T a_addr, TSK_FS_BLOCK_FLAG_ENUM a_flags); extern uint8_t tsk_fs_block_walk(TSK_FS_INFO * a_fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr); //@} /**************** DATA and DATA_LIST Structures ************/ /** \name Generic File System File Content Data Structures */ //@{ /* The location of "most" file content is stored in the generic TSK * data structures as runs (starting address and length). */ /** * Flags used for a TSK_FS_ATTR_RUN entry. */ typedef enum { TSK_FS_ATTR_RUN_FLAG_NONE = 0x00, ///< No Flag TSK_FS_ATTR_RUN_FLAG_FILLER = 0x01, ///< Entry is a filler for a run that has not been seen yet in the processing (or has been lost) TSK_FS_ATTR_RUN_FLAG_SPARSE = 0x02 ///< Entry is a sparse run where all data in the run is zeros } TSK_FS_ATTR_RUN_FLAG_ENUM; typedef struct TSK_FS_ATTR_RUN TSK_FS_ATTR_RUN; /** * Holds information about a single data run, which has a starting address and length. * A run describes a consecutive list of blocks that have been allocated to a file. * A file may have many such runs and they are stringed together in a linked list. * The entries in the list must be stored in sequential order (based on offset in file). */ struct TSK_FS_ATTR_RUN { TSK_FS_ATTR_RUN *next; ///< Pointer to the next run in the attribute (or NULL) TSK_DADDR_T offset; ///< Offset (in blocks) of this run in the file TSK_DADDR_T addr; ///< Starting block address (in file system) of run TSK_DADDR_T len; ///< Number of blocks in run (0 when entry is not in use) TSK_FS_ATTR_RUN_FLAG_ENUM flags; ///< Flags for run }; /** * Flags used for the TSK_FS_ATTR structure, which is used to * store file content metadata. */ typedef enum { TSK_FS_ATTR_FLAG_NONE = 0x00, ///< No Flag TSK_FS_ATTR_INUSE = 0x01, ///< data structure is in use TSK_FS_ATTR_NONRES = 0x02, ///< Contains non-resident data (i.e. located in blocks) TSK_FS_ATTR_RES = 0x04, ///< Contains resident data (i.e. in a small buffer) TSK_FS_ATTR_ENC = 0x10, ///< Contains encrypted data TSK_FS_ATTR_COMP = 0x20, ///< Contains compressed data TSK_FS_ATTR_SPARSE = 0x40, ///< Contains sparse data TSK_FS_ATTR_RECOVERY = 0x80, ///< Data was determined in file recovery mode } TSK_FS_ATTR_FLAG_ENUM; /** * File walk callback function definition. This is called for * chunks of content in the file being processed. * @param a_fs_file Pointer to file being processed * @param a_off Byte offset in file that this data is for * @param a_addr Address of data being passed (valid only if a_flags have RAW set) * @param a_buf Pointer to buffer with file content * @param a_len Size of data in buffer (in bytes) * @param a_flags Flags about the file content * @param a_ptr Pointer that was specified by caller to inode_walk * @returns Value that tells file walk to continue or stop */ typedef TSK_WALK_RET_ENUM(*TSK_FS_FILE_WALK_CB) (TSK_FS_FILE * a_fs_file, TSK_OFF_T a_off, TSK_DADDR_T a_addr, char *a_buf, size_t a_len, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr); /** * Flags used by tsk_fs_file_walk to determine when the callback function should * be used. */ typedef enum { TSK_FS_FILE_WALK_FLAG_NONE = 0x00, ///< No Flag TSK_FS_FILE_WALK_FLAG_SLACK = 0x01, ///< Include the file's slack space in the callback. TSK_FS_FILE_WALK_FLAG_NOID = 0x02, ///< Ignore the Id argument given in the API (use only the type) TSK_FS_FILE_WALK_FLAG_AONLY = 0x04, ///< Provide callback with only addresses and no file content. TSK_FS_FILE_WALK_FLAG_NOSPARSE = 0x08, ///< Do not include sparse blocks in the callback. } TSK_FS_FILE_WALK_FLAG_ENUM; /** * These are based on the NTFS type values. * Added types for HFS+. * NOTE: Update bindings/java/src/org/sleuthkit/datamodel/TskData.java * with any changes. */ typedef enum { TSK_FS_ATTR_TYPE_NOT_FOUND = 0x00, // 0 TSK_FS_ATTR_TYPE_DEFAULT = 0x01, // 1 TSK_FS_ATTR_TYPE_NTFS_SI = 0x10, // 16 TSK_FS_ATTR_TYPE_NTFS_ATTRLIST = 0x20, // 32 TSK_FS_ATTR_TYPE_NTFS_FNAME = 0x30, // 48 TSK_FS_ATTR_TYPE_NTFS_VVER = 0x40, // 64 (NT) TSK_FS_ATTR_TYPE_NTFS_OBJID = 0x40, // 64 (2K) TSK_FS_ATTR_TYPE_NTFS_SEC = 0x50, // 80 TSK_FS_ATTR_TYPE_NTFS_VNAME = 0x60, // 96 TSK_FS_ATTR_TYPE_NTFS_VINFO = 0x70, // 112 TSK_FS_ATTR_TYPE_NTFS_DATA = 0x80, // 128 TSK_FS_ATTR_TYPE_NTFS_IDXROOT = 0x90, // 144 TSK_FS_ATTR_TYPE_NTFS_IDXALLOC = 0xA0, // 160 TSK_FS_ATTR_TYPE_NTFS_BITMAP = 0xB0, // 176 TSK_FS_ATTR_TYPE_NTFS_SYMLNK = 0xC0, // 192 (NT) TSK_FS_ATTR_TYPE_NTFS_REPARSE = 0xC0, // 192 (2K) TSK_FS_ATTR_TYPE_NTFS_EAINFO = 0xD0, // 208 TSK_FS_ATTR_TYPE_NTFS_EA = 0xE0, // 224 TSK_FS_ATTR_TYPE_NTFS_PROP = 0xF0, // (NT) TSK_FS_ATTR_TYPE_NTFS_LOG = 0x100, // (2K) TSK_FS_ATTR_TYPE_UNIX_INDIR = 0x1001, // Indirect blocks for UFS and ExtX file systems TSK_FS_ATTR_TYPE_UNIX_EXTENT = 0x1002, // Extents for Ext4 file system // Types for HFS+ File Attributes TSK_FS_ATTR_TYPE_HFS_DEFAULT = 0x01, // 1 Data fork of fs special files and misc TSK_FS_ATTR_TYPE_HFS_DATA = 0x1100, // 4352 Data fork of regular files TSK_FS_ATTR_TYPE_HFS_RSRC = 0x1101, // 4353 Resource fork of regular files TSK_FS_ATTR_TYPE_HFS_EXT_ATTR = 0x1102, // 4354 Extended Attributes, except compression records TSK_FS_ATTR_TYPE_HFS_COMP_REC = 0x1103, // 4355 Compression records } TSK_FS_ATTR_TYPE_ENUM; #define TSK_FS_ATTR_ID_DEFAULT 0 ///< Default Data ID used if file system does not assign one. typedef struct TSK_FS_ATTR TSK_FS_ATTR; /** * Holds information about the location of file content (or a file attribute). For most file systems, a file * has only a single attribute that stores the file content. * Other file systems, such as NTFS, have multiple * attributes. If multiple attributes exist, they are stored in a linked list. * Attributes can be "resident", which means the data is stored * in a small buffer instead of being stored in a full file system block. * "Non-resident" attributes store data in blocks and they are stored in * the data structure as a series of runs. * This structure is used to represent both of these cases. * * The non-resident data has several size values. * \verbatim * |--------------------------------------------------------------------| * |skiplen|---------------allocsize------------------------------------| * |skiplen|---------------size-----------------------------------| * |skiplen|---------------initsize------------| * \endverbatim */ struct TSK_FS_ATTR { TSK_FS_ATTR *next; ///< Pointer to next attribute in list TSK_FS_FILE *fs_file; ///< Pointer to the file that this is from TSK_FS_ATTR_FLAG_ENUM flags; ///< Flags for attribute char *name; ///< Name of attribute (in UTF-8). Will be NULL if attribute doesn't have a name. size_t name_size; ///< Number of bytes allocated to name TSK_FS_ATTR_TYPE_ENUM type; ///< Type of attribute uint16_t id; ///< Id of attribute TSK_OFF_T size; ///< Size in bytes of the attribute resident and non-resident content (does not include skiplen for non-resident attributes) /** * Data associated with a non-resident file / attribute. * The data is stored in one or more data runs. */ struct { TSK_FS_ATTR_RUN *run; ///< Linked list of runs for non-resident attributes TSK_FS_ATTR_RUN *run_end; ///< Pointer to final run in the list uint32_t skiplen; ///< Number of initial bytes in run to skip before content begins. The size field does not include this length. TSK_OFF_T allocsize; ///< Number of bytes that are allocated in all clusters of non-resident run (will be larger than size - does not include skiplen). This is defined when the attribute is created and used to determine slack space. TSK_OFF_T initsize; ///< Number of bytes (starting from offset 0) that have data (including FILLER) saved for them (smaller then or equal to size). This is defined when the attribute is created. uint32_t compsize; ///< Size of compression units (needed only if NTFS file is compressed) } nrd; /** * Data associated with a resident attribute / file. * The data is stored in a buffer. */ struct { uint8_t *buf; ///< Buffer for resident data size_t buf_size; ///< Number of bytes allocated to buf TSK_OFF_T offset; ///< Starting offset in bytes relative to start of file system (NOT YET IMPLEMENTED) } rd; /* Special file (compressed, encrypted, etc.) */ ssize_t(*r) (const TSK_FS_ATTR * fs_attr, TSK_OFF_T a_offset, char *a_buf, size_t a_len); uint8_t(*w) (const TSK_FS_ATTR * fs_attr, int flags, TSK_FS_FILE_WALK_CB, void *); }; /** * Structure used as the head of an attribute list. */ typedef struct { TSK_FS_ATTR *head; } TSK_FS_ATTRLIST; extern uint8_t tsk_fs_attr_walk(const TSK_FS_ATTR * a_fs_attr, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr); //@} /**************** META_NAME_LIST Structure *******************/ /** \name Generic File System File Metadata Data Structures */ //@{ /** * Size of name array in TSK_FS_META_NAME_LIST structure */ #define TSK_FS_META_NAME_LIST_NSIZE 512 typedef struct TSK_FS_META_NAME_LIST TSK_FS_META_NAME_LIST; /** * Relatively generic structure to hold file names that are stored with * the file metadata. Note that this is different from the * file name stored in the directory heirarchy, which is * part of the tsk_fs_name_... code. This is currently * used for NTFS and FAT file systems only. */ struct TSK_FS_META_NAME_LIST { TSK_FS_META_NAME_LIST *next; ///< Pointer to next name (or NULL) char name[TSK_FS_META_NAME_LIST_NSIZE]; ///< Name in UTF-8 (does not include parent directory name) TSK_INUM_T par_inode; ///< Inode address of parent directory (NTFS only) uint32_t par_seq; ///< Sequence number of parent directory (NTFS only) }; /****************** META Structure ***************/ /** * Metadata flags used in TSK_FS_META.flags and in request to inode_walk */ enum TSK_FS_META_FLAG_ENUM { TSK_FS_META_FLAG_ALLOC = 0x01, ///< Metadata structure is currently in an allocated state TSK_FS_META_FLAG_UNALLOC = 0x02, ///< Metadata structure is currently in an unallocated state TSK_FS_META_FLAG_USED = 0x04, ///< Metadata structure has been allocated at least once TSK_FS_META_FLAG_UNUSED = 0x08, ///< Metadata structure has never been allocated. TSK_FS_META_FLAG_COMP = 0x10, ///< The file contents are compressed. TSK_FS_META_FLAG_ORPHAN = 0x20, ///< Return only metadata structures that have no file name pointing to the (inode_walk flag only) }; typedef enum TSK_FS_META_FLAG_ENUM TSK_FS_META_FLAG_ENUM; enum TSK_FS_META_ATTR_FLAG_ENUM { TSK_FS_META_ATTR_EMPTY, ///< The data in the attributes (if any) is not for this file TSK_FS_META_ATTR_STUDIED, ///< The data in the attributes are for this file TSK_FS_META_ATTR_ERROR, ///< The attributes for this file could not be loaded }; typedef enum TSK_FS_META_ATTR_FLAG_ENUM TSK_FS_META_ATTR_FLAG_ENUM; /** * Values for the mode field -- which identifies the file type * and permissions. */ enum TSK_FS_META_TYPE_ENUM { TSK_FS_META_TYPE_UNDEF = 0x00, TSK_FS_META_TYPE_REG = 0x01, ///< Regular file TSK_FS_META_TYPE_DIR = 0x02, ///< Directory file TSK_FS_META_TYPE_FIFO = 0x03, ///< Named pipe (fifo) TSK_FS_META_TYPE_CHR = 0x04, ///< Character device TSK_FS_META_TYPE_BLK = 0x05, ///< Block device TSK_FS_META_TYPE_LNK = 0x06, ///< Symbolic link TSK_FS_META_TYPE_SHAD = 0x07, ///< SOLARIS ONLY TSK_FS_META_TYPE_SOCK = 0x08, ///< UNIX domain socket TSK_FS_META_TYPE_WHT = 0x09, ///< Whiteout TSK_FS_META_TYPE_VIRT = 0x0a, ///< "Virtual File" created by TSK for file system areas }; typedef enum TSK_FS_META_TYPE_ENUM TSK_FS_META_TYPE_ENUM; #define TSK_FS_META_TYPE_STR_MAX 0x0b ///< Number of file types in shortname array extern char tsk_fs_meta_type_str[TSK_FS_META_TYPE_STR_MAX][2]; enum TSK_FS_META_MODE_ENUM { /* The following describe the file permissions */ TSK_FS_META_MODE_UNSPECIFIED = 0000000, ///< unspecified TSK_FS_META_MODE_ISUID = 0004000, ///< set user id on execution TSK_FS_META_MODE_ISGID = 0002000, ///< set group id on execution TSK_FS_META_MODE_ISVTX = 0001000, ///< sticky bit TSK_FS_META_MODE_IRUSR = 0000400, ///< R for owner TSK_FS_META_MODE_IWUSR = 0000200, ///< W for owner TSK_FS_META_MODE_IXUSR = 0000100, ///< X for owner TSK_FS_META_MODE_IRGRP = 0000040, ///< R for group TSK_FS_META_MODE_IWGRP = 0000020, ///< W for group TSK_FS_META_MODE_IXGRP = 0000010, ///< X for group TSK_FS_META_MODE_IROTH = 0000004, ///< R for other TSK_FS_META_MODE_IWOTH = 0000002, ///< W for other TSK_FS_META_MODE_IXOTH = 0000001 ///< X for other }; typedef enum TSK_FS_META_MODE_ENUM TSK_FS_META_MODE_ENUM; typedef enum TSK_FS_META_CONTENT_TYPE_ENUM { TSK_FS_META_CONTENT_TYPE_DEFAULT = 0x0, TSK_FS_META_CONTENT_TYPE_EXT4_EXTENTS = 0x1 ///< Ext4 with extents instead of individual pointers } TSK_FS_META_CONTENT_TYPE_ENUM; #define TSK_FS_META_TAG 0x13524635 /** * TSK data structure to store general file and directory metadata. * Note that the file in the file * system may have more metadata than is stored here. * For performance reasons, the run list of the file content is not always known * when the file is loaded. It may be loaded only when needed by the internal code. * The TSK_FS_META::content_ptr pointer contains file system-specific data that will be * used to determine the full run. After it has been loaded, the TSK_FS_META::attr field * will contain that info. */ typedef struct { int tag; ///< \internal Will be set to TSK_FS_META_TAG if structure is allocated TSK_FS_META_FLAG_ENUM flags; ///< Flags for this file for its allocation status etc. TSK_INUM_T addr; ///< Address of the meta data structure for this file TSK_FS_META_TYPE_ENUM type; ///< File type TSK_FS_META_MODE_ENUM mode; ///< Unix-style permissions int nlink; ///< link count (number of file names pointing to this) TSK_OFF_T size; ///< file size (in bytes) TSK_UID_T uid; ///< owner id TSK_GID_T gid; ///< group id /* @@@ Need to make these 64-bits ... ? */ time_t mtime; ///< last file content modification time (stored in number of seconds since Jan 1, 1970 UTC) uint32_t mtime_nano; ///< nano-second resolution in addition to m_time time_t atime; ///< last file content accessed time (stored in number of seconds since Jan 1, 1970 UTC) uint32_t atime_nano; ///< nano-second resolution in addition to a_time time_t ctime; ///< last file / metadata status change time (stored in number of seconds since Jan 1, 1970 UTC) uint32_t ctime_nano; ///< nano-second resolution in addition to c_time time_t crtime; ///< Created time (stored in number of seconds since Jan 1, 1970 UTC) uint32_t crtime_nano; ///< nano-second resolution in addition to cr_time /* filesystem specific times */ union { struct { time_t dtime; ///< Linux deletion time uint32_t dtime_nano; ///< nano-second resolution in addition to d_time } ext2; struct { time_t bkup_time; ///< HFS+ backup time uint32_t bkup_time_nano; ///< nano-second resolution in addition to bkup_time } hfs; struct { time_t fn_crtime; ///< NTFS Created time stored in FILE_NAME time_t fn_crtime_nano; ///< NTFS Created time stored in FILE_NAME in nano-second resolution time_t fn_mtime; ///< NTFS mod (content) stored in FILE_NAME time_t fn_mtime_nano; ///< NTFS mod time stored in FILE_NAME in nano-second resolution time_t fn_atime; ///< NTFS access time stored in FILE_NAME time_t fn_atime_nano; ///< NTFS access time stored in FILE_NAME in nano-second resolution time_t fn_ctime; ///< NTFS change (MFT Entry) time stored in FILE_NAME time_t fn_ctime_nano; ///< NTFS change (MFT Entry) time stored in FILE_NAME in nano-second resolution uint16_t fn_id; ///< Attribute ID used to populate FN times. } ntfs; } time2; void *content_ptr; ///< Pointer to file system specific data that is used to store references to file content size_t content_len; ///< size of content buffer TSK_FS_META_CONTENT_TYPE_ENUM content_type; ///< File system-specific and describes type of data in content_ptr in case file systems have multiple ways of storing things. uint32_t seq; ///< Sequence number for file (NTFS only, is incremented when entry is reallocated) /** Contains run data on the file content (specific locations where content is stored). * Check attr_state to determine if data in here is valid because not all file systems * load this data when a file is loaded. It may not be loaded until needed by one * of the APIs. Most file systems will have only one attribute, but NTFS will have several. */ TSK_FS_ATTRLIST *attr; TSK_FS_META_ATTR_FLAG_ENUM attr_state; ///< State of the data in the TSK_FS_META::attr structure TSK_FS_META_NAME_LIST *name2; ///< Name of file stored in metadata (FATXX and NTFS Only) char *link; ///< Name of target file if this is a symbolic link } TSK_FS_META; /** String that is prepended to orphan FAT & NTFS files when the file * name is known, but the parent is not */ #define TSK_FS_ORPHAN_STR "-ORPHAN_FILE-" /* we are using the last inode as the special inode for the orphan directory. Note that this * macro is defined to abstract this convention, but there are many places in the code where * there is implied logic about this convention. For example, inode_walks will stop before * this value so that special handling can occur. */ #define TSK_FS_ORPHANDIR_INUM(fs_info) \ (fs_info->last_inum) /** * inode walk callback function definition. This is called for every file * that meets the critera specified when inode_walk was called. * @param a_fs_file Pointer to the current file * @param a_ptr Pointer that was specified by caller to inode_walk * @returns Value that tells inode walk to continue or stop */ typedef TSK_WALK_RET_ENUM(*TSK_FS_META_WALK_CB) (TSK_FS_FILE * a_fs_file, void *a_ptr); extern uint8_t tsk_fs_meta_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_start, TSK_INUM_T a_end, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB a_cb, void *a_ptr); extern uint8_t tsk_fs_meta_make_ls(const TSK_FS_META * a_fs_meta, char *a_buf, size_t a_len); //@} /************* NAME / DIR structures **********/ /** \name Generic File System File Name Data Structures */ //@{ /** * File name flags that are used when specifying the status of * a name in the TSK_FS_NAME structure */ typedef enum { TSK_FS_NAME_FLAG_ALLOC = 0x01, ///< Name is in an allocated state TSK_FS_NAME_FLAG_UNALLOC = 0x02, ///< Name is in an unallocated state } TSK_FS_NAME_FLAG_ENUM; /** * File type values -- as specified in the directory entry structure. */ typedef enum { TSK_FS_NAME_TYPE_UNDEF = 0, ///< Unknown type TSK_FS_NAME_TYPE_FIFO = 1, ///< Named pipe TSK_FS_NAME_TYPE_CHR = 2, ///< Character device TSK_FS_NAME_TYPE_DIR = 3, ///< Directory TSK_FS_NAME_TYPE_BLK = 4, ///< Block device TSK_FS_NAME_TYPE_REG = 5, ///< Regular file TSK_FS_NAME_TYPE_LNK = 6, ///< Symbolic link TSK_FS_NAME_TYPE_SOCK = 7, ///< Socket TSK_FS_NAME_TYPE_SHAD = 8, ///< Shadow inode (solaris) TSK_FS_NAME_TYPE_WHT = 9, ///< Whiteout (openbsd) TSK_FS_NAME_TYPE_VIRT = 10, ///< Special (TSK added "Virtual" files) } TSK_FS_NAME_TYPE_ENUM; #define TSK_FS_NAME_TYPE_STR_MAX 11 ///< Number of types that have a short string name /* ascii representation of above types */ extern char tsk_fs_name_type_str[TSK_FS_NAME_TYPE_STR_MAX][2]; #define TSK_FS_NAME_TAG 0x23147869 /** * Generic structure to store the file name information that is stored in * a directory. Most file systems seperate the file name from the metadata, but * some do not (such as FAT). This structure contains the name and address of the * metadata. */ typedef struct { int tag; ///< \internal Set to TSK_FS_NAME_ID if allocated, 0 if not char *name; ///< The name of the file (in UTF-8) size_t name_size; ///< The number of bytes allocated to name char *shrt_name; ///< The short name of the file or null (in UTF-8) size_t shrt_name_size; ///< The number of bytes allocated to shrt_name TSK_INUM_T meta_addr; ///< Address of the metadata structure that the name points to. uint32_t meta_seq; ///< Sequence number for metadata structure (NTFS only) TSK_INUM_T par_addr; ///< Metadata address of parent directory (equal to meta_addr if this entry is for root directory). uint32_t par_seq; ///< Sequence number for parent directory (NTFS only) TSK_FS_NAME_TYPE_ENUM type; ///< File type information (directory, file, etc.) TSK_FS_NAME_FLAG_ENUM flags; ///< Flags that describe allocation status etc. } TSK_FS_NAME; /** * Definition of callback function that is used by tsk_fs_dir_walk(). This is * is called for each file in a directory. * @param a_fs_file Pointer to the current file in the directory * @param a_path Path of the file * @param a_ptr Pointer that was originally passed by caller to tsk_fs_dir_walk. * @returns Value to signal if tsk_fs_dir_walk should stop or continue. */ typedef TSK_WALK_RET_ENUM(*TSK_FS_DIR_WALK_CB) (TSK_FS_FILE * a_fs_file, const char *a_path, void *a_ptr); #define TSK_FS_DIR_TAG 0x97531246 /** * A handle to a directory so that its files can be individually accessed. */ typedef struct { int tag; ///< \internal Will be set to TSK_FS_DIR_TAG if structure is still allocated, 0 if not TSK_FS_FILE *fs_file; ///< Pointer to the file structure for the directory. TSK_FS_NAME *names; ///< Pointer to list of names in directory. size_t names_used; ///< Number of name structures in queue being used size_t names_alloc; ///< Number of name structures that were allocated TSK_INUM_T addr; ///< Metadata address of this directory uint32_t seq; ///< Metadata address sequence (NTFS Only) TSK_FS_INFO *fs_info; ///< Pointer to file system the directory is located in } TSK_FS_DIR; /** * Flags that are used when walking names in directories. These are used to identify * which files to call the callback function on. */ typedef enum { TSK_FS_DIR_WALK_FLAG_NONE = 0x00, ///< No Flags TSK_FS_DIR_WALK_FLAG_ALLOC = 0x01, ///< Return allocated names in callback TSK_FS_DIR_WALK_FLAG_UNALLOC = 0x02, ///< Return unallocated names in callback TSK_FS_DIR_WALK_FLAG_RECURSE = 0x04, ///< Recurse into sub-directories TSK_FS_DIR_WALK_FLAG_NOORPHAN = 0x08, ///< Do not return (or recurse into) the special Orphan directory } TSK_FS_DIR_WALK_FLAG_ENUM; extern TSK_FS_DIR *tsk_fs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr); extern TSK_FS_DIR *tsk_fs_dir_open(TSK_FS_INFO * a_fs, const char *a_dir); extern uint8_t tsk_fs_dir_walk(TSK_FS_INFO * a_fs, TSK_INUM_T a_inode, TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CB a_action, void *a_ptr); extern size_t tsk_fs_dir_getsize(const TSK_FS_DIR *); extern TSK_FS_FILE *tsk_fs_dir_get(const TSK_FS_DIR *, size_t); extern const TSK_FS_NAME *tsk_fs_dir_get_name(const TSK_FS_DIR * a_fs_dir, size_t a_idx); extern void tsk_fs_dir_close(TSK_FS_DIR *); extern int8_t tsk_fs_path2inum(TSK_FS_INFO * a_fs, const char *a_path, TSK_INUM_T * a_result, TSK_FS_NAME * a_fs_name); //@} /********************* FILE Structure *************************/ /** \name Generic File System File Data Structures */ //@{ #define TSK_FS_FILE_TAG 0x11212212 /** * Generic structure used to refer to files in the file system. A file will * typically have a name and metadata. This structure holds that type of information. * When deleted files are being processed, this structure may have the name defined * but not metadata because it no longer exists. Or, if you are calling meta_walk * and are not processing at the name level, then the name will not be defined. * always check these to make sure they are not null before they are read. */ struct TSK_FS_FILE { int tag; ///< \internal Will be set to TSK_FS_FILE_TAG if structure is allocated TSK_FS_NAME *name; ///< Pointer to name of file (or NULL if file was opened using metadata address) TSK_FS_META *meta; ///< Pointer to metadata of file (or NULL if name has invalid metadata address) TSK_FS_INFO *fs_info; ///< Pointer to file system that the file is located in. }; /** * Flags used by tsk_fs_file_read */ typedef enum { TSK_FS_FILE_READ_FLAG_NONE = 0x00, ///< No Flags TSK_FS_FILE_READ_FLAG_SLACK = 0x01, ///< Allow read access into slack space TSK_FS_FILE_READ_FLAG_NOID = 0x02, ///< Ignore the Id argument given in the API (use only the type) } TSK_FS_FILE_READ_FLAG_ENUM; extern void tsk_fs_file_close(TSK_FS_FILE * a_fs_file); extern TSK_FS_FILE *tsk_fs_file_open(TSK_FS_INFO * a_fs, TSK_FS_FILE * a_fs_file, const char *a_path); extern TSK_FS_FILE *tsk_fs_file_open_meta(TSK_FS_INFO * fs, TSK_FS_FILE * fs_file, TSK_INUM_T addr); extern ssize_t tsk_fs_file_read(TSK_FS_FILE *, TSK_OFF_T, char *, size_t, TSK_FS_FILE_READ_FLAG_ENUM); extern ssize_t tsk_fs_file_read_type(TSK_FS_FILE *, TSK_FS_ATTR_TYPE_ENUM, uint16_t, TSK_OFF_T, char *, size_t, TSK_FS_FILE_READ_FLAG_ENUM); extern const TSK_FS_ATTR *tsk_fs_file_attr_get(TSK_FS_FILE * a_fs_file); extern int tsk_fs_file_attr_getsize(TSK_FS_FILE * a_fs_file); extern const TSK_FS_ATTR *tsk_fs_file_attr_get_idx(TSK_FS_FILE * a_fs_file, int a_idx); extern const TSK_FS_ATTR *tsk_fs_file_attr_get_type(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR_TYPE_ENUM, uint16_t, uint8_t); extern const TSK_FS_ATTR *tsk_fs_file_attr_get_id(TSK_FS_FILE * a_fs_file, uint16_t); extern uint8_t tsk_fs_file_walk(TSK_FS_FILE * a_fs_file, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr); extern uint8_t tsk_fs_file_walk_type(TSK_FS_FILE * a_fs_file, TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CB a_action, void *a_ptr); extern ssize_t tsk_fs_attr_read(const TSK_FS_ATTR * a_fs_attr, TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags); extern uint8_t tsk_fs_file_get_owner_sid(TSK_FS_FILE *, char **); typedef struct { TSK_BASE_HASH_ENUM flags; unsigned char md5_digest[16]; unsigned char sha1_digest[20]; } TSK_FS_HASH_RESULTS; extern uint8_t tsk_fs_file_hash_calc(TSK_FS_FILE *, TSK_FS_HASH_RESULTS *, TSK_BASE_HASH_ENUM); //@} /****************** Journal Structures *************/ /** \name Generic File System Journal Data Structures */ //@{ typedef struct { TSK_DADDR_T jblk; /* journal block address */ TSK_DADDR_T fsblk; /* fs block that journal entry is about */ } TSK_FS_JENTRY; typedef TSK_WALK_RET_ENUM(*TSK_FS_JBLK_WALK_CB) (TSK_FS_INFO *, char *, int, void *); typedef TSK_WALK_RET_ENUM(*TSK_FS_JENTRY_WALK_CB) (TSK_FS_INFO *, TSK_FS_JENTRY *, int, void *); //@} /******************************* TSK_FS_INFO ******************/ /** \name Generic File System Handle Data Structure */ //@{ /** * Values for the file system type. Each bit corresponds to a file * system. */ enum TSK_FS_TYPE_ENUM { TSK_FS_TYPE_DETECT = 0x00000000, ///< Use autodetection methods TSK_FS_TYPE_NTFS = 0x00000001, ///< NTFS file system TSK_FS_TYPE_NTFS_DETECT = 0x00000001, ///< NTFS auto detection TSK_FS_TYPE_FAT12 = 0x00000002, ///< FAT12 file system TSK_FS_TYPE_FAT16 = 0x00000004, ///< FAT16 file system TSK_FS_TYPE_FAT32 = 0x00000008, ///< FAT32 file system TSK_FS_TYPE_EXFAT = 0x0000000a, ///< exFAT file system TSK_FS_TYPE_FAT_DETECT = 0x0000000e, ///< FAT auto detection TSK_FS_TYPE_FFS1 = 0x00000010, ///< UFS1 (FreeBSD, OpenBSD, BSDI ...) TSK_FS_TYPE_FFS1B = 0x00000020, ///< UFS1b (Solaris - has no type) TSK_FS_TYPE_FFS2 = 0x00000040, ///< UFS2 - FreeBSD, NetBSD TSK_FS_TYPE_FFS_DETECT = 0x00000070, ///< UFS auto detection TSK_FS_TYPE_EXT2 = 0x00000080, ///< Ext2 file system TSK_FS_TYPE_EXT3 = 0x00000100, ///< Ext3 file system TSK_FS_TYPE_EXT_DETECT = 0x00002180, ///< ExtX auto detection TSK_FS_TYPE_SWAP = 0x00000200, ///< SWAP file system TSK_FS_TYPE_SWAP_DETECT = 0x00000200, ///< SWAP auto detection TSK_FS_TYPE_RAW = 0x00000400, ///< RAW file system TSK_FS_TYPE_RAW_DETECT = 0x00000400, ///< RAW auto detection TSK_FS_TYPE_ISO9660 = 0x00000800, ///< ISO9660 file system TSK_FS_TYPE_ISO9660_DETECT = 0x00000800, ///< ISO9660 auto detection TSK_FS_TYPE_HFS = 0x00001000, ///< HFS file system TSK_FS_TYPE_HFS_DETECT = 0x00001000, ///< HFS auto detection TSK_FS_TYPE_EXT4 = 0x00002000, ///< Ext4 file system TSK_FS_TYPE_YAFFS2 = 0x00004000, ///< YAFFS2 file system TSK_FS_TYPE_YAFFS2_DETECT = 0x00004000, ///< YAFFS2 auto detection TSK_FS_TYPE_UNSUPP = 0xffffffff, ///< Unsupported file system }; /* NOTE: Update bindings/java/src/org/sleuthkit/datamodel/TskData.java * with any changes. */ typedef enum TSK_FS_TYPE_ENUM TSK_FS_TYPE_ENUM; /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for an NTFS file system. */ #define TSK_FS_TYPE_ISNTFS(ftype) \ (((ftype) & TSK_FS_TYPE_NTFS_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a FAT file system. */ #define TSK_FS_TYPE_ISFAT(ftype) \ (((ftype) & TSK_FS_TYPE_FAT_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a FFS file system. */ #define TSK_FS_TYPE_ISFFS(ftype) \ (((ftype) & TSK_FS_TYPE_FFS_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a ExtX file system. */ #define TSK_FS_TYPE_ISEXT(ftype) \ (((ftype) & TSK_FS_TYPE_EXT_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a ISO9660 file system. */ #define TSK_FS_TYPE_ISISO9660(ftype) \ (((ftype) & TSK_FS_TYPE_ISO9660_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a HFS file system. */ #define TSK_FS_TYPE_ISHFS(ftype) \ (((ftype) & TSK_FS_TYPE_HFS_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a swap "file system". */ #define TSK_FS_TYPE_ISSWAP(ftype) \ (((ftype) & TSK_FS_TYPE_SWAP_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a YAFFS2 file system. */ #define TSK_FS_TYPE_ISYAFFS2(ftype) \ (((ftype) & TSK_FS_TYPE_YAFFS2_DETECT)?1:0) /** * \ingroup fslib * Macro that takes a file system type and returns 1 if the type * is for a raw "file system". */ #define TSK_FS_TYPE_ISRAW(ftype) \ (((ftype) & TSK_FS_TYPE_RAW_DETECT)?1:0) /** * Flags for the FS_INFO structure */ enum TSK_FS_INFO_FLAG_ENUM { TSK_FS_INFO_FLAG_NONE = 0x00, ///< No Flags TSK_FS_INFO_FLAG_HAVE_SEQ = 0x01, ///< File system has sequence numbers in the inode addresses. TSK_FS_INFO_FLAG_HAVE_NANOSEC = 0x02 ///< Nano second field in times will be set. }; typedef enum TSK_FS_INFO_FLAG_ENUM TSK_FS_INFO_FLAG_ENUM; #define TSK_FS_INFO_TAG 0x10101010 #define TSK_FS_INFO_FS_ID_LEN 32 // set based on largest file system / volume ID supported /** * Stores state information for an open file system. * One of these are generated for each open files system and it contains * file system-type specific data. These values are all filled in by * the file system code and not the caller functions. This struct * (and its subclasses) should be allocated only by tsk_fs_malloc * and deallocated only by tsk_fs_free, which handle init/deinit * of the locks. */ struct TSK_FS_INFO { int tag; ///< \internal Will be set to TSK_FS_INFO_TAG if structure is still allocated, 0 if not TSK_IMG_INFO *img_info; ///< Pointer to the image layer state TSK_OFF_T offset; ///< Byte offset into img_info that fs starts /* meta data */ TSK_INUM_T inum_count; ///< Number of metadata addresses TSK_INUM_T root_inum; ///< Metadata address of root directory TSK_INUM_T first_inum; ///< First valid metadata address TSK_INUM_T last_inum; ///< Last valid metadata address /* content */ TSK_DADDR_T block_count; ///< Number of blocks in fs TSK_DADDR_T first_block; ///< Address of first block TSK_DADDR_T last_block; ///< Address of last block as reported by file system (could be larger than last_block in image if end of image does not exist) TSK_DADDR_T last_block_act; ///< Address of last block -- adjusted so that it is equal to the last block in the image or volume (if image is not complete) unsigned int block_size; ///< Size of each block (in bytes) unsigned int dev_bsize; ///< Size of device block (typically always 512) /* The following are used for really RAW images that contain data before and after the actual user sector. For example, a raw cd image may have 16 bytes before the start of each sector. */ unsigned int block_pre_size; ///< Number of bytes that preceed each block (currently only used for RAW CDs) unsigned int block_post_size; ///< Number of bytes that follow each block (currently only used for RAW CDs) /* Journal */ TSK_INUM_T journ_inum; ///< Address of journal inode TSK_FS_TYPE_ENUM ftype; ///< type of file system const char *duname; ///< string "name" of data unit type TSK_FS_INFO_FLAG_ENUM flags; ///< flags for file system uint8_t fs_id[TSK_FS_INFO_FS_ID_LEN]; ///< File system id (as reported in boot sector) size_t fs_id_used; ///< Number of bytes in fs_id that are being used TSK_ENDIAN_ENUM endian; ///< Endian order of data /* list_inum_named_lock protects list_inum_named */ tsk_lock_t list_inum_named_lock; // taken when r/w the list_inum_named list TSK_LIST *list_inum_named; /**< List of unallocated inodes that * are pointed to by a file name -- * Used to find orphan files. Is filled * after looking for orphans * or afer a full name_walk is performed. * (r/w shared - lock) */ /* orphan_hunt_lock protects orphan_dir */ tsk_lock_t orphan_dir_lock; // taken for the duration of orphan hunting (not just when updating orphan_dir) TSK_FS_DIR *orphan_dir; ///< Files and dirs in the top level of the $OrphanFiles directory. NULL if orphans have not been hunted for yet. (r/w shared - lock) uint8_t(*block_walk) (TSK_FS_INFO * fs, TSK_DADDR_T start, TSK_DADDR_T end, TSK_FS_BLOCK_WALK_FLAG_ENUM flags, TSK_FS_BLOCK_WALK_CB cb, void *ptr); ///< FS-specific function: Call tsk_fs_block_walk() instead. TSK_FS_BLOCK_FLAG_ENUM(*block_getflags) (TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr); ///< \internal uint8_t(*inode_walk) (TSK_FS_INFO * fs, TSK_INUM_T start, TSK_INUM_T end, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB cb, void *ptr); ///< FS-specific function: Call tsk_fs_meta_walk() instead. uint8_t(*file_add_meta) (TSK_FS_INFO * fs, TSK_FS_FILE * fs_file, TSK_INUM_T addr); ///< \internal TSK_FS_ATTR_TYPE_ENUM(*get_default_attr_type) (const TSK_FS_FILE *); ///< \internal uint8_t(*load_attrs) (TSK_FS_FILE *); ///< \internal /** * Pointer to file system specific function that prints details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File handle to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ uint8_t(*istat) (TSK_FS_INFO * fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew); TSK_RETVAL_ENUM(*dir_open_meta) (TSK_FS_INFO * fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T inode); ///< \internal Call tsk_fs_dir_open_meta() instead. uint8_t(*jopen) (TSK_FS_INFO *, TSK_INUM_T); ///< \internal uint8_t(*jblk_walk) (TSK_FS_INFO *, TSK_DADDR_T, TSK_DADDR_T, int, TSK_FS_JBLK_WALK_CB, void *); ///< \internal uint8_t(*jentry_walk) (TSK_FS_INFO *, int, TSK_FS_JENTRY_WALK_CB, void *); ///< \internal uint8_t(*fsstat) (TSK_FS_INFO * fs, FILE * hFile); ///< \internal int (*name_cmp) (TSK_FS_INFO *, const char *, const char *); ///< \internal uint8_t(*fscheck) (TSK_FS_INFO *, FILE *); ///< \internal void (*close) (TSK_FS_INFO * fs); ///< FS-specific function: Call tsk_fs_close() instead. uint8_t(*fread_owner_sid) (TSK_FS_FILE *, char **); // FS-specific function. Call tsk_fs_file_get_owner_sid() instead. }; /* File system level */ extern TSK_FS_INFO *tsk_fs_open_img(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM); extern TSK_FS_INFO *tsk_fs_open_vol(const TSK_VS_PART_INFO *, TSK_FS_TYPE_ENUM); extern void tsk_fs_close(TSK_FS_INFO *); extern TSK_FS_TYPE_ENUM tsk_fs_type_toid_utf8(const char *); extern TSK_FS_TYPE_ENUM tsk_fs_type_toid(const TSK_TCHAR *); extern void tsk_fs_type_print(FILE *); extern const char *tsk_fs_type_toname(TSK_FS_TYPE_ENUM); extern TSK_FS_TYPE_ENUM tsk_fs_type_supported(); extern ssize_t tsk_fs_read(TSK_FS_INFO * a_fs, TSK_OFF_T a_off, char *a_buf, size_t a_len); extern ssize_t tsk_fs_read_block(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr, char *a_buf, size_t a_len); //@} /***** LIBRARY ROUTINES FOR COMMAND LINE FUNCTIONS */ enum TSK_FS_BLKCALC_FLAG_ENUM { TSK_FS_BLKCALC_DD = 0x01, TSK_FS_BLKCALC_BLKLS = 0x02, TSK_FS_BLKCALC_SLACK = 0x04 }; typedef enum TSK_FS_BLKCALC_FLAG_ENUM TSK_FS_BLKCALC_FLAG_ENUM; extern int8_t tsk_fs_blkcalc(TSK_FS_INFO * fs, TSK_FS_BLKCALC_FLAG_ENUM flags, TSK_DADDR_T cnt); enum TSK_FS_BLKCAT_FLAG_ENUM { TSK_FS_BLKCAT_NONE = 0x00, TSK_FS_BLKCAT_HEX = 0x01, TSK_FS_BLKCAT_ASCII = 0x02, TSK_FS_BLKCAT_HTML = 0x04, TSK_FS_BLKCAT_STAT = 0x08 }; typedef enum TSK_FS_BLKCAT_FLAG_ENUM TSK_FS_BLKCAT_FLAG_ENUM; extern uint8_t tsk_fs_blkcat(TSK_FS_INFO * fs, TSK_FS_BLKCAT_FLAG_ENUM flags, TSK_DADDR_T addr, TSK_DADDR_T read_num_units); enum TSK_FS_BLKLS_FLAG_ENUM { TSK_FS_BLKLS_NONE = 0x00, TSK_FS_BLKLS_CAT = 0x01, TSK_FS_BLKLS_LIST = 0x02, TSK_FS_BLKLS_SLACK = 0x04, }; typedef enum TSK_FS_BLKLS_FLAG_ENUM TSK_FS_BLKLS_FLAG_ENUM; extern uint8_t tsk_fs_blkls(TSK_FS_INFO * fs, TSK_FS_BLKLS_FLAG_ENUM lclflags, TSK_DADDR_T bstart, TSK_DADDR_T bend, TSK_FS_BLOCK_WALK_FLAG_ENUM flags); extern uint8_t tsk_fs_blkstat(TSK_FS_INFO * fs, TSK_DADDR_T addr); enum TSK_FS_FFIND_FLAG_ENUM { TSK_FS_FFIND_ALL = 0x01, }; typedef enum TSK_FS_FFIND_FLAG_ENUM TSK_FS_FFIND_FLAG_ENUM; extern uint8_t tsk_fs_ffind(TSK_FS_INFO * fs, TSK_FS_FFIND_FLAG_ENUM lclflags, TSK_INUM_T inode, TSK_FS_ATTR_TYPE_ENUM type, uint8_t type_used, uint16_t id, uint8_t id_used, TSK_FS_DIR_WALK_FLAG_ENUM flags); enum TSK_FS_FLS_FLAG_ENUM { TSK_FS_FLS_NONE = 0x00, TSK_FS_FLS_DOT = 0x01, TSK_FS_FLS_LONG = 0x02, TSK_FS_FLS_FILE = 0x04, TSK_FS_FLS_DIR = 0x08, TSK_FS_FLS_FULL = 0x10, TSK_FS_FLS_MAC = 0x20, TSK_FS_FLS_HASH = 0x40 }; typedef enum TSK_FS_FLS_FLAG_ENUM TSK_FS_FLS_FLAG_ENUM; extern uint8_t tsk_fs_fls(TSK_FS_INFO * fs, TSK_FS_FLS_FLAG_ENUM lclflags, TSK_INUM_T inode, TSK_FS_DIR_WALK_FLAG_ENUM flags, TSK_TCHAR * pre, int32_t skew); extern uint8_t tsk_fs_icat(TSK_FS_INFO * fs, TSK_INUM_T inum, TSK_FS_ATTR_TYPE_ENUM type, uint8_t type_used, uint16_t id, uint8_t id_used, TSK_FS_FILE_WALK_FLAG_ENUM flags); enum TSK_FS_IFIND_FLAG_ENUM { TSK_FS_IFIND_NONE = 0x00, TSK_FS_IFIND_ALL = 0x01, TSK_FS_IFIND_PAR_LONG = 0x02, }; typedef enum TSK_FS_IFIND_FLAG_ENUM TSK_FS_IFIND_FLAG_ENUM; extern int8_t tsk_fs_ifind_path(TSK_FS_INFO * fs, TSK_TCHAR * path, TSK_INUM_T * result); extern uint8_t tsk_fs_ifind_data(TSK_FS_INFO * fs, TSK_FS_IFIND_FLAG_ENUM flags, TSK_DADDR_T blk); extern uint8_t tsk_fs_ifind_par(TSK_FS_INFO * fs, TSK_FS_IFIND_FLAG_ENUM flags, TSK_INUM_T par); enum TSK_FS_ILS_FLAG_ENUM { TSK_FS_ILS_NONE = 0x00, TSK_FS_ILS_OPEN = 0x01, TSK_FS_ILS_MAC = 0x02, TSK_FS_ILS_LINK = 0x04, TSK_FS_ILS_UNLINK = 0x08, }; typedef enum TSK_FS_ILS_FLAG_ENUM TSK_FS_ILS_FLAG_ENUM; extern uint8_t tsk_fs_ils(TSK_FS_INFO * fs, TSK_FS_ILS_FLAG_ENUM lclflags, TSK_INUM_T istart, TSK_INUM_T ilast, TSK_FS_META_FLAG_ENUM flags, int32_t skew, const TSK_TCHAR * img); /* ** Is this string a "." or ".." */ #define TSK_FS_ISDOT(str) ( ((str[0] == '.') && \ ( ((str[1] == '.') && (str[2] == '\0')) || (str[1] == '\0') ) ) ? 1 : 0 ) extern int tsk_fs_parse_inum(const TSK_TCHAR * str, TSK_INUM_T *, TSK_FS_ATTR_TYPE_ENUM *, uint8_t *, uint16_t *, uint8_t *); #ifdef __cplusplus } #endif #ifdef __cplusplus /** * \ingroup fslib_cpp */ class TskFsJEntry { private: TSK_FS_JENTRY * m_jEntry; TskFsJEntry(const TskFsJEntry & rhs); TskFsJEntry & operator=(const TskFsJEntry & rhs); public: TskFsJEntry(TSK_FS_JENTRY * a_jEntry) { m_jEntry = a_jEntry; }; ~TskFsJEntry() { }; }; /** * \ingroup fslib_cpp * Contains information about a single data run, which has a starting address and length. * A run describes a consecutive list of blocks that have been allocated to a file. * A file may have many such runs and they are stringed together in a linked list. * The entries in the list must be stored in sequential order (based on offset in file). * See TSK_FS_ATTR_RUN for more details. */ class TskFsAttrRun { private: TSK_FS_ATTR_RUN * m_fsAttrRun; TskFsAttrRun(const TskFsAttrRun & rhs); TskFsAttrRun & operator=(const TskFsAttrRun & rhs); public: /** * construct a TskFsAttrRun object. * @param a_fsAttrRun pointer of TSK_FS_ATTR_RUN. If NULL, then the * getX() method return values are undefined. */ TskFsAttrRun(TSK_FS_ATTR_RUN * a_fsAttrRun) { m_fsAttrRun = a_fsAttrRun; }; ~TskFsAttrRun() { }; /** * get offset (in blocks) of this run in the file * @return offset of run */ TSK_DADDR_T getOffset() const { if (m_fsAttrRun != NULL) return m_fsAttrRun->offset; else return 0; }; /** * get starting block address (in file system) of run * @return starting block address */ TSK_DADDR_T getAddr() const { if (m_fsAttrRun != NULL) return m_fsAttrRun->addr; else return 0; }; /** * get number of blocks in run (0 when entry is not in use) * @return offset */ TSK_DADDR_T length() const { if (m_fsAttrRun != NULL) return m_fsAttrRun->len; else return 0; }; /** * get flags for run * @return flags for run */ TSK_FS_ATTR_RUN_FLAG_ENUM getFlags() const { if (m_fsAttrRun != NULL) return m_fsAttrRun->flags; else return (TSK_FS_ATTR_RUN_FLAG_ENUM) 0; }; }; //TskFsAttrRun /** * \ingroup fslib_cpp * Stores the file name information that is stored in * a directory. Most file systems seperate the file name from the metadata, but * some do not (such as FAT). This structure contains the file name and the * address of the metadata. See TSK_FS_NAME for more details. */ class TskFsName { friend class TskFsInfo; private: TSK_FS_NAME * m_fsName; TskFsName(const TskFsName & rhs); TskFsName & operator=(const TskFsName & rhs); public: /** * construct a TskFsName object * @param a_fsName a pointer of TSK_FS_NAME. If NULL, the getX() return values are undefined. */ TskFsName(TSK_FS_NAME * a_fsName) { m_fsName = a_fsName; }; ~TskFsName() { }; /** * Return the name of the file (in UTF-8) * @return the name of the file */ const char *getName() const { if (m_fsName != NULL) return m_fsName->name; else return NULL; }; /** * Return the short name of the file or null (in UTF-8) * @return the short name of the file */ const char *getShortName() const { if (m_fsName != NULL) return m_fsName->shrt_name; else return NULL; }; /** * Return the address of the metadata structure that the name points to. * @return address of the metadata structure that the name points to */ TSK_INUM_T getMetaAddr() const { if (m_fsName != NULL) return m_fsName->meta_addr; else return 0; }; /** * Return the sequence number for metadata structure (NTFS only) * @return sequence number for metadata structure */ uint32_t getMetaSeq() const { if (m_fsName != NULL) return m_fsName->meta_seq; else return 0; }; /** * Return the metadata address of the parent directory (equal to meta_addr if this entry is for root directory). * @return metadata address of parent directory */ TSK_INUM_T getParentAddr() const { if (m_fsName != NULL) return m_fsName->par_addr; else return 0; }; /** * Return the file type information (directory, file, etc.) * @return file type information */ TSK_FS_NAME_TYPE_ENUM getType() const { if (m_fsName != NULL) return m_fsName->type; else return (TSK_FS_NAME_TYPE_ENUM) 0; }; /** * Return flags that describe allocation status etc. * @return flags that describe allocation status */ TSK_FS_NAME_FLAG_ENUM getFlags() const { if (m_fsName != NULL) return m_fsName->flags; else return (TSK_FS_NAME_FLAG_ENUM) 0; }; }; class TskFsFile; /** * File walk callback function definition. This is called for * chunks of content in the file being processed. * @param a_fs_file Pointer to file being processed * @param a_off Byte offset in file that this data is for * @param a_addr Address of data being passed (valid only if a_flags have RAW set) * @param a_buf Pointer to buffer with file content * @param a_len Size of data in buffer (in bytes) * @param a_flags Flags about the file content * @param a_ptr Pointer that was specified by caller to inode_walk * @returns Value that tells file walk to continue or stop */ typedef TSK_WALK_RET_ENUM(*TSK_FS_FILE_WALK_CPP_CB) (TskFsFile * a_fs_file, TSK_OFF_T a_off, TSK_DADDR_T a_addr, char *a_buf, size_t a_len, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr); /** \internal * Internal structure to pass C++ file walk data into C file walk call back. */ typedef struct { TSK_FS_FILE_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_FILE_WALK_CPP_DATA; /** \internal * Internal function used to call C++ file Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_file_cpp_c_cb(TSK_FS_FILE * a_file, TSK_OFF_T a_off, TSK_DADDR_T a_addr, char *a_buf, size_t a_len, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr); /** * \ingroup fslib_cpp * Stores information about a file attribute. File attributes store data for a file. * Most files have at least one attribute that stores the file content. See TSK_FS_ATTR for * details on attributes. */ class TskFsAttribute { private: const TSK_FS_ATTR *m_fsAttr; TskFsAttribute(const TskFsAttribute & rhs); TskFsAttribute & operator=(const TskFsAttribute & rhs); public: /** * construct a TskFsAttribute object * @param a_fsAttr a pointer of TSK_FS_ATTR. If NULL, the getX() return values are undefi ned. */ TskFsAttribute(const TSK_FS_ATTR * a_fsAttr) { m_fsAttr = a_fsAttr; }; ~TskFsAttribute() { }; /** * Process an attribute and call a callback function with its contents. The callback will be * called with chunks of data that are fs->block_size or less. The address given in the callback * will be correct only for raw files (when the raw file contents were stored in the block). For * compressed and sparse attributes, the address may be zero. * * See tsk_fs_attr_walk() for details * @param a_flags Flags to use while processing attribute * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t walk(TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CPP_CB a_action, void *a_ptr) { TSK_FS_FILE_WALK_CPP_DATA fileData; fileData.cppAction = a_action; fileData.cPtr = a_ptr; if (m_fsAttr) return tsk_fs_attr_walk(m_fsAttr, a_flags, tsk_fs_file_cpp_c_cb, &fileData); else return 1; }; /** * Read the contents of this attribute using a typical read() type interface. * 0s are returned for missing runs. * * See tsk_fs_attr_read() for details * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past end of file). */ ssize_t read(TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { if (m_fsAttr != NULL) return tsk_fs_attr_read(m_fsAttr, a_offset, a_buf, a_len, a_flags); else return -1; }; /** * get the attribute's flags * @return flags for attribute */ TSK_FS_ATTR_FLAG_ENUM getFlags() const { if (m_fsAttr != NULL) return m_fsAttr->flags; else return (TSK_FS_ATTR_FLAG_ENUM) 0; }; /** * get the attributes's name (in UTF-8). * @return name of attribute (or NULL if attribute doesn't have one) */ const char *getName() const { if (m_fsAttr != NULL) return m_fsAttr->name; else return NULL; }; /** * get type of attribute * @return type of attribute */ TSK_FS_ATTR_TYPE_ENUM getType() const { if (m_fsAttr != NULL) return m_fsAttr->type; else return (TSK_FS_ATTR_TYPE_ENUM) 0; }; /** * get id of attribute * @return id of attribute */ uint16_t getId() const { if (m_fsAttr != NULL) return m_fsAttr->id; else return 0; }; /** * get size in bytes of attribute (does not include skiplen for non-resident) * @return size in bytes of attribute */ TSK_OFF_T getSize() const { if (m_fsAttr != NULL) return m_fsAttr->size; else return 0; }; /** * get a run for a non-resident attribute. * It's caller's responsibility to free memory of TskFsAttrRun * @param a_idx The index of the run to return. * @return A run in the attribute. */ const TskFsAttrRun *getRun(int a_idx) const { if (m_fsAttr != NULL) { TSK_FS_ATTR_RUN *run = m_fsAttr->nrd.run; int i = 0; while (run != NULL) { if (i == a_idx) return new TskFsAttrRun(run); i++; run = run->next; }} return NULL; }; /** * gets the number of runs in a non-resident attribute. * @return number of runs. */ int getRunCount() const { int size = 0; if (m_fsAttr != NULL) { TSK_FS_ATTR_RUN *run = m_fsAttr->nrd.run; while (run != NULL) { size++; run = run->next; }} return size; } /** * get number of initial bytes in run to skip before content begins. * The size field does not include this length. * @return number of initial bytes in run to skip before content begins */ uint32_t getSkipLen() const { if (m_fsAttr != NULL) return m_fsAttr->nrd.skiplen; else return 0; }; /** * get number of bytes that are allocated in all clusters of non-resident run * (will be larger than size - does not include skiplen). * This is defined when the attribute is created and used to determine slack space. * @return number of bytes that are allocated in all clusters of non-resident run */ TSK_OFF_T getAllocSize() const { if (m_fsAttr != NULL) return m_fsAttr->nrd.allocsize; else return 0; }; /** * get number of bytes (starting from offset 0) that have data * (including FILLER) saved for them (smaller then or equal to size). * This is defined when the attribute is created. * @return number of bytes (starting from offset 0) that have data */ TSK_OFF_T getInitSize() const { if (m_fsAttr != NULL) return m_fsAttr->nrd.initsize; else return 0; }; /** * get size of compression units (needed only if NTFS file is compressed) * @return size of compression units (needed only if NTFS file is compressed) */ uint32_t getCompSize() const { if (m_fsAttr != NULL) return m_fsAttr->nrd.compsize; return 0; }; /** * Pointer to buffer with resident data. Only getSize() bytes will be valid. * @return pointer to buffer with resident data. */ const uint8_t *getBuf() const { if (m_fsAttr != NULL) return m_fsAttr->rd.buf; else return NULL; }; }; //TskfsAttr class TskFsBlock; class TskFsInfo; /** * Function definition used for callback to blockWalk(). * * @param a_block Pointer to TskFsBlock object that holds block content and flags * @param a_ptr Pointer that was supplied by the caller who called tsk_fs_block_walk * @returns Value to identify if walk should continue, stop, or stop because of error */ typedef TSK_WALK_RET_ENUM(*TSK_FS_BLOCK_WALK_CPP_CB) (const TskFsBlock * a_block, void *a_ptr); /** \internal * Internal structure to pass C++ block walk data into C block walk call back. */ typedef struct { TSK_FS_BLOCK_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_BLOCK_WALK_CPP_DATA; /** \internal * Internal function used to call C++ Block Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_block_cpp_c_cb(const TSK_FS_BLOCK * a_block, void *a_ptr); /** * Function definition for callback in TskFsInfo.jblkWalk(). * * @param a_fsInfo File system being analyzed * @param a_string * @param a_num * @param a_ptr Pointer that was supplied by the caller * @returns Value to identify if walk should continue, stop, or stop because of error */ typedef TSK_WALK_RET_ENUM(*TSK_FS_JBLK_WALK_CPP_CB) (TskFsInfo * a_fsInfo, char *a_string, int a_num, void *a_ptr); /** \internal * Internal structure to pass C++ JBLK walk data into C JBLK walk call back. */ typedef struct { TSK_FS_JBLK_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_JBLK_WALK_CPP_DATA; /** \internal * Internal function used to call C++ JBLK Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_jblk_walk_cpp_c_cb(TSK_FS_INFO * a_fsInfo, char *a_string, int a_num, void *a_ptr); /** * Function definition for callback in TskFsInfo.jentryWalk(). * * @param a_fsInfo File system being analyzed * @param a_jentry journal entry * @param a_num * @param a_ptr Pointer that was supplied by the caller. * @returns Value to identify if walk should continue, stop, or stop because of error */ typedef TSK_WALK_RET_ENUM(*TSK_FS_JENTRY_WALK_CPP_CB) (TskFsInfo * a_fsInfo, TskFsJEntry * a_jentry, int a_num, void *a_ptr); /** \internal * Internal structure to pass C++ JENTRY walk data into C JENTRY walk call back. */ typedef struct { TSK_FS_JENTRY_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_JENTRY_WALK_CPP_DATA; /** \internal * Internal function used to call C++ JENTRY Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_jentry_walk_cpp_c_cb(TSK_FS_INFO * a_fsInfo, TSK_FS_JENTRY * a_jentry, int a_num, void *a_ptr); /** * inode walk callback function definition. This is called for every file * that meets the critera specified when inode_walk was called. * @param a_fs_file Pointer to the current file * @param a_ptr Pointer that was specified by caller to inode_walk * @returns Value that tells inode walk to continue or stop */ typedef TSK_WALK_RET_ENUM(*TSK_FS_META_WALK_CPP_CB) (TskFsFile * a_fs_file, void *a_ptr); /** \internal * Internal structure to pass C++ metadata walk data into C metadata walk call back. */ typedef struct { TSK_FS_META_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_META_WALK_CPP_DATA; /** \internal * Internal function used to call C++ Meta Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_meta_walk_cpp_c_cb(TSK_FS_FILE * a_file, void *a_ptr); /** * Definition of callback function that is used by tsk_fs_dir_walk(). This is * is called for each file in a directory. * @param a_fs_file Pointer to the current file in the directory * @param a_path Path of the file * @param a_ptr Pointer that was originally passed by caller to tsk_fs_dir_walk. * @returns Value to signal if tsk_fs_dir_walk should stop or continue. */ typedef TSK_WALK_RET_ENUM(*TSK_FS_DIR_WALK_CPP_CB) (TskFsFile * a_fs_file, const char *a_path, void *a_ptr); /** \internal * Internal structure to pass C++ dir walk data into C block walk call back. */ typedef struct { TSK_FS_DIR_WALK_CPP_CB cppAction; // pointer C++ callback void *cPtr; // pointer to data that was passed into C++ walk method } TSK_FS_DIR_WALK_CPP_DATA; /** \internal * Internal function used to call C++ Dir Walk callback from C callback. */ extern TSK_WALK_RET_ENUM tsk_fs_dir_walk_cpp_c_cb(TSK_FS_FILE * a_file, const char *a_path, void *a_ptr); /** * \ingroup fslib_cpp * Stores information about an open file system. One of the open() * commands needs to be used before any of the getX() or read() methods will return * valid data. See TSK_FS_INFO for more details. */ class TskFsInfo { friend class TskFsBlock; friend class TskFsFile; friend class TskFsDir; private: TSK_FS_INFO * m_fsInfo; TskFsInfo(const TskFsInfo & rhs); TskFsInfo & operator=(const TskFsInfo & rhs); public: TskFsInfo(TSK_FS_INFO * a_fsInfo) { m_fsInfo = a_fsInfo; }; TskFsInfo() { m_fsInfo = NULL; }; ~TskFsInfo() { close(); } /** * Read arbitrary data from inside of the file system. * See tsk_fs_block_free() for details * @param a_off The byte offset to start reading from (relative to start of file system) * @param a_buf The buffer to store the block in. * @param a_len The number of bytes to read * @return The number of bytes read or -1 on error. */ ssize_t read(TSK_OFF_T a_off, char *a_buf, size_t a_len) { if (m_fsInfo) return tsk_fs_read(m_fsInfo, a_off, a_buf, a_len); else return -1; }; /** * Read a file system block. * See tsk_fs_read_block() for details * @param a_addr The starting block file system address. * @param a_buf The char * buffer to store the block data in. * @param a_len The number of bytes to read (must be a multiple of the block size) * @return The number of bytes read or -1 on error. */ ssize_t readBlock(TSK_DADDR_T a_addr, char *a_buf, size_t a_len) { if (m_fsInfo) return tsk_fs_read_block(m_fsInfo, a_addr, a_buf, a_len); else return -1; }; /** * Walk a range of metadata structures and call a callback for each * structure that matches the flags supplied. For example, it can * call the callback on only allocated or unallocated entries. * See tsk_fs_meta_walk() for details * @param a_start Metadata address to start walking from * @param a_end Metadata address to walk to * @param a_flags Flags that specify the desired metadata features * @param a_cb Callback function to call * @param a_ptr Pointer to pass to the callback * @returns 1 on error and 0 on success */ uint8_t metaWalk(TSK_INUM_T a_start, TSK_INUM_T a_end, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CPP_CB a_cb, void *a_ptr) { TSK_FS_META_WALK_CPP_DATA metaData; metaData.cppAction = a_cb; metaData.cPtr = a_ptr; if (m_fsInfo) return tsk_fs_meta_walk(m_fsInfo, a_start, a_end, a_flags, tsk_fs_meta_walk_cpp_c_cb, &metaData); else return 1; }; /* * Walk the file names in a directory and obtain the details of the files via a callback. * See tsk_fs_dir_walk() for details * @param a_addr Metadata address of the directory to analyze * @param a_flags Flags used during analysis * @param a_action Callback function that is called for each file name * @param a_ptr Pointer to data that is passed to the callback function each time * @returns 1 on error and 0 on success */ uint8_t dirWalk(TSK_INUM_T a_addr, TSK_FS_DIR_WALK_FLAG_ENUM a_flags, TSK_FS_DIR_WALK_CPP_CB a_action, void *a_ptr) { TSK_FS_DIR_WALK_CPP_DATA dirData; dirData.cppAction = a_action; dirData.cPtr = a_ptr; if (m_fsInfo != NULL) return tsk_fs_dir_walk(m_fsInfo, a_addr, a_flags, tsk_fs_dir_walk_cpp_c_cb, &dirData); else return 1; }; /** * * Walk a range of file system blocks and call the callback function * with the contents and allocation status of each. * See tsk_fs_block_walk() for details. * @param a_start_blk Block address to start walking from * @param a_end_blk Block address to walk to * @param a_flags Flags used during walk to determine which blocks to call callback with * @param a_action Callback function * @param a_ptr Pointer that will be passed to callback * @returns 1 on error and 0 on success */ uint8_t blockWalk(TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CPP_CB a_action, void *a_ptr) { TSK_FS_BLOCK_WALK_CPP_DATA blockData; blockData.cppAction = a_action; blockData.cPtr = a_ptr; return tsk_fs_block_walk(m_fsInfo, a_start_blk, a_end_blk, a_flags, tsk_fs_block_cpp_c_cb, &blockData); //tsk_fs_block_walk will check the input data }; /** * Opens a file system that is inside of a Volume. * Returns a structure that can be used for analysis and reporting. * See tsk_fs_open_vol() for details * @param a_part_info Open volume to read from and analyze * @param a_ftype Type of file system (or autodetect) * * @return 1 on error 0 on success. */ uint8_t open(const TskVsPartInfo * a_part_info, TSK_FS_TYPE_ENUM a_ftype) { if ((m_fsInfo = tsk_fs_open_vol(a_part_info->m_vsPartInfo, a_ftype)) != NULL) return 0; return 1; }; /** * Opens a file system at a given offset in a disk image. * Returns a structure that can be used for analysis and reporting. * See tsk_fs_open_img() for details * @param a_img_info Disk image to analyze * @param a_offset Byte offset to start analyzing from * @param a_ftype Type of file system (or autodetect) * * @return 1 on error 0 on success. */ uint8_t open(TskImgInfo * a_img_info, TSK_OFF_T a_offset, TSK_FS_TYPE_ENUM a_ftype) { if ((m_fsInfo = tsk_fs_open_img(a_img_info->m_imgInfo, a_offset, a_ftype)) != NULL) return 0; return 1; }; /** * \internal */ uint8_t jopen(TSK_INUM_T a_inum) { if (m_fsInfo == NULL) return 0; return m_fsInfo->jopen(m_fsInfo, a_inum); } /** * \internal */ uint8_t jblkWalk(TSK_DADDR_T a_addr1, TSK_DADDR_T a_addr2, int a_num, TSK_FS_JBLK_WALK_CPP_CB a_action, void *a_ptr) { if (m_fsInfo == NULL) return 0; TSK_FS_JBLK_WALK_CPP_DATA jblkData; jblkData.cppAction = a_action; jblkData.cPtr = a_ptr; return m_fsInfo->jblk_walk(m_fsInfo, a_addr1, a_addr2, a_num, tsk_fs_jblk_walk_cpp_c_cb, &jblkData); }; /** * \internal */ uint8_t jentryWalk(int a_num, TSK_FS_JENTRY_WALK_CPP_CB a_action, void *a_ptr) { if (m_fsInfo == NULL) return 0; TSK_FS_JENTRY_WALK_CPP_DATA jentryData; jentryData.cppAction = a_action; jentryData.cPtr = a_ptr; return m_fsInfo->jentry_walk(m_fsInfo, a_num, tsk_fs_jentry_walk_cpp_c_cb, &jentryData); }; /** * Parse a string with the file system type and return its internal ID. * See tsk_fs_type_toid() for details * @param a_str String to parse. * @returns ID of string (or unsupported if the name is unknown) */ static TSK_FS_TYPE_ENUM typeToId(const TSK_TCHAR * a_str) { return tsk_fs_type_toid(a_str); }; /** * Return the string name of a file system type id. * See tsk_fs_type_toname() for details * @param a_ftype File system type id * @returns Name or NULL on error */ static const char *typeToName(TSK_FS_TYPE_ENUM a_ftype) { return tsk_fs_type_toname(a_ftype); }; /** * Return the supported file system types. * See tsk_fs_type_supported() for details * @returns The bit in the return value is 1 if the type is supported. */ static TSK_FS_TYPE_ENUM typeSupported() { return tsk_fs_type_supported(); }; /** * Print the supported file system types to a file handle * See tsk_fs_type_print() for details * @param a_hFile File handle to print to */ static void typePrint(FILE * a_hFile) { tsk_fs_type_print(a_hFile); }; /** * * Find the meta data address for a given file name (UTF-8). * See tsk_fs_path2inum() for details * @param a_path UTF-8 path of file to search for * @param [out] a_result Meta data address of file * @param [out] a_fs_name Copy of name details (or NULL if details not wanted) * @returns -1 on (system) error, 0 if found, and 1 if not found */ int8_t path2INum(const char *a_path, TSK_INUM_T * a_result, TskFsName * a_fs_name) { if (m_fsInfo != NULL) return tsk_fs_path2inum(m_fsInfo, a_path, a_result, a_fs_name->m_fsName); else return -1; }; /** * Parse a TSK_TCHAR string of an inode, type, and id pair (not all parts * need to be there). This assumes the string is either: * INUM, INUM-TYPE, or INUM-TYPE-ID. Return the values in integer form. * See tsk_fs_parse_inum() for details * @param [in] a_str Input string to parse * @param [out] a_inum Pointer to location where inode can be stored. * @param [out] a_type Pointer to location where type can be stored (or NULL) * @param [out] a_type_used Pointer to location where the value can be set * to 1 if the type was set (to differentiate between meanings of 0) (or NULL). * @param [out] a_id Pointer to location where id can be stored (or NULL) * @param [out] a_id_used Pointer to location where the value can be set * to 1 if the id was set (to differentiate between meanings of 0) (or NULL). * * @return 1 on error or if not an inode and 0 on success */ static int parseINum(const TSK_TCHAR * a_str, TSK_INUM_T * a_inum, TSK_FS_ATTR_TYPE_ENUM * a_type, uint8_t * a_type_used, uint16_t * a_id, uint8_t * a_id_used) { return tsk_fs_parse_inum(a_str, a_inum, a_type, a_type_used, a_id, a_id_used); }; /** * return byte offset in image that fs starts * @return offset in bytes. */ TSK_OFF_T getOffset() const { if (m_fsInfo != NULL) return m_fsInfo->offset; else return 0; }; /** * return number of metadata addresses in FS * @return number of metatdata addresses */ TSK_INUM_T getINumCount() const { if (m_fsInfo != NULL) return m_fsInfo->inum_count; else return 0; }; /** * return metadata address of root directory * @return metadata address of root directory */ TSK_INUM_T getRootINum() const { if (m_fsInfo != NULL) return m_fsInfo->root_inum; else return 0; }; /** * return first valid metadata address * @return first valid metadata address */ TSK_INUM_T getFirstINum() const { if (m_fsInfo != NULL) return m_fsInfo->first_inum; else return 0; }; /** * return last valid metadata address * @return last valid metadata address */ TSK_INUM_T getLastINum() const { if (m_fsInfo != NULL) return m_fsInfo->last_inum; else return 0; }; /** * return address of journal inode * @return address of journal inode */ TSK_INUM_T getJournalINum() const { if (m_fsInfo != NULL) return m_fsInfo->journ_inum; else return 0; }; /** * return number of blocks in fs * @return number of blocks in fs */ TSK_DADDR_T getBlockCount() const { if (m_fsInfo != NULL) return m_fsInfo->block_count; else return 0; }; /** * return address of first block * @return address of first block */ TSK_DADDR_T getFirstBlock() const { if (m_fsInfo != NULL) return m_fsInfo->first_block; else return 0; }; /** * return address of last block as reported by file system * (it is equal to the last block in the image or volume (if image is not complete) * @return address of last block */ TSK_DADDR_T getLastBlockAct() const { if (m_fsInfo != NULL) return m_fsInfo->last_block_act; else return 0; }; /** * return address of last block that is adjusted so that * (could be larger than last_block in image if end of image does not exist) * @return address of last block */ TSK_DADDR_T getLastBlock() const { if (m_fsInfo != NULL) return m_fsInfo->last_block; else return 0; }; /** * return size of each file system block (in bytes) * @return size of each block */ unsigned int getBlockSize() const { if (m_fsInfo != NULL) return m_fsInfo->block_size; else return 0; }; /** * return size of device block (typically always 512) * @return size of device block */ unsigned int getDeviceSize() const { if (m_fsInfo != NULL) return m_fsInfo->dev_bsize; else return 0; }; /** * return type of file system * @return type of file system */ TSK_FS_TYPE_ENUM getFsType() const { if (m_fsInfo != NULL) return m_fsInfo->ftype; else return (TSK_FS_TYPE_ENUM) 0; }; /** * return the "name" of data unit type as a string ("Cluster", for example) * @return string "name" of data unit type */ const char *getDataUnitName() const { if (m_fsInfo != NULL) return m_fsInfo->duname; else return NULL; }; /** * return flags for file system * @return flags for file system */ TSK_FS_INFO_FLAG_ENUM getFlags() const { if (m_fsInfo != NULL) return m_fsInfo->flags; else return (TSK_FS_INFO_FLAG_ENUM) 0; }; /** * return file system id (as reported in boot sector). Use getFsIdLen() to determine how many byts in buffer are used. * @return Buffer with file system id */ const uint8_t *getFsId() const { if (m_fsInfo != NULL) return m_fsInfo->fs_id; else return 0; }; /** * return the number of bytes used in the buffer returned by getFsId(). * @return number of bytes used. */ size_t getFsIdLen() const { if (m_fsInfo == NULL) return 0; return m_fsInfo->fs_id_used; }; /** * Close an open file system. See tsk_fs_close() for details. */ void close() { tsk_fs_close(m_fsInfo); }; private: const TSK_IMG_INFO *getTskImgInfo() const { if (m_fsInfo != NULL) return m_fsInfo->img_info; else return NULL; }}; //TskFsInfo /** * \ingroup fslib_cpp * Stores information about a file system block. Must be created by either * allocating an empty block and opening one or by passing in a TSK_FS_BLOCK struct. * If NULL is passed to the contstructor and open() is not called, the other methods * return undefined data. See TSK_FS_BLOCK for more details. */ class TskFsBlock { private: TSK_FS_BLOCK * m_fsBlock; bool m_opened; // true if open() was called and we need to free it TskFsBlock(const TskFsBlock & rhs); TskFsBlock & operator=(const TskFsBlock & rhs); public: /** * constuct a TskFsBlock using a TSK_FS_BLOCK structure * @param a_fsBlock a pointer of TSK_FS_BLOCK. If NULL, the getX() methods return undefined data. */ TskFsBlock(const TSK_FS_BLOCK * a_fsBlock) { m_fsBlock = const_cast < TSK_FS_BLOCK * >(a_fsBlock); m_opened = false; }; /** * default constructor to constuct a TskFsBlock. Must call open() before using other methods. */ TskFsBlock() { m_fsBlock = NULL; }; /** * Free the memory associated with the TSK_FS_BLOCK structure. * See tsk_fs_block_free() for details */ ~TskFsBlock() { if (m_opened) tsk_fs_block_free(m_fsBlock); m_fsBlock = NULL; }; /** * Open a block (use only if created with default constructor). * * @param a_fs The file system to read the block from. * @param a_addr The file system address to read. * @return 1 on error and 0 on success. */ uint8_t open(TskFsInfo * a_fs, TSK_DADDR_T a_addr) { if (m_fsBlock) return 1; if ((m_fsBlock = tsk_fs_block_get(a_fs->m_fsInfo, m_fsBlock, a_addr)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /** * Get buffer with block data (of size TSK_FS_INFO::block_size) * * @return buffer with block data */ const char *getBuf() const { if (m_fsBlock != NULL) return m_fsBlock->buf; else return NULL; }; /** * Get address of block * @return address of block */ TSK_DADDR_T getAddr() const { if (m_fsBlock != NULL) return m_fsBlock->addr; else return 0; }; /** * Get flags for block (alloc or unalloc) * @return flags for block */ TSK_FS_BLOCK_FLAG_ENUM getFlags() const { if (m_fsBlock != NULL) return m_fsBlock->flags; else return (TSK_FS_BLOCK_FLAG_ENUM) 0; }; private: /** * Get pointer to file system that block is from * @return pointer to file system that block is from */ const TSK_FS_INFO *getFsInfo() const { if (m_fsBlock != NULL) return m_fsBlock->fs_info; else return NULL; }; }; /** * \ingroup fslib_cpp * Stores information about names that are located in metadata structures. See * TSK_FS_META_NAME for more details. */ class TskFsMetaName { private: TSK_FS_META_NAME_LIST * m_fsMetaNameList; TskFsMetaName(const TskFsMetaName & rhs); TskFsMetaName & operator=(const TskFsMetaName & rhs); public: /** * Allocates an object based on a C struct. * @param a_fsMetaNameList C struct of name list. If NULL, get() methods return undefined values. */ TskFsMetaName(TSK_FS_META_NAME_LIST * a_fsMetaNameList) { m_fsMetaNameList = a_fsMetaNameList; }; /** * Get the text name in UTF-8 (does not include parent directory name). * @returns name of file. */ const char *getName() const { if (m_fsMetaNameList != NULL) return m_fsMetaNameList->name; else return NULL; }; /** * Get the parent inode (NTFS Only) * @return Address of parent directory. */ TSK_INUM_T getParInode() const { if (m_fsMetaNameList != NULL) return m_fsMetaNameList->par_inode; else return 0; }; /** * get the parent sequence (NTFS Only) * @return Sequence of parent directory. */ uint32_t getParSeq() const { return m_fsMetaNameList->par_seq; }; }; /** * \ingroup fslib_cpp * Stores metadata about a file. See TSK_FS_META for more details. */ class TskFsMeta { private: TSK_FS_META * m_fsMeta; TskFsMeta(const TskFsMeta & rhs); TskFsMeta & operator=(const TskFsMeta & rhs); public: /** * construct a TskFsMeta object. If NULL is passed as an argument, the getX() behavior * is not defined. * @param a_fsMeta a pointer of TSK_FS_META */ TskFsMeta(TSK_FS_META * a_fsMeta) { m_fsMeta = a_fsMeta; #if 0 if (m_fsMeta != NULL) { m_nameList = m_fsMeta->name2; size_t numOfList = 0; TSK_FS_META_NAME_LIST *nameList = m_nameList; while (nameList != NULL) { nameList = nameList->next; numOfList += 1; } m_nameListLen = numOfList; } else { m_nameList = NULL; m_nameListLen = 0; } #endif }; ~TskFsMeta() { }; /** * Makes the "ls -l" permissions string for a file. * See tsk_fs_meta_make_ls() for details * @param a_buf [out] Buffer to write results to (must be 12 bytes or longer) * @param a_len Length of buffer */ uint8_t getLs(char *a_buf, size_t a_len) const { if (m_fsMeta != NULL) return tsk_fs_meta_make_ls(m_fsMeta, a_buf, a_len); else return 0; }; /** * get flags for this file for its allocation status etc. * @return flags for this file */ TSK_FS_META_FLAG_ENUM getFlags() const { if (m_fsMeta != NULL) return m_fsMeta->flags; else return (TSK_FS_META_FLAG_ENUM) 0; } /** * get address of the meta data structure for this file * @return address of the meta data structure for this file */ TSK_INUM_T getAddr() const { if (m_fsMeta != NULL) return m_fsMeta->addr; else return 0; }; /** * get file type * @return file type */ TSK_FS_META_TYPE_ENUM getType() const { if (m_fsMeta != NULL) return m_fsMeta->type; else return (TSK_FS_META_TYPE_ENUM) 0; }; /** * get Unix-style permissions * @return Unix-style permissions mode */ TSK_FS_META_MODE_ENUM getMode() const { if (m_fsMeta != NULL) return m_fsMeta->mode; else return (TSK_FS_META_MODE_ENUM) 0; }; /** * get link count (number of file names pointing to this) * @return link count */ int getNLink() const { if (m_fsMeta != NULL) return m_fsMeta->nlink; else return 0; }; /** * get file size (in bytes) * @return file size */ TSK_OFF_T getSize() const { if (m_fsMeta != NULL) return m_fsMeta->size; else return 0; }; /** * get owner id * @return owner id */ TSK_UID_T getUid() const { if (m_fsMeta != NULL) return m_fsMeta->uid; else return 0; }; /** * get group id * @return group id */ TSK_GID_T getGid() const { if (m_fsMeta != NULL) return m_fsMeta->gid; else return 0; }; /** * get last file content modification time (stored in number of seconds since Jan 1, 1970 UTC) * @return last file content modification time */ time_t getMTime() const { if (m_fsMeta != NULL) return m_fsMeta->mtime; else return 0; }; /** * get nano-second resolution of modification time * @return nano-second resolution of modification time */ uint32_t getMTimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->mtime_nano; else return 0; }; /** * get last file content accessed time (stored in number of seconds since Jan 1, 1970 UTC) * @return last file content accessed time */ time_t getATime() const { if (m_fsMeta != NULL) return m_fsMeta->atime; else return 0; }; /** * get nano-second resolution of accessed time * @return nano-second resolution of accessed time */ uint32_t getATimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->atime_nano; else return 0; }; /** * get last file / metadata status change time (stored in number of seconds since Jan 1, 1970 UTC) * @return last file / metadata status change time */ time_t getCTime() const { if (m_fsMeta != NULL) return m_fsMeta->ctime; else return 0; }; /** * get nano-second resolution of change time * @return nano-second resolution of change time */ uint32_t getCTimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->ctime_nano; else return 0; }; /** * get created time (stored in number of seconds since Jan 1, 1970 UTC) * @return created time */ time_t getCrTime() const { if (m_fsMeta != NULL) return m_fsMeta->crtime; else return 0; }; /** * get nano-second resolution of created time * @return nano-second resolution of created time */ uint32_t getCrTimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->crtime_nano; else return 0; }; /** * get linux deletion time * @return linux deletion time */ time_t getDTime() const { if (m_fsMeta != NULL) return m_fsMeta->time2.ext2.dtime; else return 0; }; /** * get nano-second resolution of deletion time * @return nano-second resolution of deletion time */ uint32_t getDTimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->time2.ext2.dtime_nano; else return 0; }; /** * get HFS+ backup time * @return HFS+ backup time */ time_t getBackUpTime() const { if (m_fsMeta != NULL) return m_fsMeta->time2.hfs.bkup_time; else return 0; }; /** * get nano-second resolution of HFS+ backup time * @return nano-second resolution of HFS+ backup time */ uint32_t getBackUpTimeNano() const { if (m_fsMeta != NULL) return m_fsMeta->time2.hfs.bkup_time_nano; else return 0; }; /** * get sequence number for file (NTFS only, is incremented when entry is reallocated) * @return sequence number for file, or 0xFFFF on error. */ uint32_t getSeq() const { if (m_fsMeta != NULL) return m_fsMeta->seq; return 0xFFFF; }; /** * get name of target file if this is a symbolic link * @return name of target file if this is a symbolic link */ const char *getLink() const { if (m_fsMeta != NULL) return m_fsMeta->link; else return NULL; }; /** * Return the number of names that are stored in the metadata. * @returns number of names. */ int getName2Count() const { int size = 0; if (m_fsMeta != NULL) { TSK_FS_META_NAME_LIST *name = m_fsMeta->name2; while (name != NULL) { size++; name = name->next; }} return size; }; /** * Return a name that is stored in the metadata. * @param a_idx Index of the name to return * @returns NULL on error. Caller must free this memory. */ const TskFsMetaName *getName2(int a_idx) const { if (m_fsMeta != NULL) { TSK_FS_META_NAME_LIST *name = m_fsMeta->name2; int i = 0; while (name != NULL) { if (i == a_idx) return new TskFsMetaName(name); i++; name = name->next; }} return NULL; }; private: /** * get structure used as the head of an attribute list * @return structure used as the head of an attribute list */ const TSK_FS_ATTRLIST *getAttr() const { if (m_fsMeta != NULL) return m_fsMeta->attr; else return NULL; }; }; /** * \ingroup fslib_cpp * Class that represents an allocated or deleted file. The non-default constructor or * open method must be called first. otherwise, the results of the getX() methods are * undefined. See TSK_FS_FILE for more details. */ class TskFsFile { friend class TskFsDir; private: TSK_FS_FILE * m_fsFile; bool m_opened; TskFsFile(const TskFsFile & rhs); TskFsFile & operator=(const TskFsFile & rhs); public: /** * Construct a TskFsFile object from a C struct * @param a_fsFile a pointer of TSK_FS_FILE */ TskFsFile(TSK_FS_FILE * a_fsFile) { m_fsFile = a_fsFile; m_opened = false; }; /** * default constructor to construct a TskFsFile object */ TskFsFile() { m_fsFile = NULL; m_opened = false; }; /** * Close an open file. */ ~TskFsFile() { close(); }; /** * Close an open file. * See tsk_fs_file_close() for details. */ void close() { if (m_opened) tsk_fs_file_close(m_fsFile); m_fsFile = NULL; }; /** * * Open a file given its metadata address. This function loads the metadata * and returns a handle that can be used to read and process the file. Note * that the returned class will not have the file name set because * it was not used to load the file and this function does not search the * directory structure to find the name that points to the address. In general, * if you know the metadata address of a file, this function is more effecient * then tsk_fs_file_open, which first maps a file name to the metadata address * and then open the file using this function. * See tsk_fs_file_open_meta() for details * @param a_fs File system to analyze * @param a_fs_file object to store file data in or NULL to have one allocated. * @param a_addr Metadata address of file to lookup * @returns 1 on error and 0 on success. */ uint8_t open(TskFsInfo * a_fs, TskFsFile * a_fs_file, TSK_INUM_T a_addr) { if ((m_fsFile = tsk_fs_file_open_meta(a_fs->m_fsInfo, a_fs_file->m_fsFile, a_addr)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /** * Return the handle structure for a specific file, given its full path. Note that * if you have the metadata address fo the file, then tsk_fs_file_open_meta() is a * more effecient approach. * See tsk_fs_file_open() for details * @param a_fs File system to analyze * @param a_fs_file Structure to store file data in or NULL to have one allocated. * @param a_path Path of file to open * @returns 1 on error and 0 on success. */ uint8_t open(TskFsInfo * a_fs, TskFsFile * a_fs_file, const char *a_path) { if ((m_fsFile = tsk_fs_file_open(a_fs->m_fsInfo, a_fs_file->m_fsFile, a_path)) != NULL) { m_opened = true; return 0; } else { return 1; } }; /* * Return the number of attributes in the file. * See tsk_fs_file_attr_getsize() for details * @returns number of attributes in file */ int getAttrSize() { return tsk_fs_file_attr_getsize(m_fsFile); //m_fsFile is checked by this C function }; /* * Get a file's attribute based on the 0-based index in the list (and not type, id pair). * It's caller's responsibility to free TskFsAttribute* * See tsk_fs_file_attr_get_idx() for details * @param a_idx 0-based index of attribute to return. * @returns Pointer to attribute or NULL on error */ const TskFsAttribute *getAttr(int a_idx) { TskFsAttribute *fsAttr = new TskFsAttribute(tsk_fs_file_attr_get_idx(m_fsFile, a_idx)); //m_fsFile is checked by this C function return fsAttr; }; /* * Return the default attribute for the file * It's caller's responsibility to free TskFsAttribute* * See tsk_fs_file_attr_get() for details * @returns Pointer to attribute or NULL on error */ const TskFsAttribute *getAttrDefault() { TskFsAttribute *fsAttr = new TskFsAttribute(tsk_fs_file_attr_get(m_fsFile)); //m_fsFile is checked by this C function return fsAttr; }; /* * Return a specific type and id attribute for the file. * It's caller's responsibility to free TskFsAttribute* * See tsk_fs_file_attr_get_type() for details * @param a_type Type of attribute to load * @param a_id Id of attribute to load * @param a_id_used Set to 1 if ID is actually set or 0 to use default * @returns Pointer to attribute or NULL on error */ const TskFsAttribute *getAttr(TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, uint8_t a_id_used) { TskFsAttribute *fsAttr = new TskFsAttribute(tsk_fs_file_attr_get_type(m_fsFile, //m_fsFile is checked by this C function a_type, a_id, a_id_used)); return fsAttr; }; /** * Process a specific attribute in a file and call a callback function with the file contents. * See tsk_fs_file_walk_type() for details * @param a_type Attribute type to process * @param a_id Id if attribute to process * @param a_flags Flags to use while processing file * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t walk(TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CPP_CB a_action, void *a_ptr) { TSK_FS_FILE_WALK_CPP_DATA fileData; fileData.cppAction = a_action; fileData.cPtr = a_ptr; return tsk_fs_file_walk_type(m_fsFile, a_type, a_id, a_flags, tsk_fs_file_cpp_c_cb, &fileData); //m_fsFile is checked by this C function }; /** * Process the default attribute for the file and call a callback function with the file contents. * See tsk_fs_file_walk_type() for details * @param a_flags Flags to use while processing file * @param a_action Callback action to call with content * @param a_ptr Pointer that will passed to callback * @returns 1 on error and 0 on success. */ uint8_t walk(TSK_FS_FILE_WALK_FLAG_ENUM a_flags, TSK_FS_FILE_WALK_CPP_CB a_action, void *a_ptr) { TSK_FS_FILE_WALK_CPP_DATA fileData; fileData.cppAction = a_action; fileData.cPtr = a_ptr; return tsk_fs_file_walk(m_fsFile, a_flags, tsk_fs_file_cpp_c_cb, &fileData); //m_fsFile is checked by this C function }; /** * Read the contents of a specific attribute of a file using a typical read() type interface. * 0s are returned for missing runs of files. * See tsk_fs_file_read_type() for details * @param a_type The type of attribute to load * @param a_id The id of attribute to load (use 0 and set a_flags if you do not care) * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past EOF). */ ssize_t read(TSK_FS_ATTR_TYPE_ENUM a_type, uint16_t a_id, TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { return tsk_fs_file_read_type(m_fsFile, a_type, a_id, a_offset, a_buf, a_len, a_flags); //m_fsFile is checked by this C function }; /** * Read the contents of the default attribute of a file using a typical read() type interface. * 0s are returned for missing runs of files. * See tsk_fs_file_read() for details * @param a_offset The byte offset to start reading from. * @param a_buf The buffer to read the data into. * @param a_len The number of bytes to read from the file. * @param a_flags Flags to use while reading * @returns The number of bytes read or -1 on error (incl if offset is past EOF). */ ssize_t read(TSK_OFF_T a_offset, char *a_buf, size_t a_len, TSK_FS_FILE_READ_FLAG_ENUM a_flags) { return tsk_fs_file_read(m_fsFile, a_offset, a_buf, a_len, a_flags); //m_fsFile is checked by this C function }; /** * Return pointer to the file's name (or NULL if file was opened using metadata address) * @returns pointer to name of file. It is the caller's responsibility to free this. */ TskFsName *getName() { if (m_fsFile != NULL) return new TskFsName(m_fsFile->name); else return NULL; }; /** * Return pointer to the file's metadata (or NULL if name has invalid metadata address) * @returns pointer metadata of file. It is the caller's responsibility to free this. */ TskFsMeta *getMeta() { if (m_fsFile != NULL) return new TskFsMeta(m_fsFile->meta); else return NULL; }; /** * Return pointer file system that the file is located in. * @returns pointer to file system that the file is located in. */ TskFsInfo *getFsInfo() { if (m_fsFile != NULL) return new TskFsInfo(m_fsFile->fs_info); else return NULL; }; }; //TskFsFile /** * \ingroup fslib_cpp * Stores information about a directory in the file system. The open() method * must be called before any of hte other methods return defined data. See * TSK_FS_DIR for more details. */ class TskFsDir { private: TSK_FS_DIR * m_fsDir; TskFsDir(const TskFsDir & rhs); TskFsDir & operator=(const TskFsDir & rhs); public: TskFsDir() { m_fsDir = NULL; }; /* * Close the directory that was opened with tsk_fs_dir_open() */ ~TskFsDir() { close(); } /* * Open a directory (using its metadata addr) so that each of the files in it can be accessed. * See for tsk_fs_dir_open_meta() details. * @param a_fs File system to analyze * @param a_addr Metadata address of the directory to open * @returns 1 on error and 0 on success */ uint8_t open(TskFsInfo * a_fs, TSK_INUM_T a_addr) { if ((m_fsDir = tsk_fs_dir_open_meta(a_fs->m_fsInfo, a_addr)) != NULL) return 0; else return 1; }; /* * Open a directory (using its path) so that each of the files in it can be accessed. * See for tsk_fs_dir_open() details. * @param a_fs File system to analyze * @param a_dir Path of the directory to open * @returns 1 on error and 0 on success */ uint8_t open(TskFsInfo * a_fs, const char *a_dir) { if ((m_fsDir = tsk_fs_dir_open(a_fs->m_fsInfo, a_dir)) != NULL) return 0; else return 1; }; /* * Close the directory that was opened with tsk_fs_dir_open() * See tsk_fs_dir_close() for details */ void close() { tsk_fs_dir_close(m_fsDir); }; /* * Returns the number of files and subdirectories in a directory. * See tsk_fs_dir_getsize() for details * @returns Number of files and subdirectories (or 0 on error) */ size_t getSize() const { return tsk_fs_dir_getsize(m_fsDir); //m_fsDir is checked by this C function }; /* * Return a specific file or subdirectory from an open directory. * It's caller's responsibility to free TskFsFile* * See tsk_fs_dir_getsize() for details * @param a_idx Index of file in directory to open (0-based) * @returns NULL on error */ TskFsFile *getFile(size_t a_idx) const { TSK_FS_FILE *fs_file = tsk_fs_dir_get(m_fsDir, a_idx); if (fs_file != NULL) { TskFsFile *f = new TskFsFile(fs_file); f->m_opened = true; return f; } else return NULL; }; /* * Return metadata address of this directory * @returns metadata address of this directory */ TSK_INUM_T getMetaAddr() const { if (m_fsDir != NULL) return m_fsDir->addr; else return 0; }; /* * Return pointer to the file structure for the directory. * @returns NULL on error. it is the caller's responsibility to free this object. */ const TskFsFile *getFsFile() const { if (m_fsDir != NULL) return new TskFsFile(m_fsDir->fs_file); else return NULL; }; private: /* * Return pointer to file system the directory is located in * @returns NULL on error */ const TSK_FS_INFO *getFsInfo() const { if (m_fsDir != NULL) return m_fsDir->fs_info; else return NULL; }; }; #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_fs_i.h000644 000765 000024 00000023027 12576320700 017514 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* LICENSE * .ad * .fi * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /** * \file tsk_fs_i.h * Contains the internal library definitions for the file system functions. This should * be included by the code in the file system library. */ #ifndef _TSK_FS_I_H #define _TSK_FS_I_H // Include the other internal TSK header files #include "tsk/base/tsk_base_i.h" #include "tsk/img/tsk_img_i.h" #include "tsk/vs/tsk_vs_i.h" // Include the external file #include "tsk_fs.h" #include #include #ifdef __cplusplus extern "C" { #endif /* */ #if !defined (TSK_WIN32) #include #include #endif /* */ // set to 1 to open HFS+ file systems -- which is not fully tested #ifndef TSK_USE_HFS #define TSK_USE_HFS 1 #endif /* */ #ifndef NBBY #define NBBY 8 #endif /* */ #ifndef isset #define isset(a,i) (((uint8_t *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #endif /* */ #ifndef setbit #define setbit(a,i) (((uint8_t *)(a))[(i)/NBBY] |= (1<<((i)%NBBY))) #endif /* */ /* Data structure and action to internally load a file */ typedef struct { char *base; char *cur; size_t total; size_t left; } TSK_FS_LOAD_FILE; extern TSK_WALK_RET_ENUM tsk_fs_load_file_action(TSK_FS_FILE * fs_file, TSK_OFF_T, TSK_DADDR_T, char *, size_t, TSK_FS_BLOCK_FLAG_ENUM, void *); /* BLOCK */ extern TSK_FS_BLOCK *tsk_fs_block_alloc(TSK_FS_INFO * fs); extern int tsk_fs_block_set(TSK_FS_INFO * fs, TSK_FS_BLOCK * fs_block, TSK_DADDR_T a_addr, TSK_FS_BLOCK_FLAG_ENUM a_flags, char *a_buf); /* FS_DATA */ extern TSK_FS_ATTR *tsk_fs_attr_alloc(TSK_FS_ATTR_FLAG_ENUM); extern void tsk_fs_attr_free(TSK_FS_ATTR *); extern void tsk_fs_attr_clear(TSK_FS_ATTR *); extern uint8_t tsk_fs_attr_set_str(TSK_FS_FILE *, TSK_FS_ATTR *, const char *, TSK_FS_ATTR_TYPE_ENUM, uint16_t, void *, size_t); extern uint8_t tsk_fs_attr_set_run(TSK_FS_FILE *, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * data_run_new, const char *name, TSK_FS_ATTR_TYPE_ENUM type, uint16_t id, TSK_OFF_T size, TSK_OFF_T initsize, TSK_OFF_T allocsize, TSK_FS_ATTR_FLAG_ENUM flags, uint32_t compsize); extern uint8_t tsk_fs_attr_add_run(TSK_FS_INFO * fs, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * data_run_new); extern void tsk_fs_attr_append_run(TSK_FS_INFO * fs, TSK_FS_ATTR * a_fs_attr, TSK_FS_ATTR_RUN * a_data_run); /* FS_DATALIST */ extern TSK_FS_ATTRLIST *tsk_fs_attrlist_alloc(); extern void tsk_fs_attrlist_free(TSK_FS_ATTRLIST *); extern uint8_t tsk_fs_attrlist_add(TSK_FS_ATTRLIST *, TSK_FS_ATTR *); extern TSK_FS_ATTR *tsk_fs_attrlist_getnew(TSK_FS_ATTRLIST *, TSK_FS_ATTR_FLAG_ENUM a_atype); extern void tsk_fs_attrlist_markunused(TSK_FS_ATTRLIST *); extern const TSK_FS_ATTR *tsk_fs_attrlist_get(const TSK_FS_ATTRLIST *, TSK_FS_ATTR_TYPE_ENUM); extern const TSK_FS_ATTR *tsk_fs_attrlist_get_id(const TSK_FS_ATTRLIST *, TSK_FS_ATTR_TYPE_ENUM, uint16_t); extern const TSK_FS_ATTR *tsk_fs_attrlist_get_name_type(const TSK_FS_ATTRLIST *, TSK_FS_ATTR_TYPE_ENUM, const char *); extern const TSK_FS_ATTR *tsk_fs_attrlist_get_idx(const TSK_FS_ATTRLIST *, int); extern int tsk_fs_attrlist_get_len(const TSK_FS_ATTRLIST * a_fs_attrlist); /* FS_DATA_RUN */ extern TSK_FS_ATTR_RUN *tsk_fs_attr_run_alloc(); extern void tsk_fs_attr_run_free(TSK_FS_ATTR_RUN *); /* FS_META */ extern TSK_FS_META *tsk_fs_meta_alloc(size_t); extern TSK_FS_META *tsk_fs_meta_realloc(TSK_FS_META *, size_t); extern void tsk_fs_meta_reset(TSK_FS_META *); extern void tsk_fs_meta_close(TSK_FS_META * fs_meta); /* FS_FILE */ extern TSK_FS_FILE *tsk_fs_file_alloc(TSK_FS_INFO *); /* FS_DIR */ extern TSK_FS_DIR *tsk_fs_dir_alloc(TSK_FS_INFO * a_fs, TSK_INUM_T a_addr, size_t a_cnt); extern uint8_t tsk_fs_dir_realloc(TSK_FS_DIR * a_fs_dir, size_t a_cnt); extern uint8_t tsk_fs_dir_add(TSK_FS_DIR * a_fs_dir, const TSK_FS_NAME * a_fs_dent); extern void tsk_fs_dir_reset(TSK_FS_DIR * a_fs_dir); /* Orphan Directory Support */ TSK_RETVAL_ENUM tsk_fs_dir_load_inum_named(TSK_FS_INFO * a_fs); uint8_t tsk_fs_dir_find_inum_named(TSK_FS_INFO * a_fs, TSK_INUM_T a_inum); extern uint8_t tsk_fs_dir_make_orphan_dir_meta(TSK_FS_INFO * a_fs, TSK_FS_META * a_fs_meta); extern uint8_t tsk_fs_dir_make_orphan_dir_name(TSK_FS_INFO * a_fs, TSK_FS_NAME * a_fs_name); extern TSK_RETVAL_ENUM tsk_fs_dir_find_orphans(TSK_FS_INFO * a_fs, TSK_FS_DIR * a_fs_dir); /* FS_DENT */ extern TSK_FS_NAME *tsk_fs_name_alloc(size_t, size_t); extern uint8_t tsk_fs_name_realloc(TSK_FS_NAME *, size_t); extern void tsk_fs_name_free(TSK_FS_NAME *); extern void tsk_fs_name_print(FILE *, const TSK_FS_FILE *, const char *, TSK_FS_INFO *, const TSK_FS_ATTR *, uint8_t); extern void tsk_fs_name_print_long(FILE *, const TSK_FS_FILE *, const char *, TSK_FS_INFO *, const TSK_FS_ATTR *, uint8_t, int32_t); extern void tsk_fs_name_print_mac(FILE *, const TSK_FS_FILE *, const char *, const TSK_FS_ATTR * fs_attr, const char *, int32_t); extern void tsk_fs_name_print_mac_md5(FILE *, const TSK_FS_FILE *, const char *, const TSK_FS_ATTR * fs_attr, const char *, int32_t, const unsigned char *); extern uint8_t tsk_fs_name_copy(TSK_FS_NAME * a_fs_name_to, const TSK_FS_NAME * a_fs_name_from); extern void tsk_fs_name_reset(TSK_FS_NAME * a_fs_name); extern char *tsk_fs_time_to_str(time_t, char buf[128]); extern char *tsk_fs_time_to_str_subsecs(time_t, unsigned int subsecs, char buf[128]); /* Utilities */ extern uint8_t tsk_fs_unix_make_data_run(TSK_FS_FILE * fs_file); extern TSK_FS_ATTR_TYPE_ENUM tsk_fs_unix_get_default_attr_type(const TSK_FS_FILE * a_file); extern int tsk_fs_unix_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2); /* Specific file system routines */ extern TSK_FS_INFO *ext2fs_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); extern TSK_FS_INFO *fatfs_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); extern TSK_FS_INFO *ffs_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM); extern TSK_FS_INFO *ntfs_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); extern TSK_FS_INFO *rawfs_open(TSK_IMG_INFO *, TSK_OFF_T); extern TSK_FS_INFO *swapfs_open(TSK_IMG_INFO *, TSK_OFF_T); extern TSK_FS_INFO *iso9660_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); extern TSK_FS_INFO *hfs_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); extern TSK_FS_INFO *yaffs2_open(TSK_IMG_INFO *, TSK_OFF_T, TSK_FS_TYPE_ENUM, uint8_t); /* Generic functions for swap and raw -- many say "not supported" */ extern uint8_t tsk_fs_nofs_fsstat(TSK_FS_INFO * fs, FILE * hFile); extern void tsk_fs_nofs_close(TSK_FS_INFO * fs); extern TSK_FS_ATTR_TYPE_ENUM tsk_fs_nofs_get_default_attr_type(const TSK_FS_FILE * a_file); extern uint8_t tsk_fs_nofs_make_data_run(TSK_FS_FILE *); extern int tsk_fs_nofs_name_cmp(TSK_FS_INFO *, const char *, const char *); extern TSK_FS_BLOCK_FLAG_ENUM tsk_fs_nofs_block_getflags(TSK_FS_INFO * a_fs, TSK_DADDR_T a_addr); extern uint8_t tsk_fs_nofs_block_walk(TSK_FS_INFO * fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr); extern uint8_t tsk_fs_nofs_file_add_meta(TSK_FS_INFO * fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum); extern uint8_t tsk_fs_nofs_inode_walk(TSK_FS_INFO * fs, TSK_INUM_T a_start_inum, TSK_INUM_T a_end_inum, TSK_FS_META_FLAG_ENUM a_flags, TSK_FS_META_WALK_CB a_action, void *a_ptr); extern uint8_t tsk_fs_nofs_istat(TSK_FS_INFO * a_fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew); extern TSK_RETVAL_ENUM tsk_fs_nofs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); extern uint8_t tsk_fs_nofs_jopen(TSK_FS_INFO * a_fs, TSK_INUM_T inum); extern uint8_t tsk_fs_nofs_jentry_walk(TSK_FS_INFO * a_fs, int a_flags, TSK_FS_JENTRY_WALK_CB a_action, void *a_ptr); extern uint8_t tsk_fs_nofs_jblk_walk(TSK_FS_INFO * a_fs, TSK_INUM_T start, TSK_INUM_T end, int a_flags, TSK_FS_JBLK_WALK_CB a_action, void *a_ptr); /* malloc/free with lock init/deinit */ extern TSK_FS_INFO *tsk_fs_malloc(size_t); extern void tsk_fs_free(TSK_FS_INFO *); // Endian macros - actual functions in misc/ #define tsk_fs_guessu16(fs, x, mag) \ tsk_guess_end_u16(&(fs->endian), (x), (mag)) #define tsk_fs_guessu32(fs, x, mag) \ tsk_guess_end_u32(&(fs->endian), (x), (mag)) #ifdef __cplusplus } #endif /* */ #endif /* */ sleuthkit-4.2.0/tsk/fs/tsk_hfs.h000644 000765 000024 00000075723 12576320700 017366 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** ** Copyright (c) 2009-2011 Brian Carrier. All rights reserved. ** ** Judson Powers [jpowers@atc-nycorp.com] ** Matt Stillerman [matt@atc-nycorp.com] ** Copyright (c) 2008, 2012 ATC-NY. All rights reserved. ** This file contains data developed with support from the National ** Institute of Justice, Office of Justice Programs, U.S. Department of Justice. ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier@sleuthkit.org] ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /** * Contains the structures and function APIs for HFS+ file system support. */ #ifndef _TSK_HFS_H #define _TSK_HFS_H /* * Some compilers do not have the boolean type. */ #ifndef TRUE #define TRUE ((unsigned char)1) #endif #ifndef FALSE #define FALSE ((unsigned char)0) #endif /* * All structures created using technote 1150 from Apple.com * http://developer.apple.com/technotes/tn/tn1150.html */ /* * Constants */ #define HFS_FILE_CONTENT_LEN 160 /* size of two hfs_fork data structures */ #define HFS_MAXNAMLEN 765 /* maximum HFS+ name length in bytes, when encoded in UTF8, not including terminating null */ #define HFS_MAXPATHLEN 1024 /* HFS+ can have paths longer than this, but Apple's implementation limits certain items to this value (e.g., symlink targets) */ /* * HFS uses its own time system, which is seconds since Jan 1 1904 * instead of the typical Jan 1 1970. This number is the seconds between * 1 Jan 1904 and 1 Jan 1970 which will make ctime(3) work instead of * re-writing the Apple library function to convert this time. */ #define NSEC_BTWN_1904_1970 (uint32_t) 2082844800U /** * These two constants are the "ID" of the data fork and resource fork as TSK attributes. By the way, * those attributes both have type TSK_FS_ATTR_TYPE_NTFS_DATA, which is a bit counter-intuitive. */ #define HFS_FS_ATTR_ID_DATA 0 #define HFS_FS_ATTR_ID_RSRC 1 /* predefined files */ #define HFS_ROOT_PARENT_ID 1 #define HFS_ROOT_FOLDER_ID 2 #define HFS_EXTENTS_FILE_ID 3 // extents overflow file #define HFS_EXTENTS_FILE_NAME "$ExtentsFile" #define HFS_CATALOG_FILE_ID 4 // catalog file #define HFS_CATALOG_FILE_NAME "$CatalogFile" #define HFS_BAD_BLOCK_FILE_ID 5 #define HFS_BAD_BLOCK_FILE_NAME "$BadBlockFile" #define HFS_ALLOCATION_FILE_ID 6 // allocation file (HFS+) #define HFS_ALLOCATION_FILE_NAME "$AllocationFile" #define HFS_STARTUP_FILE_ID 7 // startup file (HFS+) #define HFS_STARTUP_FILE_NAME "$StartupFile" #define HFS_ATTRIBUTES_FILE_ID 8 // Attributes file (HFS+) #define HFS_ATTRIBUTES_FILE_NAME "$AttributesFile" #define HFS_REPAIR_CATALOG_FILE_ID 14 // Temp file during fsck #define HFS_REPAIR_CATALOG_FILE_NAME "$RepairCatalogFile" #define HFS_BOGUS_EXTENT_FILE_ID 15 // Temp file during fsck #define HFS_BOGUS_EXTENT_FILE_NAME "$BogusExtentFile" #define HFS_FIRST_USER_CNID 16 #define HFS_ROOT_INUM HFS_ROOT_FOLDER_ID #define HFS_HARDLINK_FILE_TYPE 0x686C6E6B /* hlnk */ #define HFS_HARDLINK_FILE_CREATOR 0x6866732B /* hfs+ */ #define HFS_LINKDIR_FILE_TYPE 0x66647270 /* fdrp */ #define HFS_LINKDIR_FILE_CREATOR 0x4D414353 /* MACS */ #define UTF16_NULL 0x0000 #define UTF16_NULL_REPLACE 0x005e // This is the standard Unicode replacement character in UTF16 //#define UTF16_NULL_REPLACE 0xfffd #define UTF16_SLASH 0x002f #define UTF16_COLON 0x003a #define UTF16_LEAST_PRINTABLE 0x0020 #define UTF8_NULL_REPLACE "^" // This is the standard Unicode replacement character in UTF8 //#define UTF8_NULL_REPLACE "\xef\xbf\xbd" #define HFS_CATALOGNAME "$CatalogFile" #define HFS_EXTENTSNAME "$ExtentsFile" #define HFS_ALLOCATIONNAME "$BitMapFile" #define HFS_STARTUPNAME "$BootFile" #define HFS_ATTRIBUTESNAME "$AttributesFile" /** * B-Tree Node Types */ #define HFS_ATTR_NODE_LEAF -1 #define HFS_ATTR_NODE_HEADER 1 #define HFS_ATTR_NODE_INDEX 0 #define HFS_ATTR_NODE_MAP 2 /* * HFS structures */ /* File and Folder name struct */ typedef struct { uint8_t length[2]; uint8_t unicode[510]; } hfs_uni_str; /* access permissions */ // admin flag values #define HFS_PERM_AFLAG_ARCHIVED 0x01 /* file has been archived */ #define HFS_PERM_AFLAG_IMMUTABLE 0x02 /* file may not be changed */ #define HFS_PERM_AFLAG_APPEND 0x04 /* writes to file may only append */ // owner flag values #define HFS_PERM_OFLAG_NODUMP 0x01 /* do not dump (back up or archive) this file */ #define HFS_PERM_OFLAG_IMMUTABLE 0x02 /* file may not be changed */ #define HFS_PERM_OFLAG_APPEND 0x04 /* writes to file may only append */ #define HFS_PERM_OFLAG_OPAQUE 0x08 /* directory is opaque */ #define HFS_PERM_OFLAG_COMPRESSED 0x20 /* file is HFS-compressed (see 10.6 sys/stat.h) */ // mode flag values #define HFS_IN_ISUID 0004000 /* set user id */ #define HFS_IN_ISGID 0002000 /* set group id */ #define HFS_IN_ISVTX 0001000 /* sticky bit (directories only) */ #define HFS_IN_IRUSR 0000400 /* R for user */ #define HFS_IN_IWUSR 0000200 /* W for user */ #define HFS_IN_IXUSR 0000100 /* X for user */ #define HFS_IN_IRGRP 0000040 /* R for group */ #define HFS_IN_IWGRP 0000020 /* W for group */ #define HFS_IN_IXGRP 0000010 /* X for group */ #define HFS_IN_IROTH 0000004 /* R for other */ #define HFS_IN_IWOTH 0000002 /* W for other */ #define HFS_IN_IXOTH 0000001 /* X for other */ #define HFS_IN_IFMT 0170000 /* filetype mask */ #define HFS_IN_IFIFO 0010000 /* named pipe */ #define HFS_IN_IFCHR 0020000 /* character special */ #define HFS_IN_IFDIR 0040000 /* directory */ #define HFS_IN_IFBLK 0060000 /* block special */ #define HFS_IN_IFREG 0100000 /* regular file */ #define HFS_IN_IFLNK 0120000 /* symbolic link */ #define HFS_IN_IFSOCK 0140000 /* socket */ #define HFS_IFWHT 0160000 /* whiteout */ #define HFS_IFXATTR 0200000 /* extended attributes */ typedef struct { uint8_t owner[4]; /* file owner */ uint8_t group[4]; /* file group */ uint8_t a_flags; /* admin flags */ uint8_t o_flags; /* owner flags */ uint8_t mode[2]; /* file mode */ union { uint8_t inum[4]; /* inode number (for hard link files) */ uint8_t nlink[4]; /* link count (for direct node files) */ uint8_t raw[4]; /* device id (for block and char device files) */ } special; } hfs_access_perm; /* HFS extent descriptor */ typedef struct { uint8_t start_blk[4]; /* start block */ uint8_t blk_cnt[4]; /* block count */ } hfs_ext_desc; /* Structre used in the extents tree */ typedef struct { hfs_ext_desc extents[8]; } hfs_extents; /* Fork data structure. This is used in both the volume header and catalog tree. */ typedef struct { uint8_t logic_sz[8]; /* The size (in bytes) of the fork */ uint8_t clmp_sz[4]; /* For "special files" in volume header, clump size. For * catalog files, this is number of blocks read or not used. */ uint8_t total_blk[4]; /* total blocks in all extents of the fork */ hfs_ext_desc extents[8]; } hfs_fork; /**************************************************** * Super block / volume header */ #define HFS_VH_OFF 1024 // byte offset in volume to volume header // signature values #define HFS_VH_SIG_HFS 0x4244 /* BD in big endian */ #define HFS_VH_SIG_HFSPLUS 0x482b /* H+ in big endian */ #define HFS_VH_SIG_HFSX 0x4858 /* HX in big endian */ // version values #define HFS_VH_VER_HFSPLUS 0x0004 /* all HFS+ volumes are version 4 */ #define HFS_VH_VER_HFSX 0x0005 /* HFSX volumes start with version 5 */ // attr values ( // bits 0 to 7 are reserved #define HFS_VH_ATTR_UNMOUNTED (uint32_t)(1<<8) /* set if the volume was unmounted properly; as per TN 1150, modern Macintosh OSes always leave this bit set for the boot volume */ #define HFS_VH_ATTR_BADBLOCKS (uint32_t)(1<<9) /* set if there are any bad blocks for this volume (in the Extents B-tree) */ #define HFS_VH_ATTR_NOCACHE (uint32_t)(1<<10) /* set if volume should not be cached */ #define HFS_VH_ATTR_INCONSISTENT (uint32_t)(1<<11) /* cleared if the volume was unmounted properly */ #define HFS_VH_ATTR_CNIDS_REUSED (uint32_t)(1<<12) /* set if CNIDs have wrapped around past the maximum value and are being reused; in this case, there are CNIDs on the disk larger than the nextCatalogId field */ #define HFS_VH_ATTR_JOURNALED (uint32_t)(1<<13) // 14 is reserved #define HFS_VH_ATTR_SOFTWARE_LOCK (uint32_t)(1 << 15) /* set if volume should be write-protected in software */ // 16 to 31 are reserved // last_mnt_ver values #define HFS_VH_MVER_HFSPLUS 0x31302e30 /* '10.0' for Mac OS X */ #define HFS_VH_MVER_HFSJ 0x4846534a /* 'HFSJ' for journaled HFS+ on Mac OS X */ #define HFS_VH_MVER_FSK 0x46534b21 /* 'FSK!' for failed journal replay */ #define HFS_VH_MVER_FSCK 0x6673636b /* 'fsck' for fsck_hfs */ #define HFS_VH_MVER_OS89 0x382e3130 /* '8.10' for Mac OS 8.1-9.2.2 */ /* Index values for finder_info array */ #define HFS_VH_FI_BOOT 0 /*Directory ID of bootable directory */ #define HFS_VH_FI_START 1 /* Parent dir ID of startup app */ #define HFS_VH_FI_OPEN 2 /* Directory to open when volume is mounted */ #define HFS_VH_FI_BOOT9 3 /* Directory ID of OS 8 or 9 bootable sys folder */ #define HFS_VH_FI_RESV1 4 #define HFS_VH_FI_BOOTX 5 /* Directory ID of OS X bootable system (CoreServices dir) */ #define HFS_VH_FI_ID1 6 /* OS X Volume ID part 1 */ #define HFS_VH_FI_ID2 7 /* OS X Volume ID part 2 */ /** * Flags to control hfs_UTF16toUTF8() */ // If this flag is set, the function will replace fwd slash with colon, as // required in HFS+ filenames. #define HFS_U16U8_FLAG_REPLACE_SLASH 0x00000001 #define HFS_U16U8_FLAG_REPLACE_CONTROL 0x00000002 /* ** HFS+/HFSX Super Block */ typedef struct { uint8_t signature[2]; /* "H+" for HFS+, "HX" for HFSX */ uint8_t version[2]; /* 4 for HFS+, 5 for HFSX */ uint8_t attr[4]; /* volume attributes */ uint8_t last_mnt_ver[4]; /* last mounted version */ uint8_t jinfo_blk[4]; /* journal info block */ uint8_t cr_date[4]; /* volume creation date (NOT in GMT) */ uint8_t m_date[4]; /* volume last modified date (GMT) */ uint8_t bkup_date[4]; /* volume last backup date (GMT) */ uint8_t chk_date[4]; /* date of last consistency check (GMT) */ uint8_t file_cnt[4]; /* number of files on volume (not incl. special files) */ uint8_t fldr_cnt[4]; /* number of folders on volume (not incl. root dir) */ uint8_t blk_sz[4]; /* allocation block size (in bytes) */ uint8_t blk_cnt[4]; /* number of blocks on disk */ uint8_t free_blks[4]; /* unused block count */ uint8_t next_alloc[4]; /* block addr to start allocation search from */ uint8_t rsrc_clmp_sz[4]; /* default clump size for resource forks (in bytes) */ uint8_t data_clmp_sz[4]; /* default clump size for data forks (in bytes) */ uint8_t next_cat_id[4]; /* next catalog id for allocation */ uint8_t write_cnt[4]; /* write count: incremented each time it is mounted and modified */ uint8_t enc_bmp[8]; /* encoding bitmap (identifies which encodings were used in FS) */ uint8_t finder_info[8][4]; /* Special finder details */ hfs_fork alloc_file; /* location and size of allocation bitmap file */ hfs_fork ext_file; /* location and size of extents file */ hfs_fork cat_file; /* location and size of catalog file */ hfs_fork attr_file; /* location and size of attributes file */ hfs_fork start_file; /* location and size of startup file */ } hfs_plus_vh; /* ** HFS (non-Plus) Master Directory Block (volume header-like) (used with wrapped HFS+/HFSX file systems) */ typedef struct { uint8_t drSigWord[2]; /* "BD" for HFS (same location as hfs_plus_vh.signature) */ uint8_t drCrDate[4]; /* volume creation date */ uint8_t drLsMod[4]; /* volume last modified date */ uint8_t drAtrb[2]; /* volume attributes */ uint8_t drNmFls[2]; /* number of files on volume */ uint8_t drVBMSt[2]; /* starting block for volume bitmap */ uint8_t drAllocPtr[2]; /* start of next allocation search */ uint8_t drNmAlBlks[2]; /* number of blocks on disk */ uint8_t drAlBlkSiz[4]; /* size in bytes of each allocation block */ uint8_t drClpSiz[4]; /* default clump size for volume */ uint8_t drAlBlSt[2]; /* first allocation block, in 512-byte sectors */ uint8_t drNxtCNID[4]; /* next unused catalog node ID */ uint8_t drFreeBlks[2]; /* number of unused allocation blocks */ uint8_t drVN[28]; /* volume name, where first byte is length */ uint8_t drVolBkUp[4]; /* volume last backup date */ uint8_t drVSeqNum[2]; /* volume sequence number */ uint8_t drWrCnt[4]; /* write count */ uint8_t drXTClpSiz[4]; /* clump size for extents overflow file */ uint8_t drCTClpSiz[4]; /* clump size for catalog file */ uint8_t drNmRtDirs[2]; /* number of folders in root directory */ uint8_t drFilCnt[4]; /* number of files on volume */ uint8_t drDirCnt[4]; /* number of directories on volume */ uint8_t drFndrInfo[32]; /* Finder info */ uint8_t drEmbedSigWord[2]; /* signature of the embedded HFS+ volume (eg, "H+") - 0x7c offset */ uint8_t drEmbedExtent_startBlock[2]; /* extent descriptor for start of embedded volume */ uint8_t drEmbedExtent_blockCount[2]; /* extent descriptor for num of blks in of embedded volume */ uint8_t drXTFlSize[4]; /* size of the extents overflow file */ uint8_t drXTExtRec[12]; /* extent record with size and location of extents overflow file */ uint8_t drCTFlSize[4]; /* size of the catalog file */ uint8_t drCTExtRec[12]; /* extent record with size and location of catalog file */ } hfs_mdb; /********* B-Tree data structures **********/ /* Node descriptor that starts each node in a B-tree */ // type values #define HFS_BT_NODE_TYPE_LEAF -1 #define HFS_BT_NODE_TYPE_IDX 0 #define HFS_BT_NODE_TYPE_HEAD 1 #define HFS_BT_NODE_TYPE_MAP 2 // header that starts every B-tree node typedef struct { uint8_t flink[4]; /* node num of next node of same type */ uint8_t blink[4]; /* node num of prev node of same type */ int8_t type; /* type of this node */ uint8_t height; /* level in B-tree (0 for root, 1 for leaf) */ uint8_t num_rec[2]; /* number of records this node */ uint8_t res[2]; /* reserved */ } hfs_btree_node; /*****************/ // structure for the 1st record in the B-Tree header node // type values #define HFS_BT_HEAD_TYPE_CNTL 0 // control file (catalog, extents, attributes) #define HFS_BT_HEAD_TYPE_USER 128 // hot file #define HFS_BT_HEAD_TYPE_RSV 255 // compType values #define HFS_BT_HEAD_COMP_SENS 0xBC // case sensitive #define HFS_BT_HEAD_COMP_INSENS 0xC7 // case insensitive // attr values #define HFS_BT_HEAD_ATTR_BIGKEYS 0x00000002 /* key length field is 16 bits (req'd for HFS+) */ #define HFS_BT_HEAD_ATTR_VARIDXKEYS 0x00000004 /* Keys in INDEX nodes are variable length */ // NOTE: VARIDXKEYS is required for the Catalog B-tree and cleared for the Extents B-tree typedef struct { uint8_t depth[2]; /* current depth of btree */ uint8_t rootNode[4]; /* node number of root node */ uint8_t leafRecords[4]; /* number of records in leaf nodes */ uint8_t firstLeafNode[4]; /* number of first leaf node (0 if no leafs) */ uint8_t lastLeafNode[4]; /* number of last leaf node (0 if no leafs) */ uint8_t nodesize[2]; /* byte size of each node (512..32768) */ uint8_t maxKeyLen[2]; /* max key length in an index or leaf node */ uint8_t totalNodes[4]; /* number of nodes in btree (free or in use) */ uint8_t freeNodes[4]; /* unused nodes in btree */ uint8_t res[2]; /* reserved */ uint8_t clumpSize[4]; /* clump size */ uint8_t type; /* btree type (control or user) */ uint8_t compType; /* HFSX Only: identifies of key comparisons are case sensitive */ uint8_t attr[4]; /* attributes */ uint8_t res2[64]; /* reserved */ } hfs_btree_header_record; /* key for catalog records */ typedef struct { uint8_t key_len[2]; // length of key minus 2 uint8_t parent_cnid[4]; hfs_uni_str name; } hfs_btree_key_cat; /* Key for extents records */ // fork_type values #define HFS_EXT_KEY_TYPE_DATA 0x00 // extents key is for data fork #define HFS_EXT_KEY_TYPE_RSRC 0xFF // extents key is for resource fork typedef struct { uint8_t key_len[2]; // length of key minus 2 (should always be 10) uint8_t fork_type; // data or resource fork uint8_t pad; // reserved uint8_t file_id[4]; // the cnid that this key is for uint8_t start_block[4]; // the offset in the file (in blocks) that this run is for } hfs_btree_key_ext; /* Record contents for index record after key */ typedef struct { uint8_t childNode[4]; } hfs_btree_index_record; /***************** ATTRIBUTES FILE ******************************/ typedef struct { uint8_t key_len[2]; uint8_t pad[2]; uint8_t file_id[4]; uint8_t start_block[4]; uint8_t attr_name_len[2]; uint8_t attr_name[254]; } hfs_btree_key_attr; typedef struct { uint8_t record_type[4]; // HFS_ATTRIBUTE_RECORD_INLINE_DATA uint8_t reserved[8]; uint8_t attr_size[4]; uint8_t attr_data[2]; /* variable length data */ } hfs_attr_data; /* Each leaf record in the Attributes file has one of these types. However, * only "INLINE_DATA" is ever used by Apple. We check the value of the flag, * but count it as an error if either of the other two values is found. */ #define HFS_ATTR_RECORD_INLINE_DATA 0x10 #define HFS_ATTR_RECORD_FORK_DATA 0x20 #define HFS_ATTR_RECORD_EXTENTS 0x30 // Maximum UTF8 size of an attribute name = 127 * 3 + 1; // 382 #define MAX_ATTR_NAME_LENGTH 382 /* * If a file is compressed, then it will have an extended attribute * with name com.apple.decmpfs. The value of that attribute is a data * structure, arranged as shown in the following struct, possibly followed * by some actual compressed data. * * If compression_type = 3, then data follows this compression header, in-line. * If the first byte of that data is 0xF, then the data is not really compressed, so * the following bytes are the data. Otherwise, the data following the compression * header is zlib-compressed. * * If the compression_type = 4, then compressed data is stored in the file's resource * fork, in a resource of type CMPF. There will be a single resource in the fork, and * it will have this type. The beginning of the resource is a table of offsets for * successive compression units within the resource. */ typedef struct { /* this structure represents the xattr on disk; the fields below are little-endian */ uint8_t compression_magic[4]; uint8_t compression_type[4]; uint8_t uncompressed_size[8]; unsigned char attr_bytes[0]; /* the bytes of the attribute after the header, if any. */ } DECMPFS_DISK_HEADER; #define COMPRESSION_UNIT_SIZE 65536U /********* CATALOG Record structures *********/ typedef struct { int8_t v[2]; int8_t h[2]; } hfs_point; #define HFS_FINDER_FLAG_NAME_LOCKED 0x1000 #define HFS_FINDER_FLAG_HAS_BUNDLE 0x2000 #define HFS_FINDER_FLAG_IS_INVISIBLE 0x4000 #define HFS_FINDER_FLAG_IS_ALIAS 0x8000 // Finder info stored in file and folder structures typedef struct { uint8_t file_type[4]; /* file type */ uint8_t file_cr[4]; /* file creator */ uint8_t flags[2]; /* finder flags */ hfs_point loc; /* location in the folder */ uint8_t res[2]; /* reserved */ } hfs_fileinfo; typedef struct { uint8_t res1[8]; /* reserved 1 */ uint8_t extflags[2]; /* extended finder flags */ uint8_t res2[2]; /* reserved 2 */ uint8_t folderid[4]; /* putaway folder id */ } hfs_extendedfileinfo; /* Note that the file, folder, and thread structures all * start with a 2-byte type field. */ // values for rec_type Record Type fields #define HFS_FOLDER_RECORD 0x0001 #define HFS_FILE_RECORD 0X0002 #define HFS_FOLDER_THREAD 0x0003 #define HFS_FILE_THREAD 0x0004 // the start of the folder and file catalog entries are the same typedef struct { uint8_t rec_type[2]; /* record type */ uint8_t flags[2]; /* Flags (reserved (0) for folders) */ uint8_t valence[4]; /* valence - items in this folder (folders only) */ uint8_t cnid[4]; /* CNID of this file or folder */ uint8_t crtime[4]; /* create date */ uint8_t cmtime[4]; /* content modification date (m-time) */ uint8_t amtime[4]; /* attribute mod date (c-time) */ uint8_t atime[4]; /* access date */ uint8_t bkup_date[4]; /* backup date */ hfs_access_perm perm; /* permissions */ hfs_fileinfo u_info; /* user info (Used by Finder) */ hfs_extendedfileinfo f_info; /* finder info */ uint8_t text_enc[4]; /* text encoding hint for file name */ uint8_t res2[4]; /* reserved 2 */ } hfs_file_fold_std; // structure for folder data in catalog leaf records typedef struct { hfs_file_fold_std std; /* standard data that files and folders share */ } hfs_folder; // value for flags in hfs_file #define HFS_FILE_FLAG_LOCKED 0x0001 /* file is locked */ #define HFS_FILE_FLAG_THREAD 0x0002 /* File has a thread entry */ // @@@ BC: I Can't find a reference to these values... #define HFS_FILE_FLAG_ATTR 0x0004 /* file has extended attributes */ #define HFS_FILE_FLAG_ACL 0x0008 /* file has security data (ACLs) */ // structure for file data in catalog leaf records typedef struct { hfs_file_fold_std std; /* standard data that files and folders share */ hfs_fork data; /* data fork */ hfs_fork resource; /* resource fork */ } hfs_file; // structure for thread data in catalog leaf records typedef struct { uint8_t rec_type[2]; /* == kHFSPlusFolderThreadRecord or kHFSPlusFileThreadRecord */ uint8_t res[2]; /* reserved - initialized as zero */ uint8_t parent_cnid[4]; /* parent ID for this catalog node */ hfs_uni_str name; /* name of this catalog node (variable length) */ } hfs_thread; // internally used structure to pass around both files and folders typedef union { hfs_folder folder; hfs_file file; } hfs_file_folder; typedef struct { TSK_FS_INFO fs_info; /* SUPER CLASS */ hfs_plus_vh *fs; /* cached superblock */ char is_case_sensitive; /* lock protects blockmap_file, blockmap_attr, blockmap_cache, blockmap_cache_start, blockmap_cache_len */ tsk_lock_t lock; TSK_FS_FILE *blockmap_file; //(r/w shared - lock) const TSK_FS_ATTR *blockmap_attr; // (r/w shared - lock) char blockmap_cache[4096]; ///< Cache for blockmap (r/w shared - lock) TSK_OFF_T blockmap_cache_start; ///< Byte offset of blockmap where cache starts (r/w shared - lock) size_t blockmap_cache_len; ///< Length of cache that is being used (r/w shared - lock) TSK_FS_FILE *catalog_file; const TSK_FS_ATTR *catalog_attr; hfs_btree_header_record catalog_header; TSK_FS_FILE *extents_file; const TSK_FS_ATTR *extents_attr; hfs_btree_header_record extents_header; TSK_OFF_T hfs_wrapper_offset; /* byte offset of this FS within an HFS wrapper */ /* Creation times needed for hard link recognition */ time_t root_crtime; // creation time of the root directory, cnid = 2 time_t meta_crtime; // creation time of the dir with path /^^^^HFS+ Private Data (those are nulls) time_t metadir_crtime; // creation time of dir with path /.HFS+ Private Directory Data^ (that's a carriage return) unsigned char has_root_crtime; // Boolean -- are the crtime fields set? unsigned char has_meta_crtime; unsigned char has_meta_dir_crtime; TSK_INUM_T meta_inum; TSK_INUM_T meta_dir_inum; // We cache the two metadata directory structures here, to speed up hard link resolution TSK_FS_DIR *meta_dir; TSK_FS_DIR *dir_meta_dir; // We need a lock to protect the two metadata directory caches (if this is multi-threaded) // and will also use this to protect the rest of the HFS_INFO struct. tsk_lock_t metadata_dir_cache_lock; // These special files are optional. unsigned char has_extents_file; // and also the Bad Blocks file unsigned char has_startup_file; unsigned char has_attributes_file; } HFS_INFO; typedef struct { hfs_file cat; /* on-disk catalog record (either hfs_file or hfs_folder) */ int flags; /* flags for on-disk record */ TSK_INUM_T inum; /* cnid */ hfs_thread thread; /* thread record */ } HFS_ENTRY; /****************** Resource File Structures *****************/ typedef struct { uint8_t dataOffset[4]; uint8_t mapOffset[4]; uint8_t dataLength[4]; uint8_t mapLength[4]; } hfs_resource_fork_header; typedef struct { uint8_t length[4]; uint8_t data[2]; // Variable length } hfs_resource; typedef struct { uint8_t reserved1[16]; // copy of resource fork header uint8_t reserved2[4]; //handle to next resource map uint8_t reserved3[2]; // file reference number uint8_t fork_attributes[2]; //?? uint8_t typeListOffset[2]; // Actually, points to a 2-byte count of types (minus 1) uint8_t nameListOffset[2]; // could be the end of the fork or zero, if there is no name list. } hfs_resource_fork_map_header; typedef struct { unsigned char type[4]; uint8_t count[2]; // number of resources of this type, minus 1 uint8_t offset[2]; // offset from beginning of type list to reference list for this type. } hfs_resource_type_list_item; typedef struct { uint8_t typeCount[2]; // number of types minus one hfs_resource_type_list_item type[]; // Variable length } hfs_resource_type_list; typedef struct { uint8_t resID[2]; uint8_t resNameOffset[2]; //SIGNED offset from beginning of name list, or -1 uint8_t resAttributes[1]; // ?? uint8_t resDataOffset[3]; // from beginning of resource data to data for this resource uint8_t reserved[4]; // handle to resource } hfs_resource_refListItem; /************** JOURNAL ******************/ /* HFS Journal Info Block */ typedef struct { uint8_t flags[4]; uint8_t dev_sig[32]; uint8_t offs[8]; uint8_t size[8]; uint8_t res[128]; } hfs_journ_sb; /* * Prototypes */ extern uint8_t hfs_checked_read_random(TSK_FS_INFO *, char *, size_t, TSK_OFF_T); //extern uint8_t hfs_uni2ascii(TSK_FS_INFO *, uint8_t *, int, char *, int); // replaced by: extern uint8_t hfs_UTF16toUTF8(TSK_FS_INFO *, uint8_t *, int, char *, int, uint32_t); extern int hfs_unicode_compare(HFS_INFO *, const hfs_uni_str *, const hfs_uni_str *); extern uint16_t hfs_get_idxkeylen(HFS_INFO * hfs, uint16_t keylen, const hfs_btree_header_record * header); extern TSK_RETVAL_ENUM hfs_dir_open_meta(TSK_FS_INFO *, TSK_FS_DIR **, TSK_INUM_T); extern int hfs_name_cmp(TSK_FS_INFO *, const char *, const char *); extern uint8_t hfs_jopen(TSK_FS_INFO *, TSK_INUM_T); extern uint8_t hfs_jblk_walk(TSK_FS_INFO *, TSK_DADDR_T, TSK_DADDR_T, int, TSK_FS_JBLK_WALK_CB, void *); extern uint8_t hfs_jentry_walk(TSK_FS_INFO *, int, TSK_FS_JENTRY_WALK_CB, void *); extern TSK_INUM_T hfs_follow_hard_link(HFS_INFO * hfs, hfs_file * entry, unsigned char *is_error); extern uint8_t hfs_cat_file_lookup(HFS_INFO * hfs, TSK_INUM_T inum, HFS_ENTRY * entry, unsigned char follow_hard_link); extern void error_returned(char *errstr, ...); extern void error_detected(uint32_t errnum, char *errstr, ...); typedef uint8_t(*TSK_HFS_BTREE_CB) (HFS_INFO *, int8_t level_type, const void *targ_key, const hfs_btree_key_cat * cur_key, TSK_OFF_T key_off, void *); // return values for callback #define HFS_BTREE_CB_IDX_LT 1 // current key is less than target (keeps looking in node) #define HFS_BTREE_CB_IDX_EQGT 2 // current key is equal or greater than target (stops) #define HFS_BTREE_CB_LEAF_GO 3 // keep on going to the next key in the leaf node #define HFS_BTREE_CB_LEAF_STOP 4 // stop processing keys in the leaf node #define HFS_BTREE_CB_ERR 5 extern uint8_t hfs_cat_traverse(HFS_INFO * hfs, const void *targ_data, TSK_HFS_BTREE_CB a_cb, void *ptr); #endif sleuthkit-4.2.0/tsk/fs/tsk_iso9660.h000644 000765 000024 00000055440 12576320700 017717 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** This software is subject to the IBM Public License ver. 1.0, ** which was displayed prior to download and is included in the readme.txt ** file accompanying the Sleuth Kit files. It may also be requested from: ** Crucial Security Inc. ** 14900 Conference Center Drive ** Chantilly, VA 20151 ** ** Wyatt Banks [wbanks@crucialsecurity.com] ** Copyright (c) 2005 Crucial Security Inc. All rights reserved. ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ /* ** You may distribute the Sleuth Kit, or other software that incorporates ** part of all of the Sleuth Kit, in object code form under a license agreement, ** provided that: ** a) you comply with the terms and conditions of the IBM Public License ** ver 1.0; and ** b) the license agreement ** i) effectively disclaims on behalf of all Contributors all warranties ** and conditions, express and implied, including warranties or ** conditions of title and non-infringement, and implied warranties ** or conditions of merchantability and fitness for a particular ** purpose. ** ii) effectively excludes on behalf of all Contributors liability for ** damages, including direct, indirect, special, incidental and ** consequential damages such as lost profits. ** iii) states that any provisions which differ from IBM Public License ** ver. 1.0 are offered by that Contributor alone and not by any ** other party; and ** iv) states that the source code for the program is available from you, ** and informs licensees how to obtain it in a reasonable manner on or ** through a medium customarily used for software exchange. ** ** When the Sleuth Kit or other software that incorporates part or all of ** the Sleuth Kit is made available in source code form: ** a) it must be made available under IBM Public License ver. 1.0; and ** b) a copy of the IBM Public License ver. 1.0 must be included with ** each copy of the program. */ /* * Contains the structures and function APIs for ISO9660 file system support. */ /* refernece documents used: * IEEE P1281 - System Use Sharing Protocol, version 1.12 * IEEE P1282 - Rock Ridge Interchange Protocol, version 1.12 * ECMA-119 - Volume and File Structure of CDROM for Information Interchange, * 2nd Edition */ #ifndef _TSK_ISO9660_H #define _TSK_ISO9660_H /* This part borrowed from the bsd386 isofs */ #define ISODCL(from, to) (to - from + 1) /* * Constants */ #define ISO9660_FIRSTINO 0 #define ISO9660_ROOTINO 0 #define ISO9660_FILE_CONTENT_LEN sizeof(TSK_DADDR_T) #define ISO9660_SBOFF 32768 #define ISO9660_SSIZE_B 2048 #define ISO9660_MIN_BLOCK_SIZE 512 #define ISO9660_MAX_BLOCK_SIZE 2048 #define ISO9660_MAGIC "CD001" /* values used in volume descriptor type */ #define ISO9660_BOOT_RECORD 0 /* boot record */ #define ISO9660_PRIM_VOL_DESC 1 /* primary volume descriptor */ #define ISO9660_SUPP_VOL_DESC 2 /* supplementary volume descriptor */ #define ISO9660_VOL_PART_DESC 3 /* volume partition descriptor */ #define ISO9660_RESERVE_FLOOR 4 /* 4-254 are reserved */ #define ISO9660_RESERVE_CEIL 254 #define ISO9660_VOL_DESC_SET_TERM 255 /* volume descriptor set terminator */ #define ISO9660_MAXNAMLEN_STD 128 ///< Maximum length of standard name #define ISO9660_MAXNAMLEN_JOL 128 ///< maximum UTF-16 joliet name (in bytes) #define ISO9660_MAXNAMLEN (ISO9660_MAXNAMLEN_JOL << 1) // mult jol by 2 to account for UTF-16 to UTF-8 conversion /* Bits in permissions used in extended attribute records. */ #define ISO9660_BIT_UR 0x0010 #define ISO9660_BIT_UX 0x0040 #define ISO9660_BIT_GR 0x0100 #define ISO9660_BIT_GX 0x0400 #define ISO9660_BIT_AR 0x1000 #define ISO9660_BIT_AX 0x4000 /* directory descriptor flags */ #define ISO9660_FLAG_HIDE 0x01 /* Hide file -- called EXISTENCE */ #define ISO9660_FLAG_DIR 0x02 /* Directory */ #define ISO9660_FLAG_ASSOC 0x04 /* File is associated */ #define ISO9660_FLAG_RECORD 0X08 /* Record format in extended attr */ #define ISO9660_FLAG_PROT 0X10 /* No read / exec perm in ext attr */ #define ISO9660_FLAG_RES1 0X20 /* reserved */ #define ISO9660_FLAG_RES2 0x40 /* reserved */ #define ISO9660_FLAG_MULT 0X80 /* not final entry of mult ext file */ /* POSIX modes used in ISO9660 not already defined */ #define MODE_IFSOCK 0140000 /* socket */ #define MODE_IFLNK 0120000 /* symbolic link */ #define MODE_IFDIR 0040000 /* directory */ #define MODE_IFIFO 0010000 /* pipe or fifo */ #define MODE_IFBLK 0060000 /* block special */ #define MODE_IFCHR 0020000 /* character special */ /* used to determine if get directory entry function needs to handle Joliet */ #define ISO9660_TYPE_PVD 0 #define ISO9660_TYPE_SVD 1 #define ISO9660_CTYPE_ASCII 0 #define ISO9660_CTYPE_UTF16 1 /* recording date and time */ typedef struct { uint8_t year; /* years since 1900 */ uint8_t month; /* 1-12 */ uint8_t day; /* 1-31 */ uint8_t hour; /* 0-23 */ uint8_t min; /* 0-59 */ uint8_t sec; /* 0-59 */ int8_t gmt_off; /* greenwich mean time offset (in 15 minute intervals) */ } record_data; /* iso 9660 directory record */ typedef struct { uint8_t entry_len; /* length of directory record */ uint8_t ext_len; /* extended attribute record length */ uint8_t ext_loc_l[4]; /* location of extent - le */ uint8_t ext_loc_m[4]; /* location of extent - be */ uint8_t data_len_l[4]; /* data length - le */ uint8_t data_len_m[4]; /* data length - be */ record_data rec_time; /* recording date and time (7 bytes) */ int8_t flags; /* file flags */ uint8_t unit_sz; /* file unit size */ uint8_t gap_sz; /* interleave gap size */ uint8_t seq[4]; /* volume sequence number (2|16) */ uint8_t fi_len; /* length of file identifier in bytes */ } iso9660_dentry; /* This is a dummy struct used to make reading an entire PVD easier, * due to the fact that the root directory has a 1 byte name that * wouldn't be worth adding to the regular struct. */ typedef struct { uint8_t length; /* length of directory record */ uint8_t ext_len; /* extended attribute record length */ uint8_t ext_loc_l[4]; /* location of extent - le */ uint8_t ext_loc_m[4]; /* location of extent - be */ uint8_t data_len_l[4]; /* data length - le */ uint8_t data_len_m[4]; /* data length - be */ record_data rec; /* recording date and time */ int8_t flags; /* file flags */ uint8_t unit_sz; /* file unit size */ uint8_t gap_sz; /* interleave gap size */ uint8_t seq[4]; /* volume sequence number (2|16) */ uint8_t len; /* length of file identifier */ char name; } iso9660_root_dentry; /* data and time format * all are stored as "digits" according to specifications for iso9660 */ typedef struct { uint8_t year[4]; /* 1 to 9999 */ uint8_t month[2]; /* 1 to 12 */ uint8_t day[2]; /* 1 to 31 */ uint8_t hour[2]; /* 0 to 23 */ uint8_t min[2]; /* 0 to 59 */ uint8_t sec[2]; /* 0 to 59 */ uint8_t hun[2]; /* hundredths of a second */ uint8_t gmt_off; /* GMT offset */ } date_time; /* generic volume descriptor */ typedef struct { uint8_t type; ///< volume descriptor type char magic[ISODCL(2, 6)]; ///< magic number. "CD001" char ver[ISODCL(7, 7)]; ///< volume descriptor version char x[ISODCL(8, 2048)]; ///< Depends on descriptor type } iso9660_gvd; /* primary volume descriptor */ typedef struct { char unused1[ISODCL(1, 8)]; /* should be 0. unused. */ char sys_id[ISODCL(9, 40)]; /* system identifier */ char vol_id[ISODCL(41, 72)]; /* volume identifier */ char unused2[ISODCL(73, 80)]; /* should be 0. unused. */ uint8_t vs_sz_l[ISODCL(81, 84)]; /* volume space size in blocks - le */ uint8_t vs_sz_m[ISODCL(85, 88)]; /* volume space size in blocks - be */ char unused3[ISODCL(89, 120)]; /* should be 0. unused. */ uint8_t vol_set_l[ISODCL(121, 122)]; /* volume set size - le */ uint8_t vol_set_m[ISODCL(123, 124)]; /* volume set size - be */ uint8_t vol_seq_l[ISODCL(125, 126)]; /* volume sequence number -le */ uint8_t vol_seq_m[ISODCL(127, 128)]; /* volume sequence number - be */ uint8_t blk_sz_l[ISODCL(129, 130)]; /* logical block size - le */ uint8_t blk_sz_m[ISODCL(131, 132)]; /* logical block size - be */ uint8_t pt_size_l[ISODCL(133, 136)]; /* path table size in bytes - le */ uint8_t pt_size_m[ISODCL(137, 140)]; /* path table size in bytes - be */ uint8_t pt_loc_l[ISODCL(141, 144)]; /* log block addr of type L path tbl. */ uint8_t pt_opt_loc_l[ISODCL(145, 148)]; /* log block addr of optional L path tbl */ uint8_t pt_loc_m[ISODCL(149, 152)]; /* log block addr of type M path tbl. */ uint8_t pt_opt_loc_m[ISODCL(153, 156)]; /* log block addr of optional M path tbl */ iso9660_root_dentry dir_rec; /* directory record for root dir */ char vol_setid[ISODCL(191, 318)]; /* volume set identifier */ unsigned char pub_id[ISODCL(319, 446)]; /* publisher identifier */ unsigned char prep_id[ISODCL(447, 574)]; /* data preparer identifier */ unsigned char app_id[ISODCL(575, 702)]; /* application identifier */ unsigned char copy_id[ISODCL(703, 739)]; /* copyright file identifier */ unsigned char abs_id[ISODCL(740, 776)]; /* abstract file identifier */ unsigned char bib_id[ISODCL(777, 813)]; /* bibliographic file identifier */ date_time make_date; /* volume creation date/time */ date_time mod_date; /* volume modification date/time */ date_time exp_date; /* volume expiration date/time */ date_time ef_date; /* volume effective date/time */ uint8_t fs_ver; /* file structure version */ char res[ISODCL(883, 883)]; /* reserved */ char app_use[ISODCL(884, 1395)]; /* application use */ char reserv[ISODCL(1396, 2048)]; /* reserved */ } iso9660_pvd; /* supplementary volume descriptor */ typedef struct { uint8_t flags[ISODCL(1, 8)]; /* volume flags */ char sys_id[ISODCL(9, 40)]; /* system identifier */ char vol_id[ISODCL(41, 72)]; /* volume identifier */ char unused2[ISODCL(73, 80)]; /* should be 0. unused. */ uint8_t vs_sz_l[ISODCL(81, 84)]; /* volume space size in blocks - le */ uint8_t vs_sz_m[ISODCL(85, 88)]; /* volume space size in blocks - be */ uint8_t esc_seq[ISODCL(89, 120)]; /* escape sequences */ uint8_t vol_set_l[ISODCL(121, 122)]; /* volume set size - le */ uint8_t vol_set_m[ISODCL(123, 124)]; /* volume set size - be */ uint8_t vol_seq_l[ISODCL(125, 126)]; /* volume sequence number -le */ uint8_t vol_seq_m[ISODCL(127, 128)]; /* volume sequence number - be */ uint8_t blk_sz_l[ISODCL(129, 130)]; /* logical block size - le */ uint8_t blk_sz_m[ISODCL(131, 132)]; /* logical block size - be */ uint8_t pt_size_l[ISODCL(133, 136)]; /* path table size in bytes - le */ uint8_t pt_size_m[ISODCL(137, 140)]; /* path table size in bytes - be */ uint8_t pt_loc_l[ISODCL(141, 144)]; /* log block addr of type L path tbl. */ uint8_t pt_opt_loc_l[ISODCL(145, 148)]; /* log block addr of optional type L path tbl. */ uint8_t pt_loc_m[ISODCL(149, 152)]; /* log block addr of type M path tbl. */ uint8_t pt_opt_loc_m[ISODCL(153, 156)]; /* log block addr of optional type M path tbl. */ iso9660_root_dentry dir_rec; /* directory record for root dir */ char vol_setid[ISODCL(191, 318)]; /* volume set identifier */ unsigned char pub_id[ISODCL(319, 446)]; /* publisher identifier */ unsigned char prep_id[ISODCL(447, 574)]; /* data preparer identifier */ unsigned char app_id[ISODCL(575, 702)]; /* application identifier */ unsigned char copy_id[ISODCL(703, 739)]; /* copyright file identifier */ unsigned char abs_id[ISODCL(740, 776)]; /* abstract file identifier */ unsigned char bib_id[ISODCL(777, 813)]; /* bibliographic file identifier */ date_time make_date; /* volume creation date/time */ date_time mod_date; /* volume modification date/time */ date_time exp_date; /* volume expiration date/time */ date_time ef_date; /* volume effective date/time */ char fs_ver[ISODCL(882, 882)]; /* file structure version */ char res[ISODCL(883, 883)]; /* reserved */ char app_use[ISODCL(884, 1395)]; /* application use */ char reserv[ISODCL(1396, 2048)]; /* reserved */ } iso9660_svd; /* iso 9660 boot record */ typedef struct { char boot_sys_id[ISODCL(8, 39)]; /* boot system identifier */ char boot_id[ISODCL(40, 71)]; /* boot identifier */ char system_use[ISODCL(72, 2048)]; /* system use */ } iso_bootrec; /* path table record */ typedef struct { uint8_t len_di; /* length of directory identifier */ uint8_t attr_len; /* extended attribute record length */ uint8_t ext_loc[4]; /* location of extent */ uint8_t par_dir[2]; /* parent directory number (its entry in the path table) */ } path_table_rec; /* extended attribute record */ typedef struct { uint8_t uid[ISODCL(1, 4)]; /* owner identification */ uint8_t gid[ISODCL(5, 8)]; /* group identification */ uint8_t mode[ISODCL(9, 10)]; /* permissions */ uint8_t cre[ISODCL(11, 27)]; /* file creation date/time */ uint8_t mod[ISODCL(28, 44)]; /* file modification d/t */ uint8_t exp[ISODCL(45, 61)]; /* file expiration d/t */ uint8_t eff[ISODCL(62, 78)]; /* file effective d/t */ uint8_t fmt[ISODCL(79, 79)]; /* record format */ uint8_t attr[ISODCL(80, 80)]; /* record attributes */ uint8_t len[ISODCL(81, 84)]; /* record length */ uint8_t sys_id[ISODCL(85, 116)]; /* system identifier */ uint8_t uns[ISODCL(117, 180)]; /* system use, not specified */ uint8_t e_ver[ISODCL(181, 181)]; /* extended attribute record version */ uint8_t len_esc[ISODCL(182, 182)]; /* length of escape sequences */ } ext_attr_rec; #define ISO_EA_IRSYS 0x0001 #define ISO_EA_IWSYS 0x0002 #define ISO_EA_IXSYS 0x0004 #define ISO_EA_IRUSR 0x0010 #define ISO_EA_IWUSR 0x0020 #define ISO_EA_IXUSR 0x0040 #define ISO_EA_IRGRP 0x0100 #define ISO_EA_IWGRP 0x0200 #define ISO_EA_IXGRP 0x0400 #define ISO_EA_IROTH 0x1000 #define ISO_EA_IWOTH 0x2000 #define ISO_EA_IXOTH 0x4000 /* primary volume descriptor linked list node */ typedef struct iso9660_pvd_node { iso9660_pvd pvd; struct iso9660_pvd_node *next; } iso9660_pvd_node; /* supplementary volume descriptor linked list node */ typedef struct iso9660_svd_node { iso9660_svd svd; struct iso9660_svd_node *next; } iso9660_svd_node; /* RockRidge extension info */ typedef struct { TSK_UID_T uid /* owner */ ; TSK_GID_T gid; /* group */ uint16_t mode; /* posix file mode */ uint32_t nlink; /* number of links */ char fn[ISO9660_MAXNAMLEN_STD]; /* alternate filename */ } rockridge_ext; /** \internal * Internally used structure to hold basic inode information. */ typedef struct { iso9660_dentry dr; /* directory record */ ext_attr_rec *ea; /* extended attribute record */ char fn[ISO9660_MAXNAMLEN + 1]; /* file name */ rockridge_ext *rr; /* RockRidge Extensions */ int version; uint8_t is_orphan; /* 1 if the file was found from processing other volume descriptors besides the first one, 0 otherwise */ TSK_OFF_T susp_off; ///< Byte offset in image of SUSP (or NULL) TSK_OFF_T susp_len; ///< Length in bytes of SUSP } iso9660_inode; /* inode linked list node */ typedef struct iso9660_inode_node { iso9660_inode inode; TSK_OFF_T offset; /* byte offset of first block of file in file system */ TSK_OFF_T dentry_offset; /* byte offset of directory entry structure in file system */ TSK_INUM_T inum; /* identifier of inode (assigned by TSK) */ int size; /* number of bytes in file */ int ea_size; /* length of ext attributes */ struct iso9660_inode_node *next; } iso9660_inode_node; /* The all important ISO_INFO struct */ typedef struct { TSK_FS_INFO fs_info; /* SUPER CLASS */ uint32_t path_tab_addr; /* address of path table */ uint32_t root_addr; /* address of root dir extent */ iso9660_pvd_node *pvd; ///< Head of primary volume descriptor list (there should be only one...) iso9660_svd_node *svd; ///< Head of secondary volume descriptor list iso9660_inode_node *in_list; /* list of inodes */ uint8_t rr_found; /* 1 if rockridge found */ } ISO_INFO; extern TSK_RETVAL_ENUM iso9660_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); extern uint8_t iso9660_dinode_load(ISO_INFO * iso, TSK_INUM_T inum, iso9660_inode * dinode); extern int iso9660_name_cmp(TSK_FS_INFO *, const char *, const char *); /********************************************************** * * RockRidge Extensions * **********************************************************/ typedef struct { char sig[2]; uint8_t len; char ver; } iso9660_susp_head; /** \internal * SUSP Continuation Entry (CE) */ typedef struct { char sig[2]; uint8_t len; char ver; uint8_t blk_l[4]; ///< Block location of continuation area uint8_t blk_m[4]; uint8_t offset_l[4]; ///< Offset to start of continuation area (in bytes) uint8_t offset_m[4]; uint8_t celen_l[4]; ///< Length of continuation area (in bytes) uint8_t celen_m[4]; } iso9660_susp_ce; /** \internal * SUSP SP entry */ typedef struct { char sig[2]; uint8_t len; char ver; uint8_t chk[2]; uint8_t skip; } iso9660_susp_sp; typedef struct { char sig[2]; uint8_t len; char ver; uint8_t len_id; ///< length of extension id (in bytes) uint8_t len_des; ///< length of extension desc (in bytes) uint8_t len_src; ///< Length of extension spec source (in bytes) uint8_t ext_ver; ///< Version id char ext_id[1]; ///< Extension ID text (with length of len_id); // next is the extension descriptor text // next is the extension source text } iso9660_susp_er; /* Rockridge ISO9660 system use field entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "RR" */ uint8_t len[ISODCL(3, 3)]; /* length of system use entry */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version */ uint8_t foo[ISODCL(5, 5)]; /* foo */ } rr_sys_use; /* Rockridge PX entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "PX" */ uint8_t len; /* length, should be 44 */ uint8_t ver; /* system use entry version (1) */ uint8_t mode_l[ISODCL(5, 8)]; /* POSIX file mode - le */ uint8_t mode_m[ISODCL(9, 12)]; /* POSIX file mode - be */ uint8_t links_l[ISODCL(13, 16)]; /* POSIX file links - le */ uint8_t links_m[ISODCL(17, 20)]; /* POSIX file links - be */ uint8_t uid_l[ISODCL(21, 24)]; /* POSIX user id - le */ uint8_t uid_m[ISODCL(25, 28)]; /* POSIX user id - be */ uint8_t gid_l[ISODCL(29, 32)]; /* POSIX group id - le */ uint8_t gid_m[ISODCL(23, 36)]; /* POSIX group id - be */ /* rockridge docs say this is here, k3b disagrees... hmmmm */ // uint8_t serial[ISODCL(37,44)]; /* POSIX file serial number */ } iso9660_rr_px_entry; /* Rockridge PN entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "PN" */ uint8_t len; /* length, should be 20 */ uint8_t ver; /* system use entry version (1) */ uint8_t dev_h_l[ISODCL(5, 8)]; /* top 32 bits of device # */ uint8_t dev_h_m[ISODCL(9, 12)]; /* top 32 bits of device # */ uint8_t dev_l_l[ISODCL(13, 16)]; /* low 32 bits of device # */ uint8_t dev_l_m[ISODCL(17, 20)]; /* low 32 bits of device # */ } iso9660_rr_pn_entry; /* Rockridge SL entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "SL" */ uint8_t len; /* length */ uint8_t ver; /* system use entry version (1) */ uint8_t flags; /* flags */ } iso9660_rr_sl_entry; /* Rockridge NM entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "NM" */ uint8_t len; /* length of alternate name */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version (1) */ uint8_t flags[ISODCL(5, 5)]; /* flags */ char name[1]; // start of the name } iso9660_rr_nm_entry; /* Rockridge CL entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "CL" */ uint8_t len[ISODCL(3, 3)]; /* length, should be 12 */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version (1) */ uint8_t par_loc[ISODCL(5, 12)]; /* location of parent directory */ } iso9660_rr_cl_entry; /* Rockridge RE entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "RE" */ uint8_t len[ISODCL(3, 3)]; /* length, should be 4 */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version (1) */ } iso9660_rr_re_entry; /* Rockridge TF entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "TF" */ uint8_t len[ISODCL(3, 3)]; /* length of TF entry */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version (1) */ uint8_t flags[ISODCL(5, 5)]; /* flags */ } iso9660_rr_tf_entry; /* Rockridge SF entry */ typedef struct { char sig[ISODCL(1, 2)]; /* signature, should be "SF" */ uint8_t len[ISODCL(3, 3)]; /* length, should be 21 */ uint8_t ver[ISODCL(4, 4)]; /* system use entry version (1) */ uint8_t vfs_h[ISODCL(5, 12)]; /* virtual file size high */ uint8_t vfs_l[ISODCL(13, 20)]; /* virtual file size low */ uint8_t depth[ISODCL(21, 21)]; /* table depth */ } iso9660_rr_sf_entry; #endif sleuthkit-4.2.0/tsk/fs/tsk_ntfs.h000644 000765 000024 00000055122 12576320700 017547 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /* * Contains the structures and function APIs for NTFS file system support. */ #ifndef _TSK_NTFS_H #define _TSK_NTFS_H #ifdef __cplusplus extern "C" { #endif // the SID code has been buggy on some systems and byitself it does // not provide much security info. It is being disabled until fixed. #define TSK_USE_SID 1 //#define NTFS_FS_MAGIC 0x5346544E /* "NTFS" in little endian */ #define NTFS_FS_MAGIC 0xAA55 #define NTFS_MAXNAMLEN 256 #define NTFS_MAXNAMLEN_UTF8 4 * NTFS_MAXNAMLEN /* location of the Root Directory inode */ #define NTFS_ROOTINO NTFS_MFT_ROOT #define NTFS_FIRSTINO 0 /* location of the $Mft Record */ #define NTFS_LAST_DEFAULT_INO 16 /* A guess for right now */ #define NTFS_FILE_CONTENT_LEN 0 /* uncompression values */ #define NTFS_TOKEN_MASK 1 #define NTFS_SYMBOL_TOKEN 0 #define NTFS_TOKEN_LENGTH 8 /* (64 * 1024) = 65536 */ #define NTFS_MAX_UNCOMPRESSION_BUFFER_SIZE 65536 /************************************************************************ * Update sequence structure. This is located at upd_off from the * begining of the original structure */ typedef struct { uint8_t upd_val[2]; // what they should be uint8_t upd_seq; // array of size 2*(upd_cnt-1) w/orig vals } ntfs_upd; /************************************************************************ * bootsector * * located in sector 0 in $Boot */ typedef struct { uint8_t f1[3]; // 0 char oemname[8]; // 3 uint8_t ssize[2]; // 11 /* sector size in bytes */ uint8_t csize; // 13 /* sectors per cluster */ uint8_t f2[26]; // 14 uint8_t vol_size_s[8]; // 40 /*size of volume in sectors */ uint8_t mft_clust[8]; // 48 /* location of MFT */ uint8_t mftm_clust[8]; // 56 /* location of MFT mirror */ int8_t mft_rsize_c; // 64 /* number of clusters per mft record */ uint8_t f3[3]; int8_t idx_rsize_c; // 68 /* number of clus per idx rec */ uint8_t f4[3]; uint8_t serial[8]; // 72 /* serial number */ uint8_t f5[430]; //80 uint8_t magic[2]; } ntfs_sb; /************************************************************************ * MFT Entry * * One entry in the MFT - there exists one for each file */ typedef struct { uint8_t magic[4]; uint8_t upd_off[2]; // 4 uint8_t upd_cnt[2]; // 6 size+1 uint8_t lsn[8]; // 8 $LogFile Sequence Number uint8_t seq[2]; // 16 uint8_t link[2]; // 18 uint8_t attr_off[2]; // 20 uint8_t flags[2]; // 22 uint8_t size[4]; // 24 uint8_t alloc_size[4]; //28 uint8_t base_ref[6]; // 32 uint8_t base_seq[2]; // 38 uint8_t next_attrid[2]; // 40 The next id to be assigned uint8_t f1[2]; // XP Only uint8_t entry[4]; // XP Only - Number of this entry } ntfs_mft; /* Magic values for each MFT entry */ #define NTFS_MFT_MAGIC 0x454c4946 #define NTFS_MFT_MAGIC_BAAD 0x44414142 #define NTFS_MFT_MAGIC_ZERO 0x00000000 /* MFT entry flags */ #define NTFS_MFT_INUSE 0x0001 #define NTFS_MFT_DIR 0x0002 /* flags for file_ref */ #define NTFS_MFT_BASE 0 /* set when the base file record */ /* Mask when not zero, which indicates the base file record */ #define NTFS_MFT_FILE_REC 0x00ffffffffffffff /* DEFINED MFT entries - file system metadata files */ #define NTFS_MFT_MFT 0x0 #define NTFS_MFT_MFTMIR 0x1 #define NTFS_MFT_LOG 0x2 #define NTFS_MFT_VOL 0x3 #define NTFS_MFT_ATTR 0x4 #define NTFS_MFT_ROOT 0x5 #define NTFS_MFT_BMAP 0x6 #define NTFS_MFT_BOOT 0x7 #define NTFS_MFT_BAD 0x8 //#define NTFS_MFT_QUOT 0x9 #define NTFS_MFT_SECURE 0x9 #define NTFS_MFT_UPCASE 0xA /************************************************************************ * Attribute Header for resident and non-resident attributes */ typedef struct { uint8_t type[4]; uint8_t len[4]; // 4 - length including header uint8_t res; // 8 - resident flag uint8_t nlen; // 9 - name length uint8_t name_off[2]; // 10 - offset to name uint8_t flags[2]; // 12 uint8_t id[2]; // 14 - unique identifier union { /* Resident Values */ struct { uint8_t ssize[4]; // 16 - size of content uint8_t soff[2]; // 20 - offset to content (after name) uint8_t idxflag[2]; // 22 - indexed flag } r; /* Non-resident Values */ struct { uint8_t start_vcn[8]; // 16 - starting VCN of this attribute uint8_t last_vcn[8]; // 24 uint8_t run_off[2]; // 32 - offset to the data runs (after name) uint8_t compusize[2]; // 34 - compression unit size (2^x) uint8_t f1[4]; // 36 uint8_t alen[8]; // 40 allocated size of stream uint8_t ssize[8]; // 48 actual size of stream uint8_t initsize[8]; // 56 initialized steam size } nr; } c; } ntfs_attr; /* values for the res field */ #define NTFS_MFT_RES 0 /* resident */ #define NTFS_MFT_NONRES 1 /* non-resident */ /* Values for flag field * should only exist for $DATA attributes */ #define NTFS_ATTR_FLAG_COMP 0x0001 /* compressed */ #define NTFS_ATTR_FLAG_ENC 0x4000 /* encrypted */ #define NTFS_ATTR_FLAG_SPAR 0x8000 /* sparse */ /* values for the type field */ /* NOTE that the default TSK attribute types are based on these. * Any changes to these should be merged with the defines in tsk_fs.h */ #define NTFS_ATYPE_SI 0x10 // 16 #define NTFS_ATYPE_ATTRLIST 0x20 // 32 #define NTFS_ATYPE_FNAME 0x30 // 48 #define NTFS_ATYPE_VVER 0x40 // 64 (NT) #define NTFS_ATYPE_OBJID 0x40 // 64 (2K) #define NTFS_ATYPE_SEC 0x50 // 80 #define NTFS_ATYPE_VNAME 0x60 // 96 #define NTFS_ATYPE_VINFO 0x70 // 112 #define NTFS_ATYPE_DATA 0x80 // 128 #define NTFS_ATYPE_IDXROOT 0x90 // 144 #define NTFS_ATYPE_IDXALLOC 0xA0 // 160 #define NTFS_ATYPE_BITMAP 0xB0 // 176 #define NTFS_ATYPE_SYMLNK 0xC0 // 192 (NT) #define NTFS_ATYPE_REPARSE 0xC0 // 192 (2K) #define NTFS_ATYPE_EAINFO 0xD0 // 208 #define NTFS_ATYPE_EA 0xE0 // 224 #define NTFS_ATYPE_PROP 0xF0 // (NT) #define NTFS_ATYPE_LOG 0x100 // (2K) /************************************************************************ * File Name Attribute */ typedef struct { uint8_t par_ref[6]; /* file reference to base File Record of parent */ uint8_t par_seq[2]; /* seq num to base File Record of parent */ uint8_t crtime[8]; /* file creation */ uint8_t mtime[8]; /* file altered */ uint8_t ctime[8]; /* mod time for FILE record (MFT Entry) */ uint8_t atime[8]; /* access time */ uint8_t alloc_fsize[8]; uint8_t real_fsize[8]; uint8_t flags[8]; uint8_t nlen; /* length of file name */ uint8_t nspace; uint8_t name; /* in unicode */ } ntfs_attr_fname; /* values for the flags field of attr_fname */ #define NTFS_FNAME_FLAGS_RO 0x0000000000000001 #define NTFS_FNAME_FLAGS_HID 0x0000000000000002 #define NTFS_FNAME_FLAGS_SYS 0x0000000000000004 #define NTFS_FNAME_FLAGS_ARCH 0x0000000000000020 #define NTFS_FNAME_FLAGS_DEV 0x0000000000000040 #define NTFS_FNAME_FLAGS_NORM 0x0000000000000080 #define NTFS_FNAME_FLAGS_TEMP 0x0000000000000100 #define NTFS_FNAME_FLAGS_SPAR 0x0000000000000200 #define NTFS_FNAME_FLAGS_REP 0x0000000000000400 #define NTFS_FNAME_FLAGS_COMP 0x0000000000000800 #define NTFS_FNAME_FLAGS_OFF 0x0000000000001000 #define NTFS_FNAME_FLAGS_NOIDX 0x0000000000002000 #define NTFS_FNAME_FLAGS_ENC 0x0000000000004000 #define NTFS_FNAME_FLAGS_DIR 0x0000000010000000 #define NTFS_FNAME_FLAGS_IDXVIEW 0x0000000020000000 /* values for the name space values of nspace */ #define NTFS_FNAME_POSIX 0 /* case sensitive and any but NULL and \ */ #define NTFS_FNAME_WIN32 1 // insensitive and restricted #define NTFS_FNAME_DOS 2 // 8.3 format of 8-bit chars in uppercase #define NTFS_FNAME_WINDOS 3 // name in WIN32 space that is already DOS /************************************************************************ * Standard Information Attribute */ typedef struct { uint8_t crtime[8]; /* creation date */ uint8_t mtime[8]; /* file altered */ uint8_t ctime[8]; /* MFT Changed */ uint8_t atime[8]; /* last access (read) */ uint8_t dos[4]; /* permissions in DOS Format */ uint8_t maxver[4]; uint8_t ver[4]; uint8_t class_id[4]; uint8_t own_id[4]; uint8_t sec_id[4]; uint8_t quota[8]; uint8_t usn[8]; } ntfs_attr_si; /* DOS Flags values */ #define NTFS_SI_RO 0x0001 #define NTFS_SI_HID 0x0002 #define NTFS_SI_SYS 0x0004 #define NTFS_SI_ARCH 0x0020 #define NTFS_SI_DEV 0x0040 #define NTFS_SI_NORM 0x0080 #define NTFS_SI_TEMP 0x0100 #define NTFS_SI_SPAR 0x0200 #define NTFS_SI_REP 0x0400 #define NTFS_SI_COMP 0x0800 #define NTFS_SI_OFF 0x1000 #define NTFS_SI_NOIDX 0x2000 #define NTFS_SI_ENC 0x4000 /************************************************************************ * Volume Info Attribute */ typedef struct { uint8_t f1[8]; uint8_t maj_ver; uint8_t min_ver; uint8_t flags[2]; uint8_t f2[4]; } ntfs_attr_vinfo; #define NTFS_VINFO_DIRTY 0x0001 // Dirty #define NTFS_VINFO_RESLOG 0x0002 // Resize LogFile #define NTFS_VINFO_UPGRAD 0x0004 // Upgrade on Mount #define NTFS_VINFO_MNTNT4 0x0008 // Mounted on NT4 #define NTFS_VINFO_DELUSN 0x0010 // Delete USN Underway #define NTFS_VINFO_REPOBJ 0x0020 // Repair Object Ids #define NTFS_VINFO_MODCHK 0x8000 // Modified by chkdsk /* versions * NT = Maj=1 Min=2 * 2k = Maj=3 Min=0 * xp = Maj=3 Min=1 */ #define NTFS_VINFO_NT 0x21 #define NTFS_VINFO_2K 0x03 #define NTFS_VINFO_XP 0x13 /************************************************************************ * attribute list */ typedef struct { uint8_t type[4]; // Attribute Type uint8_t len[2]; // length of entry uint8_t nlen; // number of chars in name uint8_t f1; // 7 uint8_t start_vcn[8]; // starting VCN or NTFS_ATTRL_RES uint8_t file_ref[6]; // file reference to new MFT entry uint8_t seq[2]; // 22 uint8_t id[2]; // id (also in the attribute header) uint8_t name; // 26 name in unicode } ntfs_attrlist; #define NTFS_ATTRL_RES 0 /************************************************************************ * runlist * * Used to store the non-resident runs for an attribute. * It is located in the MFT and pointed to by the run_off in the header */ typedef struct { /* lsb 4 bits: num of bytes in run length field * msb 4 bits: num of bytes in run offset field - (LCN) */ uint8_t len; uint8_t buf[32]; } ntfs_runlist; #define NTFS_RUNL_LENSZ(runl) \ (uint8_t)(runl->len & 0x0f) #define NTFS_RUNL_OFFSZ(runl) \ (uint8_t)((runl->len & 0xf0) >> 4) /************************************************************************ * Index root for directories * * the attribute has two parts. The header is general to all index entries * and applies to $IDX_ALLOC as well. The buffer part contains the * index entries that are allocated to $IDX_ROOT. * */ /* * Starting at begin_off is a stream of ntfs_idxentry structures */ typedef struct { uint8_t begin_off[4]; /* offset to start of seq of idx entries */ uint8_t seqend_off[4]; /* offset to end of seq of idx entries */ uint8_t bufend_off[4]; /* offset to end of idx buffer */ uint8_t flags[4]; } ntfs_idxelist; /* value for flags */ #define NTFS_IDXELIST_CHILD 0x1 /* children exist below this node */ /* This is general index information and applies to $IDX_ALLOC as well */ typedef struct { uint8_t type[4]; /* ATYPE that tree is sorted by */ uint8_t collation_rule[4]; uint8_t idxalloc_size_b[4]; /* index alloc size in bytes */ uint8_t idx_size_c; /* index alloc size in clusters */ uint8_t pad[3]; ntfs_idxelist list; } ntfs_idxroot; /************************************************************************ * idxrec * * this is structure for the nodes of the B+ index trees * It contains a list of index entry data structures. Each * buffer corresponds to one node. The $IDX_ALLOC attribute * is an array of these data structures */ typedef struct { uint8_t magic[4]; /* INDX */ uint8_t upd_off[2]; uint8_t upd_cnt[2]; /* size + 1 */ uint8_t lsn[8]; /* $LogFile Sequence Number */ uint8_t idx_vcn[8]; /* vcn in idx alloc attr */ ntfs_idxelist list; } ntfs_idxrec; #define NTFS_IDXREC_MAGIC 0x58444e49 /* INDX */ /************************************************************************ * This structure exists for each file and directory in the tree */ typedef struct { uint8_t file_ref[6]; /* file reference (invalid for last entry) */ uint8_t seq_num[2]; /* file reference (invalid for last entry) */ uint8_t idxlen[2]; /* length of the index entry */ uint8_t strlen[2]; /* length of stream */ uint8_t flags; uint8_t f1[3]; uint8_t stream; /* length of strlen - invalid for last entry */ /* loc of subnode is found in last 8-bytes * of idx entry (idxlen - 8). use macro */ } ntfs_idxentry; #define NTFS_IDX_SUB 0x01 /* Entry points to a sub-node */ #define NTFS_IDX_LAST 0x02 /* last indx entry in the node */ /* return the address of the subnode entry, it is located in the last * 8 bytes of the structure */ #define GET_IDXENTRY_SUB(fs, e) \ (tsk_getu64(fs->endian, (int)e + tsk_getu16(fs->endian, e->idxlen) - 8)) /************************************************************************ */ typedef struct { char label[128]; /* label in unicode */ uint8_t type[4]; uint8_t disp[4]; /* display rule */ uint8_t coll[4]; /* collation rule */ uint8_t flags[4]; uint8_t minsize[8]; /* minimum size */ uint8_t maxsize[8]; /* maximum size */ } ntfs_attrdef; #define NTFS_ATTRDEF_FLAGS_IDX 0x02 #define NTFS_ATTRDEF_FLAGS_RES 0x40 /* always resident */ #define NTFS_ATTRDEF_FLAGS_NONRES 0x80 /* allowed to be non-resident */ /************************************************************************ * OBJECT_ID attribute */ typedef struct { uint8_t objid1[8]; /* object id of file or directory */ uint8_t objid2[8]; uint8_t orig_volid1[8]; /* id of "birth" volume */ uint8_t orig_volid2[8]; uint8_t orig_objid1[8]; /* original object id */ uint8_t orig_objid2[8]; uint8_t orig_domid1[8]; /* id of "birth" domain */ uint8_t orig_domid2[8]; } ntfs_attr_objid; #if TSK_USE_SID /************************************************************************ * Self-relative security descriptor */ typedef struct { uint8_t revision; /* Revision level of the security descriptor. */ uint8_t pad; uint8_t control[2]; /* Flags qualifying the type of the descriptor as well as the following fields. */ uint8_t owner[4]; /* Byte offset to a SID representing an object's owner. If this is NULL, no owner SID is present in the descriptor. */ uint8_t group[4]; /* Byte offset to a SID representing an object's primary group. If this is NULL, no primary group SID is present in the descriptor. */ uint8_t sacl[4]; /* Byte offset to a system ACL. Only valid, if SE_SACL_PRESENT is set in the control field. If SE_SACL_PRESENT is set but sacl is NULL, a NULL ACL is specified. */ uint8_t dacl[4]; /* Byte offset to a discretionary ACL. Only valid, if SE_DACL_PRESENT is set in the control field. If SE_DACL_PRESENT is set but dacl is NULL, a NULL ACL (unconditionally granting access) is specified. */ } ntfs_self_relative_security_descriptor; /************************************************************************ * Structure used in Security Descriptor lookups */ typedef struct { char *buffer; ///< Buffer to store data in size_t size; ///< Number of bytes in buffer size_t used; ///< Number of records used in the buffer (size depends on type of data stored) } NTFS_SXX_BUFFER; /************************************************************************ * SID attribute */ typedef struct { uint8_t revision; /* Revision */ uint8_t sub_auth_count; /* Sub Authority Count */ uint8_t ident_auth[6]; /* NT Authority ::NOTE:: big endian number */ uint32_t sub_auth[1]; /* At least one sub_auth */ } ntfs_sid; /************************************************************************ * SDS attribute */ typedef struct { uint8_t hash_sec_desc[4]; /* Hash of Security Descriptor */ uint8_t sec_id[4]; /* Security ID */ uint8_t file_off[8]; /* Offset of this entry in this file */ uint8_t ent_size[4]; /* Size of this entry */ ntfs_self_relative_security_descriptor self_rel_sec_desc; /* Self-relative Security Descriptor */ } ntfs_attr_sds; /************************************************************************ * SDH attribute */ typedef struct { uint8_t data_off[2]; /* Offset to data */ uint8_t size[2]; /* Size of data */ uint8_t pad1[4]; /* Padding */ uint8_t ent_size[2]; /* Size of Index Entry */ uint8_t key_size[2]; /* Size of Index Key */ uint8_t flags[2]; /* Flags */ uint8_t pad2[2]; /* Padding */ uint8_t key_hash_sec_desc[4]; /* Hash of Security Descriptor */ uint8_t key_sec_id[4]; /* Security ID */ uint8_t data_hash_sec_desc[4]; /* Hash of Security Descriptor */ uint8_t data_sec_id[4]; /* Security ID */ uint8_t sec_desc_off[8]; /* Offset to Security Descriptor (in $SDS) */ uint8_t sec_desc_size[4]; /* Size of Security Descriptor (in $SDS) */ uint8_t pad3[4]; /* Padding */ } ntfs_attr_sdh; /************************************************************************ * SII attribute */ typedef struct { uint8_t data_off[2]; /* Offset to data */ uint8_t size[2]; /* Size of data */ uint8_t pad1[4]; /* Padding */ uint8_t ent_size[2]; /* Size of Index Entry */ uint8_t key_size[2]; /* Size of Index Key */ uint8_t flags[2]; /* Flags */ uint8_t pad2[2]; /* Padding */ uint8_t key_sec_id[4]; /* Security ID */ uint8_t data_hash_sec_desc[4]; /* Hash of Security Descriptor */ uint8_t data_sec_id[4]; /* Security ID */ uint8_t sec_desc_off[8]; /* Offset to Security Descriptor (in $SDS) */ uint8_t sec_desc_size[4]; /* Size of Security Descriptor (in $SDS) */ } ntfs_attr_sii; #endif /************************************************************************ */ typedef struct { TSK_FS_INFO fs_info; /* super class */ ntfs_sb *fs; uint8_t ver; /* version of NTFS - uses the VINFO flag */ TSK_FS_FILE *mft_file; /* contains the data for the mft entry for the mft */ const TSK_FS_ATTR *mft_data; /* Data run for MFT entry for MFT */ uint32_t csize_b; /* number of bytes in a cluster */ uint16_t ssize_b; /* number of bytes in a sector */ uint32_t mft_rsize_b; /* number of bytes per mft record */ uint32_t idx_rsize_b; /* number of bytes per idx record */ TSK_DADDR_T root_mft_addr; /* address of first mft entry */ uint8_t loading_the_MFT; /* set to 1 when initializing the setup */ TSK_FS_ATTR_RUN *bmap; /* Run of bitmap for clusters (linked list) */ /* lock protects bmap_buf, bmap_buf_off */ tsk_lock_t lock; char *bmap_buf; /* buffer to hold cached copy of bitmap (r/w shared - lock) */ TSK_DADDR_T bmap_buf_off; /* offset cluster in cached bitmap (r/w shared - lock) */ ntfs_attrdef *attrdef; // buffer of attrdef file contents size_t attrdef_len; // length of addrdef buffer /* orphan_map_lock protects orphan_map */ tsk_lock_t orphan_map_lock; void *orphan_map; // map that lists par directory to its orphans. (r/w shared - lock) #if TSK_USE_SID /* sid_lock protects sii_data, sds_data */ tsk_lock_t sid_lock; NTFS_SXX_BUFFER sii_data; // (r/w shared - lock) NTFS_SXX_BUFFER sds_data; // (r/w shared - lock) #endif } NTFS_INFO; extern uint32_t nt2unixtime(uint64_t ntdate); extern uint8_t ntfs_attrname_lookup(TSK_FS_INFO *, uint16_t, char *, int); extern TSK_RETVAL_ENUM ntfs_dinode_lookup(NTFS_INFO *, char *, TSK_INUM_T); extern TSK_RETVAL_ENUM ntfs_dir_open_meta(TSK_FS_INFO * a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr); extern void ntfs_orphan_map_free(NTFS_INFO * a_ntfs); extern int ntfs_name_cmp(TSK_FS_INFO *, const char *, const char *); extern uint8_t ntfs_find_file(TSK_FS_INFO * fs, TSK_INUM_T inode_toid, uint32_t type_toid, uint8_t type_used, uint16_t id_toid, uint8_t id_used, TSK_FS_DIR_WALK_FLAG_ENUM dir_walk_flags, TSK_FS_DIR_WALK_CB action, void *ptr); #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/tsk_yaffs.h000644 000765 000024 00000014724 12576320700 017710 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2003-2011 Brian Carrier. All rights reserved ** ** TASK ** Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved ** ** This software is distributed under the Common Public License 1.0 */ /* * Contains the structures and function APIs for YAFFSFS file system support. */ #ifndef _TSK_YAFFSFS_H #define _TSK_YAFFSFS_H #include #include #ifdef __cplusplus extern "C" { #endif /* ** Constants */ #define YAFFSFS_MAXNAMLEN 255 #define YAFFS_DEFAULT_PAGE_SIZE 2048 #define YAFFS_DEFAULT_SPARE_SIZE 64 #define YAFFS_DEFAULT_MAX_TEST_BLOCKS 400 // Maximum number of blocks to test looking for Yaffs2 spare under auto-detect #define YAFFS_HELP_MESSAGE "See http://wiki.sleuthkit.org/index.php?title=YAFFS2 for help on Yaffs2 configuration" /* * Yaffs config file constants and return values */ #ifdef TSK_WIN32 #define YAFFS_CONFIG_FILE_SUFFIX L"-yaffs2.config" #else #define YAFFS_CONFIG_FILE_SUFFIX "-yaffs2.config" #endif #define YAFFS_CONFIG_SEQ_NUM_STR "spare_seq_num_offset" #define YAFFS_CONFIG_OBJ_ID_STR "spare_obj_id_offset" #define YAFFS_CONFIG_CHUNK_ID_STR "spare_chunk_id_offset" #define YAFFS_CONFIG_PAGE_SIZE_STR "flash_page_size" #define YAFFS_CONFIG_SPARE_SIZE_STR "flash_spare_size" #define YAFFS_CONFIG_CHUNKS_PER_BLOCK_STR "flash_chunks_per_block" typedef enum { YAFFS_CONFIG_OK, YAFFS_CONFIG_FILE_NOT_FOUND, YAFFS_CONFIG_ERROR } YAFFS_CONFIG_STATUS; /* ** Yaffs Object Flags */ typedef enum { NONE, YAFFS_HEADER, YAFFS_CHUNK, YAFFS_PAGES, YAFFS_SPARES, YAFFS_PAGES_AND_SPARES, UNKNOWN } YAFFS_OBJECT_FLAGS; /* ** Yaffs2 Header Object */ #define YAFFS_HEADER_NAME_LENGTH 256 #define YAFFS_HEADER_ALIAS_LENGTH 160 typedef struct yaffsObj_header { uint32_t obj_type; uint32_t parent_id; char name[YAFFS_HEADER_NAME_LENGTH]; uint32_t file_mode; uint32_t user_id; uint32_t group_id; uint32_t atime; uint32_t mtime; uint32_t ctime; uint32_t file_size; uint32_t equivalent_id; char alias[YAFFS_HEADER_ALIAS_LENGTH]; uint32_t rdev_mode; uint32_t win_ctime[2]; uint32_t win_atime[2]; uint32_t win_mtime[2]; uint32_t inband_obj_id; uint32_t inband_is_shrink; uint32_t file_size_high; uint32_t reserved[1]; int shadows_obj; uint32_t is_shrink; } YaffsHeader; /* ** Spare object - this is subject to change... */ #define YAFFS_OBJECT_SPACE 0x40000 #define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1) #define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000 #define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00 #define YAFFS_SPARE_FLAGS_IS_HEADER 0x80000000 #define YAFFS_SPARE_PARENT_ID_MASK 0x0fffffff #define YAFFS_SPARE_OBJECT_TYPE_SHIFT 28 #define YAFFS_SPARE_OBJECT_TYPE_MASK 0xf0000000 typedef struct yaffsObj_spare { uint32_t seq_number; uint32_t object_id; uint32_t chunk_id; uint32_t has_extra_fields; uint32_t extra_object_type; uint32_t extra_parent_id; } YaffsSpare; /* ** Holds the metadata for a single YAFFS2 chunk. */ typedef enum { YAFFS_CHUNK_DEAD, /* Either bad or unallocated */ YAFFS_CHUNK_META, /* Contains a header */ YAFFS_CHUNK_DATA /* Contains file data */ } YaffsChunkType; typedef struct _YaffsChunk { YaffsChunkType type; YaffsSpare *spare; YaffsHeader *header; } YaffsChunk; /* File system State Values */ #define YAFFSFS_STATE_VALID 0x0001 /* unmounted correctly */ #define YAFFSFS_STATE_ERROR 0x0002 /* errors detected */ /* * Special File Objects for the YAFFS2 File system */ #define YAFFS_OBJECT_ROOT 1 #define YAFFS_OBJECT_FIRST 1 #define YAFFS_OBJECT_LOSTNFOUND 2 #define YAFFS_OBJECT_UNLINKED 3 #define YAFFS_OBJECT_DELETED 4 #define YAFFS_OBJECT_ROOT_NAME "" #define YAFFS_OBJECT_LOSTNFOUND_NAME "lost+found" #define YAFFS_OBJECT_UNLINKED_NAME "" #define YAFFS_OBJECT_DELETED_NAME "" /* * Yaffs File Types... */ #define YAFFS_TYPE_UNKNOWN 0 #define YAFFS_TYPE_FILE 1 #define YAFFS_TYPE_SOFTLINK 2 #define YAFFS_TYPE_DIRECTORY 3 #define YAFFS_TYPE_HARDLINK 4 #define YAFFS_TYPE_SPECIAL 5 struct _YaffsCacheVersion; struct _YaffsCacheChunk; typedef struct _YaffsCacheObject { struct _YaffsCacheObject *yco_next; uint32_t yco_obj_id; struct _YaffsCacheVersion *yco_latest; } YaffsCacheObject; #define YAFFS_OBJECT_ID_MASK 0x0003ffff #define YAFFS_VERSION_NUM_SHIFT 18 #define YAFFS_VERSION_NUM_MASK 0x00003fff typedef struct _YaffsCacheVersion { struct _YaffsCacheVersion *ycv_prior; uint32_t ycv_version; uint32_t ycv_seq_number; struct _YaffsCacheChunk *ycv_header_chunk; struct _YaffsCacheChunk *ycv_first_chunk; struct _YaffsCacheChunk *ycv_last_chunk; } YaffsCacheVersion; typedef struct _YaffsCacheChunk { struct _YaffsCacheChunk *ycc_next; struct _YaffsCacheChunk *ycc_prev; TSK_OFF_T ycc_offset; uint32_t ycc_seq_number; uint32_t ycc_obj_id; uint32_t ycc_chunk_id; uint32_t ycc_parent_id; uint32_t ycc_n_bytes; } YaffsCacheChunk; typedef struct _YaffsCacheChunkGroup { YaffsCacheChunk *cache_chunks_head; YaffsCacheChunk *cache_chunks_tail; } YaffsCacheChunkGroup; /* * Structure of an yaffsfs file system handle. */ typedef struct { TSK_FS_INFO fs_info; /* super class */ unsigned int page_size; unsigned int spare_size; unsigned int chunks_per_block; uint32_t max_obj_id; uint32_t max_version; // Offsets into the spare area unsigned int spare_seq_offset; unsigned int spare_obj_id_offset; unsigned int spare_chunk_id_offset; unsigned int spare_nbytes_offset; tsk_lock_t cache_lock; YaffsCacheObject *cache_objects; std::map < uint32_t, YaffsCacheChunkGroup > *chunkMap; // If the user specified that the image is YAFFS2, print out additional verbose error messages int autoDetect; } YAFFSFS_INFO; #define YAFFS_FILE_CONTENT_LEN 0 #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/fs/unix_misc.c000644 000765 000024 00000033234 12576320700 017705 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2008-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ /** * \file unix_misc.c * Contains code that is common to both UFS1/2 and Ext2/3 file systems. */ #include "tsk_fs_i.h" #include "tsk_ffs.h" #include "tsk_ext2fs.h" /*********** MAKE DATA RUNS ***************/ /** \internal * Process an array of addresses and turn them into runs * * @param fs File system to analyze * @param fs_attr Data attribute to add runs to * @param addrs Buffer of addresses to process and turn into runs * @param addr_len Number of addresses in buffer * @param length Length of file remaining * * @returns the number of bytes processed and -1 if an error occurred */ static TSK_OFF_T unix_make_data_run_direct(TSK_FS_INFO * fs, TSK_FS_ATTR * fs_attr, TSK_DADDR_T * addrs, size_t addr_len, TSK_OFF_T length) { TSK_DADDR_T run_start = 0; TSK_DADDR_T run_len = 0; TSK_DADDR_T blks_processed = 0; size_t i; size_t fs_blen; // how big is each "block" (in fragments) if (addr_len == 0) { return 0; } // block_size is a fragment size in UFS, so we need to maintain length in fragments if (TSK_FS_TYPE_ISFFS(fs->ftype)) { FFS_INFO *ffs = (FFS_INFO *) fs; fs_blen = ffs->ffsbsize_f; } else { fs_blen = 1; } run_start = addrs[0]; run_len = fs_blen; /* Note that we are lazy about length. We stop only when a run is past length, * we do not end exactly at length -- although that should happen anyway. */ for (i = 0; i < addr_len; i++) { /* Make a new run if: * - This is the last addresss in the buffer * - The next address is not part of the current run * -- special case for sparse since they use 0 as an address */ if ((i + 1 == addr_len) || ((run_start + run_len != addrs[i + 1]) && (run_start != 0)) || ((run_start == 0) && (addrs[i + 1] != 0))) { TSK_FS_ATTR_RUN *data_run; // make a non-resident run data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) return -1; data_run->addr = run_start; data_run->len = run_len; if (run_start == 0) data_run->flags = TSK_FS_ATTR_RUN_FLAG_SPARSE; // save the run tsk_fs_attr_append_run(fs, fs_attr, data_run); // get ready for the next run if (i + 1 != addr_len) run_start = addrs[i + 1]; run_len = 0; // stop if we are past the length requested if (blks_processed * fs->block_size > (TSK_DADDR_T) length) break; } run_len += fs_blen; blks_processed += fs_blen; } return blks_processed * fs->block_size; } /** \internal * Read an indirect block and process the contents to make a runlist from the pointers. * * @param fs File system to analyze * @param fs_attr Structure to save run data into * @param fs_attr_indir Structure to save addresses of indirect block pointers in * @param buf Buffers to read block data into (0 is block sized, 1+ are DADDR_T arrays based on FS type) * @param level Indirection level that this will process at (1+) * @param addr Address of block to read * @param length Length of file remaining * * @returns the number of bytes processed during call and -1 if an error occurred */ static TSK_OFF_T unix_make_data_run_indirect(TSK_FS_INFO * fs, TSK_FS_ATTR * fs_attr, TSK_FS_ATTR * fs_attr_indir, char *buf[], int level, TSK_DADDR_T addr, TSK_OFF_T length) { char *myname = "unix_make_data_run_indirect"; size_t addr_cnt = 0; TSK_DADDR_T *myaddrs = (TSK_DADDR_T *) buf[level]; TSK_OFF_T length_remain = length; TSK_OFF_T retval; size_t fs_bufsize; size_t fs_blen; TSK_FS_ATTR_RUN *data_run; if (tsk_verbose) tsk_fprintf(stderr, "%s: level %d block %" PRIuDADDR "\n", myname, level, addr); // block_size is a fragment size in UFS, so we need to maintain length in fragments if (TSK_FS_TYPE_ISFFS(fs->ftype)) { FFS_INFO *ffs = (FFS_INFO *) fs; fs_blen = ffs->ffsbsize_f; fs_bufsize = ffs->ffsbsize_b; } else { fs_blen = 1; fs_bufsize = fs->block_size; } if (addr > fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr("unix: Indirect block address too large: %" PRIuDADDR "", addr); return -1; } // make a non-resident run data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) return -1; data_run->addr = addr; data_run->len = fs_blen; /* * Read a block of disk addresses. */ // sparse if (addr == 0) { memset(buf[0], 0, fs_bufsize); data_run->flags = TSK_FS_ATTR_RUN_FLAG_SPARSE; } else { ssize_t cnt; // read the data into the scratch buffer cnt = tsk_fs_read_block(fs, addr, buf[0], fs_bufsize); if (cnt != fs_bufsize) { if (cnt >= 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_READ); } tsk_error_set_errstr2("unix_make_data_run_indir: Block %" PRIuDADDR, addr); return -1; } } // save the run tsk_fs_attr_append_run(fs, fs_attr_indir, data_run); // convert the raw addresses to the correct endian ordering if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B) || (TSK_FS_TYPE_ISEXT(fs->ftype))) { size_t n; uint32_t *iaddr = (uint32_t *) buf[0]; addr_cnt = fs_bufsize / sizeof(*iaddr); for (n = 0; n < addr_cnt; n++) { myaddrs[n] = tsk_getu32(fs->endian, (uint8_t *) & iaddr[n]); } } else if (fs->ftype == TSK_FS_TYPE_FFS2) { size_t n; uint64_t *iaddr = (uint64_t *) buf[0]; addr_cnt = fs_bufsize / sizeof(*iaddr); for (n = 0; n < addr_cnt; n++) { myaddrs[n] = tsk_getu64(fs->endian, (uint8_t *) & iaddr[n]); } } // pass the addresses to the next level if (level == 1) { retval = unix_make_data_run_direct(fs, fs_attr, myaddrs, addr_cnt, length_remain); if (retval != -1) { length_remain -= retval; } } else { size_t i; retval = 0; for (i = 0; i < addr_cnt && retval != -1; i++) { retval = unix_make_data_run_indirect(fs, fs_attr, fs_attr_indir, buf, level - 1, myaddrs[i], length_remain); if (retval == -1) { break; } else { length_remain -= retval; } } } if (retval == -1) return -1; else return length - length_remain; } /** \internal * * @returns 1 on error and 0 on success */ uint8_t tsk_fs_unix_make_data_run(TSK_FS_FILE * fs_file) { TSK_OFF_T length = 0; TSK_OFF_T read_b = 0; TSK_FS_ATTR *fs_attr; TSK_FS_ATTR *fs_attr_indir; TSK_FS_META *fs_meta = fs_file->meta; TSK_FS_INFO *fs = fs_file->fs_info; // clean up any error messages that are lying around tsk_error_reset(); if (tsk_verbose) tsk_fprintf(stderr, "unix_make_data_run: Processing file %" PRIuINUM "\n", fs_meta->addr); // see if we have already loaded the runs if ((fs_meta->attr != NULL) && (fs_meta->attr_state == TSK_FS_META_ATTR_STUDIED)) { return 0; } else if (fs_meta->attr_state == TSK_FS_META_ATTR_ERROR) { return 1; } // not sure why this would ever happen, but... else if (fs_meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_meta->attr); } else if (fs_meta->attr == NULL) { fs_meta->attr = tsk_fs_attrlist_alloc(); } if ((TSK_FS_TYPE_ISFFS(fs->ftype) == 0) && (TSK_FS_TYPE_ISEXT(fs->ftype) == 0)) { tsk_error_set_errno(TSK_ERR_FS_INODE_COR); tsk_error_set_errstr ("unix_make_run: Called with non-Unix file system: %x", fs->ftype); return 1; } length = roundup(fs_meta->size, fs->block_size); if ((fs_attr = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { return 1; } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr, NULL, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, fs_meta->size, fs_meta->size, roundup(fs_meta->size, fs->block_size), 0, 0)) { return 1; } read_b = unix_make_data_run_direct(fs, fs_attr, (TSK_DADDR_T *) fs_meta->content_ptr, 12, length); if (read_b == -1) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; if (fs_meta->flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); return 1; } length -= read_b; /* if there is still data left, read the indirect */ if (length > 0) { int level; char **buf; size_t fs_bufsize0; size_t fs_bufsize1; size_t ptrsperblock; int numBlocks = 0; int numSingIndirect = 0; int numDblIndirect = 0; int numTripIndirect = 0; /* With FFS/UFS a full block contains the addresses, but block_size is * only a fragment. Figure out the scratch buffer size and the buffers to * store the cleaned addresses (endian converted) */ if (TSK_FS_TYPE_ISFFS(fs->ftype)) { FFS_INFO *ffs = (FFS_INFO *) fs; fs_bufsize0 = ffs->ffsbsize_b; if ((fs->ftype == TSK_FS_TYPE_FFS1) || (fs->ftype == TSK_FS_TYPE_FFS1B)) { ptrsperblock = fs_bufsize0 / 4; } else { ptrsperblock = fs_bufsize0 / 8; } } else { fs_bufsize0 = fs->block_size; ptrsperblock = fs_bufsize0 / 4; } fs_bufsize1 = sizeof(TSK_DADDR_T) * ptrsperblock; /* * Initialize a buffer for the 3 levels of indirection that are supported by * this inode. Each level of indirection will have a buffer to store * addresses in. buf[0] is a special scratch buffer that is used to store * raw data from the image (before endian conversions are applied). It is * equal to one block size. The others will store TSK_DADDR_T structures * and will have a size depending on the FS type. */ if ((buf = (char **) tsk_malloc(sizeof(char *) * 4)) == NULL) return 1; if ((buf[0] = (char *) tsk_malloc(fs_bufsize0)) == NULL) { free(buf); return 1; } if ((fs_attr_indir = tsk_fs_attrlist_getnew(fs_meta->attr, TSK_FS_ATTR_NONRES)) == NULL) { free(buf); return 1; } // determine number of indirect lbocks needed for file size... numBlocks = (int) (((fs_meta->size + fs_bufsize0 - 1) / fs_bufsize0) - 12); numSingIndirect = (int) ((numBlocks + ptrsperblock - 1) / ptrsperblock); numDblIndirect = 0; numTripIndirect = 0; // double block pointer? if (numSingIndirect > 1) { numDblIndirect = (int) ((numSingIndirect - 1 + ptrsperblock - 1) / ptrsperblock); if (numDblIndirect > 1) { numTripIndirect = (int) ((numDblIndirect - 1 + ptrsperblock - 1) / ptrsperblock); } } // initialize the data run if (tsk_fs_attr_set_run(fs_file, fs_attr_indir, NULL, NULL, TSK_FS_ATTR_TYPE_UNIX_INDIR, TSK_FS_ATTR_ID_DEFAULT, fs_bufsize0 * (numSingIndirect + numDblIndirect + numTripIndirect), fs_bufsize0 * (numSingIndirect + numDblIndirect + numTripIndirect), fs_bufsize0 * (numSingIndirect + numDblIndirect + numTripIndirect), 0, 0)) { free(buf); return 1; } for (level = 1; length > 0 && level < 4; level++) { TSK_DADDR_T *addr_ptr = (TSK_DADDR_T *) fs_meta->content_ptr; if ((buf[level] = (char *) tsk_malloc(fs_bufsize1)) == NULL) { int f; for (f = 0; f < level; f++) free(buf[f]); free(buf); return 1; } /* the indirect addresses are stored in addr_ptr after the 12 * direct addresses */ read_b = unix_make_data_run_indirect(fs, fs_attr, fs_attr_indir, buf, level, addr_ptr[12 + level - 1], length); if (read_b == -1) break; length -= read_b; } /* * Cleanup. */ for (level = 0; level < 4; level++) { if (buf[level]) free(buf[level]); } } if (read_b == -1) { fs_meta->attr_state = TSK_FS_META_ATTR_ERROR; if (fs_meta->flags & TSK_FS_META_FLAG_UNALLOC) tsk_error_set_errno(TSK_ERR_FS_RECOVER); return 1; } fs_meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } TSK_FS_ATTR_TYPE_ENUM tsk_fs_unix_get_default_attr_type(const TSK_FS_FILE * a_file) { return TSK_FS_ATTR_TYPE_DEFAULT; } int tsk_fs_unix_name_cmp(TSK_FS_INFO * a_fs_info, const char *s1, const char *s2) { return strcmp(s1, s2); } sleuthkit-4.2.0/tsk/fs/walk_cpp.cpp000644 000765 000024 00000005007 12576320700 020044 0ustar00carrierstaff000000 000000 #include "tsk_fs_i.h" #include "tsk/vs/tsk_vs_i.h" /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_block_cpp_c_cb (const TSK_FS_BLOCK *a_block, void *a_ptr) { TSK_FS_BLOCK_WALK_CPP_DATA *data = (TSK_FS_BLOCK_WALK_CPP_DATA *)a_ptr; TskFsBlock block(a_block); return data->cppAction(&block, data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_file_cpp_c_cb (TSK_FS_FILE *a_file, TSK_OFF_T a_off, TSK_DADDR_T a_addr, char *a_buf, size_t a_len, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *a_ptr) { TSK_FS_FILE_WALK_CPP_DATA *data = (TSK_FS_FILE_WALK_CPP_DATA *)a_ptr; TskFsFile fsFile(a_file); return data->cppAction(&fsFile,a_off,a_addr,a_buf,a_len,a_flags,data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_jblk_cpp_c_cb (TSK_FS_INFO *a_fsInfo, char *a_string, int a_num, void *a_ptr){ TSK_FS_JBLK_WALK_CPP_DATA *data = (TSK_FS_JBLK_WALK_CPP_DATA *)a_ptr; TskFsInfo fsInfo(a_fsInfo); return data->cppAction(&fsInfo, a_string, a_num, data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_jentry_cpp_c_cb (TSK_FS_INFO *a_fsInfo, TSK_FS_JENTRY *a_jentry, int a_num, void *a_ptr){ TSK_FS_JENTRY_WALK_CPP_DATA *data = (TSK_FS_JENTRY_WALK_CPP_DATA *)a_ptr; TskFsInfo fsInfo(a_fsInfo); TskFsJEntry fsJEntry(a_jentry); return data->cppAction(&fsInfo, &fsJEntry, a_num, data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_meta_walk_cpp_c_cb (TSK_FS_FILE *a_file, void *a_ptr){ TSK_FS_META_WALK_CPP_DATA *data = (TSK_FS_META_WALK_CPP_DATA *)a_ptr; TskFsFile fsFile(a_file); return data->cppAction(&fsFile, data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_fs_dir_walk_cpp_c_cb (TSK_FS_FILE *a_file, const char *a_path, void *a_ptr){ TSK_FS_DIR_WALK_CPP_DATA *data = (TSK_FS_DIR_WALK_CPP_DATA *)a_ptr; TskFsFile fsFile(a_file); return data->cppAction(&fsFile, a_path, data->cPtr); } /** * \internal * call back function */ TSK_WALK_RET_ENUM tsk_vs_part_walk_cpp_c_cb (TSK_VS_INFO *a_vs, const TSK_VS_PART_INFO * a_vs_part, void *a_ptr){ TSK_VS_PART_WALK_CPP_DATA *data = (TSK_VS_PART_WALK_CPP_DATA *)a_ptr; TskVsInfo vsInfo(a_vs); TskVsPartInfo vsPartInfo(const_cast(a_vs_part)); return data->cppAction(&vsInfo, &vsPartInfo, data->cPtr); } sleuthkit-4.2.0/tsk/fs/yaffs.cpp000644 000765 000024 00000323077 12576320700 017366 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All Rights reserved ** Copyright (c) 2003-2005 Brian Carrier. All rights reserved ** ** TASK v** Copyright (c) 2002-2003 Brian Carrier, @stake Inc. All rights reserved ** ** Copyright (c) 1997,1998,1999, International Business Machines ** Corporation and others. All Rights Reserved. */ /** *\file yaffs.cpp * Contains the internal TSK YAFFS2 file system functions. */ /* TCT * LICENSE * This software is distributed under the IBM Public License. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA --*/ #include #include #include #include #include #include #include "tsk_fs_i.h" #include "tsk_yaffs.h" #include "tsk_fs.h" /* * Implementation Notes: * - As inode, we use object id and a version number derived from the * number of unique sequence ids for the object still left in the * file system. * * - The version numbers start at 1 and increase as they get closer to * the the latest version. Version number 0 is a special version * that is equivalent to the latest version (without having to know * the latest version number.) * * - Since inodes are composed using the object id in the least * significant bits and the version up higher, requesting the * inode that matches the object id you are looking for will * retreive the latest version of this object. * * - Files always exist in the latest version of their parent directory * only. * * - Filenames are not unique even with attached version numbers, since * version numbers are namespaced by inode. * * - The cache stores a lot of info via the structure. As this is * used for investigations, we assume these decisions will be updated * to expose the most useful view of this log based file system. TSK * doesn't seem have a real way to expose a versioned view of a log * based file system like this. Shoehorning it into the framework * ends up dropping some information. I looked at using resource * streams as versions, but the abstraction breaks quickly. * */ static uint8_t yaffsfs_read_header(YAFFSFS_INFO *yfs, YaffsHeader ** header, TSK_OFF_T offset); /** * Generate an inode number based on the file's object and version numbers */ static TSK_RETVAL_ENUM yaffscache_obj_id_and_version_to_inode(uint32_t obj_id, uint32_t version_num, TSK_INUM_T *inode) { if ((obj_id & ~YAFFS_OBJECT_ID_MASK) != 0) { return TSK_ERR; } if ((version_num & ~YAFFS_VERSION_NUM_MASK) != 0) { return TSK_ERR; } *inode = obj_id | (version_num << YAFFS_VERSION_NUM_SHIFT); return TSK_OK; } /** * Given the TSK-generated inode address, extract the object id and version number from it */ static TSK_RETVAL_ENUM yaffscache_inode_to_obj_id_and_version(TSK_INUM_T inode, uint32_t *obj_id, uint32_t *version_num) { *obj_id = inode & YAFFS_OBJECT_ID_MASK; *version_num = (inode >> YAFFS_VERSION_NUM_SHIFT) & YAFFS_VERSION_NUM_MASK; return TSK_OK; } /* * Order it like yaffs2.git does -- sort by (seq_num, offset/block) */ static int yaffscache_chunk_compare(YaffsCacheChunk *curr, uint32_t addee_obj_id, TSK_OFF_T addee_offset, uint32_t addee_seq_number) { if (curr->ycc_obj_id == addee_obj_id) { if (curr->ycc_seq_number == addee_seq_number) { if (curr->ycc_offset == addee_offset) { return 0; } else if (curr->ycc_offset < addee_offset) { return -1; } else { return 1; } } else if (curr->ycc_seq_number < addee_seq_number) { return -1; } else { return 1; } } else if (curr->ycc_obj_id < addee_obj_id) { return -1; } else { return 1; } } static TSK_RETVAL_ENUM yaffscache_chunk_find_insertion_point(YAFFSFS_INFO *yfs, uint32_t obj_id, TSK_OFF_T offset, uint32_t seq_number, YaffsCacheChunk **chunk) { YaffsCacheChunk *curr, *prev; // Have we seen this obj_id? If not, add an entry for it if(yfs->chunkMap->find(obj_id) == yfs->chunkMap->end()){ fflush(stderr); YaffsCacheChunkGroup chunkGroup; chunkGroup.cache_chunks_head = NULL; chunkGroup.cache_chunks_tail = NULL; yfs->chunkMap->insert(std::make_pair(obj_id, chunkGroup)); } curr = yfs->chunkMap->operator[](obj_id).cache_chunks_head; prev = NULL; if (chunk == NULL) { return TSK_ERR; } while(curr != NULL) { // Compares obj id, then seq num, then offset. -1 => current < new int cmp = yaffscache_chunk_compare(curr, obj_id, offset, seq_number); if (cmp == 0) { *chunk = curr; return TSK_OK; } else if (cmp == 1) { *chunk = prev; return TSK_STOP; } prev = curr; curr = curr->ycc_next; } *chunk = prev; return TSK_STOP; } /** * Add a chunk to the cache. * @param yfs * @param offset Byte offset this chunk was found in (in the disk image) * @param seq_number Sequence number of this chunk * @param obj_id Object Id this chunk is associated with * @param parent_id Parent object ID that this chunk/object is associated with */ static TSK_RETVAL_ENUM yaffscache_chunk_add(YAFFSFS_INFO *yfs, TSK_OFF_T offset, uint32_t seq_number, uint32_t obj_id, uint32_t chunk_id, uint32_t parent_id) { TSK_RETVAL_ENUM result; YaffsCacheChunk *prev; YaffsCacheChunk *chunk; if ((chunk = (YaffsCacheChunk*)tsk_malloc(sizeof(YaffsCacheChunk))) == NULL) { return TSK_ERR; } chunk->ycc_offset = offset; chunk->ycc_seq_number = seq_number; chunk->ycc_obj_id = obj_id; chunk->ycc_chunk_id = chunk_id; chunk->ycc_parent_id = parent_id; // Bit of a hack here. In some images, the root directory (obj_id = 1) lists iself as its parent // directory, which can cause issues later when we get directory contents. To prevent this, // if a chunk comes in with obj_id = 1 and parent_id = 1, manually set the parent ID to zero. if((obj_id == 1) && (parent_id == 1)){ chunk->ycc_parent_id = 0; } // Find the chunk that should go right before the new chunk result = yaffscache_chunk_find_insertion_point(yfs, obj_id, offset, seq_number, &prev); if (result == TSK_ERR) { return TSK_ERR; } if (prev == NULL) { // No previous chunk - new chunk is the lowest we've seen and the new start of the list chunk->ycc_prev = NULL; chunk->ycc_next = yfs->chunkMap->operator[](obj_id).cache_chunks_head; } else { chunk->ycc_prev = prev; chunk->ycc_next = prev->ycc_next; } if (chunk->ycc_next != NULL) { // If we're not at the end, set the prev pointer on the next chunk to point to our new one chunk->ycc_next->ycc_prev = chunk; } else { yfs->chunkMap->operator[](obj_id).cache_chunks_tail = chunk; } if (chunk->ycc_prev != NULL) { // If we're not at the beginning, set the next pointer on the previous chunk to point at our new one chunk->ycc_prev->ycc_next = chunk; } else { yfs->chunkMap->operator[](obj_id).cache_chunks_head = chunk; } return TSK_OK; } /** * Get the file object from the cache. * @returns TSK_OK if it was found and TSK_STOP if we did not find it */ static TSK_RETVAL_ENUM yaffscache_object_find(YAFFSFS_INFO *yfs, uint32_t obj_id, YaffsCacheObject **obj) { YaffsCacheObject *curr, *prev; curr = yfs->cache_objects; prev = NULL; if (obj == NULL) { return TSK_ERR; } while(curr != NULL) { if (curr->yco_obj_id == obj_id) { *obj = curr; return TSK_OK; } else if (curr->yco_obj_id > obj_id) { *obj = prev; return TSK_STOP; } prev = curr; curr = curr->yco_next; } *obj = prev; return TSK_STOP; } /** * Add an object to the cache if it does not already exist in there. * @returns TSK_ERR on error, TSK_OK otherwise. */ static TSK_RETVAL_ENUM yaffscache_object_find_or_add(YAFFSFS_INFO *yfs, uint32_t obj_id, YaffsCacheObject **obj) { YaffsCacheObject *prev; TSK_RETVAL_ENUM result; if (obj == NULL) { return TSK_ERR; } // Look for this obj_id in yfs->cache_objects // If not found, add it in the correct spot // yaffscache_object_find returns the last object with obj_id less than the one // we were searching for, so use that to insert the new one in the list result = yaffscache_object_find(yfs, obj_id, &prev); if (result == TSK_OK) { *obj = prev; return TSK_OK; } else if (result == TSK_STOP) { *obj = (YaffsCacheObject *) tsk_malloc(sizeof(YaffsCacheObject)); (*obj)->yco_obj_id = obj_id; if (prev == NULL) { (*obj)->yco_next = yfs->cache_objects; yfs->cache_objects = *obj; } else { (*obj)->yco_next = prev->yco_next; prev->yco_next = (*obj); } return TSK_OK; } else { *obj = NULL; return TSK_ERR; } } static TSK_RETVAL_ENUM yaffscache_object_add_version(YaffsCacheObject *obj, YaffsCacheChunk *chunk) { uint32_t ver_number; YaffsCacheChunk *header_chunk = NULL; YaffsCacheVersion *version; // Going to try ignoring unlinked/deleted headers (objID 3 and 4) if ((chunk->ycc_chunk_id == 0) && (chunk->ycc_parent_id != YAFFS_OBJECT_UNLINKED) &&(chunk->ycc_parent_id != YAFFS_OBJECT_DELETED)) { header_chunk = chunk; } /* If this is the second version (since last header_chunk is not NULL) and no * header was added, get rid of this incomplete old version -- can't be * reasonably recovered. * * TODO: These chunks are still in the structure and can be walked, * but I'm not sure how to represent this set of data chunks * with no metadata under TSK. This is rare and we don't have * a testcase for it now. Punting right now. * * Edit: Shouldn't get to this point anymore. Changes to * yaffscache_versions_insert_chunk make a version continue until it * has a header block. */ if (obj->yco_latest != NULL) { if (obj->yco_latest->ycv_header_chunk == NULL) { YaffsCacheVersion *incomplete = obj->yco_latest; if (tsk_verbose) tsk_fprintf(stderr, "yaffscache_object_add_version: " "removed an incomplete first version (no header)\n"); obj->yco_latest = obj->yco_latest->ycv_prior; free(incomplete); } } if (obj->yco_latest != NULL) { ver_number = obj->yco_latest->ycv_version + 1; /* Until a new header is given, use the last seen header. */ if (header_chunk == NULL) { header_chunk = obj->yco_latest->ycv_header_chunk; // If we haven't seen a good header yet and we have a deleted/unlinked one, use it if((header_chunk == NULL) && (chunk->ycc_chunk_id == 0)){ header_chunk = chunk; } } } else { ver_number = 1; } if ((version = (YaffsCacheVersion *) tsk_malloc(sizeof(YaffsCacheVersion))) == NULL) { return TSK_ERR; } version->ycv_prior = obj->yco_latest; version->ycv_version = ver_number; version->ycv_seq_number = chunk->ycc_seq_number; version->ycv_header_chunk = header_chunk; version->ycv_first_chunk = chunk; version->ycv_last_chunk = chunk; obj->yco_latest = version; return TSK_OK; } /** * Add a chunk to its corresponding object in the cache. */ static TSK_RETVAL_ENUM yaffscache_versions_insert_chunk(YAFFSFS_INFO *yfs, YaffsCacheChunk *chunk) { YaffsCacheObject *obj; TSK_RETVAL_ENUM result; YaffsCacheVersion *version; // Building a list in yfs->cache_objects, sorted by obj_id result = yaffscache_object_find_or_add(yfs, chunk->ycc_obj_id, &obj); if (result != TSK_OK) { return TSK_ERR; } version = obj->yco_latest; /* First chunk in this object? */ if (version == NULL) { yaffscache_object_add_version(obj, chunk); } else { /* Chunk in the same update? */ if (chunk->ycc_seq_number == version->ycv_seq_number) { version->ycv_last_chunk = chunk; if ((chunk->ycc_chunk_id == 0) && (chunk->ycc_parent_id != YAFFS_OBJECT_UNLINKED) &&(chunk->ycc_parent_id != YAFFS_OBJECT_DELETED)) { version->ycv_header_chunk = chunk; } else if((chunk->ycc_chunk_id == 0) && (version->ycv_header_chunk == NULL)){ version->ycv_header_chunk = chunk; } } // If there was no header for the last version, continue adding to it instead // of starting a new version. else if(version->ycv_header_chunk == NULL){ version->ycv_seq_number = chunk->ycc_seq_number; version->ycv_last_chunk = chunk; if ((chunk->ycc_chunk_id == 0) && (chunk->ycc_parent_id != YAFFS_OBJECT_UNLINKED) &&(chunk->ycc_parent_id != YAFFS_OBJECT_DELETED)) { version->ycv_header_chunk = chunk; } else if((chunk->ycc_chunk_id == 0) && (version->ycv_header_chunk == NULL)){ version->ycv_header_chunk = chunk; } } else if(chunk->ycc_chunk_id == 0){ // Directories only have a header block // If we're looking at a new version of a directory where the previous version had the same name, // leave everything in the same version. Multiple versions of the same directory aren't really giving us // any information. YaffsHeader * newHeader; yaffsfs_read_header(yfs, &newHeader, chunk->ycc_offset); if((newHeader != NULL) && (newHeader->obj_type == YAFFS_TYPE_DIRECTORY)){ // Read in the old header YaffsHeader * oldHeader; yaffsfs_read_header(yfs, &oldHeader, version->ycv_header_chunk->ycc_offset); if((oldHeader != NULL) && (oldHeader->obj_type == YAFFS_TYPE_DIRECTORY) && (0 == strncmp(oldHeader->name, newHeader->name, YAFFS_HEADER_NAME_LENGTH))){ version->ycv_seq_number = chunk->ycc_seq_number; version->ycv_last_chunk = chunk; version->ycv_header_chunk = chunk; } else{ // The older header either isn't a directory or it doesn't have the same name, so leave it // as its own version yaffscache_object_add_version(obj, chunk); } } else{ // Not a directory yaffscache_object_add_version(obj, chunk); } } else{ // Otherwise, add this chunk as the start of a new version yaffscache_object_add_version(obj, chunk); } } return TSK_OK; } static TSK_RETVAL_ENUM yaffscache_versions_compute(YAFFSFS_INFO *yfs) { std::map::iterator iter; for( iter = yfs->chunkMap->begin(); iter != yfs->chunkMap->end(); ++iter ) { YaffsCacheChunk *chunk_curr = yfs->chunkMap->operator[](iter->first).cache_chunks_head; while(chunk_curr != NULL) { if (yaffscache_versions_insert_chunk(yfs, chunk_curr) != TSK_OK) { return TSK_ERR; } chunk_curr = chunk_curr->ycc_next; } } return TSK_OK; } /** * Callback for yaffscache_find_children() * @param obj Object that is a child * @param version Version of the object * @param args Pointer to what was passed into yaffscache_find_children */ typedef TSK_RETVAL_ENUM yc_find_children_cb(YaffsCacheObject *obj, YaffsCacheVersion *version, void *args); /** * Search the cache for objects that are children of the given address. * @param yfs * @param parent_inode Inode of folder/directory * @param cb Call back to call for each found child * @param args Pointer to structure that will be passed to cb * @returns TSK_ERR on error */ static TSK_RETVAL_ENUM yaffscache_find_children(YAFFSFS_INFO *yfs, TSK_INUM_T parent_inode, yc_find_children_cb cb, void *args) { YaffsCacheObject *obj; uint32_t parent_id, version_num; if (yaffscache_inode_to_obj_id_and_version(parent_inode, &parent_id, &version_num) != TSK_OK) { return TSK_ERR; } /* Iterate over all objects and all versions of the objects to see if one is the child * of the given parent. */ for (obj = yfs->cache_objects; obj != NULL; obj = obj->yco_next) { YaffsCacheVersion *version; for (version = obj->yco_latest; version != NULL; version = version->ycv_prior) { /* Is this an incomplete version? */ if (version->ycv_header_chunk == NULL) continue; if (version->ycv_header_chunk->ycc_parent_id == parent_id) { TSK_RETVAL_ENUM result = cb(obj, version, args); if (result != TSK_OK) return result; } } } return TSK_OK; } /** * Lookup an object based on its inode. * @param yfs * @param inode * @param version [out] Pointer to store version of the object that was found (if inode had a version of 0) * @param obj_ret [out] Pointer to store found object into * @returns TSK_ERR on error. */ static TSK_RETVAL_ENUM yaffscache_version_find_by_inode(YAFFSFS_INFO *yfs, TSK_INUM_T inode, YaffsCacheVersion **version, YaffsCacheObject **obj_ret) { uint32_t obj_id, version_num; YaffsCacheObject *obj; YaffsCacheVersion *curr; if (version == NULL) { return TSK_ERR; } // convert inode to obj and version and find it in cache if (yaffscache_inode_to_obj_id_and_version(inode, &obj_id, &version_num) != TSK_OK) { *version = NULL; return TSK_ERR; } if (yaffscache_object_find(yfs, obj_id, &obj) != TSK_OK) { *version = NULL; return TSK_ERR; } if (version_num == 0) { if (obj_ret != NULL) { *obj_ret = obj; } *version = obj->yco_latest; return TSK_OK; } // Find the requested version in the list. for(curr = obj->yco_latest; curr != NULL; curr = curr->ycv_prior) { if (curr->ycv_version == version_num) { if (obj_ret != NULL) { *obj_ret = obj; } *version = curr; return TSK_OK; } } if (obj_ret != NULL) { *obj_ret = NULL; } *version = NULL; return TSK_ERR; } static void yaffscache_object_dump(FILE *fp, YaffsCacheObject *obj) { YaffsCacheVersion *next_version = obj->yco_latest; YaffsCacheChunk *chunk = next_version->ycv_last_chunk; fprintf(fp, "Object %d\n", obj->yco_obj_id); while(chunk != NULL && chunk->ycc_obj_id == obj->yco_obj_id) { if (next_version != NULL && chunk == next_version->ycv_last_chunk) { fprintf(fp, " @%d: %p %p %p\n", next_version->ycv_version, next_version->ycv_header_chunk, next_version->ycv_first_chunk, next_version->ycv_last_chunk); next_version = next_version->ycv_prior; } fprintf(fp, " + %p %08x %08x %08llx\n", chunk, chunk->ycc_chunk_id, chunk->ycc_seq_number, chunk->ycc_offset); chunk = chunk->ycc_prev; } } static void yaffscache_objects_dump(FILE *fp, YAFFSFS_INFO *yfs) { YaffsCacheObject *obj; for(obj = yfs->cache_objects; obj != NULL; obj = obj->yco_next) yaffscache_object_dump(fp, obj); } static void yaffscache_objects_stats(YAFFSFS_INFO *yfs, unsigned int *obj_count, uint32_t *obj_first, uint32_t *obj_last, uint32_t *version_count, uint32_t *version_first, uint32_t *version_last) { YaffsCacheObject *obj; YaffsCacheVersion *ver; /* deleted and unlinked special objects don't have headers */ *obj_count = 2; *obj_first = 0xffffffff; *obj_last = 0; *version_count = 0; *version_first = 0xffffffff; *version_last = 0; for(obj = yfs->cache_objects; obj != NULL; obj = obj->yco_next) { *obj_count += 1; if (obj->yco_obj_id < *obj_first) *obj_first = obj->yco_obj_id; if (obj->yco_obj_id > *obj_last) *obj_last = obj->yco_obj_id; for(ver = obj->yco_latest; ver != NULL; ver = ver->ycv_prior) { *version_count += 1; if (ver->ycv_seq_number < *version_first) *version_first = ver->ycv_seq_number; if (ver->ycv_seq_number > *version_last) *version_last = ver->ycv_seq_number; } } } static void yaffscache_objects_free(YAFFSFS_INFO *yfs) { if((yfs != NULL) && (yfs->cache_objects != NULL)){ YaffsCacheObject *obj = yfs->cache_objects; while(obj != NULL) { YaffsCacheObject *to_free = obj; YaffsCacheVersion *ver = obj->yco_latest; while(ver != NULL) { YaffsCacheVersion *v_to_free = ver; ver = ver->ycv_prior; free(v_to_free); } obj = obj->yco_next; free(to_free); } } } static void yaffscache_chunks_free(YAFFSFS_INFO *yfs) { if((yfs != NULL) && (yfs->chunkMap != NULL)){ // Free the YaffsCacheChunks in each ChunkGroup std::map::iterator iter; for( iter = yfs->chunkMap->begin(); iter != yfs->chunkMap->end(); ++iter ) { YaffsCacheChunk *chunk = yfs->chunkMap->operator[](iter->first).cache_chunks_head; while(chunk != NULL) { YaffsCacheChunk *to_free = chunk; chunk = chunk->ycc_next; free(to_free); } } // Free the map yfs->chunkMap->clear(); delete yfs->chunkMap; } } /* * Parsing and helper functions * * */ /* Function to parse config file * * @param img_info Image info for this image * @param map Stores values from config file indexed on parameter name * @returns YAFFS_CONFIG_STATUS One of YAFFS_CONFIG_OK, YAFFS_CONFIG_FILE_NOT_FOUND, or YAFFS_CONFIG_ERROR */ static YAFFS_CONFIG_STATUS yaffs_load_config_file(TSK_IMG_INFO * a_img_info, std::map & results){ const TSK_TCHAR ** image_names; int num_imgs; size_t config_file_name_len; TSK_TCHAR * config_file_name; FILE* config_file; char buf[1001]; // Get the image name(s) image_names = tsk_img_get_names(a_img_info, &num_imgs); if(num_imgs < 1){ return YAFFS_CONFIG_ERROR; } // Construct the name of the config file from the first image name config_file_name_len = TSTRLEN(image_names[0]); config_file_name_len += TSTRLEN(YAFFS_CONFIG_FILE_SUFFIX); config_file_name = (TSK_TCHAR *) tsk_malloc(sizeof(TSK_TCHAR) * (config_file_name_len + 1)); TSTRNCPY(config_file_name, image_names[0], TSTRLEN(image_names[0]) + 1); TSTRNCAT(config_file_name, YAFFS_CONFIG_FILE_SUFFIX, TSTRLEN(YAFFS_CONFIG_FILE_SUFFIX) + 1); #ifdef TSK_WIN32 HANDLE hWin; if ((hWin = CreateFile(config_file_name, GENERIC_READ, FILE_SHARE_READ, 0, OPEN_EXISTING, 0, 0)) == INVALID_HANDLE_VALUE) { // For the moment, assume that the file just doesn't exist, which isn't an error free(config_file_name); return YAFFS_CONFIG_FILE_NOT_FOUND; } config_file = _fdopen(_open_osfhandle((intptr_t) hWin, _O_RDONLY), "r"); if (config_file == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr( "yaffs_load_config: Error converting Windows handle to C handle"); free(config_file_name); CloseHandle(hWin); return YAFFS_CONFIG_ERROR; } #else if (NULL == (config_file = fopen(config_file_name, "r"))) { free(config_file_name); return YAFFS_CONFIG_FILE_NOT_FOUND; } #endif while(fgets(buf, 1000, config_file) != NULL){ // Is it a comment? if((buf[0] == '#') || (buf[0] == ';')){ continue; } // Is there a '=' ? if(strchr(buf, '=') == NULL){ continue; } // Copy to strings while removing whitespace and converting to lower case std::string paramName(""); std::string paramVal(""); const char * paramNamePtr = strtok(buf, "="); while(*paramNamePtr != '\0'){ if(! isspace((char)(*paramNamePtr))){ paramName += tolower((char)(*paramNamePtr)); } paramNamePtr++; } const char * paramValPtr = strtok(NULL, "="); while(*paramValPtr != '\0'){ if(! isspace(*paramValPtr)){ paramVal += tolower((char)(*paramValPtr)); } paramValPtr++; } // Make sure this parameter is not already in the map if(results.find(paramName) != results.end()){ // Duplicate parameter - return an error tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr( "yaffs_load_config: Duplicate parameter name in config file (\"%s\"). %s", paramName.c_str(), YAFFS_HELP_MESSAGE); fclose(config_file); free(config_file_name); return YAFFS_CONFIG_ERROR; } // Add this entry to the map results[paramName] = paramVal; } fclose(config_file); free(config_file_name); return YAFFS_CONFIG_OK; } /* * Helper function for yaffs_validate_config * Tests that a string consists only of digits and has at least one digit * (Can modify later if we want negative fields to be valid) * * @param numStr String to test * @returns 1 on error, 0 on success */ static int yaffs_validate_integer_field(std::string numStr){ unsigned int i; // Test if empty if(numStr.length() == 0){ return 1; } // Test each character for(i = 0;i < numStr.length();i++){ if(! isdigit(numStr[i])){ return 1; } } return 0; } /* * Function to validate the contents of the config file * Currently testing: * All YAFFS_CONFIG fields should be integers (if they exist) * Either need all three of YAFFS_CONFIG_SEQ_NUM_STR, YAFFS_CONFIG_OBJ_ID_STR, YAFFS_CONFIG_CHUNK_ID_STR * or none of them * * @param paramMap Holds mapping of parameter name to parameter value * @returns 1 on error (invalid parameters), 0 on success */ static int yaffs_validate_config_file(std::map & paramMap){ int offset_field_count; // Make a list of all fields to test std::set integerParams; integerParams.insert(YAFFS_CONFIG_SEQ_NUM_STR); integerParams.insert(YAFFS_CONFIG_OBJ_ID_STR); integerParams.insert(YAFFS_CONFIG_CHUNK_ID_STR); integerParams.insert(YAFFS_CONFIG_PAGE_SIZE_STR); integerParams.insert(YAFFS_CONFIG_SPARE_SIZE_STR); integerParams.insert(YAFFS_CONFIG_CHUNKS_PER_BLOCK_STR); // If the parameter is set, verify that the value is an int for(std::set::iterator it = integerParams.begin();it != integerParams.end();it++){ if((paramMap.find(*it) != paramMap.end()) && (0 != yaffs_validate_integer_field(paramMap[*it]))){ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr( "yaffs_validate_config_file: Empty or non-integer value for Yaffs2 parameter \"%s\". %s", (*it).c_str(), YAFFS_HELP_MESSAGE); return 1; } } // Check that we have all three spare offset fields, or none of the three offset_field_count = 0; if(paramMap.find(YAFFS_CONFIG_SEQ_NUM_STR) != paramMap.end()){ offset_field_count++; } if(paramMap.find(YAFFS_CONFIG_OBJ_ID_STR) != paramMap.end()){ offset_field_count++; } if(paramMap.find(YAFFS_CONFIG_CHUNK_ID_STR) != paramMap.end()){ offset_field_count++; } if(! ((offset_field_count == 0) || (offset_field_count == 3))){ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr( "yaffs_validate_config_file: Require either all three spare offset fields or none. %s", YAFFS_HELP_MESSAGE); return 1; } // Make sure there aren't any unexpected fields present for(std::map::iterator it = paramMap.begin(); it != paramMap.end();it++){ if(integerParams.find(it->first) == integerParams.end()){ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr( "yaffs_validate_config_file: Found unexpected field in config file (\"%s\"). %s", it->first.c_str(), YAFFS_HELP_MESSAGE); return 1; } } return 0; } /* * Function to attempt to determine the layout of the yaffs spare area. * Results of the analysis (if the format could be determined) will be stored * in yfs variables. * * @param yfs File system being anlayzed * @param maxBlocksToTest Number of block groups to scan to detect spare area or 0 if there is no limit. * @returns TSK_ERR if format could not be detected and TSK_OK if it could be. */ static TSK_RETVAL_ENUM yaffs_initialize_spare_format(YAFFSFS_INFO * yfs, TSK_OFF_T maxBlocksToTest){ // Testing parameters - can all be changed unsigned int blocksToTest = 10; // Number of blocks (64 chunks) to test unsigned int chunksToTest = 10; // Number of chunks to test in each block unsigned int minChunksRead = 10; // Minimum number of chunks we require to run the test (we might not get the full number we want to test for a very small file) unsigned int chunkSize = yfs->page_size + yfs->spare_size; unsigned int blockSize = yfs->chunks_per_block * chunkSize; TSK_FS_INFO *fs = &(yfs->fs_info); unsigned char *spareBuffer; unsigned int blockIndex; unsigned int chunkIndex; unsigned int currentOffset; unsigned char * allSpares; unsigned int allSparesLength; TSK_OFF_T maxBlocks; bool skipBlock; int goodOffset; unsigned int nGoodSpares; unsigned int nBlocksTested; int okOffsetFound = 0; // Used as a flag for if we've found an offset that sort of works but doesn't seem great int goodOffsetFound = 0; // Flag to mark that we've found an offset that also passed secondary testing int bestOffset = 0; bool allSameByte; // Used in test that the spare area fields not be one repeated byte unsigned int i; int thisChunkBase; int lastChunkBase; // The spare area needs to be at least 16 bytes to run the test if(yfs->spare_size < 16){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format failed - given spare size (%d) is not large enough to contain needed fields\n", yfs->spare_size); } return TSK_ERR; } if ((spareBuffer = (unsigned char*) tsk_malloc(yfs->spare_size)) == NULL) { return TSK_ERR; } allSparesLength = yfs->spare_size * blocksToTest * chunksToTest; if ((allSpares = (unsigned char*) tsk_malloc(allSparesLength)) == NULL) { free(spareBuffer); return TSK_ERR; } // Initialize the pointers to one of the configurations we've seen (thought these defaults should not get used) yfs->spare_seq_offset = 0; yfs->spare_obj_id_offset = 4; yfs->spare_chunk_id_offset = 8; yfs->spare_nbytes_offset = 12; // Assume the data we want is 16 consecutive bytes in the order: // seq num, obj id, chunk id, byte count // (not sure we're guaranteed this but we wouldn't be able to deal with the alternative anyway) // Seq num is the important one. This number is constant in each block (block = 64 chunks), meaning // all chunks in a block will share the same sequence number. The YAFFS2 descriptions would seem to // indicate it should be different for each block, but this doesn't seem to always be the case. // In particular we frequently see the 0x1000 seq number used over multiple blocks, but this isn't the only // observed exception. // Calculate the number of blocks in the image maxBlocks = yfs->fs_info.img_info->size / (yfs->chunks_per_block * chunkSize); // If maxBlocksToTest = 0 (unlimited), set it to the total number of blocks // Also reduce the number of blocks to test if it is larger than the total number of blocks if ((maxBlocksToTest == 0) || (maxBlocksToTest > maxBlocks)){ maxBlocksToTest = maxBlocks; } nGoodSpares = 0; nBlocksTested = 0; for (TSK_OFF_T blockIndex = 0;blockIndex < maxBlocksToTest;blockIndex++){ // Read the last spare area that we want to test first TSK_OFF_T offset = (TSK_OFF_T)blockIndex * blockSize + (chunksToTest - 1) * chunkSize + yfs->page_size; ssize_t cnt = tsk_img_read(fs->img_info, offset, (char *) spareBuffer, yfs->spare_size); if ((cnt < 0) || ((unsigned int)cnt < yfs->spare_size)) { break; } // Is the spare all 0xff / 0x00? // If not, we know we should have all allocated chunks since YAFFS2 writes sequentially in a block // - can't have an unallocated chunk followed by an allocated one // We occasionally see almost all null spare area with a few 0xff, which is not a valid spare. skipBlock = true; for (i = 0;i < yfs->spare_size;i++){ if((spareBuffer[i] != 0xff) && (spareBuffer[i] != 0x00)){ skipBlock = false; break; } } if (skipBlock){ continue; } // If this block is potentially valid (i.e., the spare contains something besides 0x00 and 0xff), copy all the spares into // the big array of extracted spare areas // Copy this spare area nGoodSpares++; for (i = 0;i < yfs->spare_size;i++){ allSpares[nBlocksTested * yfs->spare_size * chunksToTest + (chunksToTest - 1) * yfs->spare_size + i] = spareBuffer[i]; } // Copy all earlier spare areas in the block for (chunkIndex = 0;chunkIndex < chunksToTest - 1;chunkIndex++){ offset = blockIndex * blockSize + chunkIndex * chunkSize + yfs->page_size; cnt = tsk_img_read(fs->img_info, offset, (char *) spareBuffer, yfs->spare_size); if ((cnt < 0) || ((unsigned int)cnt < yfs->spare_size)) { // We really shouldn't run out of data here since we already read in the furthest entry break; // Break out of chunksToTest loop } nGoodSpares++; for(i = 0;i < yfs->spare_size;i++){ allSpares[nBlocksTested * yfs->spare_size * chunksToTest + chunkIndex * yfs->spare_size + i] = spareBuffer[i]; } } // Record that we've found a potentially valid block nBlocksTested++; // If we've found enough potentailly valid blocks, break if (nBlocksTested >= blocksToTest){ break; } } // Make sure we read enough data to reasonably perform the testing if (nGoodSpares < minChunksRead){ if (tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format failed - not enough potentially valid data could be read\n"); } free(spareBuffer); free(allSpares); return TSK_ERR; } if (tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Testing potential offsets for the sequence number in the spare area\n"); } // Print out the collected spare areas if we're in verbose mode if(tsk_verbose && (! yfs->autoDetect)){ for(blockIndex = 0;blockIndex < nBlocksTested;blockIndex++){ for(chunkIndex = 0;chunkIndex < chunksToTest;chunkIndex++){ for(i = 0;i < yfs->spare_size;i++){ fprintf(stderr, "%02x", allSpares[blockIndex * yfs->spare_size * chunksToTest + chunkIndex * yfs->spare_size + i]); } fprintf(stderr, "\n"); } } } // Test all indices into the spare area (that leave enough space for all 16 bytes) for(currentOffset = 0;currentOffset <= yfs->spare_size - 16;currentOffset++){ goodOffset = 1; for(blockIndex = 0;blockIndex < nBlocksTested;blockIndex++){ for(chunkIndex = 1;chunkIndex < chunksToTest;chunkIndex++){ lastChunkBase = blockIndex * yfs->spare_size * chunksToTest + (chunkIndex - 1) * yfs->spare_size; thisChunkBase = lastChunkBase + yfs->spare_size; // Seq num should not be all 0xff (we tested earlier that the chunk has been initialized) if((0xff == allSpares[thisChunkBase + currentOffset]) && (0xff == allSpares[thisChunkBase + currentOffset + 1]) && (0xff == allSpares[thisChunkBase + currentOffset + 2]) && (0xff == allSpares[thisChunkBase + currentOffset + 3])){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Elimimating offset %d - invalid sequence number 0xffffffff\n", currentOffset); } goodOffset = 0; break; } // Seq num should not be zero if((0 == allSpares[thisChunkBase + currentOffset]) && (0 == allSpares[thisChunkBase + currentOffset + 1]) && (0 == allSpares[thisChunkBase + currentOffset + 2]) && (0 == allSpares[thisChunkBase + currentOffset + 3])){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Elimimating offset %d - invalid sequence number 0\n", currentOffset); } goodOffset = 0; break; } // Seq num should match the previous one in the block if((allSpares[lastChunkBase + currentOffset] != allSpares[thisChunkBase + currentOffset]) || (allSpares[lastChunkBase + currentOffset + 1] != allSpares[thisChunkBase + currentOffset + 1]) || (allSpares[lastChunkBase + currentOffset + 2] != allSpares[thisChunkBase + currentOffset + 2]) || (allSpares[lastChunkBase + currentOffset + 3] != allSpares[thisChunkBase + currentOffset + 3])){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Elimimating offset %d - did not match previous chunk sequence number\n", currentOffset); } goodOffset = 0; break; } // Obj id should not be zero if((0 == allSpares[thisChunkBase + currentOffset + 4]) && (0 == allSpares[thisChunkBase + currentOffset + 5]) && (0 == allSpares[thisChunkBase + currentOffset + 6]) && (0 == allSpares[thisChunkBase + currentOffset + 7])){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Elimimating offset %d - invalid object id 0\n", currentOffset); } goodOffset = 0; break; } // All 16 bytes should not be the same // (It is theoretically possible that this could be valid, but incredibly unlikely) allSameByte = true; for(i = 1;i < 16;i++){ if(allSpares[thisChunkBase + currentOffset] != allSpares[thisChunkBase + currentOffset + i]){ allSameByte = false; break; } } if(allSameByte){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Elimimating offset %d - all repeated bytes\n", currentOffset); } goodOffset = 0; break; } } // End of loop over chunks if(!goodOffset){ // Break out of loop over blocks break; } } if(goodOffset){ // Note that we've found an offset that is at least promising if((! goodOffsetFound) && (! okOffsetFound)){ bestOffset = currentOffset; } okOffsetFound = 1; if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Found potential spare offsets: %d (sequence number), %d (object id), %d (chunk id), %d (n bytes)\n", currentOffset, currentOffset+4, currentOffset+8, currentOffset+12); } // Now do some more tests // Really need some more real-world test data to do this right. int possibleError = 0; // We probably don't want the first byte to always be 0xff int firstByteFF = 1; for(blockIndex = 0;blockIndex < nBlocksTested;blockIndex++){ for(chunkIndex = 1;chunkIndex < chunksToTest;chunkIndex++){ if(allSpares[blockIndex * yfs->spare_size * chunksToTest + chunkIndex * yfs->spare_size + currentOffset] != 0xff){ firstByteFF = 0; } } } if(firstByteFF){ if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Previous data starts with all 0xff bytes. Looking for better offsets.\n"); } possibleError = 1; } if(! possibleError){ // If we already have a good offset, print this one out but don't record it if(! goodOffsetFound){ goodOffsetFound = 1; bestOffset = currentOffset; // Offset passed additional testing and we haven't seen an earlier good one, so go ahead and use it if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Previous offsets appear good - will use as final offsets\n"); } } else{ // Keep using the old one if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Previous offsets appear good but staying with earlier valid ones\n"); } } } } } free(spareBuffer); free(allSpares); if(okOffsetFound || goodOffsetFound){ // Record everything yfs->spare_seq_offset = bestOffset; yfs->spare_obj_id_offset = bestOffset + 4; yfs->spare_chunk_id_offset = bestOffset + 8; yfs->spare_nbytes_offset = bestOffset + 12; if(tsk_verbose && (! yfs->autoDetect)){ tsk_fprintf(stderr, "yaffs_initialize_spare_format: Final offsets: %d (sequence number), %d (object id), %d (chunk id), %d (n bytes)\n", bestOffset, bestOffset+4, bestOffset+8, bestOffset+12); tsk_fprintf(stderr, "If these do not seem valid: %s\n", YAFFS_HELP_MESSAGE); } return TSK_OK; } else{ return TSK_ERR; } } /** * yaffsfs_read_header( ... ) * */ static uint8_t yaffsfs_read_header(YAFFSFS_INFO *yfs, YaffsHeader ** header, TSK_OFF_T offset) { unsigned char *hdr; ssize_t cnt; YaffsHeader *head; TSK_FS_INFO *fs = &(yfs->fs_info); if ((hdr = (unsigned char*) tsk_malloc(yfs->page_size)) == NULL) { return 1; } cnt = tsk_img_read(fs->img_info, offset, (char *) hdr, yfs->page_size); if ((cnt < 0) || ((unsigned int)cnt < yfs->page_size)) { free(hdr); return 1; } if ((head = (YaffsHeader*) tsk_malloc( sizeof(YaffsHeader))) == NULL) { free(hdr); return 1; } memcpy(&head->obj_type, hdr, 4); memcpy(&head->parent_id, &hdr[4], 4); memcpy(head->name, (char*) &hdr[0xA], YAFFS_HEADER_NAME_LENGTH); memcpy(&head->file_mode, &hdr[0x10C], 4); memcpy(&head->user_id, &hdr[0x110], 4); memcpy(&head->group_id, &hdr[0x114], 4); memcpy(&head->atime, &hdr[0x118], 4); memcpy(&head->mtime, &hdr[0x11C], 4); memcpy(&head->ctime, &hdr[0x120], 4); memcpy(&head->file_size, &hdr[0x124], 4); memcpy(&head->equivalent_id, &hdr[0x128], 4); memcpy(head->alias, (char*) &hdr[0x12C], YAFFS_HEADER_ALIAS_LENGTH); //memcpy(&head->rdev_mode, &hdr[0x1CC], 4); //memcpy(&head->win_ctime, &hdr[0x1D0], 8); //memcpy(&head->win_atime, &hdr[0x1D8], 8); //memcpy(&head->win_mtime, &hdr[0x1E0], 8); //memcpy(&head->inband_obj_id, &hdr[0x1E8], 4); //memcpy(&head->inband_is_shrink, &hdr[0x1EC], 4); // NOTE: This isn't in Android 3.3 kernel but is in YAFFS2 git //memcpy(&head->file_size_high, &hdr[0x1F0], 4); free(hdr); *header = head; return 0; } /** * Read and parse the YAFFS2 tags in the NAND spare bytes. * * @param info is a YAFFS fs handle * @param spare YaffsSpare object to be populated * @param offset, offset to read from * * @returns 0 on success and 1 on error */ static uint8_t yaffsfs_read_spare(YAFFSFS_INFO *yfs, YaffsSpare ** spare, TSK_OFF_T offset) { unsigned char *spr; ssize_t cnt; YaffsSpare *sp; TSK_FS_INFO *fs = &(yfs->fs_info); uint32_t seq_number; uint32_t object_id; uint32_t chunk_id; // Should have checked this by now, but just in case if((yfs->spare_seq_offset + 4 > yfs->spare_size) || (yfs->spare_obj_id_offset + 4 > yfs->spare_size) || (yfs->spare_chunk_id_offset + 4 > yfs->spare_size)){ return 1; } if ((spr = (unsigned char*) tsk_malloc(yfs->spare_size)) == NULL) { return 1; } if (yfs->spare_size < 46) { // Why is this 46? tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("yaffsfs_read_spare: spare size is too small"); free(spr); return 1; } cnt = tsk_img_read(fs->img_info, offset, (char*) spr, yfs->spare_size); if ((cnt < 0) || ((unsigned int)cnt < yfs->spare_size)) { // couldn't read sufficient bytes... if (spare) { free(spr); *spare = NULL; } return 1; } if ((sp = (YaffsSpare*) tsk_malloc(sizeof(YaffsSpare))) == NULL) { return 1; } memset(sp, 0, sizeof(YaffsSpare)); /* * Complete read of the YAFFS2 spare */ // The format of the spare area should have been determined earlier memcpy(&seq_number, &spr[yfs->spare_seq_offset], 4); memcpy(&object_id, &spr[yfs->spare_obj_id_offset], 4); memcpy(&chunk_id, &spr[yfs->spare_chunk_id_offset], 4); if ((YAFFS_SPARE_FLAGS_IS_HEADER & chunk_id) != 0) { sp->seq_number = seq_number; sp->object_id = object_id & ~YAFFS_SPARE_OBJECT_TYPE_MASK; sp->chunk_id = 0; sp->has_extra_fields = 1; sp->extra_parent_id = chunk_id & YAFFS_SPARE_PARENT_ID_MASK; sp->extra_object_type = (object_id & YAFFS_SPARE_OBJECT_TYPE_MASK) >> YAFFS_SPARE_OBJECT_TYPE_SHIFT; } else { sp->seq_number = seq_number; sp->object_id = object_id; sp->chunk_id = chunk_id; sp->has_extra_fields = 0; } free(spr); *spare = sp; return 0; } static uint8_t yaffsfs_is_spare_valid(YAFFSFS_INFO *yfs, YaffsSpare *spare) { if (spare == NULL) { return 1; } if ((spare->object_id > YAFFS_MAX_OBJECT_ID) || (spare->seq_number < YAFFS_LOWEST_SEQUENCE_NUMBER) || (spare->seq_number > YAFFS_HIGHEST_SEQUENCE_NUMBER)) { return 1; } return 0; } static uint8_t yaffsfs_read_chunk(YAFFSFS_INFO *yfs, YaffsHeader **header, YaffsSpare **spare, TSK_OFF_T offset) { TSK_OFF_T header_offset = offset; TSK_OFF_T spare_offset = offset + yfs->page_size; if (header == NULL || spare == NULL) { return 1; } if (yaffsfs_read_header(yfs, header, header_offset) != 0) { return 1; } if (yaffsfs_read_spare(yfs, spare, spare_offset) != 0) { free(*header); *header = NULL; return 1; } return 0; } /** * Cycle through the entire image and populate the cache with objects as they are found. */ static uint8_t yaffsfs_parse_image_load_cache(YAFFSFS_INFO * yfs) { uint8_t status = TSK_OK; uint32_t nentries = 0; YaffsSpare *spare = NULL; uint8_t tempBuf[8]; uint32_t parentID; if (yfs->cache_objects) return 0; for(TSK_OFF_T offset = 0;offset < yfs->fs_info.img_info->size;offset += yfs->page_size + yfs->spare_size){ status = yaffsfs_read_spare( yfs, &spare, offset + yfs->page_size); if (status != TSK_OK) { break; } if (yaffsfs_is_spare_valid(yfs, spare) == TSK_OK) { if((spare->has_extra_fields) || (spare->chunk_id != 0)){ yaffscache_chunk_add(yfs, offset, spare->seq_number, spare->object_id, spare->chunk_id, spare->extra_parent_id); } else{ // If we have a header block and didn't extract it already from the spare, get the parent ID from // the non-spare data if(8 == tsk_img_read(yfs->fs_info.img_info, offset, (char*) tempBuf, 8)){ memcpy(&parentID, &tempBuf[4], 4); yaffscache_chunk_add(yfs, offset, spare->seq_number, spare->object_id, spare->chunk_id, parentID); } else{ // Really shouldn't happen fprintf(stderr, "Error reading header to get parent id at offset %x\n", offset); yaffscache_chunk_add(yfs, offset, spare->seq_number, spare->object_id, spare->chunk_id, 0); } } } free(spare); spare = NULL; ++nentries; } if (tsk_verbose) fprintf(stderr, "yaffsfs_parse_image_load_cache: read %d entries\n", nentries); if (tsk_verbose) fprintf(stderr, "yaffsfs_parse_image_load_cache: started processing chunks for version cache...\n"); fflush(stderr); // At this point, we have a list of chunks sorted by obj id, seq number, and offset // This makes the list of objects in cache_objects, which link to different versions yaffscache_versions_compute(yfs); if (tsk_verbose) fprintf(stderr, "yaffsfs_parse_image_load_cache: done version cache!\n"); fflush(stderr); // Having multiple inodes point to the same object seems to cause trouble in TSK, especally in orphan file detection, // so set the version number of the final one to zero. // While we're at it, find the highest obj_id and the highest version (before resetting to zero) TSK_INUM_T orphanParentID = yfs->fs_info.last_inum; YaffsCacheObject * currObj = yfs->cache_objects; YaffsCacheVersion * currVer; while(currObj != NULL){ if(currObj->yco_obj_id > yfs->max_obj_id){ yfs->max_obj_id = currObj->yco_obj_id; } currVer = currObj->yco_latest; if(currVer->ycv_version > yfs->max_version){ yfs->max_version = currVer->ycv_version; } currVer->ycv_version = 0; currObj = currObj->yco_next; } // Use the max object id and version number to construct an upper bound on the inode TSK_INUM_T max_inum; yaffscache_obj_id_and_version_to_inode(yfs->max_obj_id, yfs->max_version, &max_inum); yfs->fs_info.last_inum = max_inum + 1; // One more for the orphan dir return TSK_OK; } // A version is allocated if: // 1. This version is pointed to by yco_latest // 2. This version didn't have a delete/unlinked header after the most recent copy of the normal header static uint8_t yaffs_is_version_allocated(YAFFSFS_INFO * yfs, TSK_INUM_T inode){ YaffsCacheObject * obj; YaffsCacheVersion * version; YaffsCacheChunk * curr; TSK_RETVAL_ENUM result = yaffscache_version_find_by_inode(yfs, inode, &version, &obj); if (result != TSK_OK) { if (tsk_verbose) tsk_fprintf(stderr, "yaffs_is_version_allocated: yaffscache_version_find_by_inode failed! (inode: %d)\n", inode); return 0; } if(obj->yco_latest == version){ curr = obj->yco_latest->ycv_header_chunk; while(curr != NULL){ // We're looking for a newer unlinked or deleted header. If one exists, then this object should be considered unallocated if((curr->ycc_parent_id == YAFFS_OBJECT_UNLINKED) || (curr->ycc_parent_id == YAFFS_OBJECT_DELETED)){ return 0; } curr = curr ->ycc_next; } return 1; } else{ return 0; } } /* * TSK integration * * */ static uint8_t yaffs_make_directory(YAFFSFS_INFO *yaffsfs, TSK_FS_FILE *a_fs_file, TSK_INUM_T inode, char *name) { TSK_FS_FILE *fs_file = a_fs_file; fs_file->meta->type = TSK_FS_META_TYPE_DIR; fs_file->meta->mode = (TSK_FS_META_MODE_ENUM)0; fs_file->meta->nlink = 1; if((inode == YAFFS_OBJECT_UNLINKED) || (inode == YAFFS_OBJECT_DELETED) || (inode == yaffsfs->fs_info.last_inum)){ fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); } else{ if(yaffs_is_version_allocated(yaffsfs, inode)){ fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); } else{ fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNALLOC); } } fs_file->meta->uid = fs_file->meta->gid = 0; fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime = fs_file->meta->crtime = 0; fs_file->meta->mtime_nano = fs_file->meta->atime_nano = fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0; if (fs_file->meta->name2 == NULL) { if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) return 1; fs_file->meta->name2->next = NULL; } if (fs_file->meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_file->meta->attr); } else { fs_file->meta->attr = tsk_fs_attrlist_alloc(); } strncpy(fs_file->meta->name2->name, name, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = 0; fs_file->meta->attr_state = TSK_FS_META_ATTR_EMPTY; fs_file->meta->addr = inode; return 0; } static uint8_t yaffs_make_regularfile( YAFFSFS_INFO * yaffsfs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inode, char * name ) { TSK_FS_FILE *fs_file = a_fs_file; fs_file->meta->type = TSK_FS_META_TYPE_REG; fs_file->meta->mode = (TSK_FS_META_MODE_ENUM)0; fs_file->meta->nlink =1; if(yaffs_is_version_allocated(yaffsfs, inode)){ fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); } else{ fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNALLOC); } fs_file->meta->uid = fs_file->meta->gid = 0; fs_file->meta->mtime = fs_file->meta->atime = fs_file->meta->ctime = fs_file->meta->crtime = 0; fs_file->meta->mtime_nano = fs_file->meta->atime_nano = fs_file->meta->ctime_nano = fs_file->meta->crtime_nano = 0; if (fs_file->meta->name2 == NULL) { if ((fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL) return 1; fs_file->meta->name2->next = NULL; } if (fs_file->meta->attr != NULL) { tsk_fs_attrlist_markunused(fs_file->meta->attr); } else { fs_file->meta->attr = tsk_fs_attrlist_alloc(); } fs_file->meta->addr = inode; strncpy(fs_file->meta->name2->name, name, TSK_FS_META_NAME_LIST_NSIZE); fs_file->meta->size = 0; fs_file->meta->attr_state = TSK_FS_META_ATTR_EMPTY; return 0; } /** * \internal * Create YAFFS2 Deleted Object * * @ param yaffs file system * fs_file to copy file information to * return 1 on error, 0 on success */ static uint8_t yaffs_make_deleted( YAFFSFS_INFO * yaffsfs, TSK_FS_FILE * a_fs_file ) { TSK_FS_FILE *fs_file = a_fs_file; if (tsk_verbose) tsk_fprintf(stderr, "yaffs_make_deleted: Making virtual deleted node\n"); if (yaffs_make_directory(yaffsfs, fs_file, YAFFS_OBJECT_DELETED, YAFFS_OBJECT_DELETED_NAME)) return 1; return 0; } /** * \internal * Create YAFFS2 Unlinked object * * @ param yaffs file system * fs_file to copy file information to * return 1 on error, 0 on success */ static uint8_t yaffs_make_unlinked( YAFFSFS_INFO * yaffsfs, TSK_FS_FILE * a_fs_file ) { TSK_FS_FILE * fs_file = a_fs_file; if (tsk_verbose) tsk_fprintf(stderr, "yaffs_make_unlinked: Making virtual unlinked node\n"); if (yaffs_make_directory(yaffsfs, fs_file, YAFFS_OBJECT_UNLINKED, YAFFS_OBJECT_UNLINKED_NAME)) return 1; return 0; } /** * \internal * Create YAFFS2 orphan object * * @ param yaffs file system * fs_file to copy file information to * return 1 on error, 0 on success */ static uint8_t yaffs_make_orphan_dir( YAFFSFS_INFO * yaffsfs, TSK_FS_FILE * a_fs_file ) { TSK_FS_FILE * fs_file = a_fs_file; TSK_FS_NAME *fs_name = tsk_fs_name_alloc(256, 0); if (fs_name == NULL) return TSK_ERR; if (tsk_verbose) tsk_fprintf(stderr, "yaffs_make_orphan_dir: Making orphan dir node\n"); if (tsk_fs_dir_make_orphan_dir_name(&(yaffsfs->fs_info), fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } if (yaffs_make_directory(yaffsfs, fs_file, yaffsfs->fs_info.last_inum, (char *)fs_name)){ tsk_fs_name_free(fs_name); return 1; } tsk_fs_name_free(fs_name); return 0; } /* yaffsfs_inode_lookup - lookup inode, external interface * * Returns 1 on error and 0 on success * */ static uint8_t yaffs_inode_lookup(TSK_FS_INFO *a_fs, TSK_FS_FILE * a_fs_file, TSK_INUM_T inum) { YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)a_fs; YaffsCacheObject *obj; YaffsCacheVersion *version; YaffsHeader *header = NULL; YaffsSpare *spare = NULL; TSK_RETVAL_ENUM result; uint8_t type; char *real_name; if (a_fs_file == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("yaffsfs_inode_lookup: fs_file is NULL"); return 1; } if (a_fs_file->meta == NULL) { if ((a_fs_file->meta = tsk_fs_meta_alloc(YAFFS_FILE_CONTENT_LEN)) == NULL) return 1; } else { tsk_fs_meta_reset(a_fs_file->meta); } if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: looking up %" PRIuINUM "\n",inum); switch(inum) { case YAFFS_OBJECT_UNLINKED: yaffs_make_unlinked(yfs, a_fs_file); return 0; case YAFFS_OBJECT_DELETED: yaffs_make_deleted(yfs, a_fs_file); return 0; } if(inum == yfs->fs_info.last_inum){ yaffs_make_orphan_dir(yfs, a_fs_file); return 0; } result = yaffscache_version_find_by_inode(yfs, inum, &version, &obj); if (result != TSK_OK) { if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: yaffscache_version_find_by_inode failed! (inode = %d)\n", inum); return 1; } if(version->ycv_header_chunk == NULL){ return 1; } if (yaffsfs_read_chunk(yfs, &header, &spare, version->ycv_header_chunk->ycc_offset) != TSK_OK) { if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: yaffsfs_read_chunk failed!\n"); return 1; } type = header->obj_type; switch(inum) { case YAFFS_OBJECT_LOSTNFOUND: real_name = YAFFS_OBJECT_LOSTNFOUND_NAME; break; case YAFFS_OBJECT_UNLINKED: real_name = YAFFS_OBJECT_UNLINKED_NAME; break; case YAFFS_OBJECT_DELETED: real_name = YAFFS_OBJECT_DELETED_NAME; break; default: real_name = header->name; break; } switch(type) { case YAFFS_TYPE_FILE: if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: is a file\n"); yaffs_make_regularfile(yfs, a_fs_file, inum, real_name); break; case YAFFS_TYPE_DIRECTORY: if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: is a directory\n"); yaffs_make_directory(yfs, a_fs_file, inum, real_name); break; case YAFFS_TYPE_SOFTLINK: if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: is a symbolic link\n"); yaffs_make_regularfile(yfs, a_fs_file, inum, real_name); a_fs_file->meta->type = TSK_FS_META_TYPE_LNK; break; case YAFFS_TYPE_HARDLINK: case YAFFS_TYPE_UNKNOWN: default: if (tsk_verbose) tsk_fprintf(stderr, "yaffs_inode_lookup: is *** UNHANDLED *** (type %d, header at 0x%x)\n", type, version->ycv_header_chunk->ycc_offset); // We can still set a few things a_fs_file->meta->type = TSK_FS_META_TYPE_UNDEF; a_fs_file->meta->addr = inum; if(yaffs_is_version_allocated(yfs, inum)){ a_fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_ALLOC); } else{ a_fs_file->meta->flags = (TSK_FS_META_FLAG_ENUM)(TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNALLOC); } if (a_fs_file->meta->name2 == NULL) { if ((a_fs_file->meta->name2 = (TSK_FS_META_NAME_LIST *) tsk_malloc(sizeof(TSK_FS_META_NAME_LIST))) == NULL){ return 1; } a_fs_file->meta->name2->next = NULL; } strncpy(a_fs_file->meta->name2->name, real_name, TSK_FS_META_NAME_LIST_NSIZE); break; } /* Who owns this? I'm following the way FATFS does it by freeing + NULLing * this and mallocing if used. */ if (a_fs_file->meta->link != NULL) { free(a_fs_file->meta->link); a_fs_file->meta->link = NULL; } if (type != YAFFS_TYPE_HARDLINK) { a_fs_file->meta->mode = (TSK_FS_META_MODE_ENUM)header->file_mode; a_fs_file->meta->uid = header->user_id; a_fs_file->meta->gid = header->group_id; a_fs_file->meta->mtime = header->mtime; a_fs_file->meta->atime = header->atime; a_fs_file->meta->ctime = header->ctime; } if (type == YAFFS_TYPE_FILE) { a_fs_file->meta->size = header->file_size; // NOTE: This isn't in Android 3.3 kernel but is in YAFFS2 git //a_fs_file->meta->size |= ((TSK_OFF_T) header->file_size_high) << 32; } if (type == YAFFS_TYPE_HARDLINK) { // TODO: Store equivalent_id somewhere? */ } if (type == YAFFS_TYPE_SOFTLINK) { a_fs_file->meta->link = (char*)tsk_malloc(YAFFS_HEADER_ALIAS_LENGTH); if (a_fs_file->meta->link == NULL) { free(header); free(spare); return 1; } memcpy(a_fs_file->meta->link, header->alias, YAFFS_HEADER_ALIAS_LENGTH); } free(header); free(spare); return 0; } /* yaffsfs_inode_walk - inode iterator * * flags used: TSK_FS_META_FLAG_USED, TSK_FS_META_FLAG_UNUSED, * TSK_FS_META_FLAG_ALLOC, TSK_FS_META_FLAG_UNALLOC, TSK_FS_META_FLAG_ORPHAN * * Return 1 on error and 0 on success */ static uint8_t yaffsfs_inode_walk(TSK_FS_INFO *fs, TSK_INUM_T start_inum, TSK_INUM_T end_inum, TSK_FS_META_FLAG_ENUM flags, TSK_FS_META_WALK_CB a_action, void *a_ptr) { YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)fs; TSK_FS_FILE *fs_file; TSK_RETVAL_ENUM result; uint32_t start_obj_id; uint32_t start_ver_number; uint32_t end_obj_id; uint32_t end_ver_number; uint32_t obj_id; YaffsCacheObject *curr_obj; YaffsCacheVersion *curr_version; result = yaffscache_inode_to_obj_id_and_version(start_inum, &start_obj_id, &start_ver_number); result = yaffscache_inode_to_obj_id_and_version(end_inum, &end_obj_id, &end_ver_number); if (end_obj_id < start_obj_id) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("yaffsfs_inode_walk: end object id must be >= start object id: " "%" PRIx32 " must be >= %" PRIx32 "", end_obj_id, start_obj_id); return 1; } /* The ORPHAN flag is unsupported for YAFFS2 */ if (flags & TSK_FS_META_FLAG_ORPHAN) { if (tsk_verbose){ tsk_fprintf(stderr, "yaffsfs_inode_walk: ORPHAN flag unsupported by YAFFS2"); } } if (((flags & TSK_FS_META_FLAG_ALLOC) == 0) && ((flags & TSK_FS_META_FLAG_UNALLOC) == 0)) { flags = (TSK_FS_META_FLAG_ENUM)(flags | TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_UNALLOC); } /* If neither of the USED or UNUSED flags are set, then set them * both */ if (((flags & TSK_FS_META_FLAG_USED) == 0) && ((flags & TSK_FS_META_FLAG_UNUSED) == 0)) { flags = (TSK_FS_META_FLAG_ENUM)(flags | TSK_FS_META_FLAG_USED | TSK_FS_META_FLAG_UNUSED); } if ((fs_file = tsk_fs_file_alloc(fs)) == NULL) return 1; if ((fs_file->meta = tsk_fs_meta_alloc(YAFFS_FILE_CONTENT_LEN)) == NULL) return 1; for (obj_id = start_obj_id; obj_id <= end_obj_id; obj_id++) { int retval; result = yaffscache_version_find_by_inode(yfs, obj_id, &curr_version, &curr_obj); if (result == TSK_OK) { TSK_INUM_T curr_inode; YaffsCacheVersion *version; // ALLOC, UNALLOC, or both are set at this point if (flags & TSK_FS_META_FLAG_ALLOC) { // Allocated only - just look at current version if (yaffscache_obj_id_and_version_to_inode(obj_id, curr_obj->yco_latest->ycv_version, &curr_inode) != TSK_OK) { tsk_fs_file_close(fs_file); return 1; } // It's possible for the current version to be unallocated if the last header was a deleted or unlinked header if(yaffs_is_version_allocated(yfs, curr_inode)){ if (yaffs_inode_lookup(fs, fs_file, curr_inode) != TSK_OK) { tsk_fs_file_close(fs_file); return 1; } retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); return 1; } } } if (flags & TSK_FS_META_FLAG_UNALLOC){ for (version = curr_obj->yco_latest; version != NULL; version = version->ycv_prior) { if (yaffscache_obj_id_and_version_to_inode(obj_id, version->ycv_version, &curr_inode) != TSK_OK) { tsk_fs_file_close(fs_file); return 1; } if(! yaffs_is_version_allocated(yfs, curr_inode)){ if (yaffs_inode_lookup(fs, fs_file, curr_inode) != TSK_OK) { tsk_fs_file_close(fs_file); return 1; } retval = a_action(fs_file, a_ptr); if (retval == TSK_WALK_STOP) { tsk_fs_file_close(fs_file); return 0; } else if (retval == TSK_WALK_ERROR) { tsk_fs_file_close(fs_file); return 1; } } } } curr_obj = curr_obj->yco_next; } } /* * Cleanup. */ tsk_fs_file_close(fs_file); return 0; } static TSK_FS_BLOCK_FLAG_ENUM yaffsfs_block_getflags(TSK_FS_INFO *fs, TSK_DADDR_T a_addr) { YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)fs; TSK_FS_BLOCK_FLAG_ENUM flags = TSK_FS_BLOCK_FLAG_UNUSED; TSK_OFF_T offset = (a_addr * (fs->block_pre_size + fs->block_size + fs->block_post_size)) + yfs->page_size; YaffsSpare *spare = NULL; YaffsHeader *header = NULL; if (yaffsfs_read_spare(yfs, &spare, offset) != TSK_OK) { /* NOTE: Uh, how do we signal error? */ return flags; } if (yaffsfs_is_spare_valid(yfs, spare) == TSK_OK) { /* XXX: Do we count blocks of older versions unallocated? * If so, we need a smarter way to do this :/ * * Walk the object from this block and see if this * block is used in the latest version. Could pre- * calculate this at cache time as well. */ if (spare->chunk_id == 0) { flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_META); } else { flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_CONT); } // Have obj id and offset // 1. Is the current version of this object allocated? // 2. If this is a header, is it the header of the current version? // 3. Is the chunk id too big given the current header? // 4. Is there a more recent version of this chunk id? YaffsCacheObject * obj = NULL; yaffscache_object_find(yfs, spare->object_id, &obj); // The result really shouldn't be NULL since we loaded every chunk if(obj != NULL){ if(! yaffs_is_version_allocated(yfs, spare->object_id)){ // If the current version isn't allocated, then no chunks in it are flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNALLOC); } else if (obj->yco_latest == NULL || obj->yco_latest->ycv_header_chunk == NULL) { flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNALLOC); } else if(spare->chunk_id == 0){ if(obj->yco_latest->ycv_header_chunk->ycc_offset == offset - yfs->page_size){ // Have header chunk and it's the most recent header chunk flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_ALLOC); } else{ // Have header chunk but isn't the most recent flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNALLOC); } } else{ // Read in the full header yaffsfs_read_header(yfs, &header, obj->yco_latest->ycv_header_chunk->ycc_offset); // chunk_id is 1-based, so for example chunk id 2 would be too big for a file // 500 bytes long if(header->file_size <= ((spare->chunk_id - 1) * (fs->block_size))){ flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNALLOC); } else{ // Since at this point we know there should be a chunk with this chunk id in the file, if // this is the most recent version of the chunk assume it's part of the current version of the object. YaffsCacheChunk * curr = obj->yco_latest->ycv_last_chunk; while(curr != NULL){ // curr should really never make it to the beginning of the list // Did we find our chunk? if(curr->ycc_offset == offset - yfs->page_size){ flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_ALLOC); break; } // Did we find a different chunk with our chunk id? if(curr->ycc_chunk_id == spare->chunk_id){ flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNALLOC); break; } curr = curr->ycc_prev; } } } } } else { flags = (TSK_FS_BLOCK_FLAG_ENUM)(flags | TSK_FS_BLOCK_FLAG_UNUSED | TSK_FS_BLOCK_FLAG_UNALLOC); } free(spare); free(header); return flags; } /* yaffsfs_block_walk - block iterator * * flags: TSK_FS_BLOCK_FLAG_ALLOC, TSK_FS_BLOCK_FLAG_UNALLOC, TSK_FS_BLOCK_FLAG_CONT, * TSK_FS_BLOCK_FLAG_META * * Return 1 on error and 0 on success */ static uint8_t yaffsfs_block_walk(TSK_FS_INFO *a_fs, TSK_DADDR_T a_start_blk, TSK_DADDR_T a_end_blk, TSK_FS_BLOCK_WALK_FLAG_ENUM a_flags, TSK_FS_BLOCK_WALK_CB a_action, void *a_ptr) { TSK_FS_BLOCK *fs_block; TSK_DADDR_T addr; // clean up any error messages that are lying around tsk_error_reset(); /* * Sanity checks. */ if (a_start_blk < a_fs->first_block || a_start_blk > a_fs->last_block) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("yaffsfs_block_walk: start block: %" PRIuDADDR, a_start_blk); return 1; } if (a_end_blk < a_fs->first_block || a_end_blk > a_fs->last_block || a_end_blk < a_start_blk) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("yaffsfs_block_walk: end block: %" PRIuDADDR , a_end_blk); return 1; } /* Sanity check on a_flags -- make sure at least one ALLOC is set */ if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC) == 0)) { a_flags = (TSK_FS_BLOCK_WALK_FLAG_ENUM) (a_flags | TSK_FS_BLOCK_WALK_FLAG_ALLOC | TSK_FS_BLOCK_WALK_FLAG_UNALLOC); } if (((a_flags & TSK_FS_BLOCK_WALK_FLAG_META) == 0) && ((a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT) == 0)) { a_flags = (TSK_FS_BLOCK_WALK_FLAG_ENUM) (a_flags | TSK_FS_BLOCK_WALK_FLAG_CONT | TSK_FS_BLOCK_WALK_FLAG_META); } if ((fs_block = tsk_fs_block_alloc(a_fs)) == NULL) { return 1; } for (addr = a_start_blk; addr <= a_end_blk; addr++) { int retval; int myflags; myflags = yaffsfs_block_getflags(a_fs, addr); // test if we should call the callback with this one if ((myflags & TSK_FS_BLOCK_FLAG_META) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_META))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_CONT) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_CONT))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_ALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_ALLOC))) continue; else if ((myflags & TSK_FS_BLOCK_FLAG_UNALLOC) && (!(a_flags & TSK_FS_BLOCK_WALK_FLAG_UNALLOC))) continue; if (tsk_fs_block_get(a_fs, fs_block, addr) == NULL) { tsk_error_set_errstr2("yaffsfs_block_walk: block %" PRIuDADDR, addr); tsk_fs_block_free(fs_block); return 1; } retval = a_action(fs_block, a_ptr); if (retval == TSK_WALK_STOP) { break; } else if (retval == TSK_WALK_ERROR) { tsk_fs_block_free(fs_block); return 1; } } /* * Cleanup. */ tsk_fs_block_free(fs_block); return 0; } static uint8_t yaffsfs_fscheck(TSK_FS_INFO * fs, FILE * hFile) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("fscheck not implemented yet for YAFFS"); return 1; } /** * Print details about the file system to a file handle. * * @param fs File system to print details on * @param hFile File handle to print text to * * @returns 1 on error and 0 on success */ static uint8_t yaffsfs_fsstat(TSK_FS_INFO * fs, FILE * hFile) { YAFFSFS_INFO *yfs = (YAFFSFS_INFO *) fs; unsigned int obj_count, version_count; uint32_t obj_first, obj_last, version_first, version_last; // clean up any error messages that are lying around tsk_error_reset(); tsk_fprintf(hFile, "FILE SYSTEM INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); tsk_fprintf(hFile, "File System Type: YAFFS2\n"); tsk_fprintf(hFile, "Page Size: %u\n", yfs->page_size); tsk_fprintf(hFile, "Spare Size: %u\n", yfs->spare_size); tsk_fprintf(hFile, "Spare Offsets: Sequence number: %d, Object ID: %d, Chunk ID: %d, nBytes: %d\n", yfs->spare_seq_offset, yfs->spare_obj_id_offset, yfs->spare_chunk_id_offset, yfs->spare_nbytes_offset); tsk_fprintf(hFile, "\nMETADATA INFORMATION\n"); tsk_fprintf(hFile, "--------------------------------------------\n"); yaffscache_objects_stats(yfs, &obj_count, &obj_first, &obj_last, &version_count, &version_first, &version_last); tsk_fprintf(hFile, "Number of Allocated Objects: %u\n", obj_count); tsk_fprintf(hFile, "Object Id Range: %" PRIu32 " - %" PRIu32 "\n", obj_first, obj_last); tsk_fprintf(hFile, "Number of Total Object Versions: %u\n", version_count); tsk_fprintf(hFile, "Object Version Range: %" PRIu32 " - %" PRIu32 "\n", version_first, version_last); return 0; } /************************* istat *******************************/ typedef struct { FILE *hFile; int idx; } YAFFSFS_PRINT_ADDR; /* Callback for istat to print the block addresses */ static TSK_WALK_RET_ENUM print_addr_act(YAFFSFS_INFO * fs_file, TSK_OFF_T a_off, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM flags, void *a_ptr) { YAFFSFS_PRINT_ADDR *print = (YAFFSFS_PRINT_ADDR *) a_ptr; if (flags & TSK_FS_BLOCK_FLAG_CONT) { tsk_fprintf(print->hFile, "%" PRIuDADDR " ", addr); if (++(print->idx) == 8) { tsk_fprintf(print->hFile, "\n"); print->idx = 0; } } return TSK_WALK_CONT; } /** * Print details on a specific file to a file handle. * * @param fs File system file is located in * @param hFile File handle to print text to * @param inum Address of file in file system * @param numblock The number of blocks in file to force print (can go beyond file size) * @param sec_skew Clock skew in seconds to also print times in * * @returns 1 on error and 0 on success */ static uint8_t yaffsfs_istat(TSK_FS_INFO *fs, FILE * hFile, TSK_INUM_T inum, TSK_DADDR_T numblock, int32_t sec_skew) { TSK_FS_META *fs_meta; TSK_FS_FILE *fs_file; YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)fs; char ls[12]; YAFFSFS_PRINT_ADDR print; char timeBuf[32]; YaffsCacheObject * obj = NULL; YaffsCacheVersion * version = NULL; YaffsHeader * header = NULL; yaffscache_version_find_by_inode(yfs, inum, &version, &obj); if ((fs_file = tsk_fs_file_open_meta(fs, NULL, inum)) == NULL) { return 1; } fs_meta = fs_file->meta; tsk_fprintf(hFile, "inode: %" PRIuINUM "\n", inum); tsk_fprintf(hFile, "%sAllocated\n", (fs_meta->flags & TSK_FS_META_FLAG_ALLOC) ? "" : "Not "); if (fs_meta->link) tsk_fprintf(hFile, "symbolic link to: %s\n", fs_meta->link); tsk_fprintf(hFile, "uid / gid: %" PRIuUID " / %" PRIuGID "\n", fs_meta->uid, fs_meta->gid); tsk_fs_meta_make_ls(fs_meta, ls, sizeof(ls)); tsk_fprintf(hFile, "mode: %s\n", ls); tsk_fprintf(hFile, "size: %" PRIuOFF "\n", fs_meta->size); tsk_fprintf(hFile, "num of links: %d\n", fs_meta->nlink); if(version != NULL){ yaffsfs_read_header(yfs, &header, version->ycv_header_chunk->ycc_offset); if(header != NULL){ tsk_fprintf(hFile, "Name: %s\n", header->name); } } if (sec_skew != 0) { tsk_fprintf(hFile, "\nAdjusted Inode Times:\n"); fs_meta->mtime -= sec_skew; fs_meta->atime -= sec_skew; fs_meta->ctime -= sec_skew; tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); fs_meta->mtime += sec_skew; fs_meta->atime += sec_skew; fs_meta->ctime += sec_skew; tsk_fprintf(hFile, "\nOriginal Inode Times:\n"); } else { tsk_fprintf(hFile, "\nInode Times:\n"); } tsk_fprintf(hFile, "Accessed:\t%s\n", tsk_fs_time_to_str(fs_meta->atime, timeBuf)); tsk_fprintf(hFile, "File Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->mtime, timeBuf)); tsk_fprintf(hFile, "Inode Modified:\t%s\n", tsk_fs_time_to_str(fs_meta->ctime, timeBuf)); if(version != NULL){ tsk_fprintf(hFile, "\nHeader Chunk:\n"); tsk_fprintf(hFile, "%" PRIuDADDR "\n", (version->ycv_header_chunk->ycc_offset / (yfs->page_size + yfs->spare_size))); } if (numblock > 0) { TSK_OFF_T lower_size = numblock * fs->block_size; fs_meta->size = (lower_size < fs_meta->size)?(lower_size):(fs_meta->size); } tsk_fprintf(hFile, "\nData Chunks:\n"); print.idx = 0; print.hFile = hFile; if (tsk_fs_file_walk(fs_file, TSK_FS_FILE_WALK_FLAG_AONLY, (TSK_FS_FILE_WALK_CB) print_addr_act, (void *) &print)) { tsk_fprintf(hFile, "\nError reading file: "); tsk_error_print(hFile); tsk_error_reset(); } else if (print.idx != 0) { tsk_fprintf(hFile, "\n"); } tsk_fs_file_close(fs_file); return 0; } /* yaffsfs_close - close an yaffsfs file system */ static void yaffsfs_close(TSK_FS_INFO *fs) { if(fs != NULL){ YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)fs; fs->tag = 0; // Walk and free the cache structures yaffscache_objects_free(yfs); yaffscache_chunks_free(yfs); //tsk_deinit_lock(&yaffsfs->lock); tsk_fs_free(fs); } } typedef struct _dir_open_cb_args { YAFFSFS_INFO *yfs; TSK_FS_DIR *dir; TSK_INUM_T parent_addr; } dir_open_cb_args; static TSK_RETVAL_ENUM yaffs_dir_open_meta_cb(YaffsCacheObject *obj, YaffsCacheVersion *version, void *args) { dir_open_cb_args *cb_args = (dir_open_cb_args *) args; YaffsCacheChunk *chunk = version->ycv_header_chunk; TSK_INUM_T curr_inode = 0; uint32_t obj_id = chunk->ycc_obj_id; uint32_t chunk_id = chunk->ycc_chunk_id; uint32_t vnum = version->ycv_version; YaffsHeader *header = NULL; TSK_FS_NAME * fs_name; char *file_ext; char version_string[64]; // Allow a max of 64 bytes in the version string yaffscache_obj_id_and_version_to_inode(obj_id, vnum, &curr_inode); if (chunk_id != 0) { return TSK_ERR; } if (tsk_verbose) fprintf(stderr, "dir_open_find_children_cb: %08" PRIxINUM " -> %08" PRIx32 ":%d\n", cb_args->parent_addr, obj_id, vnum); if (yaffsfs_read_header(cb_args->yfs, &header, chunk->ycc_offset) != TSK_OK) { return TSK_ERR; } if ((fs_name = tsk_fs_name_alloc(YAFFSFS_MAXNAMLEN + 64, 0)) == NULL) { free(header); return TSK_ERR; } switch (obj_id) { case YAFFS_OBJECT_LOSTNFOUND: strncpy(fs_name->name, YAFFS_OBJECT_LOSTNFOUND_NAME, fs_name->name_size - 64); break; case YAFFS_OBJECT_UNLINKED: strncpy(fs_name->name, YAFFS_OBJECT_UNLINKED_NAME, fs_name->name_size - 64); break; case YAFFS_OBJECT_DELETED: strncpy(fs_name->name, YAFFS_OBJECT_DELETED_NAME, fs_name->name_size - 64); break; default: strncpy(fs_name->name, header->name, fs_name->name_size - 64); break; } fs_name->name[fs_name->name_size - 65] = 0; // Only put object/version string onto unallocated versions if(! yaffs_is_version_allocated(cb_args->yfs, curr_inode)){ // Also copy the extension so that it also shows up after the version string, which allows // easier searching by file extension. Max extension length is 5 characters after the dot, // and require at least one character before the dot file_ext = strrchr(fs_name->name, '.'); if((file_ext != NULL) && (file_ext != fs_name->name) && (strlen(file_ext) < 7)){ snprintf(version_string, 64, "#%d,%d%s", obj_id, vnum, file_ext); } else{ snprintf(version_string, 64, "#%d,%d", obj_id, vnum); } strncat(fs_name->name, version_string, 64); fs_name->flags = TSK_FS_NAME_FLAG_UNALLOC; } else{ fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; } fs_name->meta_addr = curr_inode; switch (header->obj_type) { case YAFFS_TYPE_FILE: fs_name->type = TSK_FS_NAME_TYPE_REG; break; case YAFFS_TYPE_DIRECTORY: fs_name->type = TSK_FS_NAME_TYPE_DIR; break; case YAFFS_TYPE_SOFTLINK: case YAFFS_TYPE_HARDLINK: fs_name->type = TSK_FS_NAME_TYPE_LNK; break; case YAFFS_TYPE_SPECIAL: fs_name->type = TSK_FS_NAME_TYPE_UNDEF; // Could be a socket break; default: if (tsk_verbose) fprintf(stderr, "yaffs_dir_open_meta_cb: unhandled object type\n"); fs_name->type = TSK_FS_NAME_TYPE_UNDEF; break; } free(header); if (tsk_fs_dir_add(cb_args->dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } /* A copy is made in tsk_fs_dir_add, so we can free this one */ tsk_fs_name_free(fs_name); return TSK_OK; } static TSK_RETVAL_ENUM yaffsfs_dir_open_meta(TSK_FS_INFO *a_fs, TSK_FS_DIR ** a_fs_dir, TSK_INUM_T a_addr) { TSK_FS_DIR *fs_dir; TSK_FS_NAME *fs_name; YAFFSFS_INFO *yfs = (YAFFSFS_INFO *)a_fs; int should_walk_children = 0; uint32_t obj_id; uint32_t ver_number; if (a_addr < a_fs->first_inum || a_addr > a_fs->last_inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_WALK_RNG); tsk_error_set_errstr("yaffs_dir_open_meta: Invalid inode value: %" PRIuINUM, a_addr); return TSK_ERR; } else if (a_fs_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("yaffs_dir_open_meta: NULL fs_dir argument given"); return TSK_ERR; } fs_dir = *a_fs_dir; if (fs_dir) { tsk_fs_dir_reset(fs_dir); fs_dir->addr = a_addr; } else if ((*a_fs_dir = fs_dir = tsk_fs_dir_alloc(a_fs, a_addr, 128)) == NULL) { return TSK_ERR; } if (tsk_verbose) fprintf(stderr,"yaffs_dir_open_meta: called for directory %" PRIu32 "\n", (uint32_t) a_addr); // handle the orphan directory if its contents were requested if (a_addr == TSK_FS_ORPHANDIR_INUM(a_fs)) { return tsk_fs_dir_find_orphans(a_fs, fs_dir); } if ((fs_name = tsk_fs_name_alloc(YAFFSFS_MAXNAMLEN, 0)) == NULL) { return TSK_ERR; } if ((fs_dir->fs_file = tsk_fs_file_open_meta(a_fs, NULL, a_addr)) == NULL) { tsk_error_errstr2_concat(" - yaffs_dir_open_meta"); tsk_fs_name_free(fs_name); return TSK_ERR; } // extract obj_id and ver_number from inum yaffscache_inode_to_obj_id_and_version(a_addr, &obj_id, &ver_number); // Decide if we should walk the directory structure if (obj_id == YAFFS_OBJECT_DELETED || obj_id == YAFFS_OBJECT_UNLINKED) { should_walk_children = 1; } else { YaffsCacheObject *obj; YaffsCacheVersion *versionFound; TSK_RETVAL_ENUM result = yaffscache_version_find_by_inode(yfs, a_addr, &versionFound, &obj); if (result != TSK_OK) { if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_dir_open_meta: yaffscache_version_find_by_inode failed! (inode: %d\n", a_addr); tsk_fs_name_free(fs_name); return TSK_ERR; } /* Only attach files onto the latest version of the directory */ should_walk_children = (obj->yco_latest == versionFound); } // Search the cache for the children of this object and add them to fs_dir if (should_walk_children) { dir_open_cb_args args; args.yfs = yfs; args.dir = fs_dir; args.parent_addr = a_addr; yaffscache_find_children(yfs, a_addr, yaffs_dir_open_meta_cb, &args); } // add special entries to root directory if (obj_id == YAFFS_OBJECT_ROOT) { strncpy(fs_name->name, YAFFS_OBJECT_UNLINKED_NAME, fs_name->name_size); fs_name->meta_addr = YAFFS_OBJECT_UNLINKED; fs_name->type = TSK_FS_NAME_TYPE_DIR; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } strncpy(fs_name->name, YAFFS_OBJECT_DELETED_NAME, fs_name->name_size); fs_name->meta_addr = YAFFS_OBJECT_DELETED; fs_name->type = TSK_FS_NAME_TYPE_DIR; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } // orphan directory if (tsk_fs_dir_make_orphan_dir_name(a_fs, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } fs_name->meta_addr = yfs->fs_info.last_inum; fs_name->type = TSK_FS_NAME_TYPE_DIR; fs_name->flags = TSK_FS_NAME_FLAG_ALLOC; if (tsk_fs_dir_add(fs_dir, fs_name)) { tsk_fs_name_free(fs_name); return TSK_ERR; } } tsk_fs_name_free(fs_name); return TSK_OK; } static TSK_FS_ATTR_TYPE_ENUM yaffsfs_get_default_attr_type(const TSK_FS_FILE * a_file) { return TSK_FS_ATTR_TYPE_DEFAULT; } static uint8_t yaffsfs_load_attrs(TSK_FS_FILE *file) { TSK_FS_ATTR *attr; TSK_FS_META *meta; TSK_FS_INFO *fs; YAFFSFS_INFO *yfs; TSK_FS_ATTR_RUN *data_run; TSK_DADDR_T file_block_count; YaffsCacheObject *obj; YaffsCacheVersion *version; TSK_RETVAL_ENUM result; TSK_LIST *chunks_seen = NULL; YaffsCacheChunk *curr; TSK_FS_ATTR_RUN *data_run_new; if (file == NULL || file->meta == NULL || file->fs_info == NULL) { tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr ("yaffsfs_load_attrs: called with NULL pointers"); return 1; } meta = file->meta; yfs = (YAFFSFS_INFO *)file->fs_info; fs = &yfs->fs_info; // see if we have already loaded the runs if ((meta->attr != NULL) && (meta->attr_state == TSK_FS_META_ATTR_STUDIED)) { return 0; } else if (meta->attr_state == TSK_FS_META_ATTR_ERROR) { return 1; } // not sure why this would ever happen, but... else if (meta->attr != NULL) { tsk_fs_attrlist_markunused(meta->attr); } else if (meta->attr == NULL) { meta->attr = tsk_fs_attrlist_alloc(); } attr = tsk_fs_attrlist_getnew(meta->attr, TSK_FS_ATTR_NONRES); if (attr == NULL) { meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (meta->size == 0) { data_run = NULL; } else { /* BC: I'm not entirely sure this is needed. My guess is that * this was done instead of maintaining the head of the list of * runs. In theory, the tsk_fs_attr_add_run() method should handle * the fillers. */ data_run = tsk_fs_attr_run_alloc(); if (data_run == NULL) { tsk_fs_attr_run_free(data_run); meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } data_run->offset = 0; data_run->addr = 0; data_run->len = (meta->size + fs->block_size - 1) / fs->block_size; data_run->flags = TSK_FS_ATTR_RUN_FLAG_FILLER; } // initialize the data run if (tsk_fs_attr_set_run(file, attr, data_run, NULL, TSK_FS_ATTR_TYPE_DEFAULT, TSK_FS_ATTR_ID_DEFAULT, meta->size, meta->size, roundup(meta->size, fs->block_size), (TSK_FS_ATTR_FLAG_ENUM)0, 0)) { meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } // If the file has size zero, return now if(meta->size == 0){ meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } /* Get the version for the given object. */ result = yaffscache_version_find_by_inode(yfs, meta->addr, &version, &obj); if (result != TSK_OK || version == NULL) { if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_load_attrs: yaffscache_version_find_by_inode failed!\n"); meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } if (tsk_verbose) yaffscache_object_dump(stderr, obj); file_block_count = data_run->len; /* Cycle through the chunks for this version of this object */ curr = version->ycv_last_chunk; while (curr != NULL && curr->ycc_obj_id == obj->yco_obj_id) { if (curr->ycc_chunk_id == 0) { if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_load_attrs: skipping header chunk\n"); } else if (tsk_list_find(chunks_seen, curr->ycc_chunk_id)) { if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_load_attrs: skipping duplicate chunk\n"); } else if (curr->ycc_chunk_id > file_block_count) { if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_load_attrs: skipping chunk past end\n"); } /* We like this chunk */ else { // add it to our internal list if (tsk_list_add(&chunks_seen, curr->ycc_chunk_id)) { meta->attr_state = TSK_FS_META_ATTR_ERROR; tsk_list_free(chunks_seen); chunks_seen = NULL; return 1; } data_run_new = tsk_fs_attr_run_alloc(); if (data_run_new == NULL) { tsk_fs_attr_run_free(data_run_new); meta->attr_state = TSK_FS_META_ATTR_ERROR; return 1; } data_run_new->offset = (curr->ycc_chunk_id - 1); data_run_new->addr = curr->ycc_offset / (fs->block_pre_size + fs->block_size + fs->block_post_size); data_run_new->len = 1; data_run_new->flags = TSK_FS_ATTR_RUN_FLAG_NONE; if (tsk_verbose) tsk_fprintf(stderr, "yaffsfs_load_attrs: @@@ Chunk %d : %08x is at offset 0x%016llx\n", curr->ycc_chunk_id, curr->ycc_seq_number, curr->ycc_offset); tsk_fs_attr_add_run(fs, attr, data_run_new); } curr = curr->ycc_prev; } tsk_list_free(chunks_seen); meta->attr_state = TSK_FS_META_ATTR_STUDIED; return 0; } static uint8_t yaffsfs_jentry_walk(TSK_FS_INFO *info, int entry, TSK_FS_JENTRY_WALK_CB cb, void *fn) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Journal support for YAFFS is not implemented"); return 1; } static uint8_t yaffsfs_jblk_walk(TSK_FS_INFO *info, TSK_DADDR_T daddr, TSK_DADDR_T daddrt, int entry, TSK_FS_JBLK_WALK_CB cb, void *fn) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Journal support for YAFFS is not implemented"); return 1; } static uint8_t yaffsfs_jopen(TSK_FS_INFO *info, TSK_INUM_T inum) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_UNSUPFUNC); tsk_error_set_errstr("Journal support for YAFFS is not implemented"); return 1; } /** * \internal * Open part of a disk image as a Yaffs/2 file system. * * @param img_info Disk image to analyze * @param offset Byte offset where file system starts * @param ftype Specific type of file system * @param test Going to use this - 1 if we're doing auto-detect, 0 if not (display more verbose messages if the user specified YAFFS2) * @returns NULL on error or if data is not an Yaffs/3 file system */ TSK_FS_INFO * yaffs2_open(TSK_IMG_INFO * img_info, TSK_OFF_T offset, TSK_FS_TYPE_ENUM ftype, uint8_t test) { YAFFSFS_INFO *yaffsfs = NULL; TSK_FS_INFO *fs = NULL; const unsigned int psize = img_info->page_size; const unsigned int ssize = img_info->spare_size; YaffsHeader * first_header = NULL; TSK_FS_DIR *test_dir; std::map configParams; YAFFS_CONFIG_STATUS config_file_status; // clean up any error messages that are lying around tsk_error_reset(); if (TSK_FS_TYPE_ISYAFFS2(ftype) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_ARG); tsk_error_set_errstr("Invalid FS Type in yaffsfs_open"); return NULL; } if ((yaffsfs = (YAFFSFS_INFO *) tsk_fs_malloc(sizeof(YAFFSFS_INFO))) == NULL) return NULL; yaffsfs->cache_objects = NULL; yaffsfs->chunkMap = NULL; // Read config file (if it exists) config_file_status = yaffs_load_config_file(img_info, configParams); if(config_file_status == YAFFS_CONFIG_ERROR){ // tsk_error was set by yaffs_load_config goto on_error; } else if(config_file_status == YAFFS_CONFIG_OK){ // Validate the input // If it fails validation, return (tsk_error will be set up already) if(1 == yaffs_validate_config_file(configParams)){ goto on_error; } } // If we read these fields from the config file, use those values. Otherwise use the defaults if(configParams.find(YAFFS_CONFIG_PAGE_SIZE_STR) != configParams.end()){ yaffsfs->page_size = atoi(configParams[YAFFS_CONFIG_PAGE_SIZE_STR].c_str()); } else{ yaffsfs->page_size = psize == 0 ? YAFFS_DEFAULT_PAGE_SIZE : psize; } if(configParams.find(YAFFS_CONFIG_SPARE_SIZE_STR) != configParams.end()){ yaffsfs->spare_size = atoi(configParams[YAFFS_CONFIG_SPARE_SIZE_STR].c_str()); } else{ yaffsfs->spare_size = ssize == 0 ? YAFFS_DEFAULT_SPARE_SIZE : ssize; } if(configParams.find(YAFFS_CONFIG_CHUNKS_PER_BLOCK_STR) != configParams.end()){ yaffsfs->chunks_per_block = atoi(configParams[YAFFS_CONFIG_CHUNKS_PER_BLOCK_STR].c_str()); } else{ yaffsfs->chunks_per_block = 64; } // TODO: Why are 2 different memory allocation methods used in the same code? // This makes things unnecessary complex. yaffsfs->max_obj_id = 1; yaffsfs->max_version = 0; // Keep track of whether we're doing auto-detection of the file system if(test){ yaffsfs->autoDetect = 1; } else{ yaffsfs->autoDetect = 0; } fs = &(yaffsfs->fs_info); fs->tag = TSK_FS_INFO_TAG; fs->ftype = ftype; fs->flags = (TSK_FS_INFO_FLAG_ENUM)0; fs->img_info = img_info; fs->offset = offset; fs->endian = TSK_LIT_ENDIAN; // Determine the layout of the spare area // If it was specified in the config file, use those values. Otherwise do the auto-detection if(configParams.find(YAFFS_CONFIG_SEQ_NUM_STR) != configParams.end()){ // In the validation step, we ensured that if one of the offsets was set, we have all of them yaffsfs->spare_seq_offset = atoi(configParams[YAFFS_CONFIG_SEQ_NUM_STR].c_str()); yaffsfs->spare_obj_id_offset = atoi(configParams[YAFFS_CONFIG_OBJ_ID_STR].c_str()); yaffsfs->spare_chunk_id_offset = atoi(configParams[YAFFS_CONFIG_CHUNK_ID_STR].c_str()); // Check that the offsets are valid for the given spare area size (fields are 4 bytes long) if((yaffsfs->spare_seq_offset + 4 > yaffsfs->spare_size) || (yaffsfs->spare_obj_id_offset + 4 > yaffsfs->spare_size) || (yaffsfs->spare_chunk_id_offset + 4 > yaffsfs->spare_size)){ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS); tsk_error_set_errstr("yaffs2_open: Offset(s) in config file too large for spare area (size %d). %s", yaffsfs->spare_size, YAFFS_HELP_MESSAGE); goto on_error; } // nBytes isn't currently used, so just set to zero yaffsfs->spare_nbytes_offset = 0; } else{ // Decide how many blocks to test. If we're not doing auto-detection, set to zero (no limit) unsigned int maxBlocksToTest; if(yaffsfs->autoDetect){ maxBlocksToTest = YAFFS_DEFAULT_MAX_TEST_BLOCKS; } else{ maxBlocksToTest = 0; } if(yaffs_initialize_spare_format(yaffsfs, maxBlocksToTest) != TSK_OK){ tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("not a YAFFS file system (bad spare format). %s", YAFFS_HELP_MESSAGE); if (tsk_verbose) fprintf(stderr, "yaffsfs_open: could not find valid spare area format\n%s\n", YAFFS_HELP_MESSAGE); goto on_error; } } /* * Read the first record, make sure it's a valid header... * * Used for verification and autodetection of * the FS type. */ if (yaffsfs_read_header(yaffsfs, &first_header, 0)) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("not a YAFFS file system (first record). %s", YAFFS_HELP_MESSAGE); if (tsk_verbose) fprintf(stderr, "yaffsfs_open: invalid first record\n%s\n", YAFFS_HELP_MESSAGE); goto on_error; } free(first_header); first_header = NULL; fs->duname = "Chunk"; /* * Calculate the meta data info */ //fs->last_inum = 0xffffffff; // Will update this as we go fs->last_inum = 0; fs->root_inum = YAFFS_OBJECT_ROOT; fs->first_inum = YAFFS_OBJECT_FIRST; //fs->inum_count = fs->last_inum; // For now this will be the last_inum - 1 (after we calculate it) /* * Calculate the block info */ fs->dev_bsize = img_info->sector_size; fs->block_size = yaffsfs->page_size; fs->block_pre_size = 0; fs->block_post_size = yaffsfs->spare_size; fs->block_count = img_info->size / (fs->block_pre_size + fs->block_size + fs->block_post_size); fs->first_block = 0; fs->last_block_act = fs->last_block = fs->block_count ? fs->block_count - 1 : 0; /* Set the generic function pointers */ fs->inode_walk = yaffsfs_inode_walk; fs->block_walk = yaffsfs_block_walk; fs->block_getflags = yaffsfs_block_getflags; fs->get_default_attr_type = yaffsfs_get_default_attr_type; fs->load_attrs = yaffsfs_load_attrs; fs->file_add_meta = yaffs_inode_lookup; fs->dir_open_meta = yaffsfs_dir_open_meta; fs->fsstat = yaffsfs_fsstat; fs->fscheck = yaffsfs_fscheck; fs->istat = yaffsfs_istat; fs->name_cmp = tsk_fs_unix_name_cmp; fs->close = yaffsfs_close; /* Journal */ fs->jblk_walk = yaffsfs_jblk_walk; fs->jentry_walk = yaffsfs_jentry_walk; fs->jopen = yaffsfs_jopen; /* Initialize the caches */ if (tsk_verbose) fprintf(stderr, "yaffsfs_open: building cache...\n"); /* Build cache */ /* NOTE: The only modifications to the cache happen here, during at * the open. Should be fine with no lock, even if access to the * cache is shared among threads. */ //tsk_init_lock(&yaffsfs->lock); yaffsfs->chunkMap = new std::map; yaffsfs_parse_image_load_cache(yaffsfs); if (tsk_verbose) { fprintf(stderr, "yaffsfs_open: done building cache!\n"); //yaffscache_objects_dump(yaffsfs, stderr); } // Update the number of inums now that we've read in the file system fs->inum_count = fs->last_inum - 1; test_dir = tsk_fs_dir_open_meta(fs, fs->root_inum); if (test_dir == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_FS_MAGIC); tsk_error_set_errstr("not a YAFFS file system (no root directory). %s", YAFFS_HELP_MESSAGE); if (tsk_verbose) fprintf(stderr, "yaffsfs_open: invalid file system\n%s\n", YAFFS_HELP_MESSAGE); goto on_error; } tsk_fs_dir_close(test_dir); return fs; on_error: // yaffsfs_close frees all the cache objects yaffsfs_close(fs); return NULL; } sleuthkit-4.2.0/tsk/docs/auto.dox000644 000765 000024 00000013751 12576320700 017551 0ustar00carrierstaff000000 000000 /*! \page autopage File Extraction Automation This section describes the TskAuto C++ superclass that can be used to easily build automated applications that analyze a disk image and extract files from it. \section auto_over Overview The TSK API described in the previous sections of this User's Guide allows you to manually open a volume or file system and browse its contents to look at volumes and files. While this fine grained access is useful in some situations, there are others where you simply want to rip apart a disk image and see all of the files without worrying about how you get the files. The TskAuto class was designed to help in these situations. It is a C++ superclass that knows how to analyze a disk image. All you need to do is implement the TskAuto::processFile() method, which will get called for each file. Other methods in the class can also be implemented to get more details about the data being analyzed. \section auto_basics Basics To use TskAuto, you first need to create a C++ subclass of it and implement the TskAuto::processFile() method. The instructions in \ref basic_build will provide access to this class. To use your new TskAuto-based class, you must first open the disk images. This can be done with TskAuto::openImage(). Once you are done with the object, it should be closed with TskAuto::closeImage(). These methods have been defined as virtual so that you can override them if your class needs access to the list of image names (for a report, for example). To start the analysis after the image files have been opened, you must call either TskAuto::findFilesInImg(), TskAuto::findFilesInVs(), or TskAuto::findFilesInFs(). TskAuto::findFilesInImg() starts at sector 0 of the disk image and starts to analyze it. It looks for a volume system and, if it finds one, will examine the contents of each volume. If it doesn't find a volume system, it will assume that the image is of a volume and will analyze sector 0 as a file system. TskAuto::findFilesInVs() allows you to specify the sector offset where the volume system is located. It does the same analysis as TskAuto::findFilesInImg(), except that it doesn't always start in sector 0. In fact, TskAuto::findFilesInImg() simply calls TskAuto::findFilesInVs() with a starting offset of 0. TskAuto::findFilesInFs() allows you to specify the offset of a file system to process. This method will not look for a volume system, only a file system. Internally, this method is called from TskAuto::findFilesInVs() after a volume has been found. There is also a version that allows you to specify the directory start processing from. Regardless of the method that you use to start the processing, the TskAuto::processFile() method will be called for each file and directory that is found. You can specify if this method should be called for only allocated or unallocated files using the TskAuto::setFileFilterFlags() method. You can also specify what types of volumes to process by using the TskAuto::setVolFilterFlags() method. Inside of your TskAuto::processFile() method, you can read the contents of the file using either tsk_fs_file_read() or tsk_fs_file_walk(). If you want to see each attribute in a file, then you should call processAttributes() and implement the processAttribute() method, which will be called for each attribute. Error handling will be described in more detail later, but it is worth pointing out in this section that the processFile() method should set error codes and call TskAuto::registerError() when it encounters errors so that consistent error reporting is performed. \section auto_filter Filtering Results With the methods defined above, TskAuto::processFile() will get called for all files and directories in an image. This maybe more files than you want though. There are thtree methods that will alert you when TskAuto is about to process a new volume or file system and will allow you to not process the volume or file system. You can implement the TskAuto::filterVs() method to learn about the volume system before it is processed. The return value of this method can cause TskAuto to skip the volume system or stop processing the disk image entirely. You can implement the TskAuto::filterVol() method to learn about each volume before it is processed. The return value of this method can cause TskAuto to skip the volume or stop processing the disk image entirely. You can implement the TskAuto::filterFs() method to learn about each file system before it is processed. The return value of this method can cause TskAuto to skip the file system or stop processing the disk image entirely. There are also a series of protected methods in TskAuto that will help you to skip files that may not be relevant to you. For example, TskAuto::isNtfsSystemFiles() will tell you if a file is one of the NTFS file system files (such as $MFT) that you may want to skip. You can use this method in TskAuto::processFile() to skip the big NTFS files. Refer to the protected methods in TskAuto for others that are defined for this purpose (they all begin with "is"). \section auto_errors Error Handling and Recursion The goal of TskAuto is to get through the disk image and process as many files as possible. Therefore, it will continue even if an error is encountered. The public methods will return a value if any error occured. As an error occured, it was registered with TskAuto and recorded in a list. To determine how many and which errors occured, use the TskAuto::getErrorList() method. If you encounter error scenarios while implementing TskAuto::processFile() or TskAuto::processAttribute(), then you should also send the errors to TskAuto::registerError() so that they are all recorded in a central location. \section auto_examples Examples Refer to the source code in the tools/autotools directory of the distribution for examples of TskAuto-based classes. Files in this directory all use TskAuto to do things like recover deleted files or load data into a SQLite database. Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/base.dox000644 000765 000024 00000012015 12576320700 017503 0ustar00carrierstaff000000 000000 /*! \page basepage Base Functions This section describes some base API functions and concepts that are general to all of the other layers, such as error and Unicode handling. In addition to this documentation, there are sample programs in the samples directory in TSK that show these functions being used while processing a disk image. \section basic_encodings Encodings \subsection basic_enc_over Overview TSK supports Unicode volume or file system data and can process disk image files that contain non-Latin characters. This section describes how that is done. TSK runs on both Unix and Windows systems and these two systems differ on how they store Unicode. There are several storage methods for Unicode characters. The UTF-16 method stores the characters using either 2-bytes or 4-bytes (though most characters can be stored in 2-bytes). UTF-8 is a method where 1- to 4-bytes are used, depending on the character. UTF-8 is backward compatible with ASCII because the ASCII characters require only 1-byte. The FAT and NTFS file systems store file names in UTF-16 while UFS and Ext2/3 store file names in UTF-8. Unix internally uses UTF-8 to store Unicode text and Windows uses UTF-16 (if UNICODE is enabled). To support running on both Unix and Windows and to support file systems from both Unix and Windows, TSK must convert between UTF-8 and UTF-16. Internally, all TSK data structures store text in UTF-8. For example, the UTF-16 file names in NTFS are converted to UTF-8. However, the Windows functions that are used to open disk images require that the file name be in UTF-16. To account for this, special data types (as described in the next section) are used to handle disk image and local file names in the local storage method. \subsection basic_enc_t Special Data Types (TSK_TCHAR) At compile-time, TSK_TCHAR is defined to be a UTF-16 wide character on a Windows system and a UTF-8 character on a Unix system. This data type is used to store the disk image file name and other command line arguments that may need to be parsed. There are various definitions that can help to process the TSK_TCHAR strings, such as TSTRLEN, which maps to the appropriate function to find the length of the string. There are also functions that can be used to convert between UTF-8 and UTF-16 (see tsk_UTF16toUTF8() and tsk_UTF8toUTF16()). Note that in some environments, the TSK_TCHAR design is difficult to work with. For example, if you have a Windows environment where you are storing all strings in UTF-8. Some of the functions that take a TSK_TCHAR as an argument have been expanded to have versions that take a specific encoding. These are being added as needed, so send e-mail to sleuthkit-developers < at > lists < dot > sourceforge < dot > net if you find more that need to be updated. \section basic_error Error Handling The public API functions all return a value to indicate when an error occurs. More details about the error can be learned by reading the global tsk_errno value. This will be one of several predefined error codes. A description of the error can be obtained using the tsk_error_get() function or the error can be printed to a FILE handle using tsk_error_print(). The C++ wrapper is the TskError class. \section basic_misc Miscellaneous Utilities The TSK library includes some basic utility structures and functions that can be used to manage lists and stacks. These are used internally and are part of the public API for convenience. The TSK_LIST structure is used to keep track of values that have been seen while processing. Values are added to the list using tsk_list_add(). The list can be searched using tsk_list_find() and closed using tsk_list_free(). The TSK_STACK structure is used to prevent infinite loops when recursing into directories. The stack can be created using tsk_stack_create() and data can pushed and popped using tsk_stack_push() and tsk_stack_pop(). To search the stack, tsk_stack_find() is used and tsk_stack_free() is used to free the stack. When recursing directories, the metadata address of the directory is stored on the stack when that directory is analyzed and popped off when that directory is done. \section basic_hash Hash Algorithms The TSK library includes support to calculate MD5 and SHA-1 hashes. To calculate the MD5 hash, a context must be first initialized with TSK_MD5_Init(). Data is added to the context with TSK_MD5_Update() and the hash is calculated with a call to TSK_MD5_Final(). A similar process is used for SHA-1 hashes using TSK_SHA_Init(), TSK_SHA_Update(), and TSK_SHA_Final(). \section basic_version Version To get the version number of the TSK library as a string, you can all the tsk_version_get_str() function to get a string representation. Or, you can call tsk_version_print() to print the library name and version to a FILE handle. There are also \#defines for the version as a string (in TSK_VERSION_STR) and as a number (in TSK_VERSION_NUM). Next to \ref imgpage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/basics.dox000644 000765 000024 00000016511 12576320700 020042 0ustar00carrierstaff000000 000000 /*! \page basicpage Library Basics This page describes some of the basic concepts of the TSK library. It is assumed that you have built and installed the TSK library based on either the instructions in the INSTALL.txt file or via a package. Refer to the files in the samples directory for examples on the topics in this and later sections. \section basic_layers Layers TSK is organized into several layers (and sub-layers). These layers represent layers of abstraction that are used with data storage. The User's Guide and API Reference are organized based on these layers. The lowest layer that TSK has is the Base Layer, which contains common programming and data structure functions that can be applied to all layers. This is where error handling, types, and convenience functions are defined. The next layer up is the Disk Image Layer, which allows disk images in various formats to be opened and processed. This layer hides the details associated with split, compressed, and encrypted image files from the other layers. All disk images must be first opened by the Disk Image Layer functions before they can be processed by other layers. The next layer up is the Volume System Layer. This layer focuses on processing data as a volume system, such as DOS partition tables or BSD disk label structures. If the disk image being analyzed has a volume system on it, then this set of functions will tell you starting and ending location of its partitions. The next layer up is the File System Layer. This layer focuses on processing data as a file system, such as FAT or NTFS. File systems can be located in a partition or can be the full disk image file. These set of functions allow you to read arbitrary data from the file system, list files, and open files. There are several sub-layers in the File System Layer and they are described in \ref fs_layers. There is an independent Hash Database Layer that handles hash databases, such as NSRL and md5sum outputs. This API allows you to create an index of hashes and perform fast lookups of them. These functions do not depend on the Disk Image, Volume System, or File System Layers. There is also an Automation Layer that integrates all of the previous layers in an automated fashion. It defines a C++ class named TskAuto that hides a lot of the details about analyzing file and volume systems. A basic diagram of the relationship between these layers is shown here. Note that if a disk image file does not have a volume system, then we can use the File System Layer functions directly on it.

 +==========================================================+
 |                                                          | 
 |                       +================+                 |
 |                       |      Base      |                 |
 |                       +================+                 |
 |                         /            \                   |
 |                        /              \                  |
 |              +==============+      +==================+  |
 |              |  Disk Image  |      |   Hash Database  |  |
 |              +==============+      +==================+  |
 |                 /       |                                |
 |                /        |                                |
 |   +===============+     |                                |
 |   | Volume System |     |                                |
 |   +===============+     |                                |
 |               \         |                                |
 |                \        |                                |
 |               +===============+                          |
 |               |  File System  |                          |
 |               +===============+                          |
 |                                                          |
 |                                                          |
 |                        Automation                        |
 +==========================================================+

\section basic_build Build Environment This section describes how to incorporate the TSK library into your application. In general, all you need to do is include a TSK header file and link in the library. \subsection ov_build_unix Unix This document assumes that you used 'make install' to install the library and header files and that you used the default location of /usr/local/. If you specified a different location, you will need to do the obvious path replacements in this document. To include the API definitions, you will need to include the tsk/libtsk.h file in your source code. Depending on system configuration, you may need to add /usr/local/include to the list of directories that your compiler looks in for header files (using -I for example). \#include To include the library in your application during the linking phase, you will need to add -ltsk to add the libtsk library. Depending on system configuration, you may need to add /usr/local/lib/ to the list of directories that your compiler looks in for libraries (using -L for example). Note that if you built TSK with support for AFFLIB and libewf then you will also need to include -lewf and/or -lafflib. You may also need to include other libraries that AFFLIB and libewf require (my current system requires -lcrypto -lssl -lz -lncurses -lreadlin). Refer to the libewf and AFFLIB documentation for details. \subsection ov_build_win Windows The Windows setup is similar to the Unix setup, except that you need to include more libraries and there is no automated install or build process. The Visual Studio solution file is located in win32/tsk-win.sln. There is a libtsk project for the five library layers (as described in \ref basic_layers). Build this to create a static library in the Debug, Release, or Debug_NoLibs folder (depending on which version you built). The Debug_NoLibs configuration does not have any dependencies on libewf or afflib. To include the header files, configure your build environment to search in the root TSK directory (i.e. sleuthkit-3.0.0) and include the tsk/libtsk.h file in your source code. The location of the TSK directory will depend on where you unpacked it. To link with the libraries, you must configure your environment to include the libtsk library. You will need to specify the directory where the library is located, which could be the Debug or Release subdirectories in the win32 directory. You can also move the library to a different location. Note that your Windows application must have UNICODE support enabled. \section basic_cpp C++ Wrapers Nearly all of the TSK code is written in C and the original API is a collection of C functions and structs. There are also C++ classes that are wrappers around the C code. The C++ class allocates the C structs and provides getter and setter methods to access the public data. The remainder of this doc primarily refers to the C functions, but will provide a link to the corresponding C++ class when one exists. Next to \ref basepage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/cpp.dox000644 000765 000024 00000004556 12576320700 017366 0ustar00carrierstaff000000 000000 /*! \page cpppage C++ Classes This section provides a high-level overview of the C++ classes that wrap around the C structs and functions. The C++ interface can be used when the rest of your program is in C++ and when you want to ensure that the proper locks and thread-safe mechanisms are used in a multi-threaded environment. TSK contains locks to make it thread safe, but the C interface allows a thread to modify data in a shared structure without obtaining the proper lock. Note that the C++ interfaces simply create and use C structs behind the scenes. Therefore, the methods are very similar to the C functions. References to the C++ classes were given in earlier sections of the User's Guide. It is assumed that the user has read the preceeding sections of the User's Guide to get an understanding of what TSK is capable of doing. This section provides references and links to the main C++ classes. \section cpp_basics Basics The first step is to open the image with the TskImgInfo class. This class allows you to read from the disk image. See \ref imgpage for details on the C structs and functions at this layer. After the image is open, you can determine the volume system of the image using the TskVsInfo class. It will detect the volume system and provide access to each volume (or partition). The TskVsPartInfo class provides references to the details of each partition. See \ref vspage for details on the C structs and functions at this layer. Now that you know the layout of the image, you can open each volume to see what file system it has inside. Use the TskFsInfo class for this. Once you have the file system open, there are many ways to analyze the file system contents. The TskFsBlock class provides access to each block in the file system. The TskFsDir class provides access to each directory and the TskFsFile class provides access to each file. From there, you can access all of the details of the file, including its name info (in TskFsFile) and metadata (in TskFsMeta). Access to all of the file's attributes are provided via the TskFsAttribute class. See \ref fspage for details on the C structs and functions at this layer. If you want to automate the entire process and not deal with manually detecting volumes and file systems, consider the TskAuto class. See \ref autopage for more automation details. Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/Doxyfile000644 000765 000024 00000226357 12576320700 017603 0ustar00carrierstaff000000 000000 # Doxyfile 1.8.1.2 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or sequence of words) that should # identify the project. Note that if you do not use Doxywizard you need # to put quotes around the project name if it contains spaces. PROJECT_NAME = "The Sleuth Kit" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 4.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ../www/docs # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = YES # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding # "class=itcl::class" will allow you to use the command class in the # itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all # comments according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you # can mix doxygen, HTML, and XML commands with Markdown formatting. # Disable only in case of backward compatibilities issues. MARKDOWN_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and # unions with only public data fields will be shown inline in the documentation # of the scope in which they are defined (i.e. file, namespace, or group # documentation), provided this scope is documented. If set to NO (the default), # structs, classes, and unions are shown on a separate page (for HTML and Man # pages) or section (for LaTeX and RTF). INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. SYMBOL_CACHE_SIZE = 0 # Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be # set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given # their name and scope. Since this can be an expensive process and often the # same symbol appear multiple times in the code, doxygen keeps a cache of # pre-resolved symbols. If the cache is too small doxygen will become slower. # If the cache is too large, memory is wasted. The cache size is given by this # formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = YES # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = NO # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = NO # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = NO # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files # containing the references data. This must be a list of .bib files. The # .bib extension is automatically appended if omitted. Using this command # requires the bibtex tool to be installed. See also # http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style # of the bibliography can be controlled using LATEX_BIB_STYLE. To use this # feature you need bibtex and perl available in the search path. CITE_BIB_FILES = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = NO # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = tsk/docs/main.dox \ tsk/docs/basics.dox \ tsk/docs/base.dox \ tsk/docs/img.dox \ tsk/docs/vs.dox \ tsk/docs/fs.dox \ tsk/docs/hashdb.dox \ tsk/docs/auto.dox \ tsk/docs/cpp.dox \ tsk/base \ tsk/fs \ tsk/hashdb \ tsk/img \ tsk/vs \ tsk/auto # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = *.c \ *.cpp \ *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */auto/sqlite3* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C, C++ and Fortran comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = NO #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = api-docs # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is advised to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when # changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = tsk/docs/footer.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # style sheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the style sheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of # entries shown in the various tree structured indices initially; the user # can expand and collapse entries dynamically later on. Doxygen will expand # the tree to such a level that at most the specified number of entries are # visible (unless a fully collapsed tree already exceeds this amount). # So setting the number of entries 1 will produce a full collapsed tree by # default. 0 is a special value representing an infinite number of entries # and will result in a full expanded tree by default. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) # at top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. Since the tabs have the same information as the # navigation tree you can set this option to NO if you already set # GENERATE_TREEVIEW to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. # Since the tree basically has the same information as the tab index you # could consider to set DISABLE_INDEX to NO when enabling this option. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you may also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to # the MathJax Content Delivery Network so you can quickly see the result without # installing MathJax. # However, it is strongly recommended to install a local # copy of MathJax from http://www.mathjax.org before deployment. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension # names that should be enabled during MathJax rendering. MATHJAX_EXTENSIONS = # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See # http://en.wikipedia.org/wiki/BibTeX for more info. LATEX_BIB_STYLE = plain #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load style sheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = __cplusplus # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. For each # tag file the location of the external documentation should be added. The # format of a tag file without this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths # or URLs. Note that each tag file must have a unique name (where the name does # NOT include the path). If a tag file is not located in the directory in which # doxygen is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = libtsk_doxygen.tag # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will use the Helvetica font for all dot files that # doxygen generates. When you want a differently looking font you can specify # the font name using DOT_FONTNAME. You need to make sure dot is able to find # the font, which can be done by putting it in a standard location or by setting # the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the # directory containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the Helvetica font. # If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to # set the path where dot can find it. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside # the class node. If there are many fields or methods and many nodes the # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # threshold limits the number of items for each type to make the size more # managable. Set this to 0 for no limit. Note that the threshold may be # exceeded by 50% before the limit is enforced. UML_LIMIT_NUM_FIELDS = 10 # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. If you choose svg you need to set # HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible in IE 9+ (other browsers do not have this requirement). DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # Note that this requires a modern browser other than Internet Explorer. # Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you # need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files # visible. Older versions of IE do not have SVG support. INTERACTIVE_SVG = NO # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES sleuthkit-4.2.0/tsk/docs/footer.html000644 000765 000024 00000000451 12576320700 020242 0ustar00carrierstaff000000 000000

Copyright © 2007-2015 Brian Carrier. (carrier -at- sleuthkit -dot- org)
This work is licensed under a Creative Commons Attribution-Share Alike 3.0 United States License.

sleuthkit-4.2.0/tsk/docs/fs.dox000644 000765 000024 00000047067 12576320700 017220 0ustar00carrierstaff000000 000000 /*! \page fspage File Systems This section describes the general file system analysis concepts and corresponding APIs in TSK. In addition to this documentation, there are sample programs in the samples directory in TSK that show these functions being used while processing a disk image. \section fs_gen General Concepts \subsection fs_layers File System Layers File systems are a collection of data structures that are stored in a disk or volume that allow you to save and open files. There are many different file systems and they all have unique data structures, but there are some general concepts that apply to all file systems. These general concepts are used in TSK to provide generic access to a variety of file systems. TSK organizes the data in file systems into five categories: File System, Data Units, Metadata, File Name, and Application. All data can be categorized into one of these:
  • File System Category: The data in this category describe the layout and general features of the file system. For example, how big each data unit is and how many data units there are.
  • Data Unit Category: This category contains the data units (i.e. blocks and clusters) in the file system that can store file content. Data units are a fixed size and most file systems require it to be a power of 2, 1024- or 4096-bytes for example.
  • Metadata Category: This is where the descriptive data about files and directories are stored. This layer includes the inode structures in UNIX, MFT entries in NTFS, and directory entry structures in FAT. This layer contains information such as last access times, permissions, and pointers to the data units that were allocated by the file or directory. The data in this category completely describes a file, but it is typically given a numeric address that is difficult to remember.
  • File Name Category: This is where the actual name of the file or directory is saved. In general, this is a different structure than the metadata structure. The exception to this is the FAT file system. File names are typically stored in data structures in the parent directory. The data structures contain a pointer to the metadata structure, which contains the rest of the file information.
  • Application Category: This is where a bunch of non-essential file system data exists. These are features that make life easier for the file system and operating system. Examples include journals that record file system updates and lists that record what files have recently been updated.
A basic ASCII diagram is shown here of the different categories. The file system category identifies the locations of the file name, metadata, data units, and application categories. The file names then point to the metadata, and the metadata points to the data units.
                     +-------------------------+
                     |       File System       |
                     +-------------------------+
                       /          |           \        +------------------+
                      /           |            \       |   Application    |
                     /            |             \      +------------------+
                    /             |              \
        +------------+      +-----------+      +-------------+
        | File Names | ---> | Metadata  | ---> | Data Units  |
        +------------+      +-----------+      +-------------+
The command line tools and the file system APIs are organized based on these layers. Data at each layer can be directly accessed and some functions combine layers. \subsection fs_del Deleted Files When a file is deleted, its data units, metadata structure, and file name structure are typically marked as being free and they can be reused when a new file is created. It is very easy for them to be reallocated at different times and therefore care must be taken when interpreting the data associated with deleted files. When you encounter an unallocated file name, check the allocation status of the metadata structure it points to. If the metadata structure is allocated, then either the metadata structure was reallocated to a new file or the the unallocated file name was created when a file was moved and the unallocated name is the old file name. In general, there is no way to differentiate between these two scenarios (the exception is in NTFS, which includes sequence numbers that increment each time the metadata structure is reallocated). One test that can be applied in this situation is to compare the type (i.e. file or directory) as reported in the file name structure versus the type as reported in the metadata structure. If one is a directory and one is a file, then the metadata structure was reallocated. You could also compare the file name extension with the file type (i.e. HTML, doc, JPEG). When you encounter an unallocated metadata entry, there may no longer be a file name structure that points to it. These are called orphan files. You will still be able to access them via their metadata address, but the their full path will be unknown. TSK makes a special directory to store the orphan files in so that they can be easily accessed. \section fs_open Opening the File System Typically, a file system exists in a \ref vspage "volume", but it can also exist on its own on a disk (such as a USB drive, CD-ROM, or floppy disk). You may also have an image file that is of only one partition (i.e. a logical image). Because of these different scenarios, TSK has two functions to open a file system. The tsk_fs_open_img() function allows you to open a file system using only a TSK_IMG_INFO structure and an offset. The offset indicates the location where the file system starts in the image. As a convenience, there is also the tsk_fs_open_vol() function that takes a TSK_VS_PART_INFO structure as an argument and determines the offset based on the volume information. Both of these functions return a TSK_FS_INFO structure that is used as a handle for more detailed file system analysis. Close the file system with the tsk_fs_close() function. The TSK_FS_INFO structure contains data regarding the number of data units, the number of metadata structures, etc. The C++ wrappers use the TskFsInfo class and it has open methods to open the file system. \subsection fs_types File System Types The functions that open the file system can detect the file system type, but sometimes you may need to specify it (if for example TSK detects multiple file system structures). Internally, TSK uses a numerical ID (TSK_FS_TYPE_ENUM) for each file system type, which is stored in TSK_FS_INFO::ftype. If you have an TSK_FS_INFO structure and want to know what file system type it is for, you can pass the TSK_FS_INFO::ftype value to one of the TSK_FS_TYPE_ISXXX macros, such as TSK_FS_TYPE_ISNTFS(). \code if (TSK_FS_TYPE_ISNTFS(fs_info->ftype)) { .... } \endcode To map from the numerical ID to a short name (such as "ntfs"), the tsk_fs_type_toname() function can be used. You can also map from the short name to the ID using the tsk_fs_type_toid() function. To determine the IDs of the supported file systems, the tsk_fs_type_supported() function can be used. The names and descriptions of the supported types can be printed to an open FILE handle using the tsk_fs_type_print() function. \subsection fs_read Reading General File System Data As you will see, there are many ways to access file system data from the different categories. One of the most generic methods is using the tsk_fs_read() function. It simply takes a byte offset relative to the start of the file system and a buffer to read the data into. It does not care about block addresses or files. In the following sections, there are smarter versions of this function that will take block addresses as an argument, instead of a byte offset. The TskFsInfo::read() method allows data to be read using the C++ classes. \section fs_block_open Opening and Reading File System Blocks TSK allows you to read the contents of any block in the file system. The size of each data unit is defined in the TSK_FS_INFO::block_size field and the number of data units (as defined by the file system) is defined in the TSK_FS_INFO::block_count field. The address of the first block is defined in TSK_FS_INFO::first_block and the last block address (as defined by the file system structures) is defined in TSK_FS_INFO::last_block. In some cases, the image may not be complete and the last block that can be read is less than TSK_FS_INFO::last_block. In that case, TSK_FS_INFO::last_block_act contains the actual last block in the image. When last_block_act is less than last_block, it means that the image is not complete. You can obtain the contents of a specific block by calling the tsk_fs_block_get() function. It returns a TSK_FS_BLOCK structure with the contents of the data unit and flags about its allocation status. You must free the TSK_FS_BLOCK structure by calling tsk_fs_block_free(). You can also walk the data units by calling tsk_fs_block_walk(). This function will call a callback function on data units that meet a certain criteria. Walking is useful if, for example, you want to focus on only allocated or unallocated data units. You can also read the contents of a data unit using the tsk_fs_read_block() function, which reads a block of data (given its data unit address) into a buffer. tsk_fs_read_block() does not provide the data unit's allocation status and is therefore more efficient than tsk_fs_block_get() if you want only the content. Similar methods exist in the TskFsInfo C++ class. The C++ wrapper to TSK_FS_BLOCK is the TskFsBlock class. One FAT file system specific thing should be mentioned here. FAT stores its file content in clusters, but the first cluster does not start at the beginning of the file system. There is typically many megabytes of data (FAT tables) before it. If TSK were to use clusters as the standard data unit, it would have no way of addressing the sectors prior to the first cluster and a confusing addressing scheme would need to exist so that all data could be accessed. For simplicity, TSK uses sector addresses everywhere with FAT file systems. All data unit addresses used by the TSK API are in 512-byte sectors (and TSK_FS_INFO::block_size is 512 and not the original cluster size). \section fs_file Opening and Reading Files \subsection fs_file_open Opening a File With TSK, you can open a file from either the metadata or file name layer. To open at the metadata layer, you need the metadata address of the file. To open at the file name layer, you need the name of the file. Regardless of the method used to open a file, a TSK_FS_FILE structure will be returned. This structure can point to TSK_FS_NAME and TSK_FS_META structures, which store the file and metadata category data. The tsk_fs_file_open_meta() function takes a metadata address as an argument and returns a TSK_FS_FILE structure. The TSK_FS_FILE::name pointer will be NULL because the file name was not used to open the file and, for efficiency, TSK does not search the directory tree to locate the file name that points to the metadata address. The first and last metadata addresses in the file system are defined in TSK_FS_INFO::first_inum and TSK_FS_INFO::last_inum. The tsk_fs_file_open() function takes a file name as an argument and first identifies the metadata address that the file name points to. It then calls tsk_fs_file_open_meta(). Note that if you know the metadata address of a file, then using tsk_fs_file_open_meta() is more efficient then tsk_fs_file_open() because you can skip the process of mapping the file name to the metadata address. If you use tsk_fs_file_open() then the TSK_FS_FILE::name structure will be populated with the name details. The TSK_FS_FILE structure can be used to read file content and its fields can be used for processing. The structure must be closed with tsk_fs_file_close(). The C++ wrappers use the TskFsFile class. It has open methods that allow a file to be opened and read from. \subsection Reading File Content There are two basic approaches to reading file content after you have a TSK_FS_FILE structure. You can read data into a buffer from a specific offset in a file using tsk_fs_file_read(). You can also use the tsk_fs_file_walk() function, which takes a callback function as an argument and will call the callback with the contents of each data unit in the file. Note that there are several ways of storing file content. For example, NTFS can store the content in a compressed form and can store small amounts of data in buffers inside of the metadata structure (instead of allocating a full data unit). The callback for tsk_fs_file_walk() will be given the address from where the data came from, but it will be 0 and not be relevant if the TSK_FS_BLOCK_FLAG_ENUM flag is for a sparse, compressed, or non-resident file. \subsection fs_file_attrs Attributes TSK allows each file to have multiple attributes. An attribute is simply a data container. Files in most file systems will have only one attribute, which stores the file content. NTFS files will have multiple attributes because NTFS stores the file name, dates, and other information in different attributes. TSK stores UFS and ExtX indirect blocks in separate attribute. TSK allows you to read from all of the attributes. Each attribute has a type and an ID. The types are defined in the TSK_FS_ATTR_TYPE_ENUM structure and the ID is an integer that is unique to the file. A file can have multiple attributes with the same type, but it can have only one attribute with a given id. The TSK APIs that have been previously presented will use the default attribute. Many of the APIs have a variation that will allow you to specify a specific attribute type and ID. For example, tsk_fs_file_read_type() has the same basic operation as tsk_fs_file_read() except it allows the caller to specify the type and ID. There is also the tsk_fs_file_walk_type() function that is the more specific version of tsk_fs_file_walk() function. \subsection fs_file_attr_read Accessing Attributes The previous section outlined that some API functions allow you to access a specific attribute. Sometimes though, you may want access to the attribute, which is stored in a TSK_FS_ATTR structure. To access the default attribute use tsk_fs_file_attr_get(). If you know the type that you want to access, you can use the tsk_fs_file_attr_get_type() function. The C++ class has similar methods, TskFsFile::getAttr() for example. If you want to figure out what types exist or want to cycle through all of the attributes, you can use the tsk_fs_file_attr_getsize() function to get the number of attributes and the tsk_fs_file_attr_get_idx() function to get an attribute based on a 0 to n-1 based index. For example: \code int i, cnt; cnt = tsk_fs_file_attr_getsize(fs_file); for (i = 0; i < cnt; i++) { const TSK_FS_ATTR *fs_attr; fs_attr = tsk_fs_file_attr_get_idx(fs_file, i); if (!fs_attr) continue; ... } \endcode Once you have a TSK_FS_ATTR structure, you can read from it using the tsk_fs_attr_read() and tsk_fs_attr_walk() functions. These operate just like the tsk_fs_file_read() and tsk_fs_file_walk() functions and in fact the file-based functions simply load the relevant attribute and call the corresponding attribute-based function. Similar methods exist in the TskFsAttr class. \subsection fs_file_runs Data Runs This section provides some details on how the file content is stored in TSK. If you use the APIs previously described, you will not need to read this section. It is more of an FYI. TSK stores locations where file content is stored in run lists. A run is a set of consecutive blocks that a file has allocated. The run is stored based on the starting block and the length of the run. The run lists for the file attributes are stored in TSK_FS_META::attr, but note that the data may not be filled in until it is needed by TSK. For efficiency, TSK only loads this data as needed (for some file systems). The TSK_FS_META::attr_state field identifies if it has been loaded yet or not. \section fs_dir_open Opening and Reading a Directory The previous section outlined how to open a file when you know its name or address. In many cases, you will want to browse the files in a directory and see what files can be opened. There two methods for browsing the file names. The first is that a directory can be opened using tsk_fs_dir_open() or tsk_fs_dir_open_meta(). The difference between these two functions is that tsk_fs_dir_open() uses the directory name and tsk_fs_dir_open_meta() uses the directory metadata address. Like opening a file, the tsk_fs_dir_open_meta() is more efficient if you already know the metadata address because tsk_fs_dir_open() will first search the directory structure for the the metadata address and then call tsk_fs_dir_open_meta(). These return a TSK_FS_DIR structure that allows the caller to then access individual file names in the directory. The number of entries in the directory can be obtained using the tsk_fs_dir_getsize() function and individual entries can be returned with the tsk_fs_dir_get() function. You can close the open directory using tsk_fs_dir_close(). If you are recursing into directories, you could get into an infinite loop. You can use the TSK_STACK structure to prevent this, see \ref basic_misc. You can also walk the directory tree using tsk_fs_dir_walk(). This will call the callback for every file or subdirectory in a directory and can recurse into directories if the proper flag is given. To walk the entire directory structure, start the walk at the root directory (TSK_FS_INFO::root_inum) and set the recurse flag. These approaches all return a TSK_FS_FILE structure and these will all have the TSK_FS_FILE::name structure defined. However, some of the files may not have the TSK_FS_FILE::meta structure defined if the file is deleted and the link to the metadata has been lost. Another way to browse the files is using the tsk_fs_meta_walk() function, which will process a range of metadata structures and call a callback function on each one. The callback gets the corresponding TSK_FS_FILE structure with the file's metadata in TSK_FS_FILE::meta and TSK_FS_FILE::name set to NULL. This functionality also exists in the TskFsDir C++ class. \subsection fs_dir_spec Virtual Files When browsing the file system, using the directory structure is most convenient and therefore special files and directories were added to make finding all relevant data easier. Orphan files, which were discussed in \ref fs_del, can be accessed from the /$OrphanFiles directory. This is a virtual directory, but TSK allows you to treat it as a normal directory (its flags in TSK_FS_META::flags will show that it is virtual though). TSK also provides special files so that you can access the boot sector and FATs in a FAT file system. The $MBR, $FAT1, and $FAT2 files are virtual files that point to the sectors for the boot sector, primary FAT, and backup FAT. You can use these virtual files to read the contents of those structures. \section fs_map Mapping Data In some cases, you may want to identify which file has allocated a given block or which name points to a meta data structure. This can typically be done by using the tsk_fs_meta_walk() or tsk_fs_dir_walk() functions, respectively. But, there are some convenience functions to make this easier. The tsk_fs_path2inum() function takes a UTF-8 path as an argument and will identify the meta data address that it points to. This exists in the C++ class as TskFsInfo::path2Inum(). Next to \ref hashdbpage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/hashdb.dox000644 000765 000024 00000004716 12576320700 020033 0ustar00carrierstaff000000 000000 /*! \page hashdbpage Hash Databases This section describes some the API functions and concepts associated with the Hash Database library. \section hash_over Overview Hash databases are frequently used to identify known good and known bad files. Text files of MD5 and SHA-1 hashes can be easily created and shared, but they are frequently not the most efficient to use to use when searching for a hash because they are in an unsorted order. The hash database functions in TSK create an index into text file hash databases and allow you to more quickly perform lookups. TSK uses the index to perform binary searches for the hashes (see Informer #6). \section hash_open Opening a Hash Database Before a database can be indexed or searched, it must first be opened. Use the tsk_hdb_open() function to open the database. There is a flag that can be specified to open only the index. This allows you to save storage space by keeping only the index around, but it means that you will not have access to other metadata that is stored in the database. The tsk_hdb_open() function will return a TSK_HDB_INFO structure that will be used as a handle to index and search the database. An open hash database can be closed with tsk_hdb_close(). This functionality also exists in the TskHdbInfo C++ class. \section hash_index Indexing a Hash Database A single database can have more than one index if the database contains multiple hash types. For example, both MD5 and SHA-1 indexes can be made for the NSRL database. Because multiple indexes can exist, you must specify the type when making or testing for an index. You can test if an open hash database has an index by using the tsk_hdb_hasindex(). If you need to create one, use the tsk_hdb_makeindex() function. This process may take several minutes (or longer). \section hash_search Searching a Hash Database An indexed database can be searched using either tsk_hdb_lookup_raw() or tsk_hdb_lookup_str(). The only difference is that tsk_hdb_lookup_raw() takes the hash value as a byte array and tsk_hdb_lookup_str() takes the hash value as a string. Both functions can call a callback with details of entries that are found, or the QUICK flag can be given in which case the callback is not called and instead the return value of the function identifies if the hash is in the database or not. Next to \ref autopage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/img.dox000644 000765 000024 00000005764 12576320700 017362 0ustar00carrierstaff000000 000000 /*! \page imgpage Disk Images This section describes the general disk image analysis concepts and corresponding APIs in TSK. In addition to this documentation, there are sample programs in the samples directory in TSK that show these functions being used while processing a disk image. \section img_open Opening the Disk Image Before the volume and file system structures can be analyzed, the disk image must be opened. The tsk_img_open() function is used to open one or more disk image files. The file format can be specified or auto-detection methods will be used. If the specific type cannot be determined, then the raw type will be assumed. Note that by default, TSK supports only single and split raw images. Other file formats, such as AFF and EWF, are supported only if the corresponding libraries existed and were configured when TSK was built and installed. If you are opening only a single disk image file (i.e. it is not split), then you can use the tsk_img_open_sing() function. It takes fewer arguments. The tsk_img_open() function returns a TSK_IMG_INFO structure. This structure has fields that contain the type and size (uncompressed, if applicable) of the disk image. When you are done analyzing the disk image, it can be closed with tsk_img_close(). Note that the tsk_img_open() and tsk_img_open_sing() functions use the TSK_TCHAR type to store the disk image paths. This type is system dependent and is a wchar_t on Windows and char on other sytems. See \ref basic_enc_t for more details. If you are in an environment where you will have UTF-8 text even in Windows, then you can use the tsk_img_open_utf8() and tsk_img_open_utf8_sing() functions. To use the C++ wrappers, create a TskImgInfo object and call one of the TskImgInfo::open() methods. \section img_types File Format Types There are several functions that can be used to map between names and IDs of file format types. Internally, the TSK functions use a numerical ID for each type. The tsk_img_type_toname() function maps the ID to a single word name (such as "raw") and the tsk_img_type_todesc() function maps an ID to a longer description (such as "Single raw file"). The short name is used in the TSK command line tools when the user specifies a type and the tsk_img_type_toid() function maps the short name to an ID. There are also functions that identify which file formats are supported by the installation. The tsk_img_type_supported() function returns the IDs of the supported types. The tsk_img_type_print() prints the supported type names and descriptions to a handle. This function is used by the command line tools to print the supported types to the console. \section img_read Reading Data To read data from the disk image, the tsk_img_read() function is used. This function can read an arbitrary amount of data from an arbitrary byte offset. The C++ class has a public read method, TskImgInfo::read(). Next to \ref vspage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/docs/main.dox000644 000765 000024 00000003257 12576320700 017525 0ustar00carrierstaff000000 000000 /*! \mainpage The Sleuth Kit (TSK) Library User's Guide and API Reference \author Brian Carrier

Overview

This document was designed to help integrate the Sleuth Kit (TSK) library into an application that needs to analyze data from a disk image. Note that this document does not contain information about using the command line tools in TSK. The command line tools use the functionality of this library, but this document is for programmers who want to use the library. There are two main sections to this document. One is the User's Guide that describes the organization of TSK, the APIs, and how to use them together. The other is a list of the API functions and a description of their arguments and return values.

User's Guide

\anchor users_guide The User's Guide describes the various components of TSK and how to use them. It refers to specific functions and data structures with links to the details in the API Reference. - \subpage basicpage - \subpage basepage - \subpage imgpage - \subpage vspage - \subpage fspage - \subpage hashdbpage - \subpage autopage - \subpage cpppage

API Reference

The API Reference lists the public C and C++ API functions with their arguments and return values. The Users's Guide should be read first so that the interaction and use of the functions are understood. These pages can also be found in the Modules section. - \ref baselib and \ref baselib_cpp - \ref imglib and \ref imglib_cpp - \ref vslib and \ref vslib_cpp - \ref fslib and \ref fslib_cpp - \ref hashdblib and \ref hashdblib_cpp - \ref autolib */ sleuthkit-4.2.0/tsk/docs/vs.dox000644 000765 000024 00000012042 12576320700 017221 0ustar00carrierstaff000000 000000 /*! \page vspage Volume Systems This section describes the general volume system analysis concepts and corresponding APIs in TSK. In addition to this documentation, there are sample programs in the samples directory in TSK that show these functions being used while processing a disk image. \section vs_open Opening the Volume System Once the disk image has been opened, you can start to analyze it. If you have a physical disk image then you will need to check if a volume system exists. Volume systems organize contiguous sectors on a disk into volumes. Use TSK to determine the type of volume system that a disk is using and to determine the volume layout. Note that some USB storage devices and floppy disks do not have volume systems. The first step is to open the volume system using the tsk_vs_open() function. This function takes an offset as a argument. TSK uses this offset to start looking for volume systems. In general, an offset of 0 should be used. This function returns a TSK_VS_INFO structure that has values for the volume system type and number of volumes (also called partitions). The volume system is closed with the tsk_vs_close() function. The C++ wrapper uses the TskImgInfo class, which has TskImgInfo::open() and TskImgInfo::close() methods that are similar to those previously described. \section vs_types Volume System Types tsk_vs_open() can detect the type of the volume system, but sometimes you may need to specify it (if for example TSK detects multiple volume system structures) or you may want to print the name of the volume system type. Internally, TSK uses a numerical ID for each volume system type. To map from the numerical ID to a short name (such as "dos"), the tsk_vs_type_toname() function can be used. To map the ID to a longer description (such as "DOS Partition Table"), use the tsk_vs_type_todesc() function. You can also map from the short name to the ID using the tsk_vs_type_toid() function. To determine the IDs of the supported volume systems, the tsk_vs_type_supported() function can be used. The names and descriptions of the supported types can be printed to an open FILE handle using the tsk_vs_type_print() function. \section vs_open2 Accessing Individual Volumes The TSK_VS_INFO structure that tsk_vs_open() returns is for all volumes in the volume system. Each volume will be assigned an address, starting with 0. There are two general ways for accessing the individual volumes. The tsk_vs_part_get() returns information about a specific volume while the tsk_vs_part_walk() function goes through a specified range of volumes and calls a callback function with data about each volume. Regardless of the method used, a TSK_VS_PART_INFO structure is used to describe each volume. This structure has fields for the volume starting offset (relative to the start of the disk image), length, and type. The C++ wrapper has a TskVsPartInfo class that wraps the TSK_VS_PART_INFO structure. TSK defines several volume types. \b Allocated volumes are those that are defined by the volume system. An allocated volume has an entry in a partition table in the volume system. \b Unallocated volumes contain the sectors that are not allocated to a volume in the volume system. For example, if one volume has sectors 0 to 9 and another has sectors 50 to 99 then TSK would create an unallocated volume for sectors 10 to 49. Note that these volumes are virtually created by TSK and they are not defined in the volume system. Allocated and unallocated volumes will cover the entire disk image. The third type of volume is a metadata volume. \b Metadata volumes contain sectors that store volume system data, such as a partition table. With some volume systems, the metadata volumes will overlap allocated volumes (because those volume systems "allocate" space for the metadata structures). In other volume systems, the metadata volumes will overlap unallocated volumes (because those volume systems store metadata in unused space). In general, you will want to analyze the data in allocated volumes as a file system and analyze the data in unallocated volumes using file carving or other data recovery techniques. In many cases, you can ignore the metadata volumes. If you are using the tsk_vs_part_walk() function then you can specify what types of partitions that you would like returned via the callback. \section vs_read Reading Data There are three ways to read data from a volume system. The tsk_vs_read_block() function reads sectors using a sector address relative to the start of the volume system. However, to read sectors using an address that is relative to the start of a specific volume, use the tsk_vs_part_read_block() function. To read data in a volume in non-sector sized chunks, use the tsk_vs_part_read() function. The C++ TskVsInfo and TskVsPartInfo classes have read methods that allow the data to be read from the start of the volume system or inside of a volume. Next to \ref fspage Back to \ref users_guide "Table of Contents" */ sleuthkit-4.2.0/tsk/base/.indent.pro000644 000765 000024 00000000036 12576320700 020120 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nlp -nut sleuthkit-4.2.0/tsk/base/crc.c000644 000765 000024 00000013622 12576320700 016757 0ustar00carrierstaff000000 000000 /* Code taken and modified from: A PAINLESS GUIDE TO CRC ERROR DETECTION ALGORITHMS Where the following copyright appears: Status : Copyright (C) Ross Williams, 1993. However, permission is granted to make and distribute verbatim copies of this document provided that this information block and copyright notice is included. Also, the C code modules included in this document are fully public domain. */ /******************************************************************************/ /* Start of crcmodel.c */ /******************************************************************************/ /* */ /* Author : Ross Williams (ross@guest.adelaide.edu.au.). */ /* Date : 3 June 1993. */ /* Status : Public domain. */ /* */ /* Description : This is the implementation (.c) file for the reference */ /* implementation of the Rocksoft^tm Model CRC Algorithm. For more */ /* information on the Rocksoft^tm Model CRC Algorithm, see the document */ /* titled "A Painless Guide to CRC Error Detection Algorithms" by Ross */ /* Williams (ross@guest.adelaide.edu.au.). This document is likely to be in */ /* "ftp.adelaide.edu.au/pub/rocksoft". */ /* */ /* Note: Rocksoft is a trademark of Rocksoft Pty Ltd, Adelaide, Australia. */ /* */ /******************************************************************************/ /* */ /* Implementation Notes */ /* -------------------- */ /* To avoid inconsistencies, the specification of each function is not echoed */ /* here. See the header file for a description of these functions. */ /* This package is light on checking because I want to keep it short and */ /* simple and portable (i.e. it would be too messy to distribute my entire */ /* C culture (e.g. assertions package) with this package. */ /* */ /******************************************************************************/ #include "crc.h" #include #ifndef _MSC_VER #include #endif /******************************************************************************/ /* The following definitions make the code more readable. */ #define BITMASK(X) (1L << (X)) #define MASK32 0xFFFFFFFFL #define LOCAL static /******************************************************************************/ LOCAL ulong reflect P_((ulong v,int b)); LOCAL ulong reflect (v,b) /* Returns the value v with the bottom b [0,32] bits reflected. */ /* Example: reflect(0x3e23L,3) == 0x3e26 */ ulong v; int b; { int i; ulong t = v; for (i=0; i>=1; } return v; } /******************************************************************************/ LOCAL ulong widmask P_((p_cm_t)); LOCAL ulong widmask (p_cm) /* Returns a longword whose value is (2^p_cm->cm_width)-1. */ /* The trick is to do this portably (e.g. without doing <<32). */ p_cm_t p_cm; { return (((1L<<(p_cm->cm_width-1))-1L)<<1)|1L; } /******************************************************************************/ void cm_ini (p_cm) p_cm_t p_cm; { p_cm->cm_reg = p_cm->cm_init; } /******************************************************************************/ void cm_nxt (p_cm,ch) p_cm_t p_cm; int ch; { int i; ulong uch = (ulong) ch; ulong topbit = BITMASK(p_cm->cm_width-1); if (p_cm->cm_refin) uch = reflect(uch,8); p_cm->cm_reg ^= (uch << (p_cm->cm_width-8)); for (i=0; i<8; i++) { if (p_cm->cm_reg & topbit) p_cm->cm_reg = (p_cm->cm_reg << 1) ^ p_cm->cm_poly; else p_cm->cm_reg <<= 1; p_cm->cm_reg &= widmask(p_cm); } } /******************************************************************************/ void cm_blk (p_cm,blk_adr,blk_len) p_cm_t p_cm; p_ubyte_ blk_adr; ulong blk_len; { while (blk_len--) cm_nxt(p_cm,*blk_adr++); } /******************************************************************************/ ulong cm_crc (p_cm) p_cm_t p_cm; { if (p_cm->cm_refot) return p_cm->cm_xorot ^ reflect(p_cm->cm_reg,p_cm->cm_width); else return p_cm->cm_xorot ^ p_cm->cm_reg; } /******************************************************************************/ /* End of crcmodel.c */ /******************************************************************************/ void crc16(p_cm_t crc_context, unsigned char const *buff, unsigned int size) { while(size > 0) { cm_nxt(crc_context, *buff++); size--; } } #ifdef TEST_CRC main() { cm_t TestCRC; TestCRC.cm_width = 16; TestCRC.cm_poly = 0x8005L; TestCRC.cm_init = 0x0000; // TestCRC.cm_init = 0xFFFF; TestCRC.cm_refin = TRUE; TestCRC.cm_refot = TRUE; TestCRC.cm_xorot = 0x0000; // TestCRC.cm_xorot = 0xFFFF; cm_ini(&TestCRC); char TestString[]="123456789"; char *finger = NULL; int i = 0; for(i = 0; i cm_width = 16; */ /* p_cm->cm_poly = 0x8005L; */ /* p_cm->cm_init = 0L; */ /* p_cm->cm_refin = TRUE; */ /* p_cm->cm_refot = TRUE; */ /* p_cm->cm_xorot = 0L; */ /* Note: Poly is specified without its top bit (18005 becomes 8005). */ /* Note: Width is one bit less than the raw poly width. */ /* */ /* Step 3: Initialize the instance with a call cm_ini(p_cm); */ /* */ /* Step 4: Process zero or more message bytes by placing zero or more */ /* successive calls to cm_nxt. Example: cm_nxt(p_cm,ch); */ /* */ /* Step 5: Extract the CRC value at any time by calling crc = cm_crc(p_cm); */ /* If the CRC is a 16-bit value, it will be in the bottom 16 bits. */ /* */ /******************************************************************************/ /* */ /* Design Notes */ /* ------------ */ /* PORTABILITY: This package has been coded very conservatively so that */ /* it will run on as many machines as possible. For example, all external */ /* identifiers have been restricted to 6 characters and all internal ones to */ /* 8 characters. The prefix cm (for Crc Model) is used as an attempt to avoid */ /* namespace collisions. This package is endian independent. */ /* */ /* EFFICIENCY: This package (and its interface) is not designed for */ /* speed. The purpose of this package is to act as a well-defined reference */ /* model for the specification of CRC algorithms. If you want speed, cook up */ /* a specific table-driven implementation as described in the document cited */ /* above. This package is designed for validation only; if you have found or */ /* implemented a CRC algorithm and wish to describe it as a set of parameters */ /* to the Rocksoft^tm Model CRC Algorithm, your CRC algorithm implementation */ /* should behave identically to this package under those parameters. */ /* */ /******************************************************************************/ /* The following #ifndef encloses this entire */ /* header file, rendering it indempotent. */ #ifndef CM_DONE #define CM_DONE /******************************************************************************/ /* The following definitions are extracted from my style header file which */ /* would be cumbersome to distribute with this package. The DONE_STYLE is the */ /* idempotence symbol used in my style header file. */ #ifndef DONE_STYLE typedef unsigned long ulong; typedef unsigned bool; typedef unsigned char * p_ubyte_; #ifndef TRUE #define FALSE 0 #define TRUE 1 #endif /* Change to the second definition if you don't have prototypes. */ #define P_(A) A /* #define P_(A) () */ /* Uncomment this definition if you don't have void. */ /* typedef int void; */ #endif /******************************************************************************/ /* CRC Model Abstract Type */ /* ----------------------- */ /* The following type stores the context of an executing instance of the */ /* model algorithm. Most of the fields are model parameters which must be */ /* set before the first initializing call to cm_ini. */ typedef struct { int cm_width; /* Parameter: Width in bits [8,32]. */ ulong cm_poly; /* Parameter: The algorithm's polynomial. */ ulong cm_init; /* Parameter: Initial register value. */ bool cm_refin; /* Parameter: Reflect input bytes? */ bool cm_refot; /* Parameter: Reflect output CRC? */ ulong cm_xorot; /* Parameter: XOR this to output CRC. */ ulong cm_reg; /* Context: Context during execution. */ } cm_t; typedef cm_t *p_cm_t; /******************************************************************************/ /* Functions That Implement The Model */ /* ---------------------------------- */ /* The following functions animate the cm_t abstraction. */ void cm_ini P_((p_cm_t p_cm)); /* Initializes the argument CRC model instance. */ /* All parameter fields must be set before calling this. */ void cm_nxt P_((p_cm_t p_cm,int ch)); /* Processes a single message byte [0,255]. */ void cm_blk P_((p_cm_t p_cm,p_ubyte_ blk_adr,ulong blk_len)); /* Processes a block of message bytes. */ ulong cm_crc P_((p_cm_t p_cm)); /* Returns the CRC value for the message bytes processed so far. */ /******************************************************************************/ /* Functions For Table Calculation */ /* ------------------------------- */ /* The following function can be used to calculate a CRC lookup table. */ /* It can also be used at run-time to create or check static tables. */ ulong cm_tab P_((p_cm_t p_cm,int index)); /* Returns the i'th entry for the lookup table for the specified algorithm. */ /* The function examines the fields cm_width, cm_poly, cm_refin, and the */ /* argument table index in the range [0,255] and returns the table entry in */ /* the bottom cm_width bytes of the return value. */ /******************************************************************************/ /* End of the header file idempotence #ifndef */ void crc16(p_cm_t crc_context, unsigned char const *buff, unsigned int size); #endif /******************************************************************************/ /* End of crcmodel.h */ /******************************************************************************/ sleuthkit-4.2.0/tsk/base/Makefile.am000644 000765 000024 00000000606 12576320700 020076 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -Wall noinst_LTLIBRARIES = libtskbase.la libtskbase_la_SOURCES = md5c.c mymalloc.c sha1c.c \ crc.c crc.h \ tsk_endian.c tsk_error.c tsk_list.c tsk_parse.c tsk_printf.c \ tsk_unicode.c tsk_version.c tsk_stack.c XGetopt.c tsk_base_i.h \ tsk_lock.c tsk_error_win32.cpp EXTRA_DIST = .indent.pro indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ sleuthkit-4.2.0/tsk/base/Makefile.in000644 000765 000024 00000052556 12576326300 020124 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/base ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskbase_la_LIBADD = am_libtskbase_la_OBJECTS = md5c.lo mymalloc.lo sha1c.lo crc.lo \ tsk_endian.lo tsk_error.lo tsk_list.lo tsk_parse.lo \ tsk_printf.lo tsk_unicode.lo tsk_version.lo tsk_stack.lo \ XGetopt.lo tsk_lock.lo tsk_error_win32.lo libtskbase_la_OBJECTS = $(am_libtskbase_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libtskbase_la_SOURCES) DIST_SOURCES = $(libtskbase_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -Wall noinst_LTLIBRARIES = libtskbase.la libtskbase_la_SOURCES = md5c.c mymalloc.c sha1c.c \ crc.c crc.h \ tsk_endian.c tsk_error.c tsk_list.c tsk_parse.c tsk_printf.c \ tsk_unicode.c tsk_version.c tsk_stack.c XGetopt.c tsk_base_i.h \ tsk_lock.c tsk_error_win32.cpp EXTRA_DIST = .indent.pro all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/base/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/base/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskbase.la: $(libtskbase_la_OBJECTS) $(libtskbase_la_DEPENDENCIES) $(EXTRA_libtskbase_la_DEPENDENCIES) $(AM_V_CXXLD)$(CXXLINK) $(libtskbase_la_OBJECTS) $(libtskbase_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/XGetopt.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/md5c.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mymalloc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sha1c.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_endian.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_error.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_error_win32.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_list.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_lock.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_parse.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_printf.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_stack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_unicode.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsk_version.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.c *.h clean-local: -rm -f *.c~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/base/md5c.c000644 000765 000024 00000025466 12576320700 017051 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * */ /* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm */ /* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. License to copy and use this software is granted provided that it is identified as the "RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing this software or this function. License is also granted to make and use derivative works provided that such works are identified as "derived from the RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing the derived work. RSA Data Security, Inc. makes no representations concerning either the merchantability of this software or the suitability of this software for any particular purpose. It is provided "as is" without express or implied warranty of any kind. These notices must be retained in any copies of any part of this documentation and/or software. */ /** \file md5c.c * Local copy of RSA Data Security, Inc. MD5 Message-Digest Algorithm. */ #include "tsk_base_i.h" /* Constants for MD5Transform routine. */ #define S11 7 #define S12 12 #define S13 17 #define S14 22 #define S21 5 #define S22 9 #define S23 14 #define S24 20 #define S31 4 #define S32 11 #define S33 16 #define S34 23 #define S41 6 #define S42 10 #define S43 15 #define S44 21 static void MD5Transform(UINT4[4], unsigned char[64]); static void Encode(unsigned char *, UINT4 *, unsigned int); static void Decode(UINT4 *, unsigned char *, unsigned int); static void MD5_memcpy(POINTER, POINTER, unsigned int); static void MD5_memset(POINTER, int, unsigned int); static unsigned char PADDING[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* F, G, H and I are basic MD5 functions. */ #define F(x, y, z) (((x) & (y)) | ((~x) & (z))) #define G(x, y, z) (((x) & (z)) | ((y) & (~z))) #define H(x, y, z) ((x) ^ (y) ^ (z)) #define I(x, y, z) ((y) ^ ((x) | (~z))) /* ROTATE_LEFT rotates x left n bits. */ #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. Rotation is separate from addition to prevent recomputation. */ #define FF(a, b, c, d, x, s, ac) { \ (a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define GG(a, b, c, d, x, s, ac) { \ (a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define HH(a, b, c, d, x, s, ac) { \ (a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } #define II(a, b, c, d, x, s, ac) { \ (a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \ (a) = ROTATE_LEFT ((a), (s)); \ (a) += (b); \ } /** * \ingroup baselib * Initialize a MD5 context structure so that data can be added to it. * @param context Pointer to context to initialize */ void TSK_MD5_Init(TSK_MD5_CTX * context) { context->count[0] = context->count[1] = 0; /* Load magic initialization constants. */ context->state[0] = 0x67452301; context->state[1] = 0xefcdab89; context->state[2] = 0x98badcfe; context->state[3] = 0x10325476; } /** * \ingroup baselib * Add data to an initialized MD5 operation. * @param context Initialized context to add data to * @param input Buffer of data to process * @param inputLen Number of bytes in input */ void TSK_MD5_Update(TSK_MD5_CTX * context, unsigned char *input, unsigned int inputLen) { unsigned int i, index, partLen; /* Compute number of bytes mod 64 */ index = (unsigned int) ((context->count[0] >> 3) & 0x3F); /* Update number of bits */ if ((context->count[0] += ((UINT4) inputLen << 3)) < ((UINT4) inputLen << 3)) context->count[1]++; context->count[1] += ((UINT4) inputLen >> 29); partLen = 64 - index; /* Transform as many times as possible. */ if (inputLen >= partLen) { MD5_memcpy ((POINTER) & context->buffer[index], (POINTER) input, partLen); MD5Transform(context->state, context->buffer); for (i = partLen; i + 63 < inputLen; i += 64) MD5Transform(context->state, &input[i]); index = 0; } else i = 0; /* Buffer remaining input */ MD5_memcpy ((POINTER) & context->buffer[index], (POINTER) & input[i], inputLen - i); } /** * \ingroup baselib * Cacluate the MD5 hash of the data added to this context. Context * will be zeroed after this call. * @param digest Buffer to store MD5 value in. * @param context Context that has data added to it. */ void TSK_MD5_Final(unsigned char digest[16], TSK_MD5_CTX * context) { unsigned char bits[8]; unsigned int index, padLen; /* Save number of bits */ Encode(bits, context->count, 8); /* Pad out to 56 mod 64. */ index = (unsigned int) ((context->count[0] >> 3) & 0x3f); padLen = (index < 56) ? (56 - index) : (120 - index); TSK_MD5_Update(context, PADDING, padLen); /* Append length (before padding) */ TSK_MD5_Update(context, bits, 8); /* Store state in digest */ Encode(digest, context->state, 16); /* Zeroize sensitive information. */ MD5_memset((POINTER) context, 0, sizeof(*context)); } /* MD5 basic transformation. Transforms state based on block. */ static void MD5Transform(state, block) UINT4 state[4]; unsigned char block[64]; { UINT4 a = state[0], b = state[1], c = state[2], d = state[3], x[16]; Decode(x, block, 64); /* Round 1 */ FF(a, b, c, d, x[0], S11, 0xd76aa478); /* 1 */ FF(d, a, b, c, x[1], S12, 0xe8c7b756); /* 2 */ FF(c, d, a, b, x[2], S13, 0x242070db); /* 3 */ FF(b, c, d, a, x[3], S14, 0xc1bdceee); /* 4 */ FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ FF(c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ FF(b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ /* Round 2 */ GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ GG(c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ GG(d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ /* Round 3 */ HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ HH(b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ HH(c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ /* Round 4 */ II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ II(c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ II(d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; /* Zeroize sensitive information. */ MD5_memset((POINTER) x, 0, sizeof(x)); } /* Encodes input (UINT4) into output (unsigned char). Assumes len is a multiple of 4. */ static void Encode(output, input, len) unsigned char *output; UINT4 *input; unsigned int len; { unsigned int i, j; for (i = 0, j = 0; j < len; i++, j += 4) { output[j] = (unsigned char) (input[i] & 0xff); output[j + 1] = (unsigned char) ((input[i] >> 8) & 0xff); output[j + 2] = (unsigned char) ((input[i] >> 16) & 0xff); output[j + 3] = (unsigned char) ((input[i] >> 24) & 0xff); } } /* Decodes input (unsigned char) into output (UINT4). Assumes len is a multiple of 4. */ static void Decode(output, input, len) UINT4 *output; unsigned char *input; unsigned int len; { unsigned int i, j; for (i = 0, j = 0; j < len; i++, j += 4) output[i] = ((UINT4) input[j]) | (((UINT4) input[j + 1]) << 8) | (((UINT4) input[j + 2]) << 16) | (((UINT4) input[j + 3]) << 24); } /* Note: Replace "for loop" with standard memcpy if possible. */ static void MD5_memcpy(output, input, len) POINTER output; POINTER input; unsigned int len; { unsigned int i; for (i = 0; i < len; i++) output[i] = input[i]; } /* Note: Replace "for loop" with standard memset if possible. */ static void MD5_memset(output, value, len) POINTER output; int value; unsigned int len; { unsigned int i; for (i = 0; i < len; i++) ((char *) output)[i] = (char) value; } sleuthkit-4.2.0/tsk/base/mymalloc.c000644 000765 000024 00000002376 12576320700 020031 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier, Basis Technology. All rights reserved. */ /** \file mymalloc.c * These functions allocate and realocate memory and set the error handling functions * when an error occurs. */ /* The IBM Public Licence must be distributed with this software. * AUTHOR(S) * Wietse Venema * IBM T.J. Watson Research * P.O. Box 704 * Yorktown Heights, NY 10598, USA *--*/ #include "tsk_base_i.h" #include /* tsk_malloc - allocate and zero memory and set error values on error */ void * tsk_malloc(size_t len) { void *ptr; if ((ptr = malloc(len)) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUX_MALLOC); tsk_error_set_errstr("tsk_malloc: %s (%"PRIuSIZE" requested)", strerror(errno), len); } else { memset(ptr, 0, len); } return (ptr); } /* tsk_realloc - reallocate memory and set error values if needed */ void * tsk_realloc(void *ptr, size_t len) { if ((ptr = realloc(ptr, len)) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUX_MALLOC); tsk_error_set_errstr("tsk_realloc: %s (%"PRIuSIZE" requested)", strerror(errno), len); } return (ptr); } sleuthkit-4.2.0/tsk/base/sha1c.c000644 000765 000024 00000033716 12576320700 017215 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * */ /* sha1c.c : Implementation of the Secure Hash Algorithm */ /* SHA: NIST's Secure Hash Algorithm */ /* This version written November 2000 by David Ireland of DI Management Services Pty Limited Adapted from code in the Python Cryptography Toolkit, version 1.0.0 by A.M. Kuchling 1995. */ /* AM Kuchling's posting:- Based on SHA code originally posted to sci.crypt by Peter Gutmann in message <30ajo5$oe8@ccu2.auckland.ac.nz>. Modified to test for endianness on creation of SHA objects by AMK. Also, the original specification of SHA was found to have a weakness by NSA/NIST. This code implements the fixed version of SHA. */ /* Here's the first paragraph of Peter Gutmann's posting: The following is my SHA (FIPS 180) code updated to allow use of the "fixed" SHA, thanks to Jim Gillogly and an anonymous contributor for the information on what's changed in the new version. The fix is a simple change which involves adding a single rotate in the initial expansion function. It is unknown whether this is an optimal solution to the problem which was discovered in the SHA or whether it's simply a bandaid which fixes the problem with a minimum of effort (for example the reengineering of a great many Capstone chips). */ /** \file sha1c.c * Local copy of the public domain SHA-1 library code by David Ireland. */ #include "tsk_base_i.h" /* The SHS block size and message digest sizes, in bytes */ #define SHS_DATASIZE 64 #define SHS_DIGESTSIZE 20 /* The SHS f()-functions. The f1 and f3 functions can be optimized to save one boolean operation each - thanks to Rich Schroeppel, rcs@cs.arizona.edu for discovering this */ /*#define f1(x,y,z) ( ( x & y ) | ( ~x & z ) ) // Rounds 0-19 */ #define f1(x,y,z) ( z ^ ( x & ( y ^ z ) ) ) /* Rounds 0-19 */ #define f2(x,y,z) ( x ^ y ^ z ) /* Rounds 20-39 */ /*#define f3(x,y,z) ( ( x & y ) | ( x & z ) | ( y & z ) ) // Rounds 40-59 */ #define f3(x,y,z) ( ( x & y ) | ( z & ( x | y ) ) ) /* Rounds 40-59 */ #define f4(x,y,z) ( x ^ y ^ z ) /* Rounds 60-79 */ /* The SHS Mysterious Constants */ #define K1 0x5A827999UL /* Rounds 0-19 */ #define K2 0x6ED9EBA1UL /* Rounds 20-39 */ #define K3 0x8F1BBCDCUL /* Rounds 40-59 */ #define K4 0xCA62C1D6UL /* Rounds 60-79 */ /* SHS initial values */ #define h0init 0x67452301UL #define h1init 0xEFCDAB89UL #define h2init 0x98BADCFEUL #define h3init 0x10325476UL #define h4init 0xC3D2E1F0UL /* Note that it may be necessary to add parentheses to these macros if they are to be called with expressions as arguments */ /* 32-bit rotate left - kludged with shifts */ #define ROTL(n,X) ( ( ( X ) << n ) | ( ( X ) >> ( 32 - n ) ) ) /* The initial expanding function. The hash function is defined over an 80-UINT2 expanded input array W, where the first 16 are copies of the input data, and the remaining 64 are defined by W[ i ] = W[ i - 16 ] ^ W[ i - 14 ] ^ W[ i - 8 ] ^ W[ i - 3 ] This implementation generates these values on the fly in a circular buffer - thanks to Colin Plumb, colin@nyx10.cs.du.edu for this optimization. The updated SHS changes the expanding function by adding a rotate of 1 bit. Thanks to Jim Gillogly, jim@rand.org, and an anonymous contributor for this information */ #define expand(W,i) ( W[ i & 15 ] = ROTL( 1, ( W[ i & 15 ] ^ W[ (i - 14) & 15 ] ^ \ W[ (i - 8) & 15 ] ^ W[ (i - 3) & 15 ] ) ) ) /* The prototype SHS sub-round. The fundamental sub-round is: a' = e + ROTL( 5, a ) + f( b, c, d ) + k + data; b' = a; c' = ROTL( 30, b ); d' = c; e' = d; but this is implemented by unrolling the loop 5 times and renaming the variables ( e, a, b, c, d ) = ( a', b', c', d', e' ) each iteration. This code is then replicated 20 times for each of the 4 functions, using the next 20 values from the W[] array each time */ #define subRound(a, b, c, d, e, f, k, data) \ ( e += ROTL( 5, a ) + f( b, c, d ) + k + data, b = ROTL( 30, b ) ) /* endian.c */ static void endianTest(int *endian_ness) { if ((*(unsigned short *) ("#S") >> 8) == '#') { /* printf("Big endian = no change\n"); */ *endian_ness = !(0); } else { /* printf("Little endian = swap\n"); */ *endian_ness = 0; } } /** * \ingroup baselib * Initialize a SHA-1 context so that data can be added to it. * @param shsInfo Pointer to context structure to initialize */ void TSK_SHA_Init(TSK_SHA_CTX * shsInfo) { endianTest(&shsInfo->Endianness); /* Set the h-vars to their initial values */ shsInfo->digest[0] = h0init; shsInfo->digest[1] = h1init; shsInfo->digest[2] = h2init; shsInfo->digest[3] = h3init; shsInfo->digest[4] = h4init; /* Initialise bit count */ shsInfo->countLo = shsInfo->countHi = 0; } /* Perform the SHS transformation. Note that this code, like MD5, seems to break some optimizing compilers due to the complexity of the expressions and the size of the basic block. It may be necessary to split it into sections, e.g. based on the four subrounds Note that this corrupts the shsInfo->data area */ static void SHSTransform(digest, data) UINT4 *digest, *data; { UINT4 A, B, C, D, E; /* Local vars */ UINT4 eData[16]; /* Expanded data */ /* Set up first buffer and local data buffer */ A = digest[0]; B = digest[1]; C = digest[2]; D = digest[3]; E = digest[4]; memcpy((POINTER) eData, (POINTER) data, SHS_DATASIZE); /* Heavy mangling, in 4 sub-rounds of 20 interations each. */ subRound(A, B, C, D, E, f1, K1, eData[0]); subRound(E, A, B, C, D, f1, K1, eData[1]); subRound(D, E, A, B, C, f1, K1, eData[2]); subRound(C, D, E, A, B, f1, K1, eData[3]); subRound(B, C, D, E, A, f1, K1, eData[4]); subRound(A, B, C, D, E, f1, K1, eData[5]); subRound(E, A, B, C, D, f1, K1, eData[6]); subRound(D, E, A, B, C, f1, K1, eData[7]); subRound(C, D, E, A, B, f1, K1, eData[8]); subRound(B, C, D, E, A, f1, K1, eData[9]); subRound(A, B, C, D, E, f1, K1, eData[10]); subRound(E, A, B, C, D, f1, K1, eData[11]); subRound(D, E, A, B, C, f1, K1, eData[12]); subRound(C, D, E, A, B, f1, K1, eData[13]); subRound(B, C, D, E, A, f1, K1, eData[14]); subRound(A, B, C, D, E, f1, K1, eData[15]); subRound(E, A, B, C, D, f1, K1, expand(eData, 16)); subRound(D, E, A, B, C, f1, K1, expand(eData, 17)); subRound(C, D, E, A, B, f1, K1, expand(eData, 18)); subRound(B, C, D, E, A, f1, K1, expand(eData, 19)); subRound(A, B, C, D, E, f2, K2, expand(eData, 20)); subRound(E, A, B, C, D, f2, K2, expand(eData, 21)); subRound(D, E, A, B, C, f2, K2, expand(eData, 22)); subRound(C, D, E, A, B, f2, K2, expand(eData, 23)); subRound(B, C, D, E, A, f2, K2, expand(eData, 24)); subRound(A, B, C, D, E, f2, K2, expand(eData, 25)); subRound(E, A, B, C, D, f2, K2, expand(eData, 26)); subRound(D, E, A, B, C, f2, K2, expand(eData, 27)); subRound(C, D, E, A, B, f2, K2, expand(eData, 28)); subRound(B, C, D, E, A, f2, K2, expand(eData, 29)); subRound(A, B, C, D, E, f2, K2, expand(eData, 30)); subRound(E, A, B, C, D, f2, K2, expand(eData, 31)); subRound(D, E, A, B, C, f2, K2, expand(eData, 32)); subRound(C, D, E, A, B, f2, K2, expand(eData, 33)); subRound(B, C, D, E, A, f2, K2, expand(eData, 34)); subRound(A, B, C, D, E, f2, K2, expand(eData, 35)); subRound(E, A, B, C, D, f2, K2, expand(eData, 36)); subRound(D, E, A, B, C, f2, K2, expand(eData, 37)); subRound(C, D, E, A, B, f2, K2, expand(eData, 38)); subRound(B, C, D, E, A, f2, K2, expand(eData, 39)); subRound(A, B, C, D, E, f3, K3, expand(eData, 40)); subRound(E, A, B, C, D, f3, K3, expand(eData, 41)); subRound(D, E, A, B, C, f3, K3, expand(eData, 42)); subRound(C, D, E, A, B, f3, K3, expand(eData, 43)); subRound(B, C, D, E, A, f3, K3, expand(eData, 44)); subRound(A, B, C, D, E, f3, K3, expand(eData, 45)); subRound(E, A, B, C, D, f3, K3, expand(eData, 46)); subRound(D, E, A, B, C, f3, K3, expand(eData, 47)); subRound(C, D, E, A, B, f3, K3, expand(eData, 48)); subRound(B, C, D, E, A, f3, K3, expand(eData, 49)); subRound(A, B, C, D, E, f3, K3, expand(eData, 50)); subRound(E, A, B, C, D, f3, K3, expand(eData, 51)); subRound(D, E, A, B, C, f3, K3, expand(eData, 52)); subRound(C, D, E, A, B, f3, K3, expand(eData, 53)); subRound(B, C, D, E, A, f3, K3, expand(eData, 54)); subRound(A, B, C, D, E, f3, K3, expand(eData, 55)); subRound(E, A, B, C, D, f3, K3, expand(eData, 56)); subRound(D, E, A, B, C, f3, K3, expand(eData, 57)); subRound(C, D, E, A, B, f3, K3, expand(eData, 58)); subRound(B, C, D, E, A, f3, K3, expand(eData, 59)); subRound(A, B, C, D, E, f4, K4, expand(eData, 60)); subRound(E, A, B, C, D, f4, K4, expand(eData, 61)); subRound(D, E, A, B, C, f4, K4, expand(eData, 62)); subRound(C, D, E, A, B, f4, K4, expand(eData, 63)); subRound(B, C, D, E, A, f4, K4, expand(eData, 64)); subRound(A, B, C, D, E, f4, K4, expand(eData, 65)); subRound(E, A, B, C, D, f4, K4, expand(eData, 66)); subRound(D, E, A, B, C, f4, K4, expand(eData, 67)); subRound(C, D, E, A, B, f4, K4, expand(eData, 68)); subRound(B, C, D, E, A, f4, K4, expand(eData, 69)); subRound(A, B, C, D, E, f4, K4, expand(eData, 70)); subRound(E, A, B, C, D, f4, K4, expand(eData, 71)); subRound(D, E, A, B, C, f4, K4, expand(eData, 72)); subRound(C, D, E, A, B, f4, K4, expand(eData, 73)); subRound(B, C, D, E, A, f4, K4, expand(eData, 74)); subRound(A, B, C, D, E, f4, K4, expand(eData, 75)); subRound(E, A, B, C, D, f4, K4, expand(eData, 76)); subRound(D, E, A, B, C, f4, K4, expand(eData, 77)); subRound(C, D, E, A, B, f4, K4, expand(eData, 78)); subRound(B, C, D, E, A, f4, K4, expand(eData, 79)); /* Build message digest */ digest[0] += A; digest[1] += B; digest[2] += C; digest[3] += D; digest[4] += E; } /* When run on a little-endian CPU we need to perform byte reversal on an array of long words. */ static void longReverse(UINT4 * buffer, int byteCount, int Endianness) { UINT4 value; if (Endianness == TRUE) return; byteCount /= sizeof(UINT4); while (byteCount--) { value = *buffer; value = ((value & 0xFF00FF00UL) >> 8) | ((value & 0x00FF00FFUL) << 8); *buffer++ = (value << 16) | (value >> 16); } } /** * \ingroup baselib * Add data to an initialized SHA-1 context. * @param shsInfo Context to add data to * @param buffer Data to process * @param count Number of bytes in buffer */ void TSK_SHA_Update(TSK_SHA_CTX * shsInfo, BYTE * buffer, int count) { UINT4 tmp; int dataCount; /* Update bitcount */ tmp = shsInfo->countLo; if ((shsInfo->countLo = tmp + ((UINT4) count << 3)) < tmp) shsInfo->countHi++; /* Carry from low to high */ shsInfo->countHi += count >> 29; /* Get count of bytes already in data */ dataCount = (int) (tmp >> 3) & 0x3F; /* Handle any leading odd-sized chunks */ if (dataCount) { BYTE *p = (BYTE *) shsInfo->data + dataCount; dataCount = SHS_DATASIZE - dataCount; if (count < dataCount) { memcpy(p, buffer, count); return; } memcpy(p, buffer, dataCount); longReverse(shsInfo->data, SHS_DATASIZE, shsInfo->Endianness); SHSTransform(shsInfo->digest, shsInfo->data); buffer += dataCount; count -= dataCount; } /* Process data in SHS_DATASIZE chunks */ while (count >= SHS_DATASIZE) { memcpy((POINTER) shsInfo->data, (POINTER) buffer, SHS_DATASIZE); longReverse(shsInfo->data, SHS_DATASIZE, shsInfo->Endianness); SHSTransform(shsInfo->digest, shsInfo->data); buffer += SHS_DATASIZE; count -= SHS_DATASIZE; } /* Handle any remaining bytes of data. */ memcpy((POINTER) shsInfo->data, (POINTER) buffer, count); } static void SHAtoByte(BYTE output[SHS_DIGESTSIZE], UINT4 * input) { /* Output SHA digest in byte array */ unsigned int i, j; for (i = 0, j = 0; j < SHS_DIGESTSIZE; i++, j += 4) { output[j + 3] = (BYTE) (input[i] & 0xff); output[j + 2] = (BYTE) ((input[i] >> 8) & 0xff); output[j + 1] = (BYTE) ((input[i] >> 16) & 0xff); output[j] = (BYTE) ((input[i] >> 24) & 0xff); } } /** * \ingroup baselib * Calculate the hash of the data added to the context. * @param output Buffer to store hash value * @param shsInfo Context that has data added to it. */ void TSK_SHA_Final(BYTE output[SHS_DIGESTSIZE], TSK_SHA_CTX * shsInfo) { int count; BYTE *dataPtr; /* Compute number of bytes mod 64 */ count = (int) shsInfo->countLo; count = (count >> 3) & 0x3F; /* Set the first char of padding to 0x80. This is safe since there is always at least one byte free */ dataPtr = (BYTE *) shsInfo->data + count; *dataPtr++ = 0x80; /* Bytes of padding needed to make 64 bytes */ count = SHS_DATASIZE - 1 - count; /* Pad out to 56 mod 64 */ if (count < 8) { /* Two lots of padding: Pad the first block to 64 bytes */ memset(dataPtr, 0, count); longReverse(shsInfo->data, SHS_DATASIZE, shsInfo->Endianness); SHSTransform(shsInfo->digest, shsInfo->data); /* Now fill the next block with 56 bytes */ memset((POINTER) shsInfo->data, 0, SHS_DATASIZE - 8); } else /* Pad block to 56 bytes */ memset(dataPtr, 0, count - 8); /* Append length in bits and transform */ shsInfo->data[14] = shsInfo->countHi; shsInfo->data[15] = shsInfo->countLo; longReverse(shsInfo->data, SHS_DATASIZE - 8, shsInfo->Endianness); SHSTransform(shsInfo->digest, shsInfo->data); /* Output to an array of bytes */ SHAtoByte(output, shsInfo->digest); /* Zeroise sensitive stuff */ memset((POINTER) shsInfo, 0, sizeof(shsInfo)); } sleuthkit-4.2.0/tsk/base/tsk_base.h000644 000765 000024 00000040713 12576320700 020011 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file tsk_base.h * Contains the type and function definitions that are needed * by external programs to use the TSK library. * Note that this file is not meant to be directly included. * It is included by both libtsk.h and tsk_base_i.h. */ /** * \defgroup baselib C Base TSK Library Functions * \defgroup baselib_cpp C++ Base TSK Library Classes */ #ifndef _TSK_BASE_H #define _TSK_BASE_H // standard C header files #include #include #include /** Version of code in number form. * Upper byte is A, next is B, and next byte is C in version A.B.C. * Lowest byte is 0xff, except in beta releases, in which case it * increments from 1. Nightly snapshots will have upper byte as * 0xff and next bytes with year, month, and date, respectively. * Note that you will not be able to differentiate between snapshots * from the trunk or branches with this method... * For example, 3.1.2 would be stored as 0x030102FF. * 3.1.2b1 would be 0x03010201. Snapshot from Jan 2, 2003 would be * 0xFF030102. * See TSK_VERSION_STR for string form. */ #define TSK_VERSION_NUM 0x040200ff /** Version of code in string form. See TSK_VERSION_NUM for * integer form. */ #define TSK_VERSION_STR "4.2.0" /* include the TSK-specific header file that we created in autoconf * On Win32 (Visual Studio) though, we will not have this file... */ #if !defined(_MSC_VER) #include "tsk/tsk_incs.h" #endif // get some other TSK / OS settings #include "tsk_os.h" #ifdef __cplusplus extern "C" { #endif #define TSK_ERROR_STRING_MAX_LENGTH 1024 typedef struct { uint32_t t_errno; char errstr[TSK_ERROR_STRING_MAX_LENGTH + 1]; char errstr2[TSK_ERROR_STRING_MAX_LENGTH + 1]; char errstr_print[TSK_ERROR_STRING_MAX_LENGTH + 1]; } TSK_ERROR_INFO; /* The core function here is to retrieve the per-thread error structure. Other functions to follow * are for convenience of performing common operations. */ extern TSK_ERROR_INFO *tsk_error_get_info(); extern uint32_t tsk_error_get_errno(); extern void tsk_error_set_errno(uint32_t t_errno); #ifdef __GNUC__ #define TSK_ERROR_FORMAT_ATTRIBUTE(n,m) __attribute__((format (printf, n, m))) #else #define TSK_ERROR_FORMAT_ATTRIBUTE(n,m) #endif extern char *tsk_error_get_errstr(); extern void tsk_error_set_errstr(const char *format, ...) TSK_ERROR_FORMAT_ATTRIBUTE(1, 2); extern void tsk_error_vset_errstr(const char *format, va_list args); extern char *tsk_error_get_errstr2(); extern void tsk_error_set_errstr2(const char *format, ...) TSK_ERROR_FORMAT_ATTRIBUTE(1, 2); extern void tsk_error_vset_errstr2(const char *format, va_list args); extern void tsk_error_errstr2_concat(const char *format, ...) TSK_ERROR_FORMAT_ATTRIBUTE(1, 2); /** Return a human-readable form of tsk_error_get_errno **/ extern const char *tsk_error_get(); extern void tsk_error_print(FILE *); extern void tsk_error_reset(); #ifdef TSK_MULTITHREAD_LIB #ifdef TSK_WIN32 void *tsk_error_win32_get_per_thread_(unsigned struct_size); typedef struct { CRITICAL_SECTION critical_section; } tsk_lock_t; // non-windows #else /* Note that there is an assumption that TSK_MULTITHREADED_LIB was * set only if we have ptheads. If we add a check for HAVE_PTHREAD * here, it causes problems when you try to include the library in * a tool because they do not have tsk_config.h included. */ #include typedef struct { pthread_mutex_t mutex; } tsk_lock_t; #endif // single threaded lib #else typedef struct { void *dummy; } tsk_lock_t; #endif /** * Return values for some TSK functions that need to differentiate between errors and corrupt data. */ typedef enum { TSK_OK, ///< Ok -- success TSK_ERR, ///< System error -- should abort TSK_COR, ///< Data is corrupt, can still process another set of data TSK_STOP ///< Stop further processing, not an error though. } TSK_RETVAL_ENUM; typedef struct TSK_LIST TSK_LIST; /** * Linked list structure that holds a 'key' and optional 'length'. * Note that the data is stored in reverse sort order so that inserts * are faster. Also note that the length is a negative number. A key of * '6' and a len of '2' means that the run contains 6 and 5. */ struct TSK_LIST { TSK_LIST *next; ///< Pointer to next entry in list uint64_t key; ///< Largest value in this run uint64_t len; ///< Length of run (negative number, stored as positive) }; extern uint8_t tsk_list_find(TSK_LIST * list, uint64_t key); extern uint8_t tsk_list_add(TSK_LIST ** list, uint64_t key); extern void tsk_list_free(TSK_LIST * list); // note that the stack code is in this file and not internal for convenience to users /** * Basic stack structure to push and pop (used for finding loops in recursion). */ typedef struct { uint64_t *vals; ///< Array that contains the values in the stack size_t top; ///< Index to the top stack entry size_t len; ///< Number of entries in the stack } TSK_STACK; extern uint8_t tsk_stack_push(TSK_STACK * stack, uint64_t key); extern void tsk_stack_pop(TSK_STACK * stack); extern uint8_t tsk_stack_find(TSK_STACK * stack, uint64_t key); extern void tsk_stack_free(TSK_STACK * stack); extern TSK_STACK *tsk_stack_create(); // print internal UTF-8 strings to local platform Unicode format extern void tsk_fprintf(FILE * fd, const char *msg, ...); extern void tsk_printf(const char *msg, ...); /** \name printf macros if system does not define them */ //@{ #ifndef PRIx64 #define PRIx64 "llx" #endif #ifndef PRIX64 #define PRIX64 "llX" #endif #ifndef PRIu64 #define PRIu64 "llu" #endif #ifndef PRId64 #define PRId64 "lld" #endif #ifndef PRIo64 #define PRIo64 "llo" #endif #ifndef PRIx32 #define PRIx32 "x" #endif #ifndef PRIX32 #define PRIX32 "X" #endif #ifndef PRIu32 #define PRIu32 "u" #endif #ifndef PRId32 #define PRId32 "d" #endif #ifndef PRIx16 #define PRIx16 "hx" #endif #ifndef PRIX16 #define PRIX16 "hX" #endif #ifndef PRIu16 #define PRIu16 "hu" #endif #ifndef PRIu8 #define PRIu8 "hhu" #endif #ifndef PRIx8 #define PRIx8 "hhx" #endif //@} /** @name Internal integer types and printf macros*/ //@{ typedef uint64_t TSK_INUM_T; ///< Data type used to internally store metadata / inode addresses #define PRIuINUM PRIu64 #define PRIxINUM PRIx64 #define PRIdINUM PRId64 typedef uint32_t TSK_UID_T; ///< Data type used to internally store User IDs #define PRIuUID PRIu32 #define PRIxUID PRIx32 #define PRIdUID PRId32 typedef uint32_t TSK_GID_T; ///< Data type used to internally store Group IDs #define PRIuGID PRIu32 #define PRIxGID PRIx32 #define PRIdGID PRId32 typedef uint64_t TSK_DADDR_T; ///< Data type used to internally store sector and block addresses #define PRIuDADDR PRIu64 #define PRIxDADDR PRIx64 #define PRIdDADDR PRId64 typedef int64_t TSK_OFF_T; ///< Data type used to internally store volume, file, etc. sizes and offsets #define PRIuOFF PRIu64 #define PRIxOFF PRIx64 #define PRIdOFF PRId64 typedef uint32_t TSK_PNUM_T; ///< Data type used to internally store partition addresses #define PRIuPNUM PRIu32 #define PRIxPNUM PRIx32 #define PRIdPNUM PRId32 //@} extern void tsk_version_print(FILE *); extern const char *tsk_version_get_str(); /*********** RETURN VALUES ************/ /** * Values that callback functions can return to calling walk function. */ typedef enum { TSK_WALK_CONT = 0x0, ///< Walk function should continue to next object TSK_WALK_STOP = 0x1, ///< Walk function should stop processing units and return OK TSK_WALK_ERROR = 0x2, ///< Walk function should stop processing units and return error } TSK_WALK_RET_ENUM; /************ ERROR HANDLING *************/ //TODO: make this per-thread? extern int tsk_verbose; ///< Set to 1 to have verbose debug messages printed to stderr #define TSK_ERR_AUX 0x01000000 #define TSK_ERR_IMG 0x02000000 #define TSK_ERR_VS 0x04000000 #define TSK_ERR_FS 0x08000000 #define TSK_ERR_HDB 0x10000000 #define TSK_ERR_AUTO 0x20000000 #define TSK_ERR_MASK 0x00ffffff #define TSK_ERR_AUX_MALLOC (TSK_ERR_AUX | 0) #define TSK_ERR_AUX_GENERIC (TSK_ERR_AUX | 2) #define TSK_ERR_AUX_MAX 2 #define TSK_ERR_IMG_NOFILE (TSK_ERR_IMG | 0) #define TSK_ERR_IMG_OFFSET (TSK_ERR_IMG | 1) #define TSK_ERR_IMG_UNKTYPE (TSK_ERR_IMG | 2) #define TSK_ERR_IMG_UNSUPTYPE (TSK_ERR_IMG | 3) #define TSK_ERR_IMG_OPEN (TSK_ERR_IMG | 4) #define TSK_ERR_IMG_STAT (TSK_ERR_IMG | 5) #define TSK_ERR_IMG_SEEK (TSK_ERR_IMG | 6) #define TSK_ERR_IMG_READ (TSK_ERR_IMG | 7) #define TSK_ERR_IMG_READ_OFF (TSK_ERR_IMG | 8) #define TSK_ERR_IMG_ARG (TSK_ERR_IMG | 9) #define TSK_ERR_IMG_MAGIC (TSK_ERR_IMG | 10) #define TSK_ERR_IMG_WRITE (TSK_ERR_IMG | 11) #define TSK_ERR_IMG_CONVERT (TSK_ERR_IMG | 12) #define TSK_ERR_IMG_PASSWD (TSK_ERR_IMG | 13) #define TSK_ERR_IMG_MAX 14 #define TSK_ERR_VS_UNKTYPE (TSK_ERR_VS | 0) #define TSK_ERR_VS_UNSUPTYPE (TSK_ERR_VS | 1) #define TSK_ERR_VS_READ (TSK_ERR_VS | 2) #define TSK_ERR_VS_MAGIC (TSK_ERR_VS | 3) #define TSK_ERR_VS_WALK_RNG (TSK_ERR_VS | 4) #define TSK_ERR_VS_BUF (TSK_ERR_VS | 5) #define TSK_ERR_VS_BLK_NUM (TSK_ERR_VS | 6) #define TSK_ERR_VS_ARG (TSK_ERR_VS | 7) #define TSK_ERR_VS_MAX 8 #define TSK_ERR_FS_UNKTYPE (TSK_ERR_FS | 0) #define TSK_ERR_FS_UNSUPTYPE (TSK_ERR_FS | 1) #define TSK_ERR_FS_UNSUPFUNC (TSK_ERR_FS | 2) #define TSK_ERR_FS_WALK_RNG (TSK_ERR_FS | 3) #define TSK_ERR_FS_READ (TSK_ERR_FS | 4) #define TSK_ERR_FS_READ_OFF (TSK_ERR_FS | 5) #define TSK_ERR_FS_ARG (TSK_ERR_FS | 6) #define TSK_ERR_FS_BLK_NUM (TSK_ERR_FS | 7) #define TSK_ERR_FS_INODE_NUM (TSK_ERR_FS | 8) #define TSK_ERR_FS_INODE_COR (TSK_ERR_FS | 9) #define TSK_ERR_FS_MAGIC (TSK_ERR_FS | 10) #define TSK_ERR_FS_FWALK (TSK_ERR_FS | 11) #define TSK_ERR_FS_WRITE (TSK_ERR_FS | 12) #define TSK_ERR_FS_UNICODE (TSK_ERR_FS | 13) #define TSK_ERR_FS_RECOVER (TSK_ERR_FS | 14) #define TSK_ERR_FS_GENFS (TSK_ERR_FS | 15) #define TSK_ERR_FS_CORRUPT (TSK_ERR_FS | 16) #define TSK_ERR_FS_ATTR_NOTFOUND (TSK_ERR_FS | 17) #define TSK_ERR_FS_MAX 18 #define TSK_ERR_HDB_UNKTYPE (TSK_ERR_HDB | 0) #define TSK_ERR_HDB_UNSUPTYPE (TSK_ERR_HDB | 1) #define TSK_ERR_HDB_READDB (TSK_ERR_HDB | 2) #define TSK_ERR_HDB_READIDX (TSK_ERR_HDB | 3) #define TSK_ERR_HDB_ARG (TSK_ERR_HDB | 4) #define TSK_ERR_HDB_WRITE (TSK_ERR_HDB | 5) #define TSK_ERR_HDB_CREATE (TSK_ERR_HDB | 6) #define TSK_ERR_HDB_DELETE (TSK_ERR_HDB | 7) #define TSK_ERR_HDB_MISSING (TSK_ERR_HDB | 8) #define TSK_ERR_HDB_PROC (TSK_ERR_HDB | 9) #define TSK_ERR_HDB_OPEN (TSK_ERR_HDB | 10) #define TSK_ERR_HDB_CORRUPT (TSK_ERR_HDB | 11) #define TSK_ERR_HDB_UNSUPFUNC (TSK_ERR_HDB | 11) #define TSK_ERR_HDB_MAX 13 #define TSK_ERR_AUTO_DB (TSK_ERR_AUTO | 0) #define TSK_ERR_AUTO_CORRUPT (TSK_ERR_AUTO | 1) #define TSK_ERR_AUTO_UNICODE (TSK_ERR_AUTO | 2) #define TSK_ERR_AUTO_NOTOPEN (TSK_ERR_AUTO | 3) #define TSK_ERR_AUTO_MAX 4 //@} /** \name Endian Ordering Functions */ //@{ /** * Flag that identifies the endian ordering of the data being read. */ typedef enum { TSK_UNKNOWN_ENDIAN = 0x00, ///< Endianness is unknown TSK_LIT_ENDIAN = 0x01, ///< Data is in little endian TSK_BIG_ENDIAN = 0x02 ///< Data is in big endian } TSK_ENDIAN_ENUM; //@} extern TSK_OFF_T tsk_parse_offset(const TSK_TCHAR *); extern int tsk_parse_pnum(const TSK_TCHAR * a_pnum_str, TSK_PNUM_T * a_pnum); /** \name MD5 and SHA-1 hashing */ //@{ /* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All rights reserved. License to copy and use this software is granted provided that it is identified as the "RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing this software or this function. License is also granted to make and use derivative works provided that such works are identified as "derived from the RSA Data Security, Inc. MD5 Message-Digest Algorithm" in all material mentioning or referencing the derived work. RSA Data Security, Inc. makes no representations concerning either the merchantability of this software or the suitability of this software for any particular purpose. It is provided "as is" without express or implied warranty of any kind. These notices must be retained in any copies of any part of this documentation and/or software. */ /* POINTER defines a generic pointer type */ typedef unsigned char *POINTER; /* UINT2 defines a two byte word */ //typedef unsigned short int UINT2; typedef uint16_t UINT2; /* UINT4 defines a four byte word */ typedef uint32_t UINT4; /* Added for sha1 */ /* BYTE defines a unsigned character */ typedef uint8_t BYTE; #ifndef TRUE #define FALSE 0 #define TRUE ( !FALSE ) #endif /* TRUE */ /* MD5 context. */ #define TSK_MD5_DIGEST_LENGTH 16 typedef struct { UINT4 state[4]; /* state (ABCD) */ UINT4 count[2]; /* number of bits, modulo 2^64 (lsb first) */ unsigned char buffer[64]; /* input buffer */ } TSK_MD5_CTX; void TSK_MD5_Init(TSK_MD5_CTX *); void TSK_MD5_Update(TSK_MD5_CTX *, unsigned char *, unsigned int); void TSK_MD5_Final(unsigned char[16], TSK_MD5_CTX *); /* sha.h */ /* The structure for storing SHS info */ #define TSK_SHA_DIGEST_LENGTH 32 typedef struct { UINT4 digest[5]; /* Message digest */ UINT4 countLo, countHi; /* 64-bit bit count */ UINT4 data[16]; /* SHS data buffer */ int Endianness; } TSK_SHA_CTX; /* Message digest functions */ void TSK_SHA_Init(TSK_SHA_CTX *); void TSK_SHA_Update(TSK_SHA_CTX *, BYTE * buffer, int count); void TSK_SHA_Final(BYTE * output, TSK_SHA_CTX *); /* Flags for which type of hash(es) to run */ typedef enum{ TSK_BASE_HASH_INVALID_ID = 0, TSK_BASE_HASH_MD5 = 0x01, TSK_BASE_HASH_SHA1 = 0x02 //TSK_BASE_HASH_SHA256 = 0x04, } TSK_BASE_HASH_ENUM; //@} #ifdef __cplusplus } #endif #ifdef __cplusplus #if 0 class TskStack { private: TSK_STACK * m_stack; public: /** * Create a TSK_STACK structure. See tsk_stack_create() for details. * @returns Pointer to structure or NULL on error */ TskStack() { m_stack = tsk_stack_create(); }; /** * Free an allocated TSK_STACK structure. See tsk_stack_free() for details. */ ~TskStack() { tsk_stack_free(m_stack); }; /** * Pop a value from the top of the stack. See tsk_stack_pop() for details. */ void pop() { tsk_stack_pop(m_stack); }; /** * Push a value to the top of TSK_STACK. See tsk_stack_push() for details. * @param a_val Value to push on * @returns 1 on error */ uint8_t push(uint64_t a_val) { return tsk_stack_push(m_stack, a_val); }; /** * Search a TSK_STACK for a given value. See tsk_stack_find() for details. * @param a_val Value to search for * @returns 1 if found and 0 if not */ uint8_t find(uint64_t a_val) { return tsk_stack_find(m_stack, a_val); }; /** * Return Number of entries in the stack * @returns number of entries in the stack */ size_t length() { if (m_stack != NULL) return m_stack->len; else return 0; }; }; #endif /** * \ingroup baselib_cpp * Allows access to most recent error message and code in the thread. */ class TskError { public: /** * Return the string with the current error message. The string does not end with a * newline. See tsk_error_get() for details. * * @returns String with error message or NULL if there is no error */ static const char *get() { return tsk_error_get(); }; /** * Print the current error message to a file. See tsk_error_print() for details. * * @param a_hFile File to print message to */ static void print(FILE * a_hFile) { tsk_error_print(a_hFile); }; /** * Clear the error number and error message. See tsk_error_reset() for details. */ static void reset() { tsk_error_reset(); }; }; #endif #endif sleuthkit-4.2.0/tsk/base/tsk_base_i.h000644 000765 000024 00000030001 12576320700 020306 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * */ /** \file tsk_base_i.h * Contains the general internal TSK type and function definitions. * This is needed by the library as it is built. */ #ifndef _TSK_BASE_I_H #define _TSK_BASE_I_H // include the autoconf header file #if HAVE_CONFIG_H #include "tsk/tsk_config.h" #endif /* Some Linux systems need LARGEFILE64_SOURCE and autoconf does * not define it, so we hack it here */ #ifdef _LARGEFILE_SOURCE #ifndef _LARGEFILE64_SOURCE #define _LARGEFILE64_SOURCE 1 #endif #endif #include "tsk_base.h" // most of the local files need this, so we include it here #include #ifdef __cplusplus extern "C" { #endif extern void tsk_init_lock(tsk_lock_t *); extern void tsk_deinit_lock(tsk_lock_t *); extern void tsk_take_lock(tsk_lock_t *); extern void tsk_release_lock(tsk_lock_t *); #ifndef rounddown #define rounddown(x, y) \ ((((x) % (y)) == 0) ? (x) : \ (roundup((x),(y)) - (y))) #endif extern void *tsk_malloc(size_t); extern void *tsk_realloc(void *, size_t); // getopt for windows #ifdef TSK_WIN32 extern int tsk_optind; extern TSK_TCHAR *tsk_optarg; extern int tsk_getopt(int argc, TSK_TCHAR * const argv[], const TSK_TCHAR * optstring); #endif /* Endian Ordering */ /* macros to read in multi-byte fields * file system is an array of 8-bit values, not 32-bit values */ extern uint8_t tsk_guess_end_u16(TSK_ENDIAN_ENUM *, uint8_t *, uint16_t); extern uint8_t tsk_guess_end_u32(TSK_ENDIAN_ENUM *, uint8_t *, uint32_t); /** \internal * Read a 16-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 16-bit unsigned value */ #define tsk_getu16(endian, x) \ (uint16_t)(((endian) == TSK_LIT_ENDIAN) ? \ (((uint8_t *)(x))[0] + (((uint8_t *)(x))[1] << 8)) : \ (((uint8_t *)(x))[1] + (((uint8_t *)(x))[0] << 8)) ) /** \internal * Read a 16-bit signed value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 16-bit signed value */ #define tsk_gets16(endian, x) \ ((int16_t)tsk_getu16(endian, x)) /** \internal * Read a 24-bit unsigned value into a uint32_t variable. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 16-bit unsigned value */ #define tsk_getu24(endian, x) \ (uint32_t)(((endian) == TSK_LIT_ENDIAN) ? \ (((uint8_t *)(x))[0] + (((uint8_t *)(x))[1] << 8) + (((uint8_t *)(x))[2] << 16)) : \ (((uint8_t *)(x))[2] + (((uint8_t *)(x))[1] << 8) + (((uint8_t *)(x))[0] << 16)) ) /** \internal * Read a 32-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 32-bit unsigned value */ #define tsk_getu32(endian, x) \ (uint32_t)( ((endian) == TSK_LIT_ENDIAN) ? \ ((((uint8_t *)(x))[0] << 0) + \ (((uint8_t *)(x))[1] << 8) + \ (((uint8_t *)(x))[2] << 16) + \ (((uint8_t *)(x))[3] << 24) ) \ : \ ((((uint8_t *)(x))[3] << 0) + \ (((uint8_t *)(x))[2] << 8) + \ (((uint8_t *)(x))[1] << 16) + \ (((uint8_t *)(x))[0] << 24) ) ) /** \internal * Read a 32-bit signed value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 32-bit signed value */ #define tsk_gets32(endian, x) \ ((int32_t)tsk_getu32(endian, x)) /** \internal * Read a 48-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 48-bit unsigned value */ #define tsk_getu48(endian, x) \ (uint64_t)( ((endian) == TSK_LIT_ENDIAN) ? \ ((uint64_t) \ ((uint64_t)((uint8_t *)(x))[0] << 0)+ \ ((uint64_t)((uint8_t *)(x))[1] << 8) + \ ((uint64_t)((uint8_t *)(x))[2] << 16) + \ ((uint64_t)((uint8_t *)(x))[3] << 24) + \ ((uint64_t)((uint8_t *)(x))[4] << 32) + \ ((uint64_t)((uint8_t *)(x))[5] << 40)) \ : \ ((uint64_t) \ ((uint64_t)((uint8_t *)(x))[5] << 0)+ \ ((uint64_t)((uint8_t *)(x))[4] << 8) + \ ((uint64_t)((uint8_t *)(x))[3] << 16) + \ ((uint64_t)((uint8_t *)(x))[2] << 24) + \ ((uint64_t)((uint8_t *)(x))[1] << 32) + \ ((uint64_t)((uint8_t *)(x))[0] << 40)) ) /** \internal * Read a 64-bit unsigned value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 64-bit unsigned value */ #define tsk_getu64(endian, x) \ (uint64_t)( ((endian) == TSK_LIT_ENDIAN) ? \ ((uint64_t) \ ((uint64_t)((uint8_t *)(x))[0] << 0) + \ ((uint64_t)((uint8_t *)(x))[1] << 8) + \ ((uint64_t)((uint8_t *)(x))[2] << 16) + \ ((uint64_t)((uint8_t *)(x))[3] << 24) + \ ((uint64_t)((uint8_t *)(x))[4] << 32) + \ ((uint64_t)((uint8_t *)(x))[5] << 40) + \ ((uint64_t)((uint8_t *)(x))[6] << 48) + \ ((uint64_t)((uint8_t *)(x))[7] << 56)) \ : \ ((uint64_t) \ ((uint64_t)((uint8_t *)(x))[7] << 0) + \ ((uint64_t)((uint8_t *)(x))[6] << 8) + \ ((uint64_t)((uint8_t *)(x))[5] << 16) + \ ((uint64_t)((uint8_t *)(x))[4] << 24) + \ ((uint64_t)((uint8_t *)(x))[3] << 32) + \ ((uint64_t)((uint8_t *)(x))[2] << 40) + \ ((uint64_t)((uint8_t *)(x))[1] << 48) + \ ((uint64_t)((uint8_t *)(x))[0] << 56)) ) /** \internal * Read a 64-bit signed value. * @param endian Flag that identifies local ordering. * @param x Byte array to read from * @returns 64-bit signed value */ #define tsk_gets64(endian, x) \ ((int64_t)tsk_getu64(endian, x)) #define TSK_IS_CNTRL(x) \ (((x) < 0x20) && ((x) >= 0x00)) /** \name Unicode */ //@{ // basic check to see if a Unicode file has been included // in an app that is using this as a library #ifndef TSK_UNI_REPLACEMENT_CHAR /**************** UNICODE *******************/ /* * Copyright 2001-2004 Unicode, Inc. * * Disclaimer * * This source code is provided as is by Unicode, Inc. No claims are * made as to fitness for any particular purpose. No warranties of any * kind are expressed or implied. The recipient agrees to determine * applicability of information provided. If this file has been * purchased on magnetic or optical media from Unicode, Inc., the * sole remedy for any claim will be exchange of defective media * within 90 days of receipt. * * Limitations on Rights to Redistribute This Code * * Unicode, Inc. hereby grants the right to freely use the information * supplied in this file in the creation of products supporting the * Unicode Standard, and to make copies of this file in any form * for internal or external distribution as long as this notice * remains attached. */ /* --------------------------------------------------------------------- Conversions between UTF32, UTF-16, and UTF-8. Header file. Several funtions are included here, forming a complete set of conversions between the three formats. UTF-7 is not included here, but is handled in a separate source file. Each of these routines takes pointers to input buffers and output buffers. The input buffers are const. Each routine converts the text between *sourceStart and sourceEnd, putting the result into the buffer between *targetStart and targetEnd. Note: the end pointers are *after* the last item: e.g. *(sourceEnd - 1) is the last item. The return result indicates whether the conversion was successful, and if not, whether the problem was in the source or target buffers. (Only the first encountered problem is indicated.) After the conversion, *sourceStart and *targetStart are both updated to point to the end of last text successfully converted in the respective buffers. Input parameters: sourceStart - pointer to a pointer to the source buffer. The contents of this are modified on return so that it points at the next thing to be converted. targetStart - similarly, pointer to pointer to the target buffer. sourceEnd, targetEnd - respectively pointers to the ends of the two buffers, for overflow checking only. These conversion functions take a TSKConversionFlags argument. When this flag is set to strict, both irregular sequences and isolated surrogates will cause an error. When the flag is set to lenient, both irregular sequences and isolated surrogates are converted. Whether the flag is strict or lenient, all illegal sequences will cause an error return. This includes sequences such as: , , or in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code must check for illegal sequences. When the flag is set to lenient, characters over 0x10FFFF are converted to the replacement character; otherwise (when the flag is set to strict) they constitute an error. Output parameters: The value "TSKsourceIllegal" is returned from some routines if the input sequence is malformed. When "TSKsourceIllegal" is returned, the source value will point to the illegal value that caused the problem. E.g., in UTF-8 when a sequence is malformed, it points to the start of the malformed sequence. Author: Mark E. Davis, 1994. Rev History: Rick McGowan, fixes & updates May 2001. Fixes & updates, Sept 2001. ------------------------------------------------------------------------ */ /* --------------------------------------------------------------------- The following 4 definitions are compiler-specific. The C standard does not guarantee that wchar_t has at least 16 bits, so wchar_t is no less portable than unsigned short! All should be unsigned values to avoid sign extension during bit mask & shift operations. ------------------------------------------------------------------------ */ typedef unsigned short UTF16; /* at least 16 bits */ typedef unsigned char UTF8; /* typically 8 bits */ typedef unsigned char Boolean; /* 0 or 1 */ typedef enum { TSKconversionOK, ///< conversion successful TSKsourceExhausted, ///< partial character in source, but hit end TSKtargetExhausted, ///< insuff. room in target for conversion TSKsourceIllegal ///< source sequence is illegal/malformed } TSKConversionResult; typedef enum { TSKstrictConversion = 0, ///< Error if invalid surrogate pairs are found TSKlenientConversion ///< Ignore invalid surrogate pairs } TSKConversionFlags; extern TSKConversionResult tsk_UTF8toUTF16(const UTF8 ** sourceStart, const UTF8 * sourceEnd, UTF16 ** targetStart, UTF16 * targetEnd, TSKConversionFlags flags); extern TSKConversionResult tsk_UTF8toUTF16W(const UTF8 ** sourceStart, const UTF8 * sourceEnd, wchar_t ** targetStart, wchar_t * targetEnd, TSKConversionFlags flags); extern TSKConversionResult tsk_UTF16toUTF8(TSK_ENDIAN_ENUM, const UTF16 ** sourceStart, const UTF16 * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags); extern TSKConversionResult tsk_UTF16toUTF8_lclorder(const UTF16 ** sourceStart, const UTF16 * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags); extern TSKConversionResult tsk_UTF16WtoUTF8_lclorder(const wchar_t ** sourceStart, const wchar_t * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags); extern Boolean tsk_isLegalUTF8Sequence(const UTF8 * source, const UTF8 * sourceEnd); extern void tsk_cleanupUTF8(char *source, const char replacement); #endif //@} #ifdef __cplusplus } #endif #endif sleuthkit-4.2.0/tsk/base/tsk_endian.c000644 000765 000024 00000003565 12576320700 020334 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2003-2011 Brian Carrier. All rights reserved * * Copyright (c) 2002 Brian Carrier, @stake Inc. All rights reserved * * This software is distributed under the Common Public License 1.0 * */ #include "tsk_base_i.h" /** \file tsk_endian.c * Contains the routines to read data in different endian orderings. */ /* A temporary data structure with an endian field */ typedef struct { uint8_t endian; } tmp_ds; /** \internal * Analyze an array of bytes and compare it to a target value to * determine which byte order the array is stored in. * * @param flag Pointer to location where proper endian flag should be stored. * @param x Pointer to array of bytes to analyze. * @param val Target value to compare to * @returns 1 if match cannot be made, 0 if it can. */ uint8_t tsk_guess_end_u16(TSK_ENDIAN_ENUM * flag, uint8_t * x, uint16_t val) { /* try little */ if (tsk_getu16(TSK_LIT_ENDIAN, x) == val) { *flag = TSK_LIT_ENDIAN; return 0; } /* ok, big now */ if (tsk_getu16(TSK_BIG_ENDIAN, x) == val) { *flag = TSK_BIG_ENDIAN; return 0; } /* didn't find it */ return 1; } /** \internal * same idea as tsk_guess_end_u16 except that val is a 32-bit value * * @param flag Pointer to location where proper endian flag should be stored. * @param x Pointer to array of bytes to analyze. * @param val Target value to compare to * @returns 1 if match cannot be made, 0 if it can. */ uint8_t tsk_guess_end_u32(TSK_ENDIAN_ENUM * flag, uint8_t * x, uint32_t val) { /* try little */ if (tsk_getu32(TSK_LIT_ENDIAN, x) == val) { *flag = TSK_LIT_ENDIAN; return 0; } /* ok, big now */ if (tsk_getu32(TSK_BIG_ENDIAN, x) == val) { *flag = TSK_BIG_ENDIAN; return 0; } return 1; } sleuthkit-4.2.0/tsk/base/tsk_error.c000644 000765 000024 00000026463 12576320700 020231 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2006-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" #include "tsk_base.h" /** * \file tsk_error.c * Contains the error handling code and variables. */ /* Global variables that fit here as well as anywhere */ char *progname = "unknown"; int tsk_verbose = 0; /* Error messages */ static const char *tsk_err_aux_str[TSK_ERR_IMG_MAX] = { "Insufficient memory", "TSK Error" }; /* imagetools specific error strings */ static const char *tsk_err_img_str[TSK_ERR_IMG_MAX] = { "Missing image file names", // 0 "Invalid image offset", "Cannot determine image type", "Unsupported image type", "Error opening image file", "Error stat(ing) image file", // 5 "Error seeking in image file", "Error reading image file", "Read offset too large for image file", "Invalid API argument", "Invalid magic value", // 10 "Error writing data", "Error converting file name", "Incorrect or missing password" }; static const char *tsk_err_mm_str[TSK_ERR_VS_MAX] = { "Cannot determine partition type", // 0 "Unsupported partition type", "Error reading image file", "Invalid magic value", "Invalid walk range", "Invalid buffer size", // 5 "Invalid sector address", "Invalid API argument", }; static const char *tsk_err_fs_str[TSK_ERR_FS_MAX] = { "Cannot determine file system type", // 0 "Unsupported file system type", "Function/Feature not supported", "Invalid walk range", "Error reading image file", "Invalid file offset", // 5 "Invalid API argument", "Invalid block address", "Invalid metadata address", "Error in metadata structure", "Invalid magic value", // 10 "Error extracting file from image", "Error writing data", "Error converting Unicode", "Error recovering deleted file", "General file system error", // 15 "File system is corrupt", "Attribute not found in file", }; static const char *tsk_err_hdb_str[TSK_ERR_HDB_MAX] = { "Cannot determine hash database type", // 0 "Unsupported hash database type", "Error reading hash database file", "Error reading hash database index", "Invalid argument", "Error writing data", // 5 "Error creating file", "Error deleting file", "Missing file", "Error creating process", "Error opening file", // 10 "Corrupt hash database" }; static const char *tsk_err_auto_str[TSK_ERR_AUTO_MAX] = { "Database Error", "Corrupt file data", "Error converting Unicode", "Image not opened yet" }; #ifdef TSK_MULTITHREAD_LIB #ifdef TSK_WIN32 TSK_ERROR_INFO * tsk_error_get_info() { return (TSK_ERROR_INFO *) tsk_error_win32_get_per_thread_(sizeof(TSK_ERROR_INFO)); } // non-windows #else static pthread_key_t pt_tls_key; static pthread_once_t pt_tls_key_once = PTHREAD_ONCE_INIT; static void free_error_info(void *per_thread_error_info) { if (per_thread_error_info != 0) { free(per_thread_error_info); pthread_setspecific(pt_tls_key, 0); } } static void make_pt_tls_key() { (void) pthread_key_create(&pt_tls_key, free_error_info); } TSK_ERROR_INFO * tsk_error_get_info() { TSK_ERROR_INFO *ptr = NULL; (void) pthread_once(&pt_tls_key_once, make_pt_tls_key); if ((ptr = (TSK_ERROR_INFO *) pthread_getspecific(pt_tls_key)) == 0) { // Under high memory presure malloc will return NULL. ptr = (TSK_ERROR_INFO *) malloc(sizeof(TSK_ERROR_INFO)); if( ptr != NULL ) { ptr->t_errno = 0; ptr->errstr[0] = 0; ptr->errstr2[0] = 0; } (void) pthread_setspecific(pt_tls_key, ptr); } return ptr; } #endif // single-threaded #else static TSK_ERROR_INFO error_info = { 0, {0}, {0} }; TSK_ERROR_INFO * tsk_error_get_info() { return &error_info; } #endif /** * \ingroup baselib * Return the string with the current error message. The string does not end with a * newline. * * @returns String with error message or NULL if there is no error */ const char * tsk_error_get() { size_t pidx = 0; TSK_ERROR_INFO *error_info = tsk_error_get_info(); int t_errno = error_info->t_errno; char *errstr_print = error_info->errstr_print; if (t_errno == 0) { return NULL; } memset(errstr_print, 0, TSK_ERROR_STRING_MAX_LENGTH); if (t_errno & TSK_ERR_AUX) { if ((TSK_ERR_MASK && t_errno) < TSK_ERR_AUX_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_aux_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "auxtools error: %" PRIu32, TSK_ERR_MASK & t_errno); } else if (t_errno & TSK_ERR_IMG) { if ((TSK_ERR_MASK & t_errno) < TSK_ERR_IMG_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_img_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "imgtools error: %" PRIu32, TSK_ERR_MASK & t_errno); } else if (t_errno & TSK_ERR_VS) { if ((TSK_ERR_MASK & t_errno) < TSK_ERR_VS_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_mm_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "mmtools error: %" PRIu32, TSK_ERR_MASK & t_errno); } else if (t_errno & TSK_ERR_FS) { if ((TSK_ERR_MASK & t_errno) < TSK_ERR_FS_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_fs_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "fstools error: %" PRIu32, TSK_ERR_MASK & t_errno); } else if (t_errno & TSK_ERR_HDB) { if ((TSK_ERR_MASK & t_errno) < TSK_ERR_HDB_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_hdb_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "hashtools error: %" PRIu32, TSK_ERR_MASK & t_errno); } else if (t_errno & TSK_ERR_AUTO) { if ((TSK_ERR_MASK & t_errno) < TSK_ERR_AUTO_MAX) snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "%s", tsk_err_auto_str[t_errno & TSK_ERR_MASK]); else snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "auto error: %" PRIu32, TSK_ERR_MASK & t_errno); } else { snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, "Unknown Error: %" PRIu32, t_errno); } pidx = strlen(errstr_print); /* Print the unique string, if it exists */ if (error_info->errstr[0] != '\0') { snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, " (%s)", error_info->errstr); pidx = strlen(errstr_print); } if (error_info->errstr2[0] != '\0') { snprintf(&errstr_print[pidx], TSK_ERROR_STRING_MAX_LENGTH - pidx, " (%s)", error_info->errstr2); pidx = strlen(errstr_print); } return (char *) error_info->errstr_print; } /** * \ingroup baselib * Return the current error number. * @returns the current error number. */ uint32_t tsk_error_get_errno() { return tsk_error_get_info()->t_errno; } /** * \ingroup baselib * Set the current TSK error number. * @param t_errno the error number. */ void tsk_error_set_errno(uint32_t t_errno) { tsk_error_get_info()->t_errno = t_errno; } /** * \ingroup baselib * Retrieve the current, basic error string. * Additional information is in errstr2. * Use tsk_error_get() to get a fully formatted string. * @returns the string. This is only valid until the next call to a tsk function. */ char * tsk_error_get_errstr() { return tsk_error_get_info()->errstr; } /** * \ingroup baselib * Set the error string #1. This should contain the basic message. * @param format the printf-style format string */ void tsk_error_set_errstr(const char *format, ...) { va_list args; va_start(args, format); vsnprintf(tsk_error_get_info()->errstr, TSK_ERROR_STRING_MAX_LENGTH, format, args); va_end(args); } /** * \ingroup baselib * Set the error string * @param format the printf-style format string * @param args the printf-style args */ void tsk_error_vset_errstr(const char *format, va_list args) { vsnprintf(tsk_error_get_info()->errstr, TSK_ERROR_STRING_MAX_LENGTH, format, args); } /** * \ingroup baselib * Retrieve the current error string #2. * This has additional information than string #1. * @returns the string. This is only valid until the next call to a tsk function. */ char * tsk_error_get_errstr2() { return tsk_error_get_info()->errstr2; } /** * \ingroup baselib * Set the error string #2. This is called by methods who encounter the error, * but did not set errno. * @param format the printf-style format string */ void tsk_error_set_errstr2(const char *format, ...) { va_list args; va_start(args, format); vsnprintf(tsk_error_get_info()->errstr2, TSK_ERROR_STRING_MAX_LENGTH, format, args); va_end(args); } /** * \ingroup baselib * Set the error string * @param format the printf-style format string * @param args the printf-style format args */ void tsk_error_vset_errstr2(const char *format, va_list args) { vsnprintf(tsk_error_get_info()->errstr2, TSK_ERROR_STRING_MAX_LENGTH, format, args); } /** * \ingroup baselib * Concatenate a message onto the end of the errstr2. * @param format */ void tsk_error_errstr2_concat(const char *format, ...) { va_list args; char *errstr2 = tsk_error_get_info()->errstr2; int current_length = (int) (strlen(errstr2) + 1); // +1 for a space if (current_length > 0) { int remaining = TSK_ERROR_STRING_MAX_LENGTH - current_length; errstr2[current_length - 1] = ' '; va_start(args, format); vsnprintf(&errstr2[current_length], remaining, format, args); va_end(args); } } /** * \ingroup baselib * Print the current fully formed error message to a file. * * @param hFile File to print message to */ void tsk_error_print(FILE * hFile) { const char *str; if (tsk_error_get_errno() == 0) return; str = tsk_error_get(); if (str != NULL) { tsk_fprintf(hFile, "%s\n", str); } else { tsk_fprintf(hFile, "Error creating Sleuth Kit error string (Errno: %d)\n", tsk_error_get_errno()); } } /** * \ingroup baselib * Clear the error number and error message. */ void tsk_error_reset() { TSK_ERROR_INFO *info = tsk_error_get_info(); if( info != NULL ) { info->t_errno = 0; info->errstr[0] = 0; info->errstr2[0] = 0; info->errstr_print[0] = 0; } } sleuthkit-4.2.0/tsk/base/tsk_error_win32.cpp000644 000765 000024 00000002333 12576320700 021601 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2010-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" #ifdef TSK_WIN32 #include namespace tsk { class GetTlsIndex { public: GetTlsIndex() { tlsIndex = TlsAlloc(); } ~GetTlsIndex() { TlsFree(tlsIndex); } DWORD GetIndex() { return tlsIndex; } private: static DWORD tlsIndex; }; DWORD GetTlsIndex::tlsIndex; static GetTlsIndex getTlsIndex; } /* * There's no destructor model in Win32 as with pthreads. * A DLLMain could do the job, but we're not a DLL. */ extern "C" void *tsk_error_win32_get_per_thread_(unsigned struct_size) { DWORD index = tsk::getTlsIndex.GetIndex(); void *ptr = TlsGetValue( index ); if (ptr == 0) { ptr = malloc(struct_size); memset(ptr, 0, struct_size); TlsSetValue(index, ptr); } return ptr; } /* * Threads must call this on exit to avoid a leak. */ extern "C" void tsk_error_win32_thread_cleanup() { DWORD index = tsk::getTlsIndex.GetIndex(); void *ptr = TlsGetValue(index); if (ptr != 0) { free(ptr); TlsSetValue(index, 0); } } #endif /*TSK_WIN32*/ sleuthkit-4.2.0/tsk/base/tsk_list.c000644 000765 000024 00000012654 12576320700 020050 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" /** \file tsk_list.c * tsk_lists are a linked list of buckets that store a key in REVERSE sorted order. * They are used to keep track of data as we walk along and prevent loops * from data corruption. Note that the len value is actually negative. An entry * with a key of 6 and a length of 2 covers the range of 5 to 6. */ /* * Create a list with teh first entry defined * @param a_key Key for initial entry * @returns newly created entry */ static TSK_LIST * tsk_list_create(uint64_t a_key) { TSK_LIST *ent; if ((ent = (TSK_LIST *) tsk_malloc(sizeof(TSK_LIST))) == NULL) { return NULL; } ent->key = a_key; ent->next = NULL; ent->len = 1; return ent; } /** * \ingroup baselib * Add an entry to a TSK_LIST (and create one if one does not exist) * @param a_tsk_list_head Pointer to pointer for head of list (can point to NULL if no list exists). * @param a_key Value to add to list * @returns 1 on error */ uint8_t tsk_list_add(TSK_LIST ** a_tsk_list_head, uint64_t a_key) { TSK_LIST *tmp; /* If the head is NULL, then create an entry */ if (*a_tsk_list_head == NULL) { TSK_LIST *ent; /* if (tsk_verbose) fprintf(stderr, "entry %" PRIu64 " is first on list\n", a_key); */ if ((ent = tsk_list_create(a_key)) == NULL) return 1; *a_tsk_list_head = ent; return 0; } /* If the new key is larger than the head, make it the head */ if (a_key > (*a_tsk_list_head)->key) { /* if (tsk_verbose) fprintf(stderr, "entry %" PRIu64 " added to head before %" PRIu64 "\n", a_key, (*a_tsk_list_head)->key); */ // If we can, update the length of the existing list entry if (a_key == (*a_tsk_list_head)->key + 1) { (*a_tsk_list_head)->key++; (*a_tsk_list_head)->len++; } else { TSK_LIST *ent; if ((ent = tsk_list_create(a_key)) == NULL) return 1; ent->next = *a_tsk_list_head; *a_tsk_list_head = ent; } return 0; } // get rid of duplicates else if (a_key == (*a_tsk_list_head)->key) { return 0; } /* At the start of this loop each time, we know that the key to add * is smaller than the entry being considered (tmp) */ tmp = *a_tsk_list_head; while (tmp != NULL) { /* First check if this is a duplicate and contained in tmp */ if (a_key > (tmp->key - tmp->len)) { return 0; } /* Can we append it to the end of tmp? */ else if (a_key == (tmp->key - tmp->len)) { // do a sanity check on the next entry if ((tmp->next) && (tmp->next->key == a_key)) { // @@@ We could fix this situation and remove the next entry... return 0; } tmp->len++; return 0; } /* The key is less than the current bucket and can't be added to it. * check if we are at the end of the list yet */ else if (tmp->next == NULL) { TSK_LIST *ent; /* if (tsk_verbose) fprintf(stderr, "entry %" PRIu64 " added to tail\n", a_key); */ if ((ent = tsk_list_create(a_key)) == NULL) return 1; tmp->next = ent; return 0; } // can we prepend it to the next bucket? else if (a_key == tmp->next->key + 1) { tmp->next->key++; tmp->next->len++; return 0; } // do we need a new bucket in between? else if (a_key > tmp->next->key) { TSK_LIST *ent; /* if (tsk_verbose) fprintf(stderr, "entry %" PRIu64 " added before %" PRIu64 "\n", a_key, tmp->next->key); */ if ((ent = tsk_list_create(a_key)) == NULL) return 1; ent->next = tmp->next; tmp->next = ent; return 0; } else if (a_key == tmp->next->key) { return 0; } tmp = tmp->next; } return 0; } /** * \ingroup baselib * Search a TSK_LIST for the existence of a value. * @param a_tsk_list_head Head of list to search * @param a_key Value to search for * @returns 1 if value is found and 0 if not */ uint8_t tsk_list_find(TSK_LIST * a_tsk_list_head, uint64_t a_key) { TSK_LIST *tmp; tmp = a_tsk_list_head; while (tmp != NULL) { // check this bucket // use the key+1 and then subtract for unsigned cases when key-len == -1 if ((a_key <= tmp->key) && (a_key >= tmp->key + 1 - tmp->len)) return 1; // Have we passed any potential buckets? else if (a_key > tmp->key) return 0; tmp = tmp->next; } return 0; } /** * \ingroup baselib * Free a TSK_LIST. * @param a_tsk_list_head Head of list to free */ void tsk_list_free(TSK_LIST * a_tsk_list_head) { TSK_LIST *tmp; while (a_tsk_list_head) { tmp = a_tsk_list_head->next; free(a_tsk_list_head); a_tsk_list_head = tmp; } } sleuthkit-4.2.0/tsk/base/tsk_lock.c000644 000765 000024 00000005314 12576320700 020020 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" #ifdef TSK_MULTITHREAD_LIB #ifdef TSK_WIN32 void tsk_init_lock(tsk_lock_t * lock) { InitializeCriticalSection(&lock->critical_section); } void tsk_deinit_lock(tsk_lock_t * lock) { DeleteCriticalSection(&lock->critical_section); } void tsk_take_lock(tsk_lock_t * lock) { EnterCriticalSection(&lock->critical_section); } void tsk_release_lock(tsk_lock_t * lock) { LeaveCriticalSection(&lock->critical_section); } #else #include void tsk_init_lock(tsk_lock_t * lock) { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); // Locks on Linux are not recursive (unlike on Windows), so things // will hang if the current thread tries to take the lock again. // While debugging, it's sometimes useful to call // // pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP); // // which will provoke an immediate error rather than a hang. // // PTHREAD_MUTEX_ERRORCHECK is not defined by default on Linux, // but it is portable if you have the right_XOPEN_SOURCE settings. // However, that macro affects the availability of other features // like BSD-style u_int types used in afflib. // PTHREAD_MUTEX_ERRORCHECK -is- available by default on Mac. // // PTHREAD_MUTEX_ERRORCHECK_NP is the Linux/gcc non-portable // equivalent which does not require _XOPEN_SOURCE. // // Other interesting attributes are PTHREAD_MUTEX_RECURSIVE (and // PTHREAD_MUTEX_RECURSIVE_NP). We avoided those out of portability // concerns with the _XOPEN_SOURCE settings. int e = pthread_mutex_init(&lock->mutex, &attr); pthread_mutexattr_destroy(&attr); if (e != 0) { fprintf(stderr, "tsk_init_lock: thread_mutex_init failed %d\n", e); assert(0); } } void tsk_deinit_lock(tsk_lock_t * lock) { pthread_mutex_destroy(&lock->mutex); } void tsk_take_lock(tsk_lock_t * lock) { int e = pthread_mutex_lock(&lock->mutex); if (e != 0) { fprintf(stderr, "tsk_take_lock: thread_mutex_lock failed %d\n", e); assert(0); } } void tsk_release_lock(tsk_lock_t * lock) { int e = pthread_mutex_unlock(&lock->mutex); if (e != 0) { fprintf(stderr, "tsk_release_lock: thread_mutex_unlock failed %d\n", e); assert(0); } } #endif // single-threaded #else void tsk_init_lock(tsk_lock_t * lock) { } void tsk_deinit_lock(tsk_lock_t * lock) { } void tsk_take_lock(tsk_lock_t * lock) { } void tsk_release_lock(tsk_lock_t * lock) { } #endif sleuthkit-4.2.0/tsk/base/tsk_os.h000644 000765 000024 00000011747 12576320700 017525 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2004-2011 Brian Carrier. All rights reserved */ /** \file tsk_os.h * Contains some OS-specific type settings. */ #ifndef _TSK_OS_H #define _TSK_OS_H /* * Solaris 2.x. Build for large files when dealing with filesystems > 2GB. * With the 32-bit file model, needs pread() to access filesystems > 2GB. */ #if defined(sun) #include #endif #if defined(__CYGWIN__) #ifndef roundup #define roundup(x, y) \ ( ( ((x)+((y) - 1)) / (y)) * (y) ) #endif #endif #if defined(__INTERNIX) #define roundup(x, y) \ ( ( ((x)+((y) - 1)) / (y)) * (y) ) #endif // mingw Windows cross compile #ifdef __MINGW32__ #ifndef UNICODE #define UNICODE #endif #ifndef _UNICODE #define _UNICODE #endif #include #include #define TSK_WIN32 #define roundup(x, y) \ ( ( ((x)+((y) - 1)) / (y)) * (y) ) #define fseeko fseek #define daddr_t int #endif // Visual Studio / Windows #ifdef _MSC_VER #define TSK_WIN32 #define TSK_MULTITHREAD_LIB #ifndef UNICODE #define UNICODE #endif #ifndef _UNICODE #define _UNICODE #endif #define WIN32_LEAN_AND_MEAN /* somewhat limit Win32 pollution */ #define _CRT_SECURE_NO_DEPRECATE 1 #include #include #include #include #include "intrin.h" // define the sized int types #if _MSC_VER >= 1600 #include #else typedef unsigned __int8 uint8_t; typedef __int8 int8_t; typedef unsigned __int16 uint16_t; typedef __int16 int16_t; typedef unsigned __int32 uint32_t; typedef __int32 int32_t; typedef unsigned __int64 uint64_t; typedef __int64 int64_t; #endif // define the typical unix types typedef int mode_t; // ifdef added from Joachim because it can cause conflicts // if python.h is included #if !defined( HAVE_SSIZE_T ) #define HAVE_SSIZE_T #if _WIN64 typedef int64_t ssize_t; #else typedef int32_t ssize_t; #endif #endif // remap some of the POSIX functions #define snprintf _snprintf #define strcasecmp(string1, string2) _stricmp(string1, string2) #define putenv _putenv #define roundup(x, y) \ ( ( ((x)+((y) - 1)) / (y)) * (y) ) #define fseeko _fseeki64 #endif /* When TSK deals with the outside world (printing / input), the data will * be in either UTF-16 or UTF-8 (Windows or Unix). TSK_TCHAR is defined * as the data type needed and the following function map to the required * function. */ #ifdef TSK_WIN32 /* TSK_TCHAR is a wide 2-byte character */ typedef WCHAR TSK_TCHAR; ///< Character data type that is UTF-16 (wchar_t) in Windows and UTF-8 (char) in Unix #define _TSK_T(x) L ## x #define TSTRTOK wcstok #define TSTRLEN wcslen #define TSTRCMP wcscmp #define TSTRNCMP wcsncmp #define TSTRICMP _wcsicmp #define TSTRNCPY wcsncpy #define TSTRNCAT wcsncat #define TSTRCHR wcschr #define TSTRRCHR wcsrchr #define TSTRTOUL wcstoul #define TATOI _wtoi #define TFPRINTF fwprintf #define TSNPRINTF _snwprintf #define TPUTENV _wputenv #define TZSET _tzset #if defined(_MSC_VER) #define TSTRTOULL _wcstoui64 #define STAT_STR __stat64 #define TSTAT _wstat64 #elif defined(__MINGW32__) #define TSTRTOULL wcstoull #define STAT_STR _stat #define TSTAT _wstat #endif #define PRIcTSK _TSK_T("S") ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer #define PRIwTSK _TSK_T("s") ///< sprintf macro to print a UTF-16 wchar_t string to TSK_TCHAR buffer #define PRIttocTSK "S" ///< printf macro to print a TSK_TCHAR string to stderr or other char device #define PRIuSIZE "Iu" ///< printf macro to print a size_t value in Windows printf codes #define unlink _unlink #define GETOPT tsk_getopt // points to local wchar version #define OPTIND tsk_optind #define OPTARG tsk_optarg #define strtok_r(a,b,c) strtok(a,b) // Non-Win32 #else /* TSK_TCHAR is a 1-byte character */ typedef char TSK_TCHAR; ///< Character data type that is UTF-16 (wchar_t) in Windows and UTF-8 (char) in Unix #define _TSK_T(x) x #define TSTAT stat #define STAT_STR stat #define TSTRTOK strtok #define TSTRLEN strlen #define TSTRCMP strcmp #define TSTRNCMP strncmp #define TSTRICMP strcasecmp #define TSTRNCPY strncpy #define TSTRNCAT strncat #define TSTRCHR strchr #define TSTRRCHR strrchr #define TSTRTOUL strtoul #define TSTRTOULL strtoull #define TATOI atoi #define TFPRINTF fprintf #define TSNPRINTF snprintf #define TPUTENV putenv #define TZSET tzset #define PRIcTSK _TSK_T("s") ///< sprintf macro to print a UTF-8 char string to TSK_TCHAR buffer #define PRIwTSK _TSK_T("S") ///< sprintf macro to print a UTF-16 wchar_t string to TSK_TCHAR buffer #define PRIttocTSK "s" ///< printf macro to print a TSK_TCHAR string to stderr or other char device #define PRIuSIZE "zu" ///< printf macro to print a size_t value in non-Windows printf codes #define GETOPT getopt // points to system char * version #define OPTIND optind // points to system char * variable #define OPTARG optarg // points to system char * variable #endif #endif sleuthkit-4.2.0/tsk/base/tsk_parse.c000644 000765 000024 00000005462 12576320700 020206 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2005-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" /** * \file tsk_parse.c * Contains code to parse specific types of data from * the command line */ /** * \ingroup baselib * Parse a TSK_TCHAR block address string. * Note that the cnt\@size format is no longer supported. * Set the device sector size in img_open to set the block size. * * @param [in] a_offset_str The string version of the offset * @return -1 on error or block offset on success */ TSK_OFF_T tsk_parse_offset(const TSK_TCHAR * a_offset_str) { TSK_TCHAR offset_lcl[64], *offset_lcl_p; TSK_DADDR_T num_blk; TSK_TCHAR *cp; if (a_offset_str == NULL) { return 0; } if (TSTRLEN(a_offset_str) > 63) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OFFSET); tsk_error_set_errstr("tsk_parse: offset string is too long: %" PRIttocTSK, a_offset_str); return -1; } /* Make a local copy */ TSTRNCPY(offset_lcl, a_offset_str, 64); offset_lcl_p = offset_lcl; /* Check for the old x@y setup */ if (TSTRCHR(offset_lcl_p, '@') != NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OFFSET); tsk_error_set_errstr ("tsk_parse: offset string format no longer supported. Use -b to specify sector size: %" PRIttocTSK, a_offset_str); return -1; } offset_lcl_p = offset_lcl; /* remove leading 0s */ while ((offset_lcl_p[0] != '\0') && (offset_lcl_p[0] == '0')) offset_lcl_p++; num_blk = 0; if (offset_lcl_p[0] != '\0') { num_blk = TSTRTOULL(offset_lcl_p, &cp, 0); if (*cp || *cp == *offset_lcl_p) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OFFSET); tsk_error_set_errstr("tsk_parse: invalid image offset: %" PRIttocTSK, offset_lcl_p); return -1; } } return num_blk; } /** * \ingroup baselib * Parse a TSK_TCHAR string of a partition byte offset and the * integer version of it. * * @param [in] a_pnum_str The string version of the address * @param [out] a_pnum The parsed integer version of the address * @return 1 on error and 0 on success */ int tsk_parse_pnum(const TSK_TCHAR * a_pnum_str, TSK_PNUM_T * a_pnum) { TSK_TCHAR *cp; if (a_pnum_str == NULL) { return 0; } *a_pnum = TSTRTOUL(a_pnum_str, &cp, 0); if (*cp || *cp == *a_pnum_str) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_IMG_OFFSET); tsk_error_set_errstr("tsk_parse: invalid partition address: %" PRIttocTSK, a_pnum_str); return 1; } return 0; } sleuthkit-4.2.0/tsk/base/tsk_printf.c000644 000765 000024 00000005735 12576320700 020401 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file tsk_printf.c * These are printf wrappers that are needed so that we can * easily print in both Unix and Windows. For Unix, the * internal UTF-8 representation is kept and a normal printf * is performed. For Windows, the UTF-8 representation is first * converted to UTF-16 and then printed */ #include "tsk_base_i.h" #include /** \internal * Convert the UTF-8 printf arguments to UTF-16 and fill in the * printf types (%s, %d, etc.) * * @param wbuf wide char string to write result to * @param wlen number of characters in wbuf * @param msg printf message string * @param args Arguments to use when filling in message string * @returns 1 on error and 0 on success */ #ifdef TSK_WIN32 static int tsk_printf_conv(WCHAR * wbuf, int wlen, const char *msg, va_list * args) { char *cbuf; UTF8 *ptr8; UTF16 *ptr16; int retVal; size_t len, clen; wbuf[0] = '\0'; /* Allocate a UTF-8 buffer and process the printf args */ clen = wlen * 3; if (NULL == (cbuf = (char *) tsk_malloc(clen))) { return 1; } #ifdef _MSC_VER vsnprintf_s(cbuf, clen - 1, _TRUNCATE, msg, *args); #else vsnprintf(cbuf, clen - 1, msg, *args); #endif len = strlen(cbuf); //Convert to UTF-16 ptr8 = (UTF8 *) cbuf; ptr16 = (UTF16 *) wbuf; retVal = tsk_UTF8toUTF16((const UTF8 **) &ptr8, &ptr8[len + 1], &ptr16, &ptr16[wlen], TSKlenientConversion); if (retVal != TSKconversionOK) { *ptr16 = '\0'; if (tsk_verbose) tsk_fprintf(stderr, "tsk_printf_conv: error converting string to UTF-16\n"); } free(cbuf); return 0; } #endif /** * \ingroup baselib * fprintf wrapper function that takes UTF-8 strings as input * (on all platforms) and does what is necessary to output * strings in the correct encoding (UTF-8 on Unix and * UTF-16 on Windows). * * @param fd File to print to * @param msg printf message */ void tsk_fprintf(FILE * fd, const char *msg, ...) { va_list args; va_start(args, msg); #ifdef TSK_WIN32 { WCHAR wbuf[2048]; tsk_printf_conv(wbuf, 2048, msg, &args); fwprintf(fd, _TSK_T("%s"), wbuf); } #else vfprintf(fd, msg, args); #endif va_end(args); } /** * \ingroup baselib * printf wrapper function that takes UTF-8 strings as input * (on all platforms) and does what is necessary to output * strings in the correct encoding (UTF-8 on Unix and * UTF-16 on Windows). * * @param msg printf message */ void tsk_printf(const char *msg, ...) { va_list args; va_start(args, msg); #ifdef TSK_WIN32 { WCHAR wbuf[2048]; tsk_printf_conv(wbuf, 2048, msg, &args); wprintf(_TSK_T("%s"), wbuf); } #else vprintf(msg, args); #endif va_end(args); } sleuthkit-4.2.0/tsk/base/tsk_stack.c000644 000765 000024 00000004452 12576320700 020177 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2011 Brian Carrier. All Rights reserved * * This software is distributed under the Common Public License 1.0 */ #include "tsk_base_i.h" /** \file tsk_stack.c * Contains the functions to create and maintain a stack, which supports basic * popping, pushing, and searching. These are used for finding loops when * recursing structures */ /** * \ingroup baselib * Create a TSK_STACK structure * @returns Pointer to structure or NULL on error */ TSK_STACK * tsk_stack_create() { TSK_STACK *tsk_stack; if ((tsk_stack = (TSK_STACK *) tsk_malloc(sizeof(TSK_STACK))) == NULL) { return NULL; } tsk_stack->len = 64; tsk_stack->top = 0; if ((tsk_stack->vals = (uint64_t *) tsk_malloc(tsk_stack->len * sizeof(uint64_t))) == NULL) { free(tsk_stack); return NULL; } return tsk_stack; } /** * \ingroup baselib * Push a value to the top of TSK_STACK. * @param a_tsk_stack Pointer to stack to push onto * @param a_val Value to push on * @returns 1 on error */ uint8_t tsk_stack_push(TSK_STACK * a_tsk_stack, uint64_t a_val) { if (a_tsk_stack->top == a_tsk_stack->len) { a_tsk_stack->len += 64; if ((a_tsk_stack->vals = (uint64_t *) tsk_realloc((char *) a_tsk_stack->vals, a_tsk_stack->len * sizeof(uint64_t))) == NULL) { return 1; } } a_tsk_stack->vals[a_tsk_stack->top++] = a_val; return 0; } /** * \ingroup baselib * Pop a value from the top of the stack. * @param a_tsk_stack Stack to pop from */ void tsk_stack_pop(TSK_STACK * a_tsk_stack) { a_tsk_stack->top--; } /** * \ingroup baselib * Search a TSK_STACK for a given value * @param a_tsk_stack Stack to search * @param a_val Value to search for * @returns 1 if found and 0 if not */ uint8_t tsk_stack_find(TSK_STACK * a_tsk_stack, uint64_t a_val) { size_t i; for (i = 0; i < a_tsk_stack->top; i++) { if (a_tsk_stack->vals[i] == a_val) return 1; } return 0; } /** * \ingroup baselib * Free an allocated TSK_STACK structure * @param a_tsk_stack Stack to free */ void tsk_stack_free(TSK_STACK * a_tsk_stack) { free(a_tsk_stack->vals); free(a_tsk_stack); } sleuthkit-4.2.0/tsk/base/tsk_unicode.c000644 000765 000024 00000064364 12576320700 020530 0ustar00carrierstaff000000 000000 /* * Copyright 2001-2004 Unicode, Inc. * * Disclaimer * * This source code is provided as is by Unicode, Inc. No claims are * made as to fitness for any particular purpose. No warranties of any * kind are expressed or implied. The recipient agrees to determine * applicability of information provided. If this file has been * purchased on magnetic or optical media from Unicode, Inc., the * sole remedy for any claim will be exchange of defective media * within 90 days of receipt. * * Limitations on Rights to Redistribute This Code * * Unicode, Inc. hereby grants the right to freely use the information * supplied in this file in the creation of products supporting the * Unicode Standard, and to make copies of this file in any form * for internal or external distribution as long as this notice * remains attached. */ /* --------------------------------------------------------------------- Conversions between UTF32, UTF-16, and UTF-8. Source code file. Author: Mark E. Davis, 1994. Rev History: Rick McGowan, fixes & updates May 2001. Sept 2001: fixed const & error conditions per mods suggested by S. Parent & A. Lillich. June 2002: Tim Dodd added detection and handling of incomplete source sequences, enhanced error detection, added casts to eliminate compiler warnings. July 2003: slight mods to back out aggressive FFFE detection. Jan 2004: updated switches in from-UTF8 conversions. Oct 2004: updated to use TSK_UNI_MAX_LEGAL_UTF32 in UTF-32 conversions. See the header file "ConvertUTF.h" for complete documentation. ------------------------------------------------------------------------ */ /** \file tsk_unicode.c * A local copy of the Unicode conversion routines from unicode.org. */ #include "tsk_base_i.h" /* Some fundamental constants */ typedef unsigned long UTF32; /* at least 32 bits */ #define TSK_UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD #define TSK_UNI_MAX_BMP (UTF32)0x0000FFFF #define TSK_UNI_MAX_UTF16 (UTF32)0x0010FFFF #define TSK_UNI_MAX_UTF32 (UTF32)0x7FFFFFFF #define TSK_UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF static const int halfShift = 10; /* used for shifting by 10 bits */ static const UTF32 halfBase = 0x0010000UL; static const UTF32 halfMask = 0x3FFUL; #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_HIGH_END (UTF32)0xDBFF #define UNI_SUR_LOW_START (UTF32)0xDC00 #define UNI_SUR_LOW_END (UTF32)0xDFFF #define false 0 #define true 1 /* --------------------------------------------------------------------- */ /* --------------------------------------------------------------------- */ /* * Index into the table below with the first byte of a UTF-8 sequence to * get the number of trailing bytes that are supposed to follow it. * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is * left as-is for anyone who may want to do such conversion, which was * allowed in earlier algorithms. */ static const char trailingBytesForUTF8[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5 }; /* * Magic values subtracted from a buffer value during UTF8 conversion. * This table contains as many values as there might be trailing bytes * in a UTF-8 sequence. */ static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL, 0x03C82080UL, 0xFA082080UL, 0x82082080UL }; /* * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed * into the first byte, depending on how many bytes follow. There are * as many entries in this table as there are UTF-8 sequence types. * (I.e., one byte sequence, two byte... etc.). Remember that sequencs * for *legal* UTF-8 will be 4 or fewer bytes total. */ static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; /* --------------------------------------------------------------------- */ /* The interface converts a whole buffer to avoid function-call overhead. * Constants have been gathered. Loops & conditionals have been removed as * much as possible for efficiency, in favor of drop-through switches. * (See "Note A" at the bottom of the file for equivalent code.) * If your compiler supports it, the "isLegalUTF8" call can be turned * into an inline function. */ /* --------------------------------------------------------------------- */ /** * \ingroup baselib * Convert a UTF-16 string to UTF-8. * @param endian Endian ordering flag of UTF-16 text * @param sourceStart Pointer to pointer to start of UTF-16 string. Will be updated to last char proccessed. * @param sourceEnd Pointer to one entry past end of UTF-16 string * @param targetStart Pointer to pointer to place where UTF-8 string should be written. Will be updated to next place to write to. * @param targetEnd Pointer to end of UTF-8 buffer * @param flags Flags used during conversion * @returns error code */ TSKConversionResult tsk_UTF16toUTF8(TSK_ENDIAN_ENUM endian, const UTF16 ** sourceStart, const UTF16 * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags) { TSKConversionResult result = TSKconversionOK; const UTF16 *source = *sourceStart; UTF8 *target = *targetStart; while (source < sourceEnd) { UTF32 ch; unsigned short bytesToWrite = 0; const UTF32 byteMask = 0xBF; const UTF32 byteMark = 0x80; const UTF16 *oldSource = source; /* In case we have to back up because of target overflow. */ ch = tsk_getu16(endian, (uint8_t *) source); source++; /* If we have a surrogate pair, convert to UTF32 first. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { /* If the 16 bits following the high surrogate are in the source buffer... */ if (source < sourceEnd) { UTF32 ch2 = tsk_getu16(endian, (uint8_t *) source); ++source; /* If it's a low surrogate, convert to UTF32. */ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + (ch2 - UNI_SUR_LOW_START) + halfBase; } else if (flags == TSKstrictConversion) { /* it's an unpaired high surrogate */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } else { /* We don't have the 16 bits following the high surrogate. */ --source; /* return to the high surrogate */ result = TSKsourceExhausted; break; } } /* UTF-16 surrogate values are illegal in UTF-32 */ else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { if (flags == TSKstrictConversion) { --source; /* return to the illegal value itself */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } /* Figure out how many bytes the result will require */ if (ch < (UTF32) 0x80) { bytesToWrite = 1; } else if (ch < (UTF32) 0x800) { bytesToWrite = 2; } else if (ch < (UTF32) 0x10000) { bytesToWrite = 3; } else if (ch < (UTF32) 0x110000) { bytesToWrite = 4; } else { bytesToWrite = 3; ch = TSK_UNI_REPLACEMENT_CHAR; } target += bytesToWrite; if (target > targetEnd) { source = oldSource; /* Back up source pointer! */ target -= bytesToWrite; result = TSKtargetExhausted; break; } switch (bytesToWrite) { /* note: everything falls through. */ case 4: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); } target += bytesToWrite; } *sourceStart = source; *targetStart = target; return result; } /** * \ingroup baselib * Convert a UTF-16 string in local endian ordering to UTF-8. * @param sourceStart Pointer to pointer to start of UTF-16 string. Will be updated to last char proccessed. * @param sourceEnd Pointer to one entry past end of UTF-16 string * @param targetStart Pointer to pointer to place where UTF-8 string should be written. Will be updated to next place to write to. * @param targetEnd Pointer to end of UTF-8 buffer * @param flags Flags used during conversion * @returns error code */ TSKConversionResult tsk_UTF16toUTF8_lclorder(const UTF16 ** sourceStart, const UTF16 * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags) { TSKConversionResult result = TSKconversionOK; const UTF16 *source = *sourceStart; UTF8 *target = *targetStart; while (source < sourceEnd) { UTF32 ch; unsigned short bytesToWrite = 0; const UTF32 byteMask = 0xBF; const UTF32 byteMark = 0x80; const UTF16 *oldSource = source; /* In case we have to back up because of target overflow. */ ch = *source++; /* If we have a surrogate pair, convert to UTF32 first. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { /* If the 16 bits following the high surrogate are in the source buffer... */ if (source < sourceEnd) { UTF32 ch2 = *source; source++; /* If it's a low surrogate, convert to UTF32. */ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + (ch2 - UNI_SUR_LOW_START) + halfBase; } else if (flags == TSKstrictConversion) { /* it's an unpaired high surrogate */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } else { /* We don't have the 16 bits following the high surrogate. */ --source; /* return to the high surrogate */ result = TSKsourceExhausted; break; } } /* UTF-16 surrogate values are illegal in UTF-32 */ else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { if (flags == TSKstrictConversion) { --source; /* return to the illegal value itself */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } /* Figure out how many bytes the result will require */ if (ch < (UTF32) 0x80) { bytesToWrite = 1; } else if (ch < (UTF32) 0x800) { bytesToWrite = 2; } else if (ch < (UTF32) 0x10000) { bytesToWrite = 3; } else if (ch < (UTF32) 0x110000) { bytesToWrite = 4; } else { bytesToWrite = 3; ch = TSK_UNI_REPLACEMENT_CHAR; } target += bytesToWrite; if (target > targetEnd) { source = oldSource; /* Back up source pointer! */ target -= bytesToWrite; result = TSKtargetExhausted; break; } switch (bytesToWrite) { /* note: everything falls through. */ case 4: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); } target += bytesToWrite; } *sourceStart = source; *targetStart = target; return result; } TSKConversionResult tsk_UTF16WtoUTF8_lclorder(const wchar_t ** sourceStart, const wchar_t * sourceEnd, UTF8 ** targetStart, UTF8 * targetEnd, TSKConversionFlags flags) { TSKConversionResult result = TSKconversionOK; const wchar_t *source = *sourceStart; UTF8 *target = *targetStart; while (source < sourceEnd) { UTF32 ch; unsigned short bytesToWrite = 0; const UTF32 byteMask = 0xBF; const UTF32 byteMark = 0x80; const wchar_t *oldSource = source; /* In case we have to back up because of target overflow. */ ch = *source++; /* If we have a surrogate pair, convert to UTF32 first. */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) { /* If the 16 bits following the high surrogate are in the source buffer... */ if (source < sourceEnd) { UTF32 ch2 = *source; source++; /* If it's a low surrogate, convert to UTF32. */ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) { ch = ((ch - UNI_SUR_HIGH_START) << halfShift) + (ch2 - UNI_SUR_LOW_START) + halfBase; } else if (flags == TSKstrictConversion) { /* it's an unpaired high surrogate */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } else { /* We don't have the 16 bits following the high surrogate. */ --source; /* return to the high surrogate */ result = TSKsourceExhausted; break; } } /* UTF-16 surrogate values are illegal in UTF-32 */ else if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) { if (flags == TSKstrictConversion) { --source; /* return to the illegal value itself */ result = TSKsourceIllegal; break; } // replace with another character else { ch = '^'; } } /* Figure out how many bytes the result will require */ if (ch < (UTF32) 0x80) { bytesToWrite = 1; } else if (ch < (UTF32) 0x800) { bytesToWrite = 2; } else if (ch < (UTF32) 0x10000) { bytesToWrite = 3; } else if (ch < (UTF32) 0x110000) { bytesToWrite = 4; } else { bytesToWrite = 3; ch = TSK_UNI_REPLACEMENT_CHAR; } target += bytesToWrite; if (target > targetEnd) { source = oldSource; /* Back up source pointer! */ target -= bytesToWrite; result = TSKtargetExhausted; break; } switch (bytesToWrite) { /* note: everything falls through. */ case 4: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 3: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 2: *--target = (UTF8) ((ch | byteMark) & byteMask); ch >>= 6; case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]); } target += bytesToWrite; } *sourceStart = source; *targetStart = target; return result; } /* --------------------------------------------------------------------- */ /* * Utility routine to tell whether a sequence of bytes is legal UTF-8. * This must be called with the length pre-determined by the first byte. * If not calling this from ConvertUTF8to*, then the length can be set by: * length = trailingBytesForUTF8[*source]+1; * and the sequence is illegal right away if there aren't that many bytes * available. * If presented with a length > 4, this returns false. The Unicode * definition of UTF-8 goes up to 4-byte sequences. */ static Boolean isLegalUTF8(const UTF8 * source, int length) { UTF8 a; const UTF8 *srcptr = source + length; switch (length) { default: return false; /* Everything else falls through when "true"... */ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; case 2: if ((a = (*--srcptr)) > 0xBF) return false; switch (*source) { /* no fall-through in this inner switch */ case 0xE0: if (a < 0xA0) return false; break; case 0xED: if (a > 0x9F) return false; break; case 0xF0: if (a < 0x90) return false; break; case 0xF4: if (a > 0x8F) return false; break; default: if (a < 0x80) return false; } case 1: if (*source >= 0x80 && *source < 0xC2) return false; } if (*source > 0xF4) return false; return true; } /* --------------------------------------------------------------------- */ /* * Exported function to return whether a UTF-8 sequence is legal or not. * This is not used here; it's just exported. */ Boolean tsk_isLegalUTF8Sequence(const UTF8 * source, const UTF8 * sourceEnd) { int length = trailingBytesForUTF8[*source] + 1; if (source + length > sourceEnd) { return false; } return isLegalUTF8(source, length); } /** * Cleans up the passed in string to replace invalid * UTF-8 values with the passed in character. * @param source String to be cleaned up * @param replacement Character to insert into source as needed. */ void tsk_cleanupUTF8(char *source, const char replacement) { size_t total_len = strlen(source); size_t cur_idx = 0; while (cur_idx < total_len) { int length = trailingBytesForUTF8[(UTF8) source[cur_idx]] + 1; if (cur_idx + length > total_len) { while (cur_idx < total_len) { source[cur_idx] = replacement; cur_idx++; } break; } if (isLegalUTF8((UTF8 *) & source[cur_idx], length) == false) { int i; for (i = 0; i < length; i++) { source[cur_idx + i] = replacement; } } cur_idx += length; } } /* --------------------------------------------------------------------- */ /** * \ingroup baselib * Convert a UTF-8 string to UTF-16 (in local endian ordering). * @param sourceStart Pointer to pointer to start of UTF-8 string. Will be updated to last char proccessed. * @param sourceEnd Pointer to one entry past end of UTF-8 string * @param targetStart Pointer to pointer to place where UTF-16 string should be written. Will be updated to next place to write to. * @param targetEnd Pointer to end of UTF-16 buffer * @param flags Flags used during conversion * @returns error code */ TSKConversionResult tsk_UTF8toUTF16(const UTF8 ** sourceStart, const UTF8 * sourceEnd, UTF16 ** targetStart, UTF16 * targetEnd, TSKConversionFlags flags) { TSKConversionResult result = TSKconversionOK; const UTF8 *source = *sourceStart; UTF16 *target = *targetStart; while (source < sourceEnd) { UTF32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = TSKsourceExhausted; break; } /* Do this check whether lenient or strict */ if (!isLegalUTF8(source, extraBytesToRead + 1)) { result = TSKsourceIllegal; break; } /* * The cases all fall through. See "Note A" below. */ switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 3: ch += *source++; ch <<= 6; case 2: ch += *source++; ch <<= 6; case 1: ch += *source++; ch <<= 6; case 0: ch += *source++; } ch -= offsetsFromUTF8[extraBytesToRead]; if (target >= targetEnd) { source -= (extraBytesToRead + 1); /* Back up source pointer! */ result = TSKtargetExhausted; break; } if (ch <= TSK_UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == TSKstrictConversion) { source -= (extraBytesToRead + 1); /* return to the illegal value itself */ result = TSKsourceIllegal; break; } else { *target++ = TSK_UNI_REPLACEMENT_CHAR; } } else { *target++ = (UTF16) ch; /* normal case */ } } else if (ch > TSK_UNI_MAX_UTF16) { if (flags == TSKstrictConversion) { result = TSKsourceIllegal; source -= (extraBytesToRead + 1); /* return to the start */ break; /* Bail out; shouldn't continue */ } else { *target++ = TSK_UNI_REPLACEMENT_CHAR; } } else { /* target is a character in range 0xFFFF - 0x10FFFF. */ if (target + 1 >= targetEnd) { source -= (extraBytesToRead + 1); /* Back up source pointer! */ result = TSKtargetExhausted; break; } ch -= halfBase; *target++ = (UTF16) ((ch >> halfShift) + UNI_SUR_HIGH_START); *target++ = (UTF16) ((ch & halfMask) + UNI_SUR_LOW_START); } } *sourceStart = source; *targetStart = target; return result; } TSKConversionResult tsk_UTF8toUTF16W(const UTF8 ** sourceStart, const UTF8 * sourceEnd, wchar_t ** targetStart, wchar_t * targetEnd, TSKConversionFlags flags) { TSKConversionResult result = TSKconversionOK; const UTF8 *source = *sourceStart; wchar_t *target = *targetStart; while (source < sourceEnd) { UTF32 ch = 0; unsigned short extraBytesToRead = trailingBytesForUTF8[*source]; if (source + extraBytesToRead >= sourceEnd) { result = TSKsourceExhausted; break; } /* Do this check whether lenient or strict */ if (!isLegalUTF8(source, extraBytesToRead + 1)) { result = TSKsourceIllegal; break; } /* * The cases all fall through. See "Note A" below. */ switch (extraBytesToRead) { case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */ case 3: ch += *source++; ch <<= 6; case 2: ch += *source++; ch <<= 6; case 1: ch += *source++; ch <<= 6; case 0: ch += *source++; } ch -= offsetsFromUTF8[extraBytesToRead]; if (target >= targetEnd) { source -= (extraBytesToRead + 1); /* Back up source pointer! */ result = TSKtargetExhausted; break; } if (ch <= TSK_UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */ /* UTF-16 surrogate values are illegal in UTF-32 */ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) { if (flags == TSKstrictConversion) { source -= (extraBytesToRead + 1); /* return to the illegal value itself */ result = TSKsourceIllegal; break; } else { *target++ = TSK_UNI_REPLACEMENT_CHAR; } } else { *target++ = (wchar_t) ch; /* normal case */ } } else if (ch > TSK_UNI_MAX_UTF16) { if (flags == TSKstrictConversion) { result = TSKsourceIllegal; source -= (extraBytesToRead + 1); /* return to the start */ break; /* Bail out; shouldn't continue */ } else { *target++ = TSK_UNI_REPLACEMENT_CHAR; } } else { /* target is a character in range 0xFFFF - 0x10FFFF. */ if (target + 1 >= targetEnd) { source -= (extraBytesToRead + 1); /* Back up source pointer! */ result = TSKtargetExhausted; break; } ch -= halfBase; *target++ = (wchar_t) ((ch >> halfShift) + UNI_SUR_HIGH_START); *target++ = (wchar_t) ((ch & halfMask) + UNI_SUR_LOW_START); } } *sourceStart = source; *targetStart = target; return result; } sleuthkit-4.2.0/tsk/base/tsk_version.c000644 000765 000024 00000001441 12576320700 020552 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit * * Brian Carrier [carrier sleuthkit [dot] org] * Copyright (c) 2007-2011 Brian Carrier. All rights reserved * * This software is distributed under the Common Public License 1.0 */ /** \file tsk_version.c * Contains functions to print and obtain the library version. */ #include "tsk_base_i.h" /** * \ingroup baselib * Print the library name and version to a handle (such as "The Sleuth Kit ver 1.00"). * @param hFile Handle to print to */ void tsk_version_print(FILE * hFile) { tsk_fprintf(hFile, "The Sleuth Kit ver %s\n", TSK_VERSION_STR); return; } /** * \ingroup baselib * Return the library version as a string. * @returns String version of version (1.00 for example) */ const char * tsk_version_get_str() { return TSK_VERSION_STR; } sleuthkit-4.2.0/tsk/base/XGetopt.c000644 000765 000024 00000016223 12576320700 017602 0ustar00carrierstaff000000 000000 /* * The Sleuth Kit */ // XGetopt.cpp Version 1.2 // // Author: Hans Dietrich // hdietrich2@hotmail.com // // Description: // XGetopt.cpp implements getopt(), a function to parse command lines. // // History // Version 1.2 - 2003 May 17 // - Added Unicode support // // Version 1.1 - 2002 March 10 // - Added example to XGetopt.cpp module header // // This software is released into the public domain. // You are free to use it in any way you like. // // This software is provided "as is" with no expressed // or implied warranty. I accept no liability for any // damage or loss of business that this software may cause. // /////////////////////////////////////////////////////////////////////////////// /** \file XGetopt.c * Parses arguments for win32 programs -- written by Hans Dietrich. */ #include "tsk_base_i.h" #ifdef TSK_WIN32 /////////////////////////////////////////////////////////////////////////////// // if you are using precompiled headers then include this line: //#include "stdafx.h" /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // if you are not using precompiled headers then include these lines: #include /////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// // // X G e t o p t . c p p // // // NAME // getopt -- parse command line options // // SYNOPSIS // int getopt(int argc, TSK_TCHAR *argv[], TSK_TCHAR *optstring) // // extern TSK_TCHAR *optarg; // extern int optind; // // DESCRIPTION // The getopt() function parses the command line arguments. Its // arguments argc and argv are the argument count and array as // passed into the application on program invocation. In the case // of Visual C++ programs, argc and argv are available via the // variables __argc and __argv (double underscores), respectively. // getopt returns the next option letter in argv that matches a // letter in optstring. (Note: Unicode programs should use // __targv instead of __argv. Also, all character and string // literals should be enclosed in _TSK_T( ) ). // // optstring is a string of recognized option letters; if a letter // is followed by a colon, the option is expected to have an argument // that may or may not be separated from it by white space. optarg // is set to point to the start of the option argument on return from // getopt. // // Option letters may be combined, e.g., "-ab" is equivalent to // "-a -b". Option letters are case sensitive. // // getopt places in the external variable optind the argv index // of the next argument to be processed. optind is initialized // to 0 before the first call to getopt. // // When all options have been processed (i.e., up to the first // non-option argument), getopt returns EOF, optarg will point // to the argument, and optind will be set to the argv index of // the argument. If there are no non-option arguments, optarg // will be set to NULL. // // The special option "--" may be used to delimit the end of the // options; EOF will be returned, and "--" (and everything after it) // will be skipped. // // RETURN VALUE // For option letters contained in the string optstring, getopt // will return the option letter. getopt returns a question mark (?) // when it encounters an option letter not included in optstring. // EOF is returned when processing is finished. // // BUGS // 1) Long options are not supported. // 2) The GNU double-colon extension is not supported. // 3) The environment variable POSIXLY_CORRECT is not supported. // 4) The + syntax is not supported. // 5) The automatic permutation of arguments is not supported. // 6) This implementation of getopt() returns EOF if an error is // encountered, instead of -1 as the latest standard requires. // // EXAMPLE // BOOL CMyApp::ProcessCommandLine(int argc, TSK_TCHAR *argv[]) // { // int c; // // while ((c = getopt(argc, argv, _TSK_T("aBn:"))) != EOF) // { // switch (c) // { // case _TSK_T('a'): // TRACE(_TSK_T("option a\n")); // // // // set some flag here // // // break; // // case _TSK_T('B'): // TRACE( _TSK_T("option B\n")); // // // // set some other flag here // // // break; // // case _TSK_T('n'): // TRACE(_TSK_T("option n: value=%d\n"), atoi(optarg)); // // // // do something with value here // // // break; // // case _TSK_T('?'): // TRACE(_TSK_T("ERROR: illegal option %s\n"), argv[optind-1]); // return FALSE; // break; // // default: // TRACE(_TSK_T("WARNING: no handler for option %c\n"), c); // return FALSE; // break; // } // } // // // // check for non-option args here // // // return TRUE; // } // /////////////////////////////////////////////////////////////////////////////// TSK_TCHAR *tsk_optarg; // global argument pointer int tsk_optind = 0; // global argv index int tsk_getopt(int argc, TSK_TCHAR * const argv[], const TSK_TCHAR * optstring) { static TSK_TCHAR *next = NULL; TSK_TCHAR c, *cp; if (tsk_optind == 0) next = NULL; tsk_optarg = NULL; if (next == NULL || *next == _TSK_T('\0')) { if (tsk_optind == 0) tsk_optind++; if (tsk_optind >= argc || argv[tsk_optind][0] != _TSK_T('-') || argv[tsk_optind][1] == _TSK_T('\0')) { tsk_optarg = NULL; if (tsk_optind < argc) tsk_optarg = argv[tsk_optind]; return EOF; } if (TSTRCMP(argv[tsk_optind], _TSK_T("--")) == 0) { tsk_optind++; tsk_optarg = NULL; if (tsk_optind < argc) tsk_optarg = argv[tsk_optind]; return EOF; } next = argv[tsk_optind]; next++; // skip past - tsk_optind++; } c = *next++; cp = (TSK_TCHAR *) TSTRCHR(optstring, c); if (cp == NULL || c == _TSK_T(':')) return _TSK_T('?'); cp++; if (*cp == _TSK_T(':')) { if (*next != _TSK_T('\0')) { tsk_optarg = next; next = NULL; } else if (tsk_optind < argc) { tsk_optarg = argv[tsk_optind]; tsk_optind++; } else { return _TSK_T('?'); } } return c; } #endif sleuthkit-4.2.0/tsk/auto/.indent.pro000644 000765 000024 00000000036 12576320700 020156 0ustar00carrierstaff000000 000000 -kr -psl -nce -ip2 -nlp -nut sleuthkit-4.2.0/tsk/auto/auto.cpp000644 000765 000024 00000053401 12576320700 017555 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2010-2013 Brian Carrier. All Rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file auto.cpp * Contains C++ code that creates the base file extraction automation class. */ #include "tsk_auto_i.h" #include "tsk/fs/tsk_fatxxfs.h" // @@@ Follow through some error paths for sanity check and update docs somewhere to relfect the new scheme TskAuto::TskAuto() { m_img_info = NULL; m_tag = TSK_AUTO_TAG; m_volFilterFlags = (TSK_VS_PART_FLAG_ENUM)(TSK_VS_PART_FLAG_ALLOC | TSK_VS_PART_FLAG_UNALLOC); m_fileFilterFlags = TSK_FS_DIR_WALK_FLAG_RECURSE; m_stopAllProcessing = false; m_internalOpen = false; m_curVsPartValid = false; m_curVsPartDescr = ""; } TskAuto::~TskAuto() { closeImage(); m_tag = 0; } void TskAuto::setCurVsPart(const TSK_VS_PART_INFO *partInfo) { if (partInfo->desc) m_curVsPartDescr = partInfo->desc; else m_curVsPartDescr = ""; m_curVsPartFlag = partInfo->flags; m_curVsPartValid = true; } std::string TskAuto::getCurVsPartDescr() const { return m_curVsPartDescr; } TSK_VS_PART_FLAG_ENUM TskAuto::getCurVsPartFlag() const { return m_curVsPartFlag; } bool TskAuto::isCurVsValid() const { return m_curVsPartValid; } /** * Opens the disk image to be analyzed. This must be called before any * of the findFilesInXXX() methods. * * @param a_numImg The number of images to open (will be > 1 for split images). * @param a_images The path to the image files (the number of files must * be equal to num_img and they must be in a sorted order) * @param a_imgType The disk image type (can be autodetection) * @param a_sSize Size of device sector in bytes (or 0 for default) * @returns 1 on error (messages were NOT registered), 0 on success */ uint8_t TskAuto::openImage(int a_numImg, const TSK_TCHAR * const a_images[], TSK_IMG_TYPE_ENUM a_imgType, unsigned int a_sSize) { resetErrorList(); if (m_img_info) closeImage(); m_internalOpen = true; m_img_info = tsk_img_open(a_numImg, a_images, a_imgType, a_sSize); if (m_img_info) return 0; else return 1; } /** * Opens the disk image to be analyzed. This must be called before any * of the findFilesInXXX() methods. Always uses the utf8 tsk_img_open * even in windows. * * @param a_numImg The number of images to open (will be > 1 for split images). * @param a_images The path to the image files (the number of files must * be equal to num_img and they must be in a sorted order) * @param a_imgType The disk image type (can be autodetection) * @param a_sSize Size of device sector in bytes (or 0 for default) * @returns 1 on error (messages were NOT registered), 0 on success */ uint8_t TskAuto::openImageUtf8(int a_numImg, const char *const a_images[], TSK_IMG_TYPE_ENUM a_imgType, unsigned int a_sSize) { resetErrorList(); if (m_img_info) closeImage(); m_internalOpen = true; m_img_info = tsk_img_open_utf8(a_numImg, a_images, a_imgType, a_sSize); if (m_img_info) return 0; else return 1; } /** * Uses the already opened image for future analysis. This must be called before any * of the findFilesInXXX() methods. Note that the TSK_IMG_INFO will not * be freed when the TskAuto class is closed. * @param a_img_info Handle to an already opened disk image. * @returns 1 on error (messages were NOT registered) and 0 on success */ uint8_t TskAuto::openImageHandle(TSK_IMG_INFO * a_img_info) { resetErrorList(); if (m_img_info) closeImage(); m_internalOpen = false; m_img_info = a_img_info; return 0; } /** * Closes the handles to the open disk image. Should be called after * you have completed analysis of the image. */ void TskAuto::closeImage() { if ((m_img_info) && (m_internalOpen)) { tsk_img_close(m_img_info); } m_img_info = NULL; } /** * Set the attributes for the volumes that should be processed. * The default settings are for Allocated Non-Meta volumes only. * This must be called before the findFilesInXX() method. * @param vs_flags Flags to use for filtering */ void TskAuto::setVolFilterFlags(TSK_VS_PART_FLAG_ENUM vs_flags) { m_volFilterFlags = vs_flags; } /** * Set the attributes for the files that should be processed. * The default settings are for all files (allocated and deleted). * This must be called before the findFilesInXX() method. * @param file_flags Flags to use for filtering */ void TskAuto::setFileFilterFlags(TSK_FS_DIR_WALK_FLAG_ENUM file_flags) { m_fileFilterFlags = file_flags; } /** * @return The size of the image in bytes or -1 if the * image is not open. */ TSK_OFF_T TskAuto::getImageSize() const { if (m_img_info == NULL) return -1; return m_img_info->size; } TSK_FILTER_ENUM TskAuto::filterVs(const TSK_VS_INFO * vs_info) { return TSK_FILTER_CONT; } TSK_FILTER_ENUM TskAuto::filterVol(const TSK_VS_PART_INFO * vs_part) { return TSK_FILTER_CONT; } TSK_FILTER_ENUM TskAuto::filterFs(TSK_FS_INFO * fs_info) { return TSK_FILTER_CONT; }; /** * Starts in sector 0 of the opened disk images and looks for a * volume or file system. Will call processFile() on each file * that is found. * @return 1 if an error occured (message will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInImg() { if (!m_img_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_NOTOPEN); tsk_error_set_errstr("findFilesInImg -- img_info"); registerError(); return 1; } return findFilesInVs(0); } /** \internal * Volume system walk callback function that will analyze * each volume to find a file system. * Does not return ERROR because all errors have been registered * and returning an error would indicate to TSK that errno and such are set. */ TSK_WALK_RET_ENUM TskAuto::vsWalkCb(TSK_VS_INFO * a_vs_info, const TSK_VS_PART_INFO * a_vs_part, void *a_ptr) { TskAuto *tsk = (TskAuto *) a_ptr; if (tsk->m_tag != TSK_AUTO_TAG) { // we have no way to register an error... return TSK_WALK_STOP; } //save the current volume description tsk->setCurVsPart(a_vs_part); // see if the super class wants to continue with this. TSK_FILTER_ENUM retval1 = tsk->filterVol(a_vs_part); if (retval1 == TSK_FILTER_SKIP) return TSK_WALK_CONT; else if ((retval1 == TSK_FILTER_STOP) || (tsk->getStopProcessing())) return TSK_WALK_STOP; // process it TSK_RETVAL_ENUM retval2 = tsk->findFilesInFsRet(a_vs_part->start * a_vs_part->vs->block_size, TSK_FS_TYPE_DETECT); if ((retval2 == TSK_STOP) || (tsk->getStopProcessing())) { return TSK_WALK_STOP; } //all errors would have been registered return TSK_WALK_CONT; } /** * Starts in a specified byte offset of the opened disk images and looks for a * volume system or file system. Will call processFile() on each file * that is found. * @param a_start Byte offset to start analyzing from. * @param a_vtype Volume system type to analyze * @return 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInVs(TSK_OFF_T a_start, TSK_VS_TYPE_ENUM a_vtype) { if (!m_img_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_NOTOPEN); tsk_error_set_errstr("findFilesInVs -- img_info"); registerError(); return 1; } TSK_VS_INFO *vs_info; // USE mm_walk to get the volumes if ((vs_info = tsk_vs_open(m_img_info, a_start, a_vtype)) == NULL) { /* we're going to ignore this error to avoid confusion if the * fs_open passes. */ tsk_error_reset(); if(tsk_verbose) fprintf(stderr, "findFilesInVs: Error opening volume system, trying as a file system\n"); /* There was no volume system, but there could be a file system * Errors will have been registered */ findFilesInFs(a_start); } // process the volume system else { TSK_FILTER_ENUM retval = filterVs(vs_info); if ((retval == TSK_FILTER_STOP) || (retval == TSK_FILTER_SKIP)|| (m_stopAllProcessing)) return m_errors.empty() ? 0 : 1; /* Walk the allocated volumes (skip metadata and unallocated volumes) */ if (tsk_vs_part_walk(vs_info, 0, vs_info->part_count - 1, m_volFilterFlags, vsWalkCb, this)) { registerError(); tsk_vs_close(vs_info); return 1; } tsk_vs_close(vs_info); } return m_errors.empty() ? 0 : 1; } /** * Starts in a specified byte offset of the opened disk images and looks for a * volume system or file system. Will call processFile() on each file * that is found. * @param a_start Byte offset to start analyzing from. * @return 1 if an error occured (message will have been registered), 0 on success */ uint8_t TskAuto::findFilesInVs(TSK_OFF_T a_start) { return findFilesInVs(a_start, TSK_VS_TYPE_DETECT); } /** * Starts in a specified byte offset of the opened disk images and looks for a * file system. Will call processFile() on each file * that is found. Same as findFilesInFs, but gives more detailed return values. * @param a_start Byte offset to start analyzing from. * @param a_ftype File system type. * @returns Error (messages will have been registered), OK, or STOP. */ TSK_RETVAL_ENUM TskAuto::findFilesInFsRet(TSK_OFF_T a_start, TSK_FS_TYPE_ENUM a_ftype) { if (!m_img_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_NOTOPEN); tsk_error_set_errstr("findFilesInFsRet -- img_info"); registerError(); return TSK_ERR; } TSK_FS_INFO *fs_info; if ((fs_info = tsk_fs_open_img(m_img_info, a_start, a_ftype)) == NULL) { if (isCurVsValid() == false) { tsk_error_set_errstr2 ("Sector offset: %" PRIuOFF, a_start/512); registerError(); return TSK_ERR; } else if (getCurVsPartFlag() & TSK_VS_PART_FLAG_ALLOC) { tsk_error_set_errstr2 ("Sector offset: %" PRIuOFF ", Partition Type: %s", a_start/512, getCurVsPartDescr().c_str() ); registerError(); return TSK_ERR; } else { tsk_error_reset(); return TSK_OK; } } TSK_RETVAL_ENUM retval = findFilesInFsInt(fs_info, fs_info->root_inum); tsk_fs_close(fs_info); if (m_errors.empty() == false) return TSK_ERR; else return retval; } /** * Starts in a specified byte offset of the opened disk images and looks for a * file system. Will call processFile() on each file * that is found. * * @param a_start Byte offset of file system starting location. * * @returns 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInFs(TSK_OFF_T a_start) { return findFilesInFs(a_start, TSK_FS_TYPE_DETECT); } /** * Starts in a specified byte offset of the opened disk images and looks for a * file system. Will call processFile() on each file * that is found. * * @param a_start Byte offset of file system starting location. * @param a_ftype Type of file system that is located at the offset. * * @returns 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInFs(TSK_OFF_T a_start, TSK_FS_TYPE_ENUM a_ftype) { findFilesInFsRet(a_start, a_ftype); return m_errors.empty() ? 0 : 1; } /** * Starts in a specified byte offset of the opened disk images and looks for a * file system. Will start processing the file system at a specified * file system. Will call processFile() on each file * that is found in that directory. * * @param a_start Byte offset of file system starting location. * @param a_ftype Type of file system that will be analyzed. * @param a_inum inum to start walking files system at. * * @returns 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInFs(TSK_OFF_T a_start, TSK_FS_TYPE_ENUM a_ftype, TSK_INUM_T a_inum) { if (!m_img_info) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_NOTOPEN); tsk_error_set_errstr("findFilesInFs -- img_info "); registerError(); return 1; } TSK_FS_INFO *fs_info; if ((fs_info = tsk_fs_open_img(m_img_info, a_start, a_ftype)) == NULL) { if (isCurVsValid() == false) { tsk_error_set_errstr2 ("Sector offset: %" PRIuOFF, a_start/512); registerError(); return TSK_ERR; } else if (getCurVsPartFlag() & TSK_VS_PART_FLAG_ALLOC) { tsk_error_set_errstr2( "Sector offset: %" PRIuOFF ", Partition Type: %s", a_start / 512, getCurVsPartDescr().c_str()); registerError(); return 1; } else { tsk_error_reset(); return 0; } } findFilesInFsInt(fs_info, a_inum); tsk_fs_close(fs_info); return m_errors.empty() ? 0 : 1; } /** * Starts in a specified byte offset of the opened disk images and looks for a * file system. Will start processing the file system at a specified * file system. Will call processFile() on each file * that is found in that directory. * * @param a_start Byte offset of file system starting location. * @param a_inum inum to start walking files system at. * * @returns 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInFs(TSK_OFF_T a_start, TSK_INUM_T a_inum) { return TskAuto::findFilesInFs(a_start, TSK_FS_TYPE_DETECT, a_inum); } /** * Processes the file system represented by the given TSK_FS_INFO * pointer. Will Call processFile() on each file that is found. * * @param a_fs_info Pointer to a previously opened file system. * * @returns 1 if an error occured (messages will have been registered) and 0 on success */ uint8_t TskAuto::findFilesInFs(TSK_FS_INFO * a_fs_info) { if (a_fs_info == NULL) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_NOTOPEN); tsk_error_set_errstr("findFilesInFs - fs_info"); registerError(); return 1; } findFilesInFsInt(a_fs_info, a_fs_info->root_inum); return m_errors.empty() ? 0 : 1; } /** \internal * file name walk callback. Walk the contents of each file * that is found. * * Does not return ERROR because all errors have been registered * and returning an error would indicate to TSK that errno and such are set. */ TSK_WALK_RET_ENUM TskAuto::dirWalkCb(TSK_FS_FILE * a_fs_file, const char *a_path, void *a_ptr) { TskAuto *tsk = (TskAuto *) a_ptr; if (tsk->m_tag != TSK_AUTO_TAG) { // we have no way to register an error... return TSK_WALK_STOP; } TSK_RETVAL_ENUM retval = tsk->processFile(a_fs_file, a_path); if ((retval == TSK_STOP) || (tsk->getStopProcessing())) return TSK_WALK_STOP; else return TSK_WALK_CONT; } /** \internal * Internal method that the other findFilesInFs can call after they * have opened FS_INFO. * @returns OK, STOP, or ERR (error message will already have been registered) */ TSK_RETVAL_ENUM TskAuto::findFilesInFsInt(TSK_FS_INFO * a_fs_info, TSK_INUM_T a_inum) { // see if the super class wants us to proceed TSK_FILTER_ENUM retval = filterFs(a_fs_info); if ((retval == TSK_FILTER_STOP) || (m_stopAllProcessing)) return TSK_STOP; else if (retval == TSK_FILTER_SKIP) return TSK_OK; /* Walk the files, starting at the given inum */ if (tsk_fs_dir_walk(a_fs_info, a_inum, (TSK_FS_DIR_WALK_FLAG_ENUM) (TSK_FS_DIR_WALK_FLAG_RECURSE | m_fileFilterFlags), dirWalkCb, this)) { tsk_error_set_errstr2( "Error walking directory in file system at offset %" PRIuOFF, a_fs_info->offset); registerError(); return TSK_ERR; } if (m_stopAllProcessing) return TSK_STOP; /* We could do some analysis of unallocated blocks at this point... */ return TSK_OK; } /** * Method that can be used from within processFile() to look at each * attribute that a file may have. This will call the processAttribute() * method (which you must implement) on each of the attributes in the file. * @param fs_file file details * @param path full path of parent directory * @returns STOP if the file system processing should stop and not process more files or OK. */ TSK_RETVAL_ENUM TskAuto::processAttributes(TSK_FS_FILE * fs_file, const char *path) { int count = tsk_fs_file_attr_getsize(fs_file), i; for (i = 0; i < count; i++) { TSK_RETVAL_ENUM retval = processAttribute(fs_file, tsk_fs_file_attr_get_idx(fs_file, i), path); if ((retval == TSK_STOP) || (m_stopAllProcessing)) return TSK_STOP; } return TSK_OK; } TSK_RETVAL_ENUM TskAuto::processAttribute(TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr, const char *path) { return TSK_OK; }; void TskAuto::setStopProcessing() { m_stopAllProcessing = true; } bool TskAuto::getStopProcessing() const { return m_stopAllProcessing; } uint8_t TskAuto::registerError() { // add to our list of errors error_record er; er.code = tsk_error_get_errno(); er.msg1 = tsk_error_get_errstr(); er.msg2 = tsk_error_get_errstr2(); m_errors.push_back(er); // call super class implementation uint8_t retval = handleError(); tsk_error_reset(); return retval; } const std::vector TskAuto::getErrorList() { return m_errors; } void TskAuto::resetErrorList() { m_errors.clear(); } std::string TskAuto::errorRecordToString(error_record &rec) { tsk_error_reset(); tsk_error_set_errno(rec.code); tsk_error_set_errstr("%s", rec.msg1.c_str()); tsk_error_set_errstr2("%s", rec.msg2.c_str()); const char *msg = tsk_error_get(); std::string ret; if (msg != NULL) ret = msg; tsk_error_reset(); return ret; } uint8_t TskAuto::handleError() { return 0; }; /** * Utility method to help determine if a file is an NTFS file system file (such as $MFT). * * @returns 1 if the file is an NTFS System file, 0 if not. */ uint8_t TskAuto::isNtfsSystemFiles(TSK_FS_FILE * a_fs_file, const char *a_path) { if ((a_fs_file) && (a_fs_file->fs_info) && (TSK_FS_TYPE_ISNTFS(a_fs_file->fs_info->ftype)) && (a_fs_file->name) && (a_fs_file->name->name[0] == '$') && (a_fs_file->name->meta_addr < 20)) return 1; else return 0; } /** * Utility method to help determine if a file is a FAT file system file (such as $MBR). * * @returns 1 if the file is an FAT System file, 0 if not. */ uint8_t TskAuto::isFATSystemFiles(TSK_FS_FILE *a_fs_file) { if (a_fs_file && a_fs_file->fs_info && a_fs_file->name) { FATFS_INFO *fatfs = (FATFS_INFO*)a_fs_file->fs_info; TSK_INUM_T addr = a_fs_file->name->meta_addr; if ((addr == fatfs->mbr_virt_inum) || (addr == fatfs->fat1_virt_inum) || (addr == fatfs->fat2_virt_inum && fatfs->numfat == 2)) { return 1; } } return 0; } /** * Utility method to help determine if a file is a . or .. directory. * @param a_fs_file File to evaluate * * @returns 1 if the file is a dot directory, 0 if not. */ uint8_t TskAuto::isDotDir(TSK_FS_FILE * a_fs_file) { if ((!a_fs_file) || (!a_fs_file->name) || (a_fs_file->name->type != TSK_FS_NAME_TYPE_DIR)) return 0; if ((a_fs_file->name->name_size >= 2) && (a_fs_file->name->name[0] == '.') && ((a_fs_file->name->name[1] == '\0') || ((a_fs_file->name->name_size > 2) && (a_fs_file->name->name[1] == '.') && (a_fs_file->name->name[2] == '\0')))) return 1; else return 0; } /** * Utility method to help determine if a file is a directory. * * @returns 1 if the file is a directory, 0 if not. */ uint8_t TskAuto::isDir(TSK_FS_FILE * a_fs_file) { if ((a_fs_file) && (a_fs_file->name)) { if (a_fs_file->name->type == TSK_FS_NAME_TYPE_DIR) { return 1; } else if (a_fs_file->name->type == TSK_FS_NAME_TYPE_UNDEF) { if ((a_fs_file->meta) && (a_fs_file->meta->type == TSK_FS_META_TYPE_DIR)) { return 1; } } } return 0; } /** * Utility method to help determine if a file is a file (and not a directory). * * @returns 1 if the file is a file, 0 if not. */ uint8_t TskAuto::isFile(TSK_FS_FILE * a_fs_file) { if ((a_fs_file) && (a_fs_file->name)) { if (a_fs_file->name->type == TSK_FS_NAME_TYPE_REG) { return 1; } else if (a_fs_file->name->type == TSK_FS_NAME_TYPE_UNDEF) { if ((a_fs_file->meta) && (a_fs_file->meta->type == TSK_FS_META_TYPE_REG)) { return 1; } } } return 0; } /** * Utility method to help determine if an attribute is the default type for the file/dir. * * @returns 1 if the attribute is a default type, 0 if not. */ uint8_t TskAuto::isDefaultType(TSK_FS_FILE * a_fs_file, const TSK_FS_ATTR * a_fs_attr) { if ((a_fs_file) && (a_fs_file->fs_info) && (a_fs_file->fs_info->get_default_attr_type(a_fs_file) == a_fs_attr->type)) return 1; else return 0; } /** * Utility method to help determine if an attribute is non-resident (meaning it uses blocks to store data) * * @returns 1 if the attribute is non-resident, 0 if not. */ uint8_t TskAuto::isNonResident(const TSK_FS_ATTR * a_fs_attr) { if ((a_fs_attr) && (a_fs_attr->flags & TSK_FS_ATTR_NONRES)) return 1; else return 0; } sleuthkit-4.2.0/tsk/auto/auto_db.cpp000644 000765 000024 00000077511 12576320700 020232 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2010-2013 Brian Carrier. All Rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file auto_db.cpp * Contains code to populate SQLite database with volume and file system information from a specific image. */ #include "tsk_case_db.h" #if HAVE_LIBEWF #include "tsk/img/ewf.h" #endif #include #include #include using std::stringstream; using std::for_each; /** * @param a_db Database to add an image to * @param a_NSRLDb Database of "known" files (can be NULL) * @param a_knownBadDb Database of "known bad" files (can be NULL) */ TskAutoDb::TskAutoDb(TskDbSqlite * a_db, TSK_HDB_INFO * a_NSRLDb, TSK_HDB_INFO * a_knownBadDb) { m_db = a_db; m_curImgId = 0; m_curVsId = 0; m_curVolId = 0; m_curFsId = 0; m_curFileId = 0; m_curUnallocDirId = 0; m_curDirId = 0; m_curDirPath = ""; m_blkMapFlag = false; m_vsFound = false; m_volFound = false; m_stopped = false; m_foundStructure = false; m_imgTransactionOpen = false; m_NSRLDb = a_NSRLDb; m_knownBadDb = a_knownBadDb; if ((m_NSRLDb) || (m_knownBadDb)) m_fileHashFlag = true; else m_fileHashFlag = false; m_noFatFsOrphans = false; m_addUnallocSpace = false; m_chunkSize = -1; tsk_init_lock(&m_curDirPathLock); } TskAutoDb::~TskAutoDb() { // if they didn't commit / revert, then revert if (m_imgTransactionOpen) revertAddImage(); closeImage(); tsk_deinit_lock(&m_curDirPathLock); } void TskAutoDb::closeImage() { TskAuto::closeImage(); m_NSRLDb = NULL; m_knownBadDb = NULL; } void TskAutoDb::createBlockMap(bool flag) { m_blkMapFlag = flag; } void TskAutoDb::hashFiles(bool flag) { m_fileHashFlag = flag; } void TskAutoDb::setNoFatFsOrphans(bool noFatFsOrphans) { m_noFatFsOrphans = noFatFsOrphans; } void TskAutoDb::setAddUnallocSpace(bool addUnallocSpace) { setAddUnallocSpace(addUnallocSpace, -1); } void TskAutoDb::setAddUnallocSpace(bool addUnallocSpace, int64_t chunkSize) { m_addUnallocSpace = addUnallocSpace; m_chunkSize = chunkSize; } uint8_t TskAutoDb::openImageUtf8(int a_num, const char *const a_images[], TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { uint8_t retval = TskAuto::openImageUtf8(a_num, a_images, a_type, a_ssize); if (retval != 0) { return retval; } if (addImageDetails(a_images, a_num)) { return 1; } return 0; } uint8_t TskAutoDb::openImage(int a_num, const TSK_TCHAR * const a_images[], TSK_IMG_TYPE_ENUM a_type, unsigned int a_ssize) { // make name of database #ifdef TSK_WIN32 uint8_t retval = TskAuto::openImage(a_num, a_images, a_type, a_ssize); if (retval != 0) { return retval; } // convert image paths to UTF-8 char **img_ptrs = (char **) tsk_malloc(a_num * sizeof(char *)); if (img_ptrs == NULL) { return 1; } for (int i = 0; i < a_num; i++) { char * img2 = (char*) tsk_malloc(1024 * sizeof(char)); UTF8 *ptr8; UTF16 *ptr16; ptr8 = (UTF8 *) img2; ptr16 = (UTF16 *) a_images[i]; retval = tsk_UTF16toUTF8_lclorder((const UTF16 **) &ptr16, (UTF16 *) & ptr16[TSTRLEN(a_images[i]) + 1], &ptr8, (UTF8 *) ((uintptr_t) ptr8 + 1024), TSKlenientConversion); if (retval != TSKconversionOK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_UNICODE); tsk_error_set_errstr("Error converting image to UTF-8\n"); return 1; } img_ptrs[i] = img2; } if (addImageDetails(img_ptrs, a_num)) { //cleanup for (int i = 0; i < a_num; ++i) { free(img_ptrs[i]); } free(img_ptrs); return 1; } //cleanup for (int i = 0; i < a_num; ++i) { free(img_ptrs[i]); } free(img_ptrs); return 0; #else return openImageUtf8(a_num, a_images, a_type, a_ssize); #endif } /** * Adds image details to the existing database tables. * @param img_ptrs The paths to the image splits * @return Returns 1 on error */ uint8_t TskAutoDb::addImageDetails(const char *const img_ptrs[], int a_num) { string md5 = ""; #if HAVE_LIBEWF if (m_img_info->itype == TSK_IMG_TYPE_EWF_EWF) { // @@@ This shoudl really probably be inside of a tsk_img_ method IMG_EWF_INFO *ewf_info = (IMG_EWF_INFO *)m_img_info; if (ewf_info->md5hash_isset) { md5 = ewf_info->md5hash; } } #endif if (m_db->addImageInfo(m_img_info->itype, m_img_info->sector_size, m_curImgId, m_curImgTZone, m_img_info->size, md5)) { registerError(); return 1; } // Add the image names for (int i = 0; i < a_num; i++) { const char *img_ptr = NULL; img_ptr = img_ptrs[i]; if (m_db->addImageName(m_curImgId, img_ptr, i)) { registerError(); return 1; } } return 0; } TSK_FILTER_ENUM TskAutoDb::filterVs(const TSK_VS_INFO * vs_info) { m_vsFound = true; if (m_db->addVsInfo(vs_info, m_curImgId, m_curVsId)) { registerError(); return TSK_FILTER_STOP; } return TSK_FILTER_CONT; } TSK_FILTER_ENUM TskAutoDb::filterVol(const TSK_VS_PART_INFO * vs_part) { m_volFound = true; m_foundStructure = true; if (m_db->addVolumeInfo(vs_part, m_curVsId, m_curVolId)) { registerError(); return TSK_FILTER_STOP; } return TSK_FILTER_CONT; } TSK_FILTER_ENUM TskAutoDb::filterFs(TSK_FS_INFO * fs_info) { TSK_FS_FILE *file_root; m_foundStructure = true; if (m_volFound && m_vsFound) { // there's a volume system and volume if (m_db->addFsInfo(fs_info, m_curVolId, m_curFsId)) { registerError(); return TSK_FILTER_STOP; } } else { // file system doesn't live in a volume, use image as parent if (m_db->addFsInfo(fs_info, m_curImgId, m_curFsId)) { registerError(); return TSK_FILTER_STOP; } } // We won't hit the root directory on the walk, so open it now if ((file_root = tsk_fs_file_open(fs_info, NULL, "/")) != NULL) { processFile(file_root, ""); tsk_fs_file_close(file_root); file_root = NULL; } // make sure that flags are set to get all files -- we need this to // find parent directory TSK_FS_DIR_WALK_FLAG_ENUM filterFlags = (TSK_FS_DIR_WALK_FLAG_ENUM) (TSK_FS_DIR_WALK_FLAG_ALLOC | TSK_FS_DIR_WALK_FLAG_UNALLOC); //check if to skip processing of FAT orphans if (m_noFatFsOrphans && TSK_FS_TYPE_ISFAT(fs_info->ftype) ) { filterFlags = (TSK_FS_DIR_WALK_FLAG_ENUM) (filterFlags | TSK_FS_DIR_WALK_FLAG_NOORPHAN); } setFileFilterFlags(filterFlags); return TSK_FILTER_CONT; } /* Insert the file data into the file table. * @param md5 Binary MD5 value (i.e. 16 bytes) or NULL * Returns TSK_ERR on error. */ TSK_RETVAL_ENUM TskAutoDb::insertFileData(TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr, const char *path, const unsigned char *const md5, const TSK_DB_FILES_KNOWN_ENUM known) { if (m_db->addFsFile(fs_file, fs_attr, path, md5, known, m_curFsId, m_curFileId)) { registerError(); return TSK_ERR; } return TSK_OK; } /** * Analyzes the open image and adds image info to a database. * Does not deal with transactions and such. Refer to startAddImage() * for more control. * @returns 1 if a critical error occured (DB doesn't exist, no file system, etc.), 2 if errors occured at some point adding files to the DB (corrupt file, etc.), and 0 otherwise. Errors will have been registered. */ uint8_t TskAutoDb::addFilesInImgToDb() { if (m_db == NULL || !m_db->dbExist()) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("addFilesInImgToDb: m_db not open"); registerError(); return 1; } // @@@ This seems bad because we are overriding what the user may // have set. We should remove the public API if we are going to // override it -- presumabably this was added so that we always have // unallocated volume space... setVolFilterFlags((TSK_VS_PART_FLAG_ENUM) (TSK_VS_PART_FLAG_ALLOC | TSK_VS_PART_FLAG_UNALLOC)); uint8_t retVal = 0; if (findFilesInImg()) { // map the boolean return value from findFiles to the three-state return value we use // @@@ findFiles should probably return this three-state enum too if (m_foundStructure == false) { retVal = 1; } else { retVal = 2; } } TSK_RETVAL_ENUM addUnallocRetval = TSK_OK; if (m_addUnallocSpace) addUnallocRetval = addUnallocSpaceToDb(); // findFiles return value trumps unalloc since it can return either 2 or 1. if (retVal) { return retVal; } else if (addUnallocRetval == TSK_ERR) { return 2; } else { return 0; } } /** * Start the process to add image/file metadata to database inside of a transaction. * Same functionality as addFilesInImgToDb(). Reverts * all changes on error. User must call either commitAddImage() to commit the changes, * or revertAddImage() to revert them. * @returns 1 if critical system error occcured (data does not exist in DB), 2 if error occured while adding files to DB (but it finished), and 0 otherwise. All errors will have been registered. */ uint8_t TskAutoDb::startAddImage(int numImg, const TSK_TCHAR * const imagePaths[], TSK_IMG_TYPE_ENUM imgType, unsigned int sSize) { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::startAddImage: Starting add image process\n"); if (m_db->releaseSavepoint(TSK_ADD_IMAGE_SAVEPOINT) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::startAddImage(): An add-image savepoint already exists"); registerError(); return 1; } // @@@ This check is a bit paranoid, and may need to be removed in the future if (m_db->inTransaction()) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::startAddImage(): Already in a transaction, image might not be commited"); registerError(); return 1; } if (m_db->createSavepoint(TSK_ADD_IMAGE_SAVEPOINT)) { registerError(); return 1; } m_imgTransactionOpen = true; if (openImage(numImg, imagePaths, imgType, sSize)) { tsk_error_set_errstr2("TskAutoDb::startAddImage"); registerError(); if (revertAddImage()) registerError(); return 1; } return addFilesInImgToDb(); } #ifdef WIN32 uint8_t TskAutoDb::startAddImage(int numImg, const char *const imagePaths[], TSK_IMG_TYPE_ENUM imgType, unsigned int sSize) { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::startAddImage_utf8: Starting add image process\n"); if (m_db->releaseSavepoint(TSK_ADD_IMAGE_SAVEPOINT) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::startAddImage(): An add-image savepoint already exists"); registerError(); return 1; } // @@@ This check is a bit paranoid, and may need to be removed in the future if (m_db->inTransaction()) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::startAddImage(): Already in a transaction, image might not be commited"); registerError(); return 1; } if (m_db->createSavepoint(TSK_ADD_IMAGE_SAVEPOINT)) { registerError(); return 1; } m_imgTransactionOpen = true; if (openImageUtf8(numImg, imagePaths, imgType, sSize)) { tsk_error_set_errstr2("TskAutoDb::startAddImage"); registerError(); if (revertAddImage()) registerError(); return 1; } return addFilesInImgToDb(); } #endif /** * Cancel the running process. Will not be handled immediately. */ void TskAutoDb::stopAddImage() { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::stopAddImage: Stop request received\n"); m_stopped = true; setStopProcessing(); // flag is checked every time processFile() is called } /** * Revert all changes after the startAddImage() process has run sucessfully. * @returns 1 on error (error was NOT registered in list), 0 on success */ int TskAutoDb::revertAddImage() { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::revertAddImage: Reverting add image process\n"); if (m_imgTransactionOpen == false) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("revertAddImage(): transaction is already closed"); return 1; } int retval = m_db->revertSavepoint(TSK_ADD_IMAGE_SAVEPOINT); if (retval == 0) { if (m_db->inTransaction()) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::revertAddImage(): Image reverted, but still in a transaction."); retval = 1; } } m_imgTransactionOpen = false; return retval; } /** * Finish the transaction after the startAddImage is finished. * @returns Id of the image that was added or -1 on error (error was NOT registered in list) */ int64_t TskAutoDb::commitAddImage() { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::commitAddImage: Commiting add image process\n"); if (m_imgTransactionOpen == false) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("commitAddImage(): transaction is already closed"); return -1; } int retval = m_db->releaseSavepoint(TSK_ADD_IMAGE_SAVEPOINT); m_imgTransactionOpen = false; if (retval == 1) { return -1; } else { if (m_db->inTransaction()) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskAutoDb::revertAddImage(): Image savepoint released, but still in a transaction."); return -1; } } return m_curImgId; } /** * Set the current image's timezone */ void TskAutoDb::setTz(string tzone) { m_curImgTZone = tzone; } TSK_RETVAL_ENUM TskAutoDb::processFile(TSK_FS_FILE * fs_file, const char *path) { // Check if the process has been canceled if (m_stopped) { if (tsk_verbose) tsk_fprintf(stderr, "TskAutoDb::processFile: Stop request detected\n"); return TSK_STOP; } /* If no longe processing the same directroy as the last file, * then update the class-level setting. */ int64_t cur = fs_file->name->par_addr; if (m_curDirId != cur) { m_curDirId = cur; tsk_take_lock(&m_curDirPathLock); m_curDirPath = path; tsk_release_lock(&m_curDirPathLock); } /* process the attributes. The case of having 0 attributes can occur * with virtual / sparse files and HFS directories. * At some point, this can probably be cleaned * up if TSK is more consistent about if there should always be an * attribute or not. Sometimes, none of the attributes are added * because of their type and we always want to add a reference to * every file. */ TSK_RETVAL_ENUM retval = TSK_OK; m_attributeAdded = false; if (tsk_fs_file_attr_getsize(fs_file) > 0) { retval = processAttributes(fs_file, path); } // insert a general row if we didn't add a specific attribute one if ((retval == TSK_OK) && (m_attributeAdded == false)) { retval = insertFileData(fs_file, NULL, path, NULL, TSK_DB_FILES_KNOWN_UNKNOWN); } // reset the file id m_curFileId = 0; if (retval == TSK_STOP) return TSK_STOP; else return TSK_OK; } // we return only OK or STOP -- errors are registered only and OK is returned. TSK_RETVAL_ENUM TskAutoDb::processAttribute(TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr, const char *path) { // add the file metadata for the default attribute type if (isDefaultType(fs_file, fs_attr)) { // calculate the MD5 hash if the attribute is a file unsigned char hash[16]; unsigned char *md5 = NULL; memset(hash, 0, 16); TSK_DB_FILES_KNOWN_ENUM file_known = TSK_DB_FILES_KNOWN_UNKNOWN; if (m_fileHashFlag && isFile(fs_file)) { if (md5HashAttr(hash, fs_attr)) { // error was registered return TSK_OK; } md5 = hash; if (m_NSRLDb != NULL) { int8_t retval = tsk_hdb_lookup_raw(m_NSRLDb, hash, 16, TSK_HDB_FLAG_QUICK, NULL, NULL); if (retval == -1) { registerError(); return TSK_OK; } else if (retval) { file_known = TSK_DB_FILES_KNOWN_KNOWN; } } if (m_knownBadDb != NULL) { int8_t retval = tsk_hdb_lookup_raw(m_knownBadDb, hash, 16, TSK_HDB_FLAG_QUICK, NULL, NULL); if (retval == -1) { registerError(); return TSK_OK; } else if (retval) { file_known = TSK_DB_FILES_KNOWN_KNOWN_BAD; } } } if (insertFileData(fs_attr->fs_file, fs_attr, path, md5, file_known) == TSK_ERR) { registerError(); return TSK_OK; } else { m_attributeAdded = true; } // add the block map, if requested and the file is non-resident if ((m_blkMapFlag) && (isNonResident(fs_attr)) && (isDotDir(fs_file) == 0)) { TSK_FS_ATTR_RUN *run; int sequence = 0; for (run = fs_attr->nrd.run; run != NULL; run = run->next) { unsigned int block_size = fs_file->fs_info->block_size; // ignore sparse blocks if (run->flags & TSK_FS_ATTR_RUN_FLAG_SPARSE) continue; // @@@ We probaly want to keep on going here if (m_db->addFileLayoutRange(m_curFileId, run->addr * block_size, run->len * block_size, sequence++)) { registerError(); return TSK_OK; } } } } return TSK_OK; } /** * Helper for md5HashAttr */ TSK_WALK_RET_ENUM TskAutoDb::md5HashCallback(TSK_FS_FILE * file, TSK_OFF_T offset, TSK_DADDR_T addr, char *buf, size_t size, TSK_FS_BLOCK_FLAG_ENUM a_flags, void *ptr) { TSK_MD5_CTX *md = (TSK_MD5_CTX *) ptr; if (md == NULL) return TSK_WALK_CONT; TSK_MD5_Update(md, (unsigned char *) buf, (unsigned int) size); return TSK_WALK_CONT; } /** * MD5 hash an attribute and put the result in the given array * @param md5Hash array to write the hash to * @param fs_attr attribute to hash the data of * @return Returns 1 on error (message has been registered) */ int TskAutoDb::md5HashAttr(unsigned char md5Hash[16], const TSK_FS_ATTR * fs_attr) { TSK_MD5_CTX md; TSK_MD5_Init(&md); if (tsk_fs_attr_walk(fs_attr, TSK_FS_FILE_WALK_FLAG_NONE, md5HashCallback, (void *) &md)) { registerError(); return 1; } TSK_MD5_Final(md5Hash, &md); return 0; } /** * Callback invoked per every unallocated block in the filesystem * Creates file ranges and file entries * A single file entry per consecutive range of blocks * @param a_block block being walked * @param a_ptr point to TskAutoDb class * @returns TSK_WALK_CONT if continue, otherwise TSK_WALK_STOP if stop processing requested */ TSK_WALK_RET_ENUM TskAutoDb::fsWalkUnallocBlocksCb(const TSK_FS_BLOCK *a_block, void *a_ptr) { UNALLOC_BLOCK_WLK_TRACK * unallocBlockWlkTrack = (UNALLOC_BLOCK_WLK_TRACK *) a_ptr; if (unallocBlockWlkTrack->tskAutoDb.m_stopAllProcessing) return TSK_WALK_STOP; // initialize if this is the first block if (unallocBlockWlkTrack->isStart) { unallocBlockWlkTrack->isStart = false; unallocBlockWlkTrack->curRangeStart = a_block->addr; unallocBlockWlkTrack->prevBlock = a_block->addr; unallocBlockWlkTrack->size = 0; unallocBlockWlkTrack->nextSequenceNo = 0; return TSK_WALK_CONT; } // if this block is consecutive with the previous one, update prevBlock and return if (a_block->addr == unallocBlockWlkTrack->prevBlock + 1) { unallocBlockWlkTrack->prevBlock = a_block->addr; return TSK_WALK_CONT; } // this block is not contiguous with the previous one; create and add a range object const uint64_t rangeStartOffset = unallocBlockWlkTrack->curRangeStart * unallocBlockWlkTrack->fsInfo.block_size + unallocBlockWlkTrack->fsInfo.offset; const uint64_t rangeSizeBytes = (1 + unallocBlockWlkTrack->prevBlock - unallocBlockWlkTrack->curRangeStart) * unallocBlockWlkTrack->fsInfo.block_size; unallocBlockWlkTrack->ranges.push_back(TSK_DB_FILE_LAYOUT_RANGE(rangeStartOffset, rangeSizeBytes, unallocBlockWlkTrack->nextSequenceNo++)); // bookkeeping for the next range object unallocBlockWlkTrack->size += rangeSizeBytes; unallocBlockWlkTrack->curRangeStart = a_block->addr; unallocBlockWlkTrack->prevBlock = a_block->addr; // Here we just return if we are a) collecting all unallocated data // for the given volumen (chunkSize == 0) or b) collecting all unallocated // data whose total size is at least chunkSize (chunkSize > 0) if (unallocBlockWlkTrack->chunkSize == 0 || unallocBlockWlkTrack->chunkSize > 0 && unallocBlockWlkTrack->size < unallocBlockWlkTrack->chunkSize) { return TSK_WALK_CONT; } // at this point we are either chunking and have reached the chunk limit // or we're not chunking. Either way we now add what we've got to the DB int64_t fileObjId = 0; if (unallocBlockWlkTrack->tskAutoDb.m_db->addUnallocBlockFile(unallocBlockWlkTrack->tskAutoDb.m_curUnallocDirId, unallocBlockWlkTrack->fsObjId, unallocBlockWlkTrack->size, unallocBlockWlkTrack->ranges, fileObjId) == TSK_ERR) { // @@@ Handle error -> Don't have access to registerError() though... } // reset unallocBlockWlkTrack->curRangeStart = a_block->addr; unallocBlockWlkTrack->size = 0; unallocBlockWlkTrack->ranges.clear(); unallocBlockWlkTrack->nextSequenceNo = 0; //we don't know what the last unalloc block is in advance //and will handle the last range in addFsInfoUnalloc() return TSK_WALK_CONT; } /** * Add unallocated space for the given file system to the database. * Create files for consecutive unalloc block ranges. * @param dbFsInfo fs to process * @returns TSK_OK on success, TSK_ERR on error */ TSK_RETVAL_ENUM TskAutoDb::addFsInfoUnalloc(const TSK_DB_FS_INFO & dbFsInfo) { //open the fs we have from database TSK_FS_INFO * fsInfo = tsk_fs_open_img(m_img_info, dbFsInfo.imgOffset, dbFsInfo.fType); if (fsInfo == NULL) { tsk_error_set_errstr2("TskAutoDb::addFsInfoUnalloc: error opening fs at offset %"PRIuOFF, dbFsInfo.imgOffset); registerError(); return TSK_ERR; } //create a "fake" dir to hold the unalloc files for the fs if (m_db->addUnallocFsBlockFilesParent(dbFsInfo.objId, m_curUnallocDirId) == TSK_ERR) { tsk_error_set_errstr2("addFsInfoUnalloc: error creating dir for unallocated space"); registerError(); return TSK_ERR; } //walk unalloc blocks on the fs and process them //initialize the unalloc block walk tracking UNALLOC_BLOCK_WLK_TRACK unallocBlockWlkTrack(*this, *fsInfo, dbFsInfo.objId, m_chunkSize); uint8_t block_walk_ret = tsk_fs_block_walk(fsInfo, fsInfo->first_block, fsInfo->last_block, (TSK_FS_BLOCK_WALK_FLAG_ENUM)(TSK_FS_BLOCK_WALK_FLAG_UNALLOC | TSK_FS_BLOCK_WALK_FLAG_AONLY), fsWalkUnallocBlocksCb, &unallocBlockWlkTrack); if (block_walk_ret == 1) { stringstream errss; tsk_fs_close(fsInfo); errss << "TskAutoDb::addFsInfoUnalloc: error walking fs unalloc blocks, fs id: "; errss << unallocBlockWlkTrack.fsObjId; tsk_error_set_errstr2("%s", errss.str().c_str()); registerError(); return TSK_ERR; } if(m_stopAllProcessing) { tsk_fs_close(fsInfo); return TSK_OK; } // handle creation of the last range // make range inclusive from curBlockStart to prevBlock const uint64_t byteStart = unallocBlockWlkTrack.curRangeStart * fsInfo->block_size + fsInfo->offset; const uint64_t byteLen = (1 + unallocBlockWlkTrack.prevBlock - unallocBlockWlkTrack.curRangeStart) * fsInfo->block_size; unallocBlockWlkTrack.ranges.push_back(TSK_DB_FILE_LAYOUT_RANGE(byteStart, byteLen, unallocBlockWlkTrack.nextSequenceNo++)); unallocBlockWlkTrack.size += byteLen; int64_t fileObjId = 0; if (m_db->addUnallocBlockFile(m_curUnallocDirId, dbFsInfo.objId, unallocBlockWlkTrack.size, unallocBlockWlkTrack.ranges, fileObjId) == TSK_ERR) { registerError(); tsk_fs_close(fsInfo); return TSK_ERR; } //cleanup tsk_fs_close(fsInfo); return TSK_OK; } /** * Process all unallocated space for this disk image and create "virtual" files with layouts * @returns TSK_OK on success, TSK_ERR on error */ TSK_RETVAL_ENUM TskAutoDb::addUnallocSpaceToDb() { if (m_stopAllProcessing) { return TSK_OK; } size_t numVsP = 0; size_t numFs = 0; TSK_RETVAL_ENUM retFsSpace = addUnallocFsSpaceToDb(numFs); TSK_RETVAL_ENUM retVsSpace = addUnallocVsSpaceToDb(numVsP); //handle case when no fs and no vs partitions TSK_RETVAL_ENUM retImgFile = TSK_OK; if (numVsP == 0 && numFs == 0) { retImgFile = addUnallocImageSpaceToDb(); } if (retFsSpace == TSK_ERR || retVsSpace == TSK_ERR || retImgFile == TSK_ERR) return TSK_ERR; else return TSK_OK; } /** * Process each file system in the database and add its unallocated sectors to virtual files. * @param numFs (out) number of filesystems found * @returns TSK_OK on success, TSK_ERR on error (if some or all fs could not be processed) */ TSK_RETVAL_ENUM TskAutoDb::addUnallocFsSpaceToDb(size_t & numFs) { vector fsInfos; if(m_stopAllProcessing) { return TSK_OK; } uint16_t ret = m_db->getFsInfos(m_curImgId, fsInfos); if (ret) { tsk_error_set_errstr2("addUnallocFsSpaceToDb: error getting fs infos from db"); registerError(); return TSK_ERR; } numFs = fsInfos.size(); TSK_RETVAL_ENUM allFsProcessRet = TSK_OK; for (vector::iterator it = fsInfos.begin(); it!= fsInfos.end(); ++it) { if (m_stopAllProcessing) { break; } if (addFsInfoUnalloc(*it) == TSK_ERR) allFsProcessRet = TSK_ERR; } //TODO set parent_path for newly created virt dir/file hierarchy for consistency return allFsProcessRet; } /** * Process each volume in the database and add its unallocated sectors to virtual files. * @param numVsP (out) number of vs partitions found * @returns TSK_OK on success, TSK_ERR on error */ TSK_RETVAL_ENUM TskAutoDb::addUnallocVsSpaceToDb(size_t & numVsP) { vector vsPartInfos; TSK_RETVAL_ENUM retVsPartInfos = m_db->getVsPartInfos(m_curImgId, vsPartInfos); if (retVsPartInfos == TSK_ERR) { tsk_error_set_errstr2("addUnallocVsSpaceToDb: error getting vs part infos from db"); registerError(); return TSK_ERR; } numVsP = vsPartInfos.size(); //get fs infos to see if this vspart has fs vector fsInfos; uint16_t retFsInfos = m_db->getFsInfos(m_curImgId, fsInfos); if (retFsInfos) { tsk_error_set_errstr2("addUnallocVsSpaceToDb: error getting fs infos from db"); registerError(); return TSK_ERR; } for (vector::const_iterator it = vsPartInfos.begin(); it != vsPartInfos.end(); ++it) { if (m_stopAllProcessing) { break; } const TSK_DB_VS_PART_INFO &vsPart = *it; //interested in unalloc, meta, or alloc and no fs if ( (vsPart.flags & (TSK_VS_PART_FLAG_UNALLOC | TSK_VS_PART_FLAG_META)) == 0 ) { //check if vspart has no fs bool hasFs = false; for (vector::const_iterator itFs = fsInfos.begin(); itFs != fsInfos.end(); ++itFs) { const TSK_DB_FS_INFO & fsInfo = *itFs; TSK_DB_OBJECT fsObjInfo; if (m_db->getObjectInfo(fsInfo.objId, fsObjInfo) == TSK_ERR ) { stringstream errss; errss << "addUnallocVsSpaceToDb: error getting object info for fs from db, objId: " << fsInfo.objId; tsk_error_set_errstr2("%s", errss.str().c_str()); registerError(); return TSK_ERR; } if (fsObjInfo.parObjId == vsPart.objId) { hasFs = true; break; } } if (hasFs == true) { //skip processing this vspart continue; } } //end checking vspart flags //get sector size and image offset from parent vs info //get parent id of this vs part TSK_DB_OBJECT vsPartObj; if (m_db->getObjectInfo(vsPart.objId, vsPartObj) == TSK_ERR) { stringstream errss; errss << "addUnallocVsSpaceToDb: error getting object info for vs part from db, objId: " << vsPart.objId; tsk_error_set_errstr2("%s", errss.str().c_str()); registerError(); return TSK_ERR; } TSK_DB_VS_INFO vsInfo; if (m_db->getVsInfo(vsPartObj.parObjId, vsInfo) ) { stringstream errss; errss << "addUnallocVsSpaceToDb: error getting volume system info from db, objId: " << vsPartObj.parObjId; tsk_error_set_errstr2("%s", errss.str().c_str()); registerError(); return TSK_ERR; } //create an unalloc file with unalloc part, with vs part as parent vector ranges; const uint64_t byteStart = vsInfo.offset + vsInfo.block_size * vsPart.start; const uint64_t byteLen = vsInfo.block_size * vsPart.len; TSK_DB_FILE_LAYOUT_RANGE tempRange(byteStart, byteLen, 0); ranges.push_back(tempRange); int64_t fileObjId = 0; if (m_db->addUnallocBlockFile(vsPart.objId, 0, tempRange.byteLen, ranges, fileObjId) == TSK_ERR) { registerError(); return TSK_ERR; } } return TSK_OK; } /** * Adds unalloc space for the image if there is no volumes and no file systems. * * @returns TSK_OK on success, TSK_ERR on error */ TSK_RETVAL_ENUM TskAutoDb::addUnallocImageSpaceToDb() { TSK_RETVAL_ENUM retImgFile = TSK_OK; const TSK_OFF_T imgSize = getImageSize(); if (imgSize == -1) { tsk_error_set_errstr("addUnallocImageSpaceToDb: error getting current image size, can't create unalloc block file for the image."); registerError(); retImgFile = TSK_ERR; } else { TSK_DB_FILE_LAYOUT_RANGE tempRange(0, imgSize, 0); //add unalloc block file for the entire image vector ranges; ranges.push_back(tempRange); int64_t fileObjId = 0; retImgFile = m_db->addUnallocBlockFile(m_curImgId, 0, imgSize, ranges, fileObjId); } return retImgFile; } /** * Returns the directory currently being analyzed by processFile(). * Safe to use from another thread than processFile(). * * @returns curDirPath string representing currently analyzed directory */ const std::string TskAutoDb::getCurDir() { string curDirPath; tsk_take_lock(&m_curDirPathLock); curDirPath = m_curDirPath; tsk_release_lock(&m_curDirPathLock); return curDirPath; } sleuthkit-4.2.0/tsk/auto/case_db.cpp000644 000765 000024 00000011126 12576320700 020163 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2010-2013 Brian Carrier. All Rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file case_db.cpp * Contains class definition for TskCaseDb class to handle creating/opening a case * database and adding images to it. */ #include "tsk_case_db.h" #include "tsk_auto_i.h" TskCaseDb::TskCaseDb(TskDbSqlite * a_db) { m_tag = TSK_CASE_DB_TAG; m_db = a_db; m_NSRLDb = NULL; m_knownBadDb = NULL; } TskCaseDb::~TskCaseDb() { if (m_db != NULL) { delete m_db; m_db = NULL; } if (m_NSRLDb != NULL) { tsk_hdb_close(m_NSRLDb); m_NSRLDb = NULL; } if (m_knownBadDb != NULL) { tsk_hdb_close(m_knownBadDb); m_knownBadDb = NULL; } m_tag = 0; } /** * Creates a new case with a new database and initializes its tables. * Fails if there's already a file at the given path. Returns a pointer * to a new TskCaseDb if successful, else NULL. * * @param path Full path to create new database at. */ TskCaseDb * TskCaseDb::newDb(const TSK_TCHAR * const path) { // Check if the database already exsists struct STAT_STR stat_buf; if (TSTAT(path, &stat_buf) == 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Database %" PRIttocTSK " already exists. Must be deleted first.", path); return NULL; } TskDbSqlite *db = new TskDbSqlite(path, true); // Open the database. if (db->open(true)) { delete(db); return NULL; } return new TskCaseDb(db); } /** * Opens a case from an existing database. * * @param path Full path to open database from. */ TskCaseDb * TskCaseDb::openDb(const TSK_TCHAR * path) { // Confirm that database already exsists struct STAT_STR stat_buf; if (TSTAT(path, &stat_buf) != 0) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Database %" PRIttocTSK " does not exist. Must be created first.", path); return NULL; } TskDbSqlite *db = new TskDbSqlite(path, true); // Open the database. if (db->open(false)) { delete(db); return NULL; } return new TskCaseDb(db); } /** * Prepares the process to add an image to the database. This method * allows the caller to specify options to be used during the ingest. * @returns TskAutDb object that can be used to add the image. */ TskAutoDb * TskCaseDb::initAddImage() { return new TskAutoDb(m_db, m_NSRLDb, m_knownBadDb); } /** * Add an image to the database. This method does not allow you * to customize any of the settings for ingest (such as hash calculation, * and block map population). Use TskCaseDb::initAddImage() to set * these values. * * @param numImg Number of images to add * @param imagePaths Paths to the image splits to open. * @param imgType TYpe of image format * @param sSize Sector size of image * @returns 1 on error and 0 on success */ uint8_t TskCaseDb::addImage(int numImg, const TSK_TCHAR * const imagePaths[], TSK_IMG_TYPE_ENUM imgType, unsigned int sSize) { TskAutoDb autoDb(m_db, m_NSRLDb, m_knownBadDb); if (autoDb.startAddImage(numImg, imagePaths, imgType, sSize)) { autoDb.revertAddImage(); return 1; } if (autoDb.commitAddImage()) { return 1; } return 0; } /* * Specify the NSRL index used for determining "known" files. * @param images Path to index. * @returns 1 on error and 0 on success */ uint8_t TskCaseDb::setNSRLHashDb(TSK_TCHAR * const indexFile ) { if (m_NSRLDb != NULL) { tsk_hdb_close(m_NSRLDb); m_NSRLDb = NULL; } TSK_HDB_OPEN_ENUM flags = TSK_HDB_OPEN_IDXONLY; m_NSRLDb = tsk_hdb_open(indexFile, flags); return m_NSRLDb != NULL; } /* * Specify an index for determining "known bad" files. * @param images Path to index. * @returns 1 on error and 0 on success */ uint8_t TskCaseDb::setKnownBadHashDb(TSK_TCHAR * const indexFile) { if (m_knownBadDb != NULL) { tsk_hdb_close(m_knownBadDb); m_knownBadDb = NULL; } TSK_HDB_OPEN_ENUM flags = TSK_HDB_OPEN_IDXONLY; m_knownBadDb = tsk_hdb_open(indexFile, flags); return m_knownBadDb != NULL; } /* * Clear set lookup databases. * @param images Path to index. */ void TskCaseDb::clearLookupDatabases() { if (m_NSRLDb != NULL) { tsk_hdb_close(m_NSRLDb); m_NSRLDb = NULL; } if (m_knownBadDb != NULL) { tsk_hdb_close(m_knownBadDb); m_knownBadDb = NULL; } } sleuthkit-4.2.0/tsk/auto/db_sqlite.cpp000755 000765 000024 00000156516 12576320700 020571 0ustar00carrierstaff000000 000000 /* ** The Sleuth Kit ** ** Brian Carrier [carrier sleuthkit [dot] org] ** Copyright (c) 2010-2013 Brian Carrier. All Rights reserved ** ** This software is distributed under the Common Public License 1.0 ** */ /** * \file db_sqlite.cpp * Contains code to perform operations against SQLite database. */ #include "tsk_db_sqlite.h" #include "sqlite3.h" #include #include #include using std::stringstream; using std::sort; using std::for_each; #define TSK_SCHEMA_VER 3 /** * Set the locations and logging object. Must call * open() before the object can be used. */ TskDbSqlite::TskDbSqlite(const char *a_dbFilePathUtf8, bool a_blkMapFlag) { strncpy(m_dbFilePathUtf8, a_dbFilePathUtf8, 1024); m_utf8 = true; m_blkMapFlag = a_blkMapFlag; m_db = NULL; m_selectFilePreparedStmt = NULL; m_insertObjectPreparedStmt = NULL; } #ifdef TSK_WIN32 //@@@@ TskDbSqlite::TskDbSqlite(const TSK_TCHAR * a_dbFilePath, bool a_blkMapFlag) { wcsncpy(m_dbFilePath, a_dbFilePath, 1024); m_utf8 = false; m_blkMapFlag = a_blkMapFlag; m_db = NULL; m_selectFilePreparedStmt = NULL; m_insertObjectPreparedStmt = NULL; } #endif TskDbSqlite::~TskDbSqlite() { (void) close(); } /* * Close the Sqlite database. * Return 0 on success, 1 on failure */ int TskDbSqlite::close() { if (m_db) { cleanupFilePreparedStmt(); sqlite3_close(m_db); m_db = NULL; } return 0; } int TskDbSqlite::attempt(int resultCode, int expectedResultCode, const char *errfmt) { if (resultCode != expectedResultCode) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr(errfmt, sqlite3_errmsg(m_db), resultCode); return 1; } return 0; } int TskDbSqlite::attempt(int resultCode, const char *errfmt) { return attempt(resultCode, SQLITE_OK, errfmt); } /** * Execute a statement and sets TSK error values on error * @returns 1 on error, 0 on success */ int TskDbSqlite::attempt_exec(const char *sql, int (*callback) (void *, int, char **, char **), void *callback_arg, const char *errfmt) { char * errmsg; if (!m_db) return 1; if (sqlite3_exec(m_db, sql, callback, callback_arg, &errmsg) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr(errfmt, errmsg); sqlite3_free(errmsg); return 1; } return 0; } /** * Execute a statement. * @returns 1 on error, 0 on success */ int TskDbSqlite::attempt_exec(const char *sql, const char *errfmt) { return attempt_exec(sql, NULL, NULL, errfmt); } /** * @returns 1 on error, 0 on success */ int TskDbSqlite::prepare_stmt(const char *sql, sqlite3_stmt ** ppStmt) { if (sqlite3_prepare_v2(m_db, sql, -1, ppStmt, NULL) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error preparing SQL statement: %s\n", sql); tsk_error_print(stderr); return 1; } return 0; } /** * @returns 1 on error, 0 on success */ uint8_t TskDbSqlite::addObject(TSK_DB_OBJECT_TYPE_ENUM type, int64_t parObjId, int64_t & objId) { if (attempt(sqlite3_bind_int64(m_insertObjectPreparedStmt, 1, parObjId), "TskDbSqlite::addObj: Error binding parent to statment: %s (result code %d)\n") || attempt(sqlite3_bind_int(m_insertObjectPreparedStmt, 2, type), "TskDbSqlite::addObj: Error binding type to statment: %s (result code %d)\n") || attempt(sqlite3_step(m_insertObjectPreparedStmt), SQLITE_DONE, "TskDbSqlite::addObj: Error adding object to row: %s (result code %d)\n")) { // Statement may be used again, even after error sqlite3_reset(m_insertObjectPreparedStmt); return 1; } objId = sqlite3_last_insert_rowid(m_db); if (attempt(sqlite3_reset(m_insertObjectPreparedStmt), "TskDbSqlite::addObj: Error resetting 'insert object' statement: %s\n")) { return 1; } return 0; } /** * Initialize the open DB: set PRAGMAs, create tables and indexes * @returns 1 on error */ int TskDbSqlite::initialize() { char foo[1024]; // disable synchronous for loading the DB since we have no crash recovery anyway... if (attempt_exec("PRAGMA synchronous = OFF;", "Error setting PRAGMA synchronous: %s\n")) { return 1; } // allow to read while in transaction if (attempt_exec("PRAGMA read_uncommitted = True;", "Error setting PRAGMA read_uncommitted: %s\n")) { return 1; } if (attempt_exec("PRAGMA encoding = \"UTF-8\";", "Error setting PRAGMA encoding UTF-8: %s\n")) { return 1; } if (attempt_exec("PRAGMA page_size = 4096;", "Error setting PRAGMA page_size: %s\n")) { return 1; } if (attempt_exec("PRAGMA foreign_keys = ON;", "Error setting PRAGMA foreign_keys: %s\n")) { return 1; } // increase the DB by 1MB at a time -- supposed to help performance when populating int chunkSize = 1024 * 1024; if (sqlite3_file_control(m_db, NULL, SQLITE_FCNTL_CHUNK_SIZE, &chunkSize) != SQLITE_OK) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("TskDbSqlite::initialze: error setting chunk size %s", sqlite3_errmsg(m_db)); return 1; } if (attempt_exec ("CREATE TABLE tsk_db_info (schema_ver INTEGER, tsk_ver INTEGER);", "Error creating tsk_db_info table: %s\n")) { return 1; } snprintf(foo, 1024, "INSERT INTO tsk_db_info (schema_ver, tsk_ver) VALUES (%d, %d);", TSK_SCHEMA_VER, TSK_VERSION_NUM); if (attempt_exec(foo, "Error adding data to tsk_db_info table: %s\n")) { return 1; } if (attempt_exec ("CREATE TABLE tsk_objects (obj_id INTEGER PRIMARY KEY, par_obj_id INTEGER, type INTEGER NOT NULL);", "Error creating tsk_objects table: %s\n") || attempt_exec ("CREATE TABLE tsk_image_info (obj_id INTEGER PRIMARY KEY, type INTEGER, ssize INTEGER, tzone TEXT, size INTEGER, md5 TEXT, display_name TEXT, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_image_info table: %s\n") || attempt_exec ("CREATE TABLE tsk_image_names (obj_id INTEGER NOT NULL, name TEXT NOT NULL, sequence INTEGER NOT NULL, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_image_names table: %s\n") || attempt_exec ("CREATE TABLE tsk_vs_info (obj_id INTEGER PRIMARY KEY, vs_type INTEGER NOT NULL, img_offset INTEGER NOT NULL, block_size INTEGER NOT NULL, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_vs_info table: %s\n") || attempt_exec ("CREATE TABLE tsk_vs_parts (obj_id INTEGER PRIMARY KEY, addr INTEGER NOT NULL, start INTEGER NOT NULL, length INTEGER NOT NULL, desc TEXT, flags INTEGER NOT NULL, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_vol_info table: %s\n") || attempt_exec ("CREATE TABLE tsk_fs_info (obj_id INTEGER PRIMARY KEY, img_offset INTEGER NOT NULL, fs_type INTEGER NOT NULL, block_size INTEGER NOT NULL, block_count INTEGER NOT NULL, root_inum INTEGER NOT NULL, first_inum INTEGER NOT NULL, last_inum INTEGER NOT NULL, display_name TEXT, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_fs_info table: %s\n") || attempt_exec ("CREATE TABLE tsk_files (obj_id INTEGER PRIMARY KEY, fs_obj_id INTEGER, attr_type INTEGER, attr_id INTEGER, name TEXT NOT NULL, meta_addr INTEGER, meta_seq INTEGER, type INTEGER, has_layout INTEGER, has_path INTEGER, dir_type INTEGER, meta_type INTEGER, dir_flags INTEGER, meta_flags INTEGER, size INTEGER, ctime INTEGER, crtime INTEGER, atime INTEGER, mtime INTEGER, mode INTEGER, uid INTEGER, gid INTEGER, md5 TEXT, known INTEGER, parent_path TEXT, " "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id), FOREIGN KEY(fs_obj_id) REFERENCES tsk_fs_info(obj_id));", "Error creating tsk_files table: %s\n") || attempt_exec ("CREATE TABLE tsk_files_path (obj_id INTEGER PRIMARY KEY, path TEXT NOT NULL, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id))", "Error creating tsk_files_path table: %s\n") || attempt_exec ("CREATE TABLE tsk_files_derived (obj_id INTEGER PRIMARY KEY, derived_id INTEGER NOT NULL, rederive TEXT, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id))", "Error creating tsk_files_derived table: %s\n") || attempt_exec ("CREATE TABLE tsk_files_derived_method (derived_id INTEGER PRIMARY KEY, tool_name TEXT NOT NULL, tool_version TEXT NOT NULL, other TEXT)", "Error creating tsk_files_derived_method table: %s\n") || attempt_exec ("CREATE TABLE tag_names (tag_name_id INTEGER PRIMARY KEY, display_name TEXT UNIQUE, description TEXT NOT NULL, color TEXT NOT NULL)", "Error creating tag_names table: %s\n") || attempt_exec ("CREATE TABLE content_tags (tag_id INTEGER PRIMARY KEY, obj_id INTEGER NOT NULL, tag_name_id INTEGER NOT NULL, comment TEXT NOT NULL, begin_byte_offset INTEGER NOT NULL, end_byte_offset INTEGER NOT NULL, " "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id), FOREIGN KEY(tag_name_id) REFERENCES tag_names(tag_name_id))", "Error creating content_tags table: %s\n") || attempt_exec ("CREATE TABLE blackboard_artifact_tags (tag_id INTEGER PRIMARY KEY, artifact_id INTEGER NOT NULL, tag_name_id INTEGER NOT NULL, comment TEXT NOT NULL, " "FOREIGN KEY(artifact_id) REFERENCES blackboard_artifacts(artifact_id), FOREIGN KEY(tag_name_id) REFERENCES tag_names(tag_name_id))", "Error creating blackboard_artifact_tags table: %s\n") || attempt_exec ("CREATE TABLE blackboard_artifacts (artifact_id INTEGER PRIMARY KEY, obj_id INTEGER NOT NULL, artifact_type_id INTEGER NOT NULL, " "FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id), FOREIGN KEY(artifact_type_id) REFERENCES blackboard_artifact_types(artifact_type_id))", "Error creating blackboard_artifact table: %s\n") || attempt_exec ("CREATE TABLE blackboard_attributes (artifact_id INTEGER NOT NULL, artifact_type_id INTEGER NOT NULL, source TEXT, context TEXT, attribute_type_id INTEGER NOT NULL, value_type INTEGER NOT NULL, " "value_byte BLOB, value_text TEXT, value_int32 INTEGER, value_int64 INTEGER, value_double NUMERIC(20, 10), " "FOREIGN KEY(artifact_id) REFERENCES blackboard_artifacts(artifact_id), FOREIGN KEY(artifact_type_id) REFERENCES blackboard_artifact_types(artifact_type_id), FOREIGN KEY(attribute_type_id) REFERENCES blackboard_attribute_types(attribute_type_id))", "Error creating blackboard_attribute table: %s\n") || attempt_exec ("CREATE TABLE blackboard_artifact_types (artifact_type_id INTEGER PRIMARY KEY, type_name TEXT NOT NULL, display_name TEXT)", "Error creating blackboard_artifact_types table: %s\n") || attempt_exec ("CREATE TABLE blackboard_attribute_types (attribute_type_id INTEGER PRIMARY KEY, type_name TEXT NOT NULL, display_name TEXT)", "Error creating blackboard_attribute_types table: %s\n") || attempt_exec ("CREATE TABLE reports (report_id INTEGER PRIMARY KEY, path TEXT NOT NULL, crtime INTEGER NOT NULL, src_module_name TEXT NOT NULL, report_name TEXT NOT NULL)", "Error creating reports table: %s\n")) { return 1; } if (m_blkMapFlag) { if (attempt_exec ("CREATE TABLE tsk_file_layout (obj_id INTEGER NOT NULL, byte_start INTEGER NOT NULL, byte_len INTEGER NOT NULL, sequence INTEGER NOT NULL, FOREIGN KEY(obj_id) REFERENCES tsk_objects(obj_id));", "Error creating tsk_fs_blocks table: %s\n")) { return 1; } } if (createIndexes()) return 1; return 0; } /** * Create indexes for the columns that are not primary keys and that we query on. * @returns 1 on error, 0 on success */ int TskDbSqlite::createIndexes() { return // tsk_objects index attempt_exec("CREATE INDEX parObjId ON tsk_objects(par_obj_id);", "Error creating tsk_objects index on par_obj_id: %s\n") || // file layout index attempt_exec("CREATE INDEX layout_objID ON tsk_file_layout(obj_id);", "Error creating layout_objID index on tsk_file_layout: %s\n") || // blackboard indexes attempt_exec("CREATE INDEX artifact_objID ON blackboard_artifacts(obj_id);", "Error creating artifact_objID index on blackboard_artifacts: %s\n") || attempt_exec("CREATE INDEX artifact_typeID ON blackboard_artifacts(artifact_type_id);", "Error creating artifact_objID index on blackboard_artifacts: %s\n") || attempt_exec("CREATE INDEX attrsArtifactID ON blackboard_attributes(artifact_id);", "Error creating artifact_id index on blackboard_attributes: %s\n") ; /*attempt_exec("CREATE INDEX attribute_artifactTypeId ON blackboard_attributes(artifact_type_id);", "Error creating artifact_type_id index on blackboard_attributes: %s\n"); */ } /* * Open the database (will create file if it does not exist). * @param a_toInit Set to true if this is a new database that needs to have the tables created * @ returns 1 on error and 0 on success */ int TskDbSqlite::open(bool a_toInit) { if (m_utf8) { if (attempt(sqlite3_open(m_dbFilePathUtf8, &m_db), "Can't open database: %s\n")) { sqlite3_close(m_db); return 1; } } else { if (attempt(sqlite3_open16(m_dbFilePath, &m_db), "Can't open database: %s\n")) { sqlite3_close(m_db); return 1; } } // enable finer result codes sqlite3_extended_result_codes(m_db, true); // create the tables if we need to if (a_toInit) { if (initialize()) return 1; } if (setupFilePreparedStmt()) { return 1; } return 0; } /** * Must be called on an intialized database, before adding any content to it. */ int TskDbSqlite::setupFilePreparedStmt() { if (prepare_stmt ("SELECT obj_id FROM tsk_files WHERE meta_addr IS ? AND fs_obj_id IS ?", &m_selectFilePreparedStmt)) { return 1; } if (prepare_stmt ("INSERT INTO tsk_objects (obj_id, par_obj_id, type) VALUES (NULL, ?, ?)", &m_insertObjectPreparedStmt)) { return 1; } return 0; } /** * Must be called after adding content to the database. */ void TskDbSqlite::cleanupFilePreparedStmt() { if (m_selectFilePreparedStmt != NULL) { sqlite3_finalize(m_selectFilePreparedStmt); m_selectFilePreparedStmt = NULL; } if (m_insertObjectPreparedStmt != NULL) { sqlite3_finalize(m_insertObjectPreparedStmt); m_insertObjectPreparedStmt = NULL; } } /** * deprecated */ int TskDbSqlite::addImageInfo(int type, int size, int64_t & objId, const string & timezone) { return addImageInfo(type, size, objId, timezone, 0, ""); } /** * @returns 1 on error, 0 on success */ int TskDbSqlite::addImageInfo(int type, int ssize, int64_t & objId, const string & timezone, TSK_OFF_T size, const string &md5) { char stmt[1024]; char *zSQL; int ret; // We dont' use addObject because we're passing in NULL as the parent snprintf(stmt, 1024, "INSERT INTO tsk_objects (obj_id, par_obj_id, type) VALUES (NULL, NULL, %d);", TSK_DB_OBJECT_TYPE_IMG); if (attempt_exec(stmt, "Error adding data to tsk_objects table: %s\n")) return 1; objId = sqlite3_last_insert_rowid(m_db); zSQL = sqlite3_mprintf("INSERT INTO tsk_image_info (obj_id, type, ssize, tzone, size, md5) VALUES (%lld, %d, %d, '%q', %"PRIuOFF", '%q');", objId, type, ssize, timezone.c_str(), size, md5.c_str()); ret = attempt_exec(zSQL, "Error adding data to tsk_image_info table: %s\n"); sqlite3_free(zSQL); return ret; } /** * @returns 1 on error, 0 on success */ int TskDbSqlite::addImageName(int64_t objId, char const *imgName, int sequence) { char *zSQL; int ret; zSQL = sqlite3_mprintf("INSERT INTO tsk_image_names (obj_id, name, sequence) VALUES (%lld, '%q', %d)", objId, imgName, sequence); ret = attempt_exec(zSQL, "Error adding data to tsk_image_names table: %s\n"); sqlite3_free(zSQL); return ret; } /** * @returns 1 on error, 0 on success */ int TskDbSqlite::addVsInfo(const TSK_VS_INFO * vs_info, int64_t parObjId, int64_t & objId) { char stmt[1024]; if (addObject(TSK_DB_OBJECT_TYPE_VS, parObjId, objId)) return 1; snprintf(stmt, 1024, "INSERT INTO tsk_vs_info (obj_id, vs_type, img_offset, block_size) VALUES (%lld, %d,%" PRIuOFF ",%d)", objId, vs_info->vstype, vs_info->offset, vs_info->block_size); return attempt_exec(stmt, "Error adding data to tsk_vs_info table: %s\n"); } /** * Adds the sector addresses of the volumes into the db. * @returns 1 on error, 0 on success */ int TskDbSqlite::addVolumeInfo(const TSK_VS_PART_INFO * vs_part, int64_t parObjId, int64_t & objId) { char *zSQL; int ret; if (addObject(TSK_DB_OBJECT_TYPE_VOL, parObjId, objId)) return 1; zSQL = sqlite3_mprintf( "INSERT INTO tsk_vs_parts (obj_id, addr, start, length, desc, flags)" "VALUES (%lld, %" PRIuPNUM ",%" PRIuOFF ",%" PRIuOFF ",'%q',%d)", objId, (int) vs_part->addr, vs_part->start, vs_part->len, vs_part->desc, vs_part->flags); ret = attempt_exec(zSQL, "Error adding data to tsk_vs_parts table: %s\n"); sqlite3_free(zSQL); return ret; } /** * @returns 1 on error, 0 on success */ int TskDbSqlite::addFsInfo(const TSK_FS_INFO * fs_info, int64_t parObjId, int64_t & objId) { char stmt[1024]; if (addObject(TSK_DB_OBJECT_TYPE_FS, parObjId, objId)) return 1; snprintf(stmt, 1024, "INSERT INTO tsk_fs_info (obj_id, img_offset, fs_type, block_size, block_count, " "root_inum, first_inum, last_inum) " "VALUES (" "%lld,%" PRIuOFF ",%d,%u,%" PRIuDADDR "," "%" PRIuINUM ",%" PRIuINUM ",%" PRIuINUM ")", objId, fs_info->offset, (int) fs_info->ftype, fs_info->block_size, fs_info->block_count, fs_info->root_inum, fs_info->first_inum, fs_info->last_inum); return attempt_exec(stmt, "Error adding data to tsk_fs_info table: %s\n"); } // ????? //int TskDbSqlite::addCarvedFile(TSK_FS_FILE * fs_file, // const TSK_FS_ATTR * fs_attr, const char *path, int64_t fsObjId, int64_t parObjId, int64_t & objId) //{ // // return addFile(fs_file, fs_attr, path, fsObjId, parObjId, objId); //} /** * Add a file system file to the database * @param fs_file File structure to add * @param fs_attr Specific attribute to add * @param path Path of the file * @param md5 Binary value of MD5 (i.e. 16 bytes) or NULL * @param known Status regarding if it was found in hash databse or not * @param fsObjId File system object of its file system * @param objId ID that was assigned to it from the objects table * @returns 1 on error and 0 on success */ int TskDbSqlite::addFsFile(TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr, const char *path, const unsigned char *const md5, const TSK_DB_FILES_KNOWN_ENUM known, int64_t fsObjId, int64_t & objId) { int64_t parObjId = 0; if (fs_file->name == NULL) return 0; /* we want the root directory to have its parent be the file system * object. We need to have special care though because the ".." entries * in sub-folders of the root directory have a meta_addr of the root dir. */ if ((fs_file->fs_info->root_inum == fs_file->name->meta_addr) && ((fs_file->name->name == NULL) || (0 == TSK_FS_ISDOT(fs_file->name->name)))) { // this entry is for root directory parObjId = fsObjId; } else { parObjId = findParObjId(fs_file, path, fsObjId); if (parObjId == -1) { //error return 1; } } return addFile(fs_file, fs_attr, path, md5, known, fsObjId, parObjId, objId); } /** * return a hash of the passed in string. We use this * for full paths. * From: http://www.cse.yorku.ca/~oz/hash.html */ uint32_t TskDbSqlite::hash(const unsigned char *str) { uint32_t hash = 5381; int c; while ((c = *str++)) { // skip slashes -> normalizes leading/ending/double slashes if (c == '/') continue; hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ } return hash; } /** * Store meta_addr to object id mapping of the directory in a local cache map * @param fsObjId fs id of this directory * @param fs_file File for the directory to store * @param path Full path (parent and this file) of this directory * @param objId object id of this directory from the objects table */ void TskDbSqlite::storeObjId(const int64_t & fsObjId, const TSK_FS_FILE *fs_file, const char *path, const int64_t & objId) { // skip the . and .. entries if ((fs_file->name) && (fs_file->name->name) && (TSK_FS_ISDOT(fs_file->name->name))) { return; } uint32_t seq; /* NTFS uses sequence, otherwise we hash the path. We do this to map to the * correct parent folder if there are two from the root dir that eventually point to * the same folder (one deleted and one allocated) or two hard links. */ if (TSK_FS_TYPE_ISNTFS(fs_file->fs_info->ftype)) { /* Use the sequence stored in meta (which could be one larger than the name value * if the directory is deleted. We do this because the par_seq gets added to the * name structure when it is added to the directory based on teh value stored in * meta. */ seq = fs_file->meta->seq; } else { seq = hash((const unsigned char *)path); } map > &fsMap = m_parentDirIdCache[fsObjId]; if (fsMap.count(fs_file->name->meta_addr) == 0) { fsMap[fs_file->name->meta_addr][seq] = objId; } else { map &fileMap = fsMap[fs_file->name->meta_addr]; if (fileMap.count(seq) == 0) { fileMap[seq] = objId; } } } /** * Find parent object id of TSK_FS_FILE. Use local cache map, if not found, fall back to SQL * @param fs_file file to find parent obj id for * @param path Path of parent folder that we want to match * @param fsObjId fs id of this file * @returns parent obj id ( > 0), -1 on error */ int64_t TskDbSqlite::findParObjId(const TSK_FS_FILE * fs_file, const char *path, const int64_t & fsObjId) { uint32_t seq; /* NTFS uses sequence, otherwise we hash the path. We do this to map to the * correct parent folder if there are two from the root dir that eventually point to * the same folder (one deleted and one allocated) or two hard links. */ if (TSK_FS_TYPE_ISNTFS(fs_file->fs_info->ftype)) { seq = fs_file->name->par_seq; } else { seq = hash((const unsigned char *)path); } //get from cache by parent meta addr, if available map > &fsMap = m_parentDirIdCache[fsObjId]; if (fsMap.count(fs_file->name->par_addr) > 0) { map &fileMap = fsMap[fs_file->name->par_addr]; if (fileMap.count(seq) > 0) { return fileMap[seq]; } else { printf("Miss: %d\n", fileMap.count(seq)); } } fprintf(stderr, "Miss: %s (%"PRIu64")\n", fs_file->name->name, fs_file->name->meta_addr); // Find the parent file id in the database using the parent metadata address // @@@ This should use sequence number when the new database supports it if (attempt(sqlite3_bind_int64(m_selectFilePreparedStmt, 1, fs_file->name->par_addr), "TskDbSqlite::findParObjId: Error binding meta_addr to statment: %s (result code %d)\n") || attempt(sqlite3_bind_int64(m_selectFilePreparedStmt, 2, fsObjId), "TskDbSqlite::findParObjId: Error binding fs_obj_id to statment: %s (result code %d)\n") || attempt(sqlite3_step(m_selectFilePreparedStmt), SQLITE_ROW, "TskDbSqlite::findParObjId: Error selecting file id by meta_addr: %s (result code %d)\n")) { // Statement may be used again, even after error sqlite3_reset(m_selectFilePreparedStmt); return -1; } int64_t parObjId = sqlite3_column_int64(m_selectFilePreparedStmt, 0); if (attempt(sqlite3_reset(m_selectFilePreparedStmt), "TskDbSqlite::findParObjId: Error resetting 'select file id by meta_addr' statement: %s\n")) { return -1; } return parObjId; } /** * Add file data to the file table * @param md5 binary value of MD5 (i.e. 16 bytes) or NULL * Return 0 on success, 1 on error. */ int TskDbSqlite::addFile(TSK_FS_FILE * fs_file, const TSK_FS_ATTR * fs_attr, const char *path, const unsigned char *const md5, const TSK_DB_FILES_KNOWN_ENUM known, int64_t fsObjId, int64_t parObjId, int64_t & objId) { time_t mtime = 0; time_t crtime = 0; time_t ctime = 0; time_t atime = 0; TSK_OFF_T size = 0; int meta_type = 0; int meta_flags = 0; int meta_mode = 0; int gid = 0; int uid = 0; int type = TSK_FS_ATTR_TYPE_NOT_FOUND; int idx = 0; char *zSQL; if (fs_file->name == NULL) return 0; if (fs_file->meta) { mtime = fs_file->meta->mtime; atime = fs_file->meta->atime; ctime = fs_file->meta->ctime; crtime = fs_file->meta->crtime; meta_type = fs_file->meta->type; meta_flags = fs_file->meta->flags; meta_mode = fs_file->meta->mode; gid = fs_file->meta->gid; uid = fs_file->meta->uid; } size_t attr_nlen = 0; if (fs_attr) { type = fs_attr->type; idx = fs_attr->id; size = fs_attr->size; if (fs_attr->name) { if ((fs_attr->type != TSK_FS_ATTR_TYPE_NTFS_IDXROOT) || (strcmp(fs_attr->name, "$I30") != 0)) { attr_nlen = strlen(fs_attr->name); } } } // combine name and attribute name size_t len = strlen(fs_file->name->name); char * name; size_t nlen = len + attr_nlen + 5; if ((name = (char *) tsk_malloc(nlen)) == NULL) { return 1; } strncpy(name, fs_file->name->name, nlen); // Add the attribute name if (attr_nlen > 0) { strncat(name, ":", nlen-strlen(name)); strncat(name, fs_attr->name, nlen-strlen(name)); } // clean up path // +2 = space for leading slash and terminating null size_t path_len = strlen(path) + 2; char *escaped_path; if ((escaped_path = (char *) tsk_malloc(path_len)) == NULL) { free(name); return 1; } strncpy(escaped_path, "/", path_len); strncat(escaped_path, path, path_len - strlen(escaped_path)); char *md5TextPtr = NULL; char md5Text[48]; // if md5 hashes are being used if (md5 != NULL) { // copy the hash as hexidecimal into the buffer for (int i = 0; i < 16; i++) { sprintf(&(md5Text[i*2]), "%x%x", (md5[i] >> 4) & 0xf, md5[i] & 0xf); } md5TextPtr = md5Text; } if (addObject(TSK_DB_OBJECT_TYPE_FILE, parObjId, objId)) { free(name); free(escaped_path); return 1; } zSQL = sqlite3_mprintf( "INSERT INTO tsk_files (fs_obj_id, obj_id, type, attr_type, attr_id, name, meta_addr, meta_seq, dir_type, meta_type, dir_flags, meta_flags, size, crtime, ctime, atime, mtime, mode, gid, uid, md5, known, parent_path) " "VALUES (" "%" PRId64 ",%" PRId64 "," "%d," "%d,%d,'%q'," "%" PRIuINUM ",%d," "%d,%d,%d,%d," "%" PRIuOFF "," "%llu,%llu,%llu,%llu," "%d,%d,%d,%Q,%d," "'%q')", fsObjId, objId, TSK_DB_FILES_TYPE_FS, type, idx, name, fs_file->name->meta_addr, fs_file->name->meta_seq, fs_file->name->type, meta_type, fs_file->name->flags, meta_flags, size, (unsigned long long)crtime, (unsigned long long)ctime,(unsigned long long) atime,(unsigned long long) mtime, meta_mode, gid, uid, md5TextPtr, known, escaped_path); if (attempt_exec(zSQL, "TskDbSqlite::addFile: Error adding data to tsk_files table: %s\n")) { free(name); free(escaped_path); sqlite3_free(zSQL); return 1; } sqlite3_free(zSQL); //if dir, update parent id cache if (meta_type == TSK_FS_META_TYPE_DIR) { std::string fullPath = std::string(path) + fs_file->name->name; storeObjId(fsObjId, fs_file, fullPath.c_str(), objId); } free(name); free(escaped_path); return 0; } /** * Create a savepoint. Call revertSavepoint() or releaseSavepoint() * to revert or commit. * @param name Name to call savepoint * @returns 1 on error, 0 on success */ int TskDbSqlite::createSavepoint(const char *name) { char buff[1024]; snprintf(buff, 1024, "SAVEPOINT %s", name); return attempt_exec(buff, "Error setting savepoint: %s\n"); } /** * Rollback to specified savepoint and release * @param name Name of savepoint * @returns 1 on error, 0 on success */ int TskDbSqlite::revertSavepoint(const char *name) { char buff[1024]; snprintf(buff, 1024, "ROLLBACK TO SAVEPOINT %s", name); if (attempt_exec(buff, "Error rolling back savepoint: %s\n")) return 1; return releaseSavepoint(name); } /** * Release a savepoint. Commits if savepoint was not rollbacked. * @param name Name of savepoint * @returns 1 on error, 0 on success */ int TskDbSqlite::releaseSavepoint(const char *name) { char buff[1024]; snprintf(buff, 1024, "RELEASE SAVEPOINT %s", name); return attempt_exec(buff, "Error releasing savepoint: %s\n"); } /** * Add file layout info to the database. This table stores the run information for each file so that we * can map which parts of an image are used by what files. * @param a_fileObjId ID of the file * @param a_byteStart Byte address relative to the start of the image file * @param a_byteLen Length of the run in bytes * @param a_sequence Sequence of this run in the file * @returns 1 on error */ int TskDbSqlite::addFileLayoutRange(int64_t a_fileObjId, uint64_t a_byteStart, uint64_t a_byteLen, int a_sequence) { char foo[1024]; snprintf(foo, 1024, "INSERT INTO tsk_file_layout(obj_id, byte_start, byte_len, sequence) VALUES (%lld, %llu, %llu, %d)", a_fileObjId, a_byteStart, a_byteLen, a_sequence); return attempt_exec(foo, "Error adding data to tsk_file_layout table: %s\n"); } /** * Add file layout info to the database. This table stores the run information for each file so that we * can map which parts of an image are used by what files. * @param fileLayoutRange TSK_DB_FILE_LAYOUT_RANGE object storing a single file layout range entry * @returns 1 on error */ int TskDbSqlite::addFileLayoutRange(const TSK_DB_FILE_LAYOUT_RANGE & fileLayoutRange) { return addFileLayoutRange(fileLayoutRange.fileObjId, fileLayoutRange.byteStart, fileLayoutRange.byteLen, fileLayoutRange.sequence); } /** * Adds entry for to tsk_files for a layout file into the database. * @param parObjId parent obj id in the database * @param fsObjId fs obj id in the database, or 0 if parent it not fs (NULL) * @param dbFileType type (unallocated, carved, unused) * @param fileName file name for the layout file * @param size Number of bytes in file * @param objId layout file Id (output) * @returns TSK_OK on success or TSK_ERR on error. */ TSK_RETVAL_ENUM TskDbSqlite::addLayoutFileInfo(const int64_t parObjId, const int64_t fsObjId, const TSK_DB_FILES_TYPE_ENUM dbFileType, const char *fileName, const uint64_t size, int64_t & objId) { char *zSQL; if (addObject(TSK_DB_OBJECT_TYPE_FILE, parObjId, objId)) return TSK_ERR; //fsObjId can be NULL char *fsObjIdStrPtr = NULL; char fsObjIdStr[32]; if (fsObjId != 0) { snprintf(fsObjIdStr, 32, "%"PRIu64, fsObjId); fsObjIdStrPtr = fsObjIdStr; } zSQL = sqlite3_mprintf( "INSERT INTO tsk_files (has_layout, fs_obj_id, obj_id, type, attr_type, attr_id, name, meta_addr, meta_seq, dir_type, meta_type, dir_flags, meta_flags, size, crtime, ctime, atime, mtime, mode, gid, uid) " "VALUES (" "1, %Q, %lld," "%d," "NULL,NULL,'%q'," "NULL,NULL," "%d,%d,%d,%d," "%" PRIuOFF "," "NULL,NULL,NULL,NULL,NULL,NULL,NULL)", fsObjIdStrPtr, objId, dbFileType, fileName, TSK_FS_NAME_TYPE_REG, TSK_FS_META_TYPE_REG, TSK_FS_NAME_FLAG_UNALLOC, TSK_FS_META_FLAG_UNALLOC, size); if (attempt_exec(zSQL, "TskDbSqlite::addLayoutFileInfo: Error adding data to tsk_files table: %s\n")) { sqlite3_free(zSQL); return TSK_ERR; } sqlite3_free(zSQL); return TSK_OK; } /** * Returns true if database is opened. */ bool TskDbSqlite::dbExist() const { if (m_db) return true; else return false; } bool TskDbSqlite::inTransaction() { return (sqlite3_get_autocommit(m_db) == 0); } /** * Adds information about a unallocated file with layout ranges into the database. * Adds a single entry to tsk_files table with an auto-generated file name, tsk_objects table, and one or more entries to tsk_file_layout table * @param parentObjId Id of the parent object in the database (fs, volume, or image) * @param fsObjId parent fs, or NULL if the file is not associated with fs * @param size Number of bytes in file * @param ranges vector containing one or more TSK_DB_FILE_LAYOUT_RANGE layout ranges (in) * @param objId object id of the file object created (output) * @returns TSK_OK on success or TSK_ERR on error. */ TSK_RETVAL_ENUM TskDbSqlite::addUnallocBlockFile(const int64_t parentObjId, const int64_t fsObjId, const uint64_t size, vector & ranges, int64_t & objId) { return addFileWithLayoutRange(TSK_DB_FILES_TYPE_UNALLOC_BLOCKS, parentObjId, fsObjId, size, ranges, objId); } /** * Adds information about a unused file with layout ranges into the database. * Adds a single entry to tsk_files table with an auto-generated file name, tsk_objects table, and one or more entries to tsk_file_layout table * @param parentObjId Id of the parent object in the database (fs, volume, or image) * @param fsObjId parent fs, or NULL if the file is not associated with fs * @param size Number of bytes in file * @param ranges vector containing one or more TSK_DB_FILE_LAYOUT_RANGE layout ranges (in) * @param objId object id of the file object created (output) * @returns TSK_OK on success or TSK_ERR on error. */ TSK_RETVAL_ENUM TskDbSqlite::addUnusedBlockFile(const int64_t parentObjId, const int64_t fsObjId, const uint64_t size, vector & ranges, int64_t & objId) { return addFileWithLayoutRange(TSK_DB_FILES_TYPE_UNUSED_BLOCKS, parentObjId, fsObjId, size, ranges, objId); } /** * Adds information about a carved file with layout ranges into the database. * Adds a single entry to tsk_files table with an auto-generated file name, tsk_objects table, and one or more entries to tsk_file_layout table * @param parentObjId Id of the parent object in the database (fs, volume, or image) * @param fsObjId fs id associated with the file, or NULL * @param size Number of bytes in file * @param ranges vector containing one or more TSK_DB_FILE_LAYOUT_RANGE layout ranges (in) * @param objId object id of the file object created (output) * @returns TSK_OK on success or TSK_ERR on error. */ TSK_RETVAL_ENUM TskDbSqlite::addCarvedFile(const int64_t parentObjId, const int64_t fsObjId, const uint64_t size, vector & ranges, int64_t & objId) { return addFileWithLayoutRange(TSK_DB_FILES_TYPE_CARVED, parentObjId, fsObjId, size, ranges, objId); } //internal function object to check for range overlap typedef struct _checkFileLayoutRangeOverlap{ const vector & ranges; bool hasOverlap; _checkFileLayoutRangeOverlap(const vector & ranges) : ranges(ranges),hasOverlap(false) {} bool getHasOverlap() const { return hasOverlap; } void operator() (const TSK_DB_FILE_LAYOUT_RANGE & range) { if (hasOverlap) return; //no need to check other uint64_t start = range.byteStart; uint64_t end = start + range.byteLen; vector::const_iterator it; for (it = ranges.begin(); it != ranges.end(); ++it) { const TSK_DB_FILE_LAYOUT_RANGE * otherRange = &(*it); if (&range == otherRange) continue; //skip, it's the same range uint64_t otherStart = otherRange->byteStart; uint64_t otherEnd = otherStart + otherRange->byteLen; if (start <= otherEnd && end >= otherStart) { hasOverlap = true; break; } } } } checkFileLayoutRangeOverlap; /** * Add virtual dir of type TSK_DB_FILES_TYPE_VIRTUAL_DIR * that can be a parent of other non-fs virtual files or directories, to organize them * @param fsObjId (in) file system object id to associate with the virtual directory. * @param parentDirId (in) parent dir object id of the new directory: either another virtual directory or root fs directory * @param name name (int) of the new virtual directory * @param objId (out) object id of the created virtual directory object * @returns TSK_ERR on error or TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::addVirtualDir(const int64_t fsObjId, const int64_t parentDirId, const char * const name, int64_t & objId) { char *zSQL; if (addObject(TSK_DB_OBJECT_TYPE_FILE, parentDirId, objId)) return TSK_ERR; zSQL = sqlite3_mprintf( "INSERT INTO tsk_files (attr_type, attr_id, has_layout, fs_obj_id, obj_id, type, attr_type, " "attr_id, name, meta_addr, meta_seq, dir_type, meta_type, dir_flags, meta_flags, size, " "crtime, ctime, atime, mtime, mode, gid, uid, known, parent_path) " "VALUES (" "NULL, NULL," "NULL," "%lld," "%lld," "%d," "NULL,NULL,'%q'," "NULL,NULL," "%d,%d,%d,%d," "0," "NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,'/')", fsObjId, objId, TSK_DB_FILES_TYPE_VIRTUAL_DIR, name, TSK_FS_NAME_TYPE_DIR, TSK_FS_META_TYPE_DIR, TSK_FS_NAME_FLAG_ALLOC, (TSK_FS_META_FLAG_ALLOC | TSK_FS_META_FLAG_USED)); if (attempt_exec(zSQL, "Error adding data to tsk_files table: %s\n")) { sqlite3_free(zSQL); return TSK_ERR; } sqlite3_free(zSQL); return TSK_OK; } /** * Internal helper method to add a virtual root dir, a parent dir of files representing unalloc space within fs. * The dir has is associated with its root dir parent for the fs. * @param fsObjId (in) fs id to find root dir for and create $Unalloc dir for * @param objId (out) object id of the $Unalloc dir created * @returns TSK_ERR on error or TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::addUnallocFsBlockFilesParent(const int64_t fsObjId, int64_t & objId) { const char * const unallocDirName = "$Unalloc"; //get root dir TSK_DB_OBJECT rootDirObjInfo; if (getFsRootDirObjectInfo(fsObjId, rootDirObjInfo) == TSK_ERR) { return TSK_ERR; } return addVirtualDir(fsObjId, rootDirObjInfo.objId, unallocDirName, objId); } /** * Internal helper method to add unalloc, unused and carved files with layout ranges to db * Generates file_name and populates tsk_files, tsk_objects and tsk_file_layout tables * @returns TSK_ERR on error or TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::addFileWithLayoutRange(const TSK_DB_FILES_TYPE_ENUM dbFileType, const int64_t parentObjId, const int64_t fsObjId, const uint64_t size, vector & ranges, int64_t & objId) { const size_t numRanges = ranges.size(); if (numRanges < 1) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error addFileWithLayoutRange() - no ranges present"); return TSK_ERR; } stringstream fileNameSs; switch (dbFileType) { case TSK_DB_FILES_TYPE_UNALLOC_BLOCKS: fileNameSs << "Unalloc"; break; case TSK_DB_FILES_TYPE_UNUSED_BLOCKS: fileNameSs << "Unused"; break; case TSK_DB_FILES_TYPE_CARVED: fileNameSs << "Carved"; break; default: stringstream sserr; tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); sserr << "Error addFileWithLayoutRange() - unsupported file type for file layout range: "; sserr << (int) dbFileType; tsk_error_set_errstr("%s", sserr.str().c_str()); return TSK_ERR; } //ensure layout ranges are sorted (to generate file name and to be inserted in sequence order) sort(ranges.begin(), ranges.end()); //dome some checking //ensure there is no overlap and each range has unique byte range const checkFileLayoutRangeOverlap & overlapRes = for_each(ranges.begin(), ranges.end(), checkFileLayoutRangeOverlap(ranges)); if (overlapRes.getHasOverlap() ) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error addFileWithLayoutRange() - overlap detected between ranges"); return TSK_ERR; } //construct filename with parent obj id, start byte of first range, end byte of last range fileNameSs << "_" << parentObjId << "_" << ranges[0].byteStart; fileNameSs << "_" << (ranges[numRanges-1].byteStart + ranges[numRanges-1].byteLen); //insert into tsk files and tsk objects if (addLayoutFileInfo(parentObjId, fsObjId, dbFileType, fileNameSs.str().c_str(), size, objId) ) { return TSK_ERR; } //fill in fileObjId and insert ranges for (vector::iterator it = ranges.begin(); it != ranges.end(); ++it) { TSK_DB_FILE_LAYOUT_RANGE & range = *it; range.fileObjId = objId; if (this->addFileLayoutRange(range) ) { return TSK_ERR; } } return TSK_OK; } /** * Query tsk_file_layout and return rows for every entry in tsk_file_layout table * @param fileLayouts (out) TSK_DB_FILE_LAYOUT_RANGE row representations to return * @returns TSK_ERR on error, TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getFileLayouts(vector & fileLayouts) { sqlite3_stmt * fileLayoutsStatement = NULL; if (prepare_stmt("SELECT obj_id, byte_start, byte_len, sequence FROM tsk_file_layout", &fileLayoutsStatement) ) { return TSK_ERR; } //get rows TSK_DB_FILE_LAYOUT_RANGE rowData; while (sqlite3_step(fileLayoutsStatement) == SQLITE_ROW) { rowData.fileObjId = sqlite3_column_int64(fileLayoutsStatement, 0); rowData.byteStart = sqlite3_column_int64(fileLayoutsStatement, 1); rowData.byteLen = sqlite3_column_int64(fileLayoutsStatement, 2); rowData.sequence = sqlite3_column_int(fileLayoutsStatement, 3); //insert a copy of the rowData fileLayouts.push_back(rowData); } //cleanup if (fileLayoutsStatement != NULL) { sqlite3_finalize(fileLayoutsStatement); fileLayoutsStatement = NULL; } return TSK_OK; } ostream& operator <<(ostream &os,const TSK_DB_FILE_LAYOUT_RANGE &layoutRange) { os << layoutRange.fileObjId << "," << layoutRange.byteStart << "," << layoutRange.byteLen << "," << layoutRange.sequence; os << std::endl; return os; } ostream& operator <<(ostream &os,const TSK_DB_FS_INFO &fsInfo) { os << fsInfo.objId << "," << fsInfo.imgOffset << "," << (int)fsInfo.fType << "," << fsInfo.block_size << "," << fsInfo.block_count << "," << fsInfo.root_inum << "," << fsInfo.first_inum << "," << fsInfo.last_inum; os << std::endl; return os; } ostream& operator <<(ostream &os,const TSK_DB_VS_INFO &vsInfo) { os << vsInfo.objId << "," << (int)vsInfo.vstype << "," << vsInfo.offset << "," << vsInfo.block_size; os << std::endl; return os; } ostream& operator <<(ostream &os,const TSK_DB_VS_PART_INFO &vsPartInfo) { os << vsPartInfo.objId << "," << vsPartInfo.addr << "," << vsPartInfo.start << "," << vsPartInfo.len << "," << vsPartInfo.desc << "," << (int)vsPartInfo.flags; os << std::endl; return os; } ostream& operator <<(ostream &os,const TSK_DB_OBJECT &dbObject) { os << dbObject.objId << "," << dbObject.parObjId << "," << (int)dbObject.type; os << std::endl; return os; } /** * Query tsk_fs_info and return rows for every entry in tsk_fs_info table * @param imgId the object id of the image to get filesystems for * @param fsInfos (out) TSK_DB_FS_INFO row representations to return * @returns TSK_ERR on error, TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getFsInfos(int64_t imgId, vector & fsInfos) { sqlite3_stmt * fsInfosStatement = NULL; if (prepare_stmt("SELECT obj_id, img_offset, fs_type, block_size, block_count, root_inum, first_inum, last_inum FROM tsk_fs_info", &fsInfosStatement) ) { return TSK_ERR; } //get rows TSK_DB_FS_INFO rowData; while (sqlite3_step(fsInfosStatement) == SQLITE_ROW) { int64_t fsObjId = sqlite3_column_int64(fsInfosStatement, 0); //ensure fs is (sub)child of the image requested, if not, skip it int64_t curImgId = 0; if (getParentImageId(fsObjId, curImgId) == TSK_ERR) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error finding parent for: %"PRIu64, fsObjId); return TSK_ERR; } if (imgId != curImgId) { continue; } rowData.objId = fsObjId; rowData.imgOffset = sqlite3_column_int64(fsInfosStatement, 1); rowData.fType = (TSK_FS_TYPE_ENUM)sqlite3_column_int(fsInfosStatement, 2); rowData.block_size = sqlite3_column_int(fsInfosStatement, 3); rowData.block_count = sqlite3_column_int64(fsInfosStatement, 4); rowData.root_inum = sqlite3_column_int64(fsInfosStatement, 5); rowData.first_inum = sqlite3_column_int64(fsInfosStatement, 6); rowData.last_inum = sqlite3_column_int64(fsInfosStatement, 7); //insert a copy of the rowData fsInfos.push_back(rowData); } //cleanup if (fsInfosStatement != NULL) { sqlite3_finalize(fsInfosStatement); fsInfosStatement = NULL; } return TSK_OK; } /** * Query tsk_vs_info and return rows for every entry in tsk_vs_info table * @param imgId the object id of the image to get volumesystems for * @param vsInfos (out) TSK_DB_VS_INFO row representations to return * @returns TSK_ERR on error, TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getVsInfos(int64_t imgId, vector & vsInfos) { sqlite3_stmt * vsInfosStatement = NULL; if (prepare_stmt("SELECT obj_id, vs_type, img_offset, block_size FROM tsk_vs_info", &vsInfosStatement) ) { return TSK_ERR; } //get rows TSK_DB_VS_INFO rowData; while (sqlite3_step(vsInfosStatement) == SQLITE_ROW) { int64_t vsObjId = sqlite3_column_int64(vsInfosStatement, 0); int64_t curImgId = 0; if (getParentImageId(vsObjId, curImgId) == TSK_ERR) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error finding parent for: %"PRIu64, vsObjId); return TSK_ERR; } if (imgId != curImgId ) { //ensure vs is (sub)child of the image requested, if not, skip it continue; } rowData.objId = vsObjId; rowData.vstype = (TSK_VS_TYPE_ENUM)sqlite3_column_int(vsInfosStatement, 1); rowData.offset = sqlite3_column_int64(vsInfosStatement, 2); rowData.block_size = sqlite3_column_int(vsInfosStatement, 3); //insert a copy of the rowData vsInfos.push_back(rowData); } //cleanup if (vsInfosStatement != NULL) { sqlite3_finalize(vsInfosStatement); vsInfosStatement = NULL; } return TSK_OK; } /** * Query tsk_vs_part and return rows for every entry in tsk_vs_part table * @param imgId the object id of the image to get vs parts for * @param vsPartInfos (out) TSK_DB_VS_PART_INFO row representations to return * @returns TSK_ERR on error, TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getVsPartInfos(int64_t imgId, vector & vsPartInfos) { sqlite3_stmt * vsPartInfosStatement = NULL; if (prepare_stmt("SELECT obj_id, addr, start, length, desc, flags FROM tsk_vs_parts", &vsPartInfosStatement) ) { return TSK_ERR; } //get rows TSK_DB_VS_PART_INFO rowData; while (sqlite3_step(vsPartInfosStatement) == SQLITE_ROW) { int64_t vsPartObjId = sqlite3_column_int64(vsPartInfosStatement, 0); int64_t curImgId = 0; if (getParentImageId(vsPartObjId, curImgId) == TSK_ERR) { tsk_error_reset(); tsk_error_set_errno(TSK_ERR_AUTO_DB); tsk_error_set_errstr("Error finding parent for: %"PRIu64, vsPartObjId); return TSK_ERR; } if (imgId != curImgId) { //ensure vs is (sub)child of the image requested, if not, skip it continue; } rowData.objId = vsPartObjId; rowData.addr = sqlite3_column_int(vsPartInfosStatement, 1); rowData.start = sqlite3_column_int64(vsPartInfosStatement, 2); rowData.len = sqlite3_column_int64(vsPartInfosStatement, 3); const unsigned char * text = sqlite3_column_text(vsPartInfosStatement, 4); size_t textLen = sqlite3_column_bytes(vsPartInfosStatement, 4); const size_t copyChars = textLen < TSK_MAX_DB_VS_PART_INFO_DESC_LEN-1?textLen:TSK_MAX_DB_VS_PART_INFO_DESC_LEN-1; strncpy (rowData.desc,(char*)text,copyChars); rowData.desc[copyChars] = '\0'; rowData.flags = (TSK_VS_PART_FLAG_ENUM)sqlite3_column_int(vsPartInfosStatement, 5); //insert a copy of the rowData vsPartInfos.push_back(rowData); } //cleanup if (vsPartInfosStatement != NULL) { sqlite3_finalize(vsPartInfosStatement); vsPartInfosStatement = NULL; } return TSK_OK; } /** * Query tsk_objects with given id and returns object info entry * @param objId object id to query * @param objectInfo (out) TSK_DB_OBJECT entry representation to return * @returns TSK_ERR on error (or if not found), TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getObjectInfo(int64_t objId, TSK_DB_OBJECT & objectInfo) { sqlite3_stmt * objectsStatement = NULL; if (prepare_stmt("SELECT obj_id, par_obj_id, type FROM tsk_objects WHERE obj_id IS ?", &objectsStatement) ) { return TSK_ERR; } if (attempt(sqlite3_bind_int64(objectsStatement, 1, objId), "TskDbSqlite::getObjectInfo: Error binding objId to statment: %s (result code %d)\n") || attempt(sqlite3_step(objectsStatement), SQLITE_ROW, "TskDbSqlite::getObjectInfo: Error selecting object by objid: %s (result code %d)\n")) { sqlite3_finalize(objectsStatement); return TSK_ERR; } objectInfo.objId = sqlite3_column_int64(objectsStatement, 0); objectInfo.parObjId = sqlite3_column_int64(objectsStatement, 1); objectInfo.type = (TSK_DB_OBJECT_TYPE_ENUM) sqlite3_column_int(objectsStatement, 2); //cleanup if (objectsStatement != NULL) { sqlite3_finalize(objectsStatement); objectsStatement = NULL; } return TSK_OK; } /** * Query tsk_vs_info with given id and returns TSK_DB_VS_INFO info entry * @param objId vs id to query * @param vsInfo (out) TSK_DB_VS_INFO entry representation to return * @returns TSK_ERR on error (or if not found), TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getVsInfo(int64_t objId, TSK_DB_VS_INFO & vsInfo) { sqlite3_stmt * vsInfoStatement = NULL; if (prepare_stmt("SELECT obj_id, vs_type, img_offset, block_size FROM tsk_vs_info WHERE obj_id IS ?", &vsInfoStatement) ) { return TSK_ERR; } if (attempt(sqlite3_bind_int64(vsInfoStatement, 1, objId), "TskDbSqlite::getVsInfo: Error binding objId to statment: %s (result code %d)\n") || attempt(sqlite3_step(vsInfoStatement), SQLITE_ROW, "TskDbSqlite::getVsInfo: Error selecting object by objid: %s (result code %d)\n")) { sqlite3_finalize(vsInfoStatement); return TSK_ERR; } vsInfo.objId = sqlite3_column_int64(vsInfoStatement, 0); vsInfo.vstype = (TSK_VS_TYPE_ENUM)sqlite3_column_int(vsInfoStatement, 1); vsInfo.offset = sqlite3_column_int64(vsInfoStatement, 2); vsInfo.block_size = sqlite3_column_int(vsInfoStatement, 3); //cleanup if (vsInfoStatement != NULL) { sqlite3_finalize(vsInfoStatement); vsInfoStatement = NULL; } return TSK_OK; } /** * Query tsk_objects to find the root image id for the object * @param objId (in) object id to query * @param imageId (out) root parent image id returned * @returns TSK_ERR on error (or if not found), TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getParentImageId (const int64_t objId, int64_t & imageId) { TSK_DB_OBJECT objectInfo; TSK_RETVAL_ENUM ret = TSK_ERR; int64_t queryObjectId = objId; while (getObjectInfo(queryObjectId, objectInfo) == TSK_OK) { if (objectInfo.parObjId == 0) { //found root image imageId = objectInfo.objId; ret = TSK_OK; break; } else { //advance queryObjectId = objectInfo.parObjId; } } return ret; } /** * Query tsk_objects and tsk_files given file system id and return the root directory object * @param fsObjId (int) file system id to query root dir object for * @param rootDirObjInfo (out) TSK_DB_OBJECT root dir entry representation to return * @returns TSK_ERR on error (or if not found), TSK_OK on success */ TSK_RETVAL_ENUM TskDbSqlite::getFsRootDirObjectInfo(const int64_t fsObjId, TSK_DB_OBJECT & rootDirObjInfo) { sqlite3_stmt * rootDirInfoStatement = NULL; if (prepare_stmt("SELECT tsk_objects.obj_id,tsk_objects.par_obj_id,tsk_objects.type " "FROM tsk_objects,tsk_files WHERE tsk_objects.par_obj_id IS ? " "AND tsk_files.obj_id = tsk_objects.obj_id AND tsk_files.name = ''", &rootDirInfoStatement) ) { return TSK_ERR; } if (attempt(sqlite3_bind_int64(rootDirInfoStatement, 1, fsObjId), "TskDbSqlite::getFsRootDirObjectInfo: Error binding objId to statment: %s (result code %d)\n") || attempt(sqlite3_step(rootDirInfoStatement), SQLITE_ROW, "TskDbSqlite::getFsRootDirObjectInfo: Error selecting object by objid: %s (result code %d)\n")) { sqlite3_finalize(rootDirInfoStatement); return TSK_ERR; } rootDirObjInfo.objId = sqlite3_column_int64(rootDirInfoStatement, 0); rootDirObjInfo.parObjId = sqlite3_column_int64(rootDirInfoStatement, 1); rootDirObjInfo.type = (TSK_DB_OBJECT_TYPE_ENUM)sqlite3_column_int(rootDirInfoStatement, 2); //cleanup if (rootDirInfoStatement != NULL) { sqlite3_finalize(rootDirInfoStatement); rootDirInfoStatement = NULL; } return TSK_OK; } sleuthkit-4.2.0/tsk/auto/Makefile.am000644 000765 000024 00000000653 12576320700 020136 0ustar00carrierstaff000000 000000 AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall -Wmultichar -Wstrict-null-sentinel -Woverloaded-virtual -Wsign-promo EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskauto.la # Note that the .h files are in the top-level Makefile libtskauto_la_SOURCES = auto.cpp tsk_auto_i.h auto_db.cpp sqlite3.c sqlite3.h db_sqlite.cpp tsk_db_sqlite.h case_db.cpp tsk_case_db.h indent: indent *.cpp *.h clean-local: -rm -f *.cpp~ *.h~ sleuthkit-4.2.0/tsk/auto/Makefile.in000644 000765 000024 00000051041 12576326300 020146 0ustar00carrierstaff000000 000000 # Makefile.in generated by automake 1.15 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2014 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = { \ if test -z '$(MAKELEVEL)'; then \ false; \ elif test -n '$(MAKE_HOST)'; then \ true; \ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ true; \ else \ false; \ fi; \ } am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tsk/auto ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_pthread.m4 \ $(top_srcdir)/m4/cppunit.m4 \ $(top_srcdir)/m4/ax_jni_include_dir.m4 \ $(top_srcdir)/m4/ac_prog_javac_works.m4 \ $(top_srcdir)/m4/ac_prog_javac.m4 \ $(top_srcdir)/m4/ac_prog_java_works.m4 \ $(top_srcdir)/m4/ac_prog_java.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/tsk/tsk_config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) libtskauto_la_LIBADD = am_libtskauto_la_OBJECTS = auto.lo auto_db.lo sqlite3.lo db_sqlite.lo \ case_db.lo libtskauto_la_OBJECTS = $(am_libtskauto_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/tsk depcomp = $(SHELL) $(top_srcdir)/config/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libtskauto_la_SOURCES) DIST_SOURCES = $(libtskauto_la_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ ALLOCA = @ALLOCA@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ ANT_FOUND = @ANT_FOUND@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CPPUNIT_CFLAGS = @CPPUNIT_CFLAGS@ CPPUNIT_CONFIG = @CPPUNIT_CONFIG@ CPPUNIT_LIBS = @CPPUNIT_LIBS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JAVA = @JAVA@ JAVAC = @JAVAC@ JNI_CPPFLAGS = @JNI_CPPFLAGS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBTOOL = @LIBTOOL@ LIBTSK_LDFLAGS = @LIBTSK_LDFLAGS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PTHREAD_CC = @PTHREAD_CC@ PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ PTHREAD_LIBS = @PTHREAD_LIBS@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ STRIP = @STRIP@ VERSION = @VERSION@ Z_PATH = @Z_PATH@ _ACJNI_JAVAC = @_ACJNI_JAVAC@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ ax_pthread_config = @ax_pthread_config@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ uudecode = @uudecode@ AM_CPPFLAGS = -I../.. -I$(srcdir)/../.. -Wall -Wmultichar -Wstrict-null-sentinel -Woverloaded-virtual -Wsign-promo EXTRA_DIST = .indent.pro noinst_LTLIBRARIES = libtskauto.la # Note that the .h files are in the top-level Makefile libtskauto_la_SOURCES = auto.cpp tsk_auto_i.h auto_db.cpp sqlite3.c sqlite3.h db_sqlite.cpp tsk_db_sqlite.h case_db.cpp tsk_case_db.h all: all-am .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign tsk/auto/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign tsk/auto/Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstLTLIBRARIES: -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) @list='$(noinst_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } libtskauto.la: $(libtskauto_la_OBJECTS) $(libtskauto_la_DEPENDENCIES) $(EXTRA_libtskauto_la_DEPENDENCIES) $(AM_V_CXXLD)$(CXXLINK) $(libtskauto_la_OBJECTS) $(libtskauto_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/auto.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/auto_db.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/case_db.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/db_sqlite.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sqlite3.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)$(LTCXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @am__fastdepCXX_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(LTLIBRARIES) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-local \ clean-noinstLTLIBRARIES mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-local clean-noinstLTLIBRARIES \ cscopelist-am ctags ctags-am distclean distclean-compile \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am .PRECIOUS: Makefile indent: indent *.cpp *.h clean-local: -rm -f *.cpp~ *.h~ # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: sleuthkit-4.2.0/tsk/auto/sqlite3.c000644 000765 000024 00025733752 12576320700 017656 0ustar00carrierstaff000000 000000 /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite ** version 3.8.11.1. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements ** of 5% or more are commonly seen when SQLite is compiled as a single ** translation unit. ** ** This file is all you need to compile SQLite. To use SQLite in other ** programs, you need this file and the "sqlite3.h" header file that defines ** the programming interface to the SQLite library. (If you do not have ** the "sqlite3.h" header file at hand, you will find a copy embedded within ** the text of this file. Search for "Begin file sqlite3.h" to find the start ** of the embedded sqlite3.h header file.) Additional code files may be needed ** if you want a wrapper to interface SQLite with your choice of programming ** language. The code for the "sqlite3" command-line shell is also in a ** separate file. This file contains only code for the core SQLite library. */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 #ifndef SQLITE_PRIVATE # define SQLITE_PRIVATE static #endif /************** Begin file sqliteInt.h ***************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Internal interface definitions for SQLite. ** */ #ifndef _SQLITEINT_H_ #define _SQLITEINT_H_ /* ** Include the header file used to customize the compiler options for MSVC. ** This should be done first so that it can successfully prevent spurious ** compiler warnings due to subsequent content in this file and other files ** that are included by this file. */ /************** Include msvc.h in the middle of sqliteInt.h ******************/ /************** Begin file msvc.h ********************************************/ /* ** 2015 January 12 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code that is specific to MSVC. */ #ifndef _MSVC_H_ #define _MSVC_H_ #if defined(_MSC_VER) #pragma warning(disable : 4054) #pragma warning(disable : 4055) #pragma warning(disable : 4100) #pragma warning(disable : 4127) #pragma warning(disable : 4130) #pragma warning(disable : 4152) #pragma warning(disable : 4189) #pragma warning(disable : 4206) #pragma warning(disable : 4210) #pragma warning(disable : 4232) #pragma warning(disable : 4244) #pragma warning(disable : 4305) #pragma warning(disable : 4306) #pragma warning(disable : 4702) #pragma warning(disable : 4706) #endif /* defined(_MSC_VER) */ #endif /* _MSVC_H_ */ /************** End of msvc.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /* ** Special setup for VxWorks */ /************** Include vxworks.h in the middle of sqliteInt.h ***************/ /************** Begin file vxworks.h *****************************************/ /* ** 2015-03-02 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code that is specific to Wind River's VxWorks */ #if defined(__RTP__) || defined(_WRS_KERNEL) /* This is VxWorks. Set up things specially for that OS */ #include #include /* amalgamator: dontcache */ #define OS_VXWORKS 1 #define SQLITE_OS_OTHER 0 #define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1 #define SQLITE_OMIT_LOAD_EXTENSION 1 #define SQLITE_ENABLE_LOCKING_STYLE 0 #define HAVE_UTIME 1 #else /* This is not VxWorks. */ #define OS_VXWORKS 0 #endif /* defined(_WRS_KERNEL) */ /************** End of vxworks.h *********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /* ** These #defines should enable >2GB file support on POSIX if the ** underlying operating system supports it. If the OS lacks ** large file support, or if the OS is windows, these should be no-ops. ** ** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any ** system #includes. Hence, this block of code must be the very first ** code in all source files. ** ** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch ** on the compiler command line. This is necessary if you are compiling ** on a recent machine (ex: Red Hat 7.2) but you want your code to work ** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2 ** without this option, LFS is enable. But LFS does not exist in the kernel ** in Red Hat 6.0, so the code won't work. Hence, for maximum binary ** portability you should omit LFS. ** ** The previous paragraph was written in 2005. (This paragraph is written ** on 2008-11-28.) These days, all Linux kernels support large files, so ** you should probably leave LFS enabled. But some embedded platforms might ** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful. ** ** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later. */ #ifndef SQLITE_DISABLE_LFS # define _LARGE_FILE 1 # ifndef _FILE_OFFSET_BITS # define _FILE_OFFSET_BITS 64 # endif # define _LARGEFILE_SOURCE 1 #endif /* What version of GCC is being used. 0 means GCC is not being used */ #ifdef __GNUC__ # define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__) #else # define GCC_VERSION 0 #endif /* Needed for various definitions... */ #if defined(__GNUC__) && !defined(_GNU_SOURCE) # define _GNU_SOURCE #endif #if defined(__OpenBSD__) && !defined(_BSD_SOURCE) # define _BSD_SOURCE #endif /* ** For MinGW, check to see if we can include the header file containing its ** version information, among other things. Normally, this internal MinGW ** header file would [only] be included automatically by other MinGW header ** files; however, the contained version information is now required by this ** header file to work around binary compatibility issues (see below) and ** this is the only known way to reliably obtain it. This entire #if block ** would be completely unnecessary if there was any other way of detecting ** MinGW via their preprocessor (e.g. if they customized their GCC to define ** some MinGW-specific macros). When compiling for MinGW, either the ** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be ** defined; otherwise, detection of conditions specific to MinGW will be ** disabled. */ #if defined(_HAVE_MINGW_H) # include "mingw.h" #elif defined(_HAVE__MINGW_H) # include "_mingw.h" #endif /* ** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T ** define is required to maintain binary compatibility with the MSVC runtime ** library in use (e.g. for Windows XP). */ #if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \ defined(_WIN32) && !defined(_WIN64) && \ defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \ defined(__MSVCRT__) # define _USE_32BIT_TIME_T #endif /* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear ** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for ** MinGW. */ /************** Include sqlite3.h in the middle of sqliteInt.h ***************/ /************** Begin file sqlite3.h *****************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the interface that the SQLite library ** presents to client programs. If a C-function, structure, datatype, ** or constant definition does not appear in this file, then it is ** not a published API of SQLite, is subject to change without ** notice, and should not be referenced by programs that use SQLite. ** ** Some of the definitions that are in this file are marked as ** "experimental". Experimental interfaces are normally new ** features recently added to SQLite. We do not anticipate changes ** to experimental interfaces but reserve the right to make minor changes ** if experience from use "in the wild" suggest such changes are prudent. ** ** The official C-language API documentation for SQLite is derived ** from comments in this file. This file is the authoritative source ** on how SQLite interfaces are supposed to operate. ** ** The name of this file under configuration management is "sqlite.h.in". ** The makefile makes some minor changes to this file (such as inserting ** the version number) and changes its name to "sqlite3.h" as ** part of the build process. */ #ifndef _SQLITE3_H_ #define _SQLITE3_H_ #include /* Needed for the definition of va_list */ /* ** Make sure we can call this stuff from C++. */ #if 0 extern "C" { #endif /* ** Provide the ability to override linkage features of the interface. */ #ifndef SQLITE_EXTERN # define SQLITE_EXTERN extern #endif #ifndef SQLITE_API # define SQLITE_API #endif #ifndef SQLITE_CDECL # define SQLITE_CDECL #endif #ifndef SQLITE_STDCALL # define SQLITE_STDCALL #endif /* ** These no-op macros are used in front of interfaces to mark those ** interfaces as either deprecated or experimental. New applications ** should not use deprecated interfaces - they are supported for backwards ** compatibility only. Application writers should be aware that ** experimental interfaces are subject to change in point releases. ** ** These macros used to resolve to various kinds of compiler magic that ** would generate warning messages when they were used. But that ** compiler magic ended up generating such a flurry of bug reports ** that we have taken it all out and gone back to using simple ** noop macros. */ #define SQLITE_DEPRECATED #define SQLITE_EXPERIMENTAL /* ** Ensure these symbols were not defined by some previous header file. */ #ifdef SQLITE_VERSION # undef SQLITE_VERSION #endif #ifdef SQLITE_VERSION_NUMBER # undef SQLITE_VERSION_NUMBER #endif /* ** CAPI3REF: Compile-Time Library Version Numbers ** ** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header ** evaluates to a string literal that is the SQLite version in the ** format "X.Y.Z" where X is the major version number (always 3 for ** SQLite3) and Y is the minor version number and Z is the release number.)^ ** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer ** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same ** numbers used in [SQLITE_VERSION].)^ ** The SQLITE_VERSION_NUMBER for any given release of SQLite will also ** be larger than the release from which it is derived. Either Y will ** be held constant and Z will be incremented or else Y will be incremented ** and Z will be reset to zero. ** ** Since version 3.6.18, SQLite source code has been stored in the ** Fossil configuration management ** system. ^The SQLITE_SOURCE_ID macro evaluates to ** a string which identifies a particular check-in of SQLite ** within its configuration management system. ^The SQLITE_SOURCE_ID ** string contains the date and time of the check-in (UTC) and an SHA1 ** hash of the entire source tree. ** ** See also: [sqlite3_libversion()], ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ #define SQLITE_VERSION "3.8.11.1" #define SQLITE_VERSION_NUMBER 3008011 #define SQLITE_SOURCE_ID "2015-07-29 20:00:57 cf538e2783e468bbc25e7cb2a9ee64d3e0e80b2f" /* ** CAPI3REF: Run-Time Library Version Numbers ** KEYWORDS: sqlite3_version, sqlite3_sourceid ** ** These interfaces provide the same information as the [SQLITE_VERSION], ** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros ** but are associated with the library instead of the header file. ^(Cautious ** programmers might include assert() statements in their application to ** verify that values returned by these interfaces match the macros in ** the header, and thus insure that the application is ** compiled with matching library and header files. ** **
** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
** 
)^ ** ** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] ** macro. ^The sqlite3_libversion() function returns a pointer to the ** to the sqlite3_version[] string constant. The sqlite3_libversion() ** function is provided for use in DLLs since DLL users usually do not have ** direct access to string constants within the DLL. ^The ** sqlite3_libversion_number() function returns an integer equal to ** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns ** a pointer to a string constant whose value is the same as the ** [SQLITE_SOURCE_ID] C preprocessor macro. ** ** See also: [sqlite_version()] and [sqlite_source_id()]. */ SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void); SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void); SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void); /* ** CAPI3REF: Run-Time Library Compilation Options Diagnostics ** ** ^The sqlite3_compileoption_used() function returns 0 or 1 ** indicating whether the specified option was defined at ** compile time. ^The SQLITE_ prefix may be omitted from the ** option name passed to sqlite3_compileoption_used(). ** ** ^The sqlite3_compileoption_get() function allows iterating ** over the list of options that were defined at compile time by ** returning the N-th compile time option string. ^If N is out of range, ** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ ** prefix is omitted from any strings returned by ** sqlite3_compileoption_get(). ** ** ^Support for the diagnostic functions sqlite3_compileoption_used() ** and sqlite3_compileoption_get() may be omitted by specifying the ** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. ** ** See also: SQL functions [sqlite_compileoption_used()] and ** [sqlite_compileoption_get()] and the [compile_options pragma]. */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName); SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N); #endif /* ** CAPI3REF: Test To See If The Library Is Threadsafe ** ** ^The sqlite3_threadsafe() function returns zero if and only if ** SQLite was compiled with mutexing code omitted due to the ** [SQLITE_THREADSAFE] compile-time option being set to 0. ** ** SQLite can be compiled with or without mutexes. When ** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes ** are enabled and SQLite is threadsafe. When the ** [SQLITE_THREADSAFE] macro is 0, ** the mutexes are omitted. Without the mutexes, it is not safe ** to use SQLite concurrently from more than one thread. ** ** Enabling mutexes incurs a measurable performance penalty. ** So if speed is of utmost importance, it makes sense to disable ** the mutexes. But for maximum safety, mutexes should be enabled. ** ^The default behavior is for mutexes to be enabled. ** ** This interface can be used by an application to make sure that the ** version of SQLite that it is linking against was compiled with ** the desired setting of the [SQLITE_THREADSAFE] macro. ** ** This interface only reports on the compile-time mutex setting ** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with ** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but ** can be fully or partially disabled using a call to [sqlite3_config()] ** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], ** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the ** sqlite3_threadsafe() function shows only the compile-time setting of ** thread safety, not any run-time changes to that setting made by ** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() ** is unchanged by calls to sqlite3_config().)^ ** ** See the [threading mode] documentation for additional information. */ SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void); /* ** CAPI3REF: Database Connection Handle ** KEYWORDS: {database connection} {database connections} ** ** Each open SQLite database is represented by a pointer to an instance of ** the opaque structure named "sqlite3". It is useful to think of an sqlite3 ** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and ** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] ** and [sqlite3_close_v2()] are its destructors. There are many other ** interfaces (such as ** [sqlite3_prepare_v2()], [sqlite3_create_function()], and ** [sqlite3_busy_timeout()] to name but three) that are methods on an ** sqlite3 object. */ typedef struct sqlite3 sqlite3; /* ** CAPI3REF: 64-Bit Integer Types ** KEYWORDS: sqlite_int64 sqlite_uint64 ** ** Because there is no cross-platform way to specify 64-bit integer types ** SQLite includes typedefs for 64-bit signed and unsigned integers. ** ** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. ** The sqlite_int64 and sqlite_uint64 types are supported for backwards ** compatibility only. ** ** ^The sqlite3_int64 and sqlite_int64 types can store integer values ** between -9223372036854775808 and +9223372036854775807 inclusive. ^The ** sqlite3_uint64 and sqlite_uint64 types can store integer values ** between 0 and +18446744073709551615 inclusive. */ #ifdef SQLITE_INT64_TYPE typedef SQLITE_INT64_TYPE sqlite_int64; typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; #elif defined(_MSC_VER) || defined(__BORLANDC__) typedef __int64 sqlite_int64; typedef unsigned __int64 sqlite_uint64; #else typedef long long int sqlite_int64; typedef unsigned long long int sqlite_uint64; #endif typedef sqlite_int64 sqlite3_int64; typedef sqlite_uint64 sqlite3_uint64; /* ** If compiling for a processor that lacks floating point support, ** substitute integer for floating-point. */ #ifdef SQLITE_OMIT_FLOATING_POINT # define double sqlite3_int64 #endif /* ** CAPI3REF: Closing A Database Connection ** DESTRUCTOR: sqlite3 ** ** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors ** for the [sqlite3] object. ** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if ** the [sqlite3] object is successfully destroyed and all associated ** resources are deallocated. ** ** ^If the database connection is associated with unfinalized prepared ** statements or unfinished sqlite3_backup objects then sqlite3_close() ** will leave the database connection open and return [SQLITE_BUSY]. ** ^If sqlite3_close_v2() is called with unfinalized prepared statements ** and/or unfinished sqlite3_backups, then the database connection becomes ** an unusable "zombie" which will automatically be deallocated when the ** last prepared statement is finalized or the last sqlite3_backup is ** finished. The sqlite3_close_v2() interface is intended for use with ** host languages that are garbage collected, and where the order in which ** destructors are called is arbitrary. ** ** Applications should [sqlite3_finalize | finalize] all [prepared statements], ** [sqlite3_blob_close | close] all [BLOB handles], and ** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated ** with the [sqlite3] object prior to attempting to close the object. ^If ** sqlite3_close_v2() is called on a [database connection] that still has ** outstanding [prepared statements], [BLOB handles], and/or ** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation ** of resources is deferred until all [prepared statements], [BLOB handles], ** and [sqlite3_backup] objects are also destroyed. ** ** ^If an [sqlite3] object is destroyed while a transaction is open, ** the transaction is automatically rolled back. ** ** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] ** must be either a NULL ** pointer or an [sqlite3] object pointer obtained ** from [sqlite3_open()], [sqlite3_open16()], or ** [sqlite3_open_v2()], and not previously closed. ** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer ** argument is a harmless no-op. */ SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*); SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*); /* ** The type for a callback function. ** This is legacy and deprecated. It is included for historical ** compatibility and is not documented. */ typedef int (*sqlite3_callback)(void*,int,char**, char**); /* ** CAPI3REF: One-Step Query Execution Interface ** METHOD: sqlite3 ** ** The sqlite3_exec() interface is a convenience wrapper around ** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], ** that allows an application to run multiple statements of SQL ** without having to use a lot of C code. ** ** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, ** semicolon-separate SQL statements passed into its 2nd argument, ** in the context of the [database connection] passed in as its 1st ** argument. ^If the callback function of the 3rd argument to ** sqlite3_exec() is not NULL, then it is invoked for each result row ** coming out of the evaluated SQL statements. ^The 4th argument to ** sqlite3_exec() is relayed through to the 1st argument of each ** callback invocation. ^If the callback pointer to sqlite3_exec() ** is NULL, then no callback is ever invoked and result rows are ** ignored. ** ** ^If an error occurs while evaluating the SQL statements passed into ** sqlite3_exec(), then execution of the current statement stops and ** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() ** is not NULL then any error message is written into memory obtained ** from [sqlite3_malloc()] and passed back through the 5th parameter. ** To avoid memory leaks, the application should invoke [sqlite3_free()] ** on error message strings returned through the 5th parameter of ** of sqlite3_exec() after the error message string is no longer needed. ** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors ** occur, then sqlite3_exec() sets the pointer in its 5th parameter to ** NULL before returning. ** ** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() ** routine returns SQLITE_ABORT without invoking the callback again and ** without running any subsequent SQL statements. ** ** ^The 2nd argument to the sqlite3_exec() callback function is the ** number of columns in the result. ^The 3rd argument to the sqlite3_exec() ** callback is an array of pointers to strings obtained as if from ** [sqlite3_column_text()], one for each column. ^If an element of a ** result row is NULL then the corresponding string pointer for the ** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the ** sqlite3_exec() callback is an array of pointers to strings where each ** entry represents the name of corresponding result column as obtained ** from [sqlite3_column_name()]. ** ** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer ** to an empty string, or a pointer that contains only whitespace and/or ** SQL comments, then no SQL statements are evaluated and the database ** is not changed. ** ** Restrictions: ** **
    **
  • The application must insure that the 1st parameter to sqlite3_exec() ** is a valid and open [database connection]. **
  • The application must not close the [database connection] specified by ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. **
  • The application must not modify the SQL statement text passed into ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. **
*/ SQLITE_API int SQLITE_STDCALL sqlite3_exec( sqlite3*, /* An open database */ const char *sql, /* SQL to be evaluated */ int (*callback)(void*,int,char**,char**), /* Callback function */ void *, /* 1st argument to callback */ char **errmsg /* Error msg written here */ ); /* ** CAPI3REF: Result Codes ** KEYWORDS: {result code definitions} ** ** Many SQLite functions return an integer result code from the set shown ** here in order to indicate success or failure. ** ** New error codes may be added in future versions of SQLite. ** ** See also: [extended result code definitions] */ #define SQLITE_OK 0 /* Successful result */ /* beginning-of-error-codes */ #define SQLITE_ERROR 1 /* SQL error or missing database */ #define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ #define SQLITE_PERM 3 /* Access permission denied */ #define SQLITE_ABORT 4 /* Callback routine requested an abort */ #define SQLITE_BUSY 5 /* The database file is locked */ #define SQLITE_LOCKED 6 /* A table in the database is locked */ #define SQLITE_NOMEM 7 /* A malloc() failed */ #define SQLITE_READONLY 8 /* Attempt to write a readonly database */ #define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ #define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ #define SQLITE_CORRUPT 11 /* The database disk image is malformed */ #define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ #define SQLITE_FULL 13 /* Insertion failed because database is full */ #define SQLITE_CANTOPEN 14 /* Unable to open the database file */ #define SQLITE_PROTOCOL 15 /* Database lock protocol error */ #define SQLITE_EMPTY 16 /* Database is empty */ #define SQLITE_SCHEMA 17 /* The database schema changed */ #define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ #define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ #define SQLITE_MISMATCH 20 /* Data type mismatch */ #define SQLITE_MISUSE 21 /* Library used incorrectly */ #define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ #define SQLITE_AUTH 23 /* Authorization denied */ #define SQLITE_FORMAT 24 /* Auxiliary database format error */ #define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ #define SQLITE_NOTADB 26 /* File opened that is not a database file */ #define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ #define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ #define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ #define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ /* end-of-error-codes */ /* ** CAPI3REF: Extended Result Codes ** KEYWORDS: {extended result code definitions} ** ** In its default configuration, SQLite API routines return one of 30 integer ** [result codes]. However, experience has shown that many of ** these result codes are too coarse-grained. They do not provide as ** much information about problems as programmers might like. In an effort to ** address this, newer versions of SQLite (version 3.3.8 and later) include ** support for additional result codes that provide more detailed information ** about errors. These [extended result codes] are enabled or disabled ** on a per database connection basis using the ** [sqlite3_extended_result_codes()] API. Or, the extended code for ** the most recent error can be obtained using ** [sqlite3_extended_errcode()]. */ #define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) #define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) #define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) #define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) #define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) #define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) #define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) #define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) #define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) #define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) #define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) #define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) #define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) #define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) #define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) #define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) #define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) #define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) #define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) #define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) #define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) #define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) #define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) #define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) #define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) #define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) #define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) #define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) #define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) #define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) #define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) #define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) #define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) #define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) #define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) #define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) #define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) #define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) #define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) #define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) #define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) #define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) #define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) #define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) #define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) #define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) #define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) /* ** CAPI3REF: Flags For File Open Operations ** ** These bit values are intended for use in the ** 3rd parameter to the [sqlite3_open_v2()] interface and ** in the 4th parameter to the [sqlite3_vfs.xOpen] method. */ #define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ #define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ #define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ #define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ #define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ #define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ #define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ #define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ #define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ #define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ #define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ #define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ /* Reserved: 0x00F00000 */ /* ** CAPI3REF: Device Characteristics ** ** The xDeviceCharacteristics method of the [sqlite3_io_methods] ** object returns an integer which is a vector of these ** bit values expressing I/O characteristics of the mass storage ** device that holds the file that the [sqlite3_io_methods] ** refers to. ** ** The SQLITE_IOCAP_ATOMIC property means that all writes of ** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values ** mean that writes of blocks that are nnn bytes in size and ** are aligned to an address which is an integer multiple of ** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means ** that when data is appended to a file, the data is appended ** first then the size of the file is extended, never the other ** way around. The SQLITE_IOCAP_SEQUENTIAL property means that ** information is written to disk in the same order as calls ** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that ** after reboot following a crash or power loss, the only bytes in a ** file that were written at the application level might have changed ** and that adjacent bytes, even bytes within the same sector are ** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN ** flag indicate that a file cannot be deleted when open. The ** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on ** read-only media and cannot be changed even by processes with ** elevated privileges. */ #define SQLITE_IOCAP_ATOMIC 0x00000001 #define SQLITE_IOCAP_ATOMIC512 0x00000002 #define SQLITE_IOCAP_ATOMIC1K 0x00000004 #define SQLITE_IOCAP_ATOMIC2K 0x00000008 #define SQLITE_IOCAP_ATOMIC4K 0x00000010 #define SQLITE_IOCAP_ATOMIC8K 0x00000020 #define SQLITE_IOCAP_ATOMIC16K 0x00000040 #define SQLITE_IOCAP_ATOMIC32K 0x00000080 #define SQLITE_IOCAP_ATOMIC64K 0x00000100 #define SQLITE_IOCAP_SAFE_APPEND 0x00000200 #define SQLITE_IOCAP_SEQUENTIAL 0x00000400 #define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 #define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 #define SQLITE_IOCAP_IMMUTABLE 0x00002000 /* ** CAPI3REF: File Locking Levels ** ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods ** of an [sqlite3_io_methods] object. */ #define SQLITE_LOCK_NONE 0 #define SQLITE_LOCK_SHARED 1 #define SQLITE_LOCK_RESERVED 2 #define SQLITE_LOCK_PENDING 3 #define SQLITE_LOCK_EXCLUSIVE 4 /* ** CAPI3REF: Synchronization Type Flags ** ** When SQLite invokes the xSync() method of an ** [sqlite3_io_methods] object it uses a combination of ** these integer values as the second argument. ** ** When the SQLITE_SYNC_DATAONLY flag is used, it means that the ** sync operation only needs to flush data to mass storage. Inode ** information need not be flushed. If the lower four bits of the flag ** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. ** If the lower four bits equal SQLITE_SYNC_FULL, that means ** to use Mac OS X style fullsync instead of fsync(). ** ** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags ** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL ** settings. The [synchronous pragma] determines when calls to the ** xSync VFS method occur and applies uniformly across all platforms. ** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how ** energetic or rigorous or forceful the sync operations are and ** only make a difference on Mac OSX for the default SQLite code. ** (Third-party VFS implementations might also make the distinction ** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the ** operating systems natively supported by SQLite, only Mac OSX ** cares about the difference.) */ #define SQLITE_SYNC_NORMAL 0x00002 #define SQLITE_SYNC_FULL 0x00003 #define SQLITE_SYNC_DATAONLY 0x00010 /* ** CAPI3REF: OS Interface Open File Handle ** ** An [sqlite3_file] object represents an open file in the ** [sqlite3_vfs | OS interface layer]. Individual OS interface ** implementations will ** want to subclass this object by appending additional fields ** for their own use. The pMethods entry is a pointer to an ** [sqlite3_io_methods] object that defines methods for performing ** I/O operations on the open file. */ typedef struct sqlite3_file sqlite3_file; struct sqlite3_file { const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ }; /* ** CAPI3REF: OS Interface File Virtual Methods Object ** ** Every file opened by the [sqlite3_vfs.xOpen] method populates an ** [sqlite3_file] object (or, more commonly, a subclass of the ** [sqlite3_file] object) with a pointer to an instance of this object. ** This object defines the methods used to perform various operations ** against the open file represented by the [sqlite3_file] object. ** ** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element ** to a non-NULL pointer, then the sqlite3_io_methods.xClose method ** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The ** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] ** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element ** to NULL. ** ** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or ** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). ** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] ** flag may be ORed in to indicate that only the data of the file ** and not its inode needs to be synced. ** ** The integer values to xLock() and xUnlock() are one of **
    **
  • [SQLITE_LOCK_NONE], **
  • [SQLITE_LOCK_SHARED], **
  • [SQLITE_LOCK_RESERVED], **
  • [SQLITE_LOCK_PENDING], or **
  • [SQLITE_LOCK_EXCLUSIVE]. **
** xLock() increases the lock. xUnlock() decreases the lock. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, ** PENDING, or EXCLUSIVE lock on the file. It returns true ** if such a lock exists and false otherwise. ** ** The xFileControl() method is a generic interface that allows custom ** VFS implementations to directly control an open file using the ** [sqlite3_file_control()] interface. The second "op" argument is an ** integer opcode. The third argument is a generic pointer intended to ** point to a structure that may contain arguments or space in which to ** write return values. Potential uses for xFileControl() might be ** functions to enable blocking locks with timeouts, to change the ** locking strategy (for example to use dot-file locks), to inquire ** about the status of a lock, or to break stale locks. The SQLite ** core reserves all opcodes less than 100 for its own use. ** A [file control opcodes | list of opcodes] less than 100 is available. ** Applications that define a custom xFileControl method should use opcodes ** greater than 100 to avoid conflicts. VFS implementations should ** return [SQLITE_NOTFOUND] for file control opcodes that they do not ** recognize. ** ** The xSectorSize() method returns the sector size of the ** device that underlies the file. The sector size is the ** minimum write that can be performed without disturbing ** other bytes in the file. The xDeviceCharacteristics() ** method returns a bit vector describing behaviors of the ** underlying device: ** **
    **
  • [SQLITE_IOCAP_ATOMIC] **
  • [SQLITE_IOCAP_ATOMIC512] **
  • [SQLITE_IOCAP_ATOMIC1K] **
  • [SQLITE_IOCAP_ATOMIC2K] **
  • [SQLITE_IOCAP_ATOMIC4K] **
  • [SQLITE_IOCAP_ATOMIC8K] **
  • [SQLITE_IOCAP_ATOMIC16K] **
  • [SQLITE_IOCAP_ATOMIC32K] **
  • [SQLITE_IOCAP_ATOMIC64K] **
  • [SQLITE_IOCAP_SAFE_APPEND] **
  • [SQLITE_IOCAP_SEQUENTIAL] **
** ** The SQLITE_IOCAP_ATOMIC property means that all writes of ** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values ** mean that writes of blocks that are nnn bytes in size and ** are aligned to an address which is an integer multiple of ** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means ** that when data is appended to a file, the data is appended ** first then the size of the file is extended, never the other ** way around. The SQLITE_IOCAP_SEQUENTIAL property means that ** information is written to disk in the same order as calls ** to xWrite(). ** ** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill ** in the unread portions of the buffer with zeros. A VFS that ** fails to zero-fill short reads might seem to work. However, ** failure to zero-fill short reads will eventually lead to ** database corruption. */ typedef struct sqlite3_io_methods sqlite3_io_methods; struct sqlite3_io_methods { int iVersion; int (*xClose)(sqlite3_file*); int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); int (*xSync)(sqlite3_file*, int flags); int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); int (*xLock)(sqlite3_file*, int); int (*xUnlock)(sqlite3_file*, int); int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); int (*xFileControl)(sqlite3_file*, int op, void *pArg); int (*xSectorSize)(sqlite3_file*); int (*xDeviceCharacteristics)(sqlite3_file*); /* Methods above are valid for version 1 */ int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); void (*xShmBarrier)(sqlite3_file*); int (*xShmUnmap)(sqlite3_file*, int deleteFlag); /* Methods above are valid for version 2 */ int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); /* Methods above are valid for version 3 */ /* Additional methods may be added in future releases */ }; /* ** CAPI3REF: Standard File Control Opcodes ** KEYWORDS: {file control opcodes} {file control opcode} ** ** These integer constants are opcodes for the xFileControl method ** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] ** interface. ** **
    **
  • [[SQLITE_FCNTL_LOCKSTATE]] ** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This ** opcode causes the xFileControl method to write the current state of ** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], ** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) ** into an integer that the pArg argument points to. This capability ** is used during testing and is only available when the SQLITE_TEST ** compile-time option is used. ** **
  • [[SQLITE_FCNTL_SIZE_HINT]] ** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS ** layer a hint of how large the database file will grow to be during the ** current transaction. This hint is not guaranteed to be accurate but it ** is often close. The underlying VFS might choose to preallocate database ** file space based on this hint in order to help writes to the database ** file run faster. ** **
  • [[SQLITE_FCNTL_CHUNK_SIZE]] ** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS ** extends and truncates the database file in chunks of a size specified ** by the user. The fourth argument to [sqlite3_file_control()] should ** point to an integer (type int) containing the new chunk-size to use ** for the nominated database. Allocating database file space in large ** chunks (say 1MB at a time), may reduce file-system fragmentation and ** improve performance on some systems. ** **
  • [[SQLITE_FCNTL_FILE_POINTER]] ** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer ** to the [sqlite3_file] object associated with a particular database ** connection. See the [sqlite3_file_control()] documentation for ** additional information. ** **
  • [[SQLITE_FCNTL_SYNC_OMITTED]] ** No longer in use. ** **
  • [[SQLITE_FCNTL_SYNC]] ** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and ** sent to the VFS immediately before the xSync method is invoked on a ** database file descriptor. Or, if the xSync method is not invoked ** because the user has configured SQLite with ** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place ** of the xSync method. In most cases, the pointer argument passed with ** this file-control is NULL. However, if the database file is being synced ** as part of a multi-database commit, the argument points to a nul-terminated ** string containing the transactions master-journal file name. VFSes that ** do not need this signal should silently ignore this opcode. Applications ** should not call [sqlite3_file_control()] with this opcode as doing so may ** disrupt the operation of the specialized VFSes that do require it. ** **
  • [[SQLITE_FCNTL_COMMIT_PHASETWO]] ** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite ** and sent to the VFS after a transaction has been committed immediately ** but before the database is unlocked. VFSes that do not need this signal ** should silently ignore this opcode. Applications should not call ** [sqlite3_file_control()] with this opcode as doing so may disrupt the ** operation of the specialized VFSes that do require it. ** **
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] ** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic ** retry counts and intervals for certain disk I/O operations for the ** windows [VFS] in order to provide robustness in the presence of ** anti-virus programs. By default, the windows VFS will retry file read, ** file write, and file delete operations up to 10 times, with a delay ** of 25 milliseconds before the first retry and with the delay increasing ** by an additional 25 milliseconds with each subsequent retry. This ** opcode allows these two values (10 retries and 25 milliseconds of delay) ** to be adjusted. The values are changed for all database connections ** within the same process. The argument is a pointer to an array of two ** integers where the first integer i the new retry count and the second ** integer is the delay. If either integer is negative, then the setting ** is not changed but instead the prior value of that setting is written ** into the array entry, allowing the current retry settings to be ** interrogated. The zDbName parameter is ignored. ** **
  • [[SQLITE_FCNTL_PERSIST_WAL]] ** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the ** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary ** write ahead log and shared memory files used for transaction control ** are automatically deleted when the latest connection to the database ** closes. Setting persistent WAL mode causes those files to persist after ** close. Persisting the files is useful when other processes that do not ** have write permission on the directory containing the database file want ** to read the database file, as the WAL and shared memory files must exist ** in order for the database to be readable. The fourth parameter to ** [sqlite3_file_control()] for this opcode should be a pointer to an integer. ** That integer is 0 to disable persistent WAL mode or 1 to enable persistent ** WAL mode. If the integer is -1, then it is overwritten with the current ** WAL persistence setting. ** **
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] ** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the ** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting ** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the ** xDeviceCharacteristics methods. The fourth parameter to ** [sqlite3_file_control()] for this opcode should be a pointer to an integer. ** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage ** mode. If the integer is -1, then it is overwritten with the current ** zero-damage mode setting. ** **
  • [[SQLITE_FCNTL_OVERWRITE]] ** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening ** a write transaction to indicate that, unless it is rolled back for some ** reason, the entire database file will be overwritten by the current ** transaction. This is used by VACUUM operations. ** **
  • [[SQLITE_FCNTL_VFSNAME]] ** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of ** all [VFSes] in the VFS stack. The names are of all VFS shims and the ** final bottom-level VFS are written into memory obtained from ** [sqlite3_malloc()] and the result is stored in the char* variable ** that the fourth parameter of [sqlite3_file_control()] points to. ** The caller is responsible for freeing the memory when done. As with ** all file-control actions, there is no guarantee that this will actually ** do anything. Callers should initialize the char* variable to a NULL ** pointer in case this file-control is not implemented. This file-control ** is intended for diagnostic use only. ** **
  • [[SQLITE_FCNTL_PRAGMA]] ** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] ** file control is sent to the open [sqlite3_file] object corresponding ** to the database file to which the pragma statement refers. ^The argument ** to the [SQLITE_FCNTL_PRAGMA] file control is an array of ** pointers to strings (char**) in which the second element of the array ** is the name of the pragma and the third element is the argument to the ** pragma or NULL if the pragma has no argument. ^The handler for an ** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element ** of the char** argument point to a string obtained from [sqlite3_mprintf()] ** or the equivalent and that string will become the result of the pragma or ** the error message if the pragma fails. ^If the ** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal ** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] ** file control returns [SQLITE_OK], then the parser assumes that the ** VFS has handled the PRAGMA itself and the parser generates a no-op ** prepared statement if result string is NULL, or that returns a copy ** of the result string if the string is non-NULL. ** ^If the [SQLITE_FCNTL_PRAGMA] file control returns ** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means ** that the VFS encountered an error while handling the [PRAGMA] and the ** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] ** file control occurs at the beginning of pragma statement analysis and so ** it is able to override built-in [PRAGMA] statements. ** **
  • [[SQLITE_FCNTL_BUSYHANDLER]] ** ^The [SQLITE_FCNTL_BUSYHANDLER] ** file-control may be invoked by SQLite on the database file handle ** shortly after it is opened in order to provide a custom VFS with access ** to the connections busy-handler callback. The argument is of type (void **) ** - an array of two (void *) values. The first (void *) actually points ** to a function of type (int (*)(void *)). In order to invoke the connections ** busy-handler, this function should be invoked with the second (void *) in ** the array as the only argument. If it returns non-zero, then the operation ** should be retried. If it returns zero, the custom VFS should abandon the ** current operation. ** **
  • [[SQLITE_FCNTL_TEMPFILENAME]] ** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control ** to have SQLite generate a ** temporary filename using the same algorithm that is followed to generate ** temporary filenames for TEMP tables and other internal uses. The ** argument should be a char** which will be filled with the filename ** written into memory obtained from [sqlite3_malloc()]. The caller should ** invoke [sqlite3_free()] on the result to avoid a memory leak. ** **
  • [[SQLITE_FCNTL_MMAP_SIZE]] ** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the ** maximum number of bytes that will be used for memory-mapped I/O. ** The argument is a pointer to a value of type sqlite3_int64 that ** is an advisory maximum number of bytes in the file to memory map. The ** pointer is overwritten with the old value. The limit is not changed if ** the value originally pointed to is negative, and so the current limit ** can be queried by passing in a pointer to a negative number. This ** file-control is used internally to implement [PRAGMA mmap_size]. ** **
  • [[SQLITE_FCNTL_TRACE]] ** The [SQLITE_FCNTL_TRACE] file control provides advisory information ** to the VFS about what the higher layers of the SQLite stack are doing. ** This file control is used by some VFS activity tracing [shims]. ** The argument is a zero-terminated string. Higher layers in the ** SQLite stack may generate instances of this file control if ** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. ** **
  • [[SQLITE_FCNTL_HAS_MOVED]] ** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a ** pointer to an integer and it writes a boolean into that integer depending ** on whether or not the file has been renamed, moved, or deleted since it ** was first opened. ** **
  • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] ** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This ** opcode causes the xFileControl method to swap the file handle with the one ** pointed to by the pArg argument. This capability is used during testing ** and only needs to be supported when SQLITE_TEST is defined. ** **
  • [[SQLITE_FCNTL_WAL_BLOCK]] ** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might ** be advantageous to block on the next WAL lock if the lock is not immediately ** available. The WAL subsystem issues this signal during rare ** circumstances in order to fix a problem with priority inversion. ** Applications should not use this file-control. ** **
  • [[SQLITE_FCNTL_ZIPVFS]] ** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other ** VFS should return SQLITE_NOTFOUND for this opcode. ** **
  • [[SQLITE_FCNTL_RBU]] ** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by ** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for ** this opcode. **
*/ #define SQLITE_FCNTL_LOCKSTATE 1 #define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 #define SQLITE_FCNTL_SET_LOCKPROXYFILE 3 #define SQLITE_FCNTL_LAST_ERRNO 4 #define SQLITE_FCNTL_SIZE_HINT 5 #define SQLITE_FCNTL_CHUNK_SIZE 6 #define SQLITE_FCNTL_FILE_POINTER 7 #define SQLITE_FCNTL_SYNC_OMITTED 8 #define SQLITE_FCNTL_WIN32_AV_RETRY 9 #define SQLITE_FCNTL_PERSIST_WAL 10 #define SQLITE_FCNTL_OVERWRITE 11 #define SQLITE_FCNTL_VFSNAME 12 #define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 #define SQLITE_FCNTL_PRAGMA 14 #define SQLITE_FCNTL_BUSYHANDLER 15 #define SQLITE_FCNTL_TEMPFILENAME 16 #define SQLITE_FCNTL_MMAP_SIZE 18 #define SQLITE_FCNTL_TRACE 19 #define SQLITE_FCNTL_HAS_MOVED 20 #define SQLITE_FCNTL_SYNC 21 #define SQLITE_FCNTL_COMMIT_PHASETWO 22 #define SQLITE_FCNTL_WIN32_SET_HANDLE 23 #define SQLITE_FCNTL_WAL_BLOCK 24 #define SQLITE_FCNTL_ZIPVFS 25 #define SQLITE_FCNTL_RBU 26 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE #define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE #define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO /* ** CAPI3REF: Mutex Handle ** ** The mutex module within SQLite defines [sqlite3_mutex] to be an ** abstract type for a mutex object. The SQLite core never looks ** at the internal representation of an [sqlite3_mutex]. It only ** deals with pointers to the [sqlite3_mutex] object. ** ** Mutexes are created using [sqlite3_mutex_alloc()]. */ typedef struct sqlite3_mutex sqlite3_mutex; /* ** CAPI3REF: OS Interface Object ** ** An instance of the sqlite3_vfs object defines the interface between ** the SQLite core and the underlying operating system. The "vfs" ** in the name of the object stands for "virtual file system". See ** the [VFS | VFS documentation] for further information. ** ** The value of the iVersion field is initially 1 but may be larger in ** future versions of SQLite. Additional fields may be appended to this ** object when the iVersion value is increased. Note that the structure ** of the sqlite3_vfs object changes in the transaction between ** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not ** modified. ** ** The szOsFile field is the size of the subclassed [sqlite3_file] ** structure used by this VFS. mxPathname is the maximum length of ** a pathname in this VFS. ** ** Registered sqlite3_vfs objects are kept on a linked list formed by ** the pNext pointer. The [sqlite3_vfs_register()] ** and [sqlite3_vfs_unregister()] interfaces manage this list ** in a thread-safe way. The [sqlite3_vfs_find()] interface ** searches the list. Neither the application code nor the VFS ** implementation should use the pNext pointer. ** ** The pNext field is the only field in the sqlite3_vfs ** structure that SQLite will ever modify. SQLite will only access ** or modify this field while holding a particular static mutex. ** The application should never modify anything within the sqlite3_vfs ** object once the object has been registered. ** ** The zName field holds the name of the VFS module. The name must ** be unique across all VFS modules. ** ** [[sqlite3_vfs.xOpen]] ** ^SQLite guarantees that the zFilename parameter to xOpen ** is either a NULL pointer or string obtained ** from xFullPathname() with an optional suffix added. ** ^If a suffix is added to the zFilename parameter, it will ** consist of a single "-" character followed by no more than ** 11 alphanumeric and/or "-" characters. ** ^SQLite further guarantees that ** the string will be valid and unchanged until xClose() is ** called. Because of the previous sentence, ** the [sqlite3_file] can safely store a pointer to the ** filename if it needs to remember the filename for some reason. ** If the zFilename parameter to xOpen is a NULL pointer then xOpen ** must invent its own temporary name for the file. ^Whenever the ** xFilename parameter is NULL it will also be the case that the ** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. ** ** The flags argument to xOpen() includes all bits set in ** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] ** or [sqlite3_open16()] is used, then flags includes at least ** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. ** If xOpen() opens a file read-only then it sets *pOutFlags to ** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. ** ** ^(SQLite will also add one of the following flags to the xOpen() ** call, depending on the object being opened: ** **
    **
  • [SQLITE_OPEN_MAIN_DB] **
  • [SQLITE_OPEN_MAIN_JOURNAL] **
  • [SQLITE_OPEN_TEMP_DB] **
  • [SQLITE_OPEN_TEMP_JOURNAL] **
  • [SQLITE_OPEN_TRANSIENT_DB] **
  • [SQLITE_OPEN_SUBJOURNAL] **
  • [SQLITE_OPEN_MASTER_JOURNAL] **
  • [SQLITE_OPEN_WAL] **
)^ ** ** The file I/O implementation can use the object type flags to ** change the way it deals with files. For example, an application ** that does not care about crash recovery or rollback might make ** the open of a journal file a no-op. Writes to this journal would ** also be no-ops, and any attempt to read the journal would return ** SQLITE_IOERR. Or the implementation might recognize that a database ** file will be doing page-aligned sector reads and writes in a random ** order and set up its I/O subsystem accordingly. ** ** SQLite might also add one of the following flags to the xOpen method: ** **
    **
  • [SQLITE_OPEN_DELETEONCLOSE] **
  • [SQLITE_OPEN_EXCLUSIVE] **
** ** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be ** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] ** will be set for TEMP databases and their journals, transient ** databases, and subjournals. ** ** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction ** with the [SQLITE_OPEN_CREATE] flag, which are both directly ** analogous to the O_EXCL and O_CREAT flags of the POSIX open() ** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the ** SQLITE_OPEN_CREATE, is used to indicate that file should always ** be created, and that it is an error if it already exists. ** It is not used to indicate the file should be opened ** for exclusive access. ** ** ^At least szOsFile bytes of memory are allocated by SQLite ** to hold the [sqlite3_file] structure passed as the third ** argument to xOpen. The xOpen method does not have to ** allocate the structure; it should just fill it in. Note that ** the xOpen method must set the sqlite3_file.pMethods to either ** a valid [sqlite3_io_methods] object or to NULL. xOpen must do ** this even if the open fails. SQLite expects that the sqlite3_file.pMethods ** element will be valid after xOpen returns regardless of the success ** or failure of the xOpen call. ** ** [[sqlite3_vfs.xAccess]] ** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] ** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to ** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] ** to test whether a file is at least readable. The file can be a ** directory. ** ** ^SQLite will always allocate at least mxPathname+1 bytes for the ** output buffer xFullPathname. The exact size of the output buffer ** is also passed as a parameter to both methods. If the output buffer ** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is ** handled as a fatal error by SQLite, vfs implementations should endeavor ** to prevent this by setting mxPathname to a sufficiently large value. ** ** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() ** interfaces are not strictly a part of the filesystem, but they are ** included in the VFS structure for completeness. ** The xRandomness() function attempts to return nBytes bytes ** of good-quality randomness into zOut. The return value is ** the actual number of bytes of randomness obtained. ** The xSleep() method causes the calling thread to sleep for at ** least the number of microseconds given. ^The xCurrentTime() ** method returns a Julian Day Number for the current date and time as ** a floating point value. ** ^The xCurrentTimeInt64() method returns, as an integer, the Julian ** Day Number multiplied by 86400000 (the number of milliseconds in ** a 24-hour day). ** ^SQLite will use the xCurrentTimeInt64() method to get the current ** date and time if that method is available (if iVersion is 2 or ** greater and the function pointer is not NULL) and will fall back ** to xCurrentTime() if xCurrentTimeInt64() is unavailable. ** ** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces ** are not used by the SQLite core. These optional interfaces are provided ** by some VFSes to facilitate testing of the VFS code. By overriding ** system calls with functions under its control, a test program can ** simulate faults and error conditions that would otherwise be difficult ** or impossible to induce. The set of system calls that can be overridden ** varies from one VFS to another, and from one version of the same VFS to the ** next. Applications that use these interfaces must be prepared for any ** or all of these interfaces to be NULL or for their behavior to change ** from one release to the next. Applications must not attempt to access ** any of these methods if the iVersion of the VFS is less than 3. */ typedef struct sqlite3_vfs sqlite3_vfs; typedef void (*sqlite3_syscall_ptr)(void); struct sqlite3_vfs { int iVersion; /* Structure version number (currently 3) */ int szOsFile; /* Size of subclassed sqlite3_file */ int mxPathname; /* Maximum file pathname length */ sqlite3_vfs *pNext; /* Next registered VFS */ const char *zName; /* Name of this virtual file system */ void *pAppData; /* Pointer to application-specific data */ int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, int flags, int *pOutFlags); int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); void (*xDlClose)(sqlite3_vfs*, void*); int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); int (*xSleep)(sqlite3_vfs*, int microseconds); int (*xCurrentTime)(sqlite3_vfs*, double*); int (*xGetLastError)(sqlite3_vfs*, int, char *); /* ** The methods above are in version 1 of the sqlite_vfs object ** definition. Those that follow are added in version 2 or later */ int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); /* ** The methods above are in versions 1 and 2 of the sqlite_vfs object. ** Those below are for version 3 and greater. */ int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); /* ** The methods above are in versions 1 through 3 of the sqlite_vfs object. ** New fields may be appended in figure versions. The iVersion ** value will increment whenever this happens. */ }; /* ** CAPI3REF: Flags for the xAccess VFS method ** ** These integer constants can be used as the third parameter to ** the xAccess method of an [sqlite3_vfs] object. They determine ** what kind of permissions the xAccess method is looking for. ** With SQLITE_ACCESS_EXISTS, the xAccess method ** simply checks whether the file exists. ** With SQLITE_ACCESS_READWRITE, the xAccess method ** checks whether the named directory is both readable and writable ** (in other words, if files can be added, removed, and renamed within ** the directory). ** The SQLITE_ACCESS_READWRITE constant is currently used only by the ** [temp_store_directory pragma], though this could change in a future ** release of SQLite. ** With SQLITE_ACCESS_READ, the xAccess method ** checks whether the file is readable. The SQLITE_ACCESS_READ constant is ** currently unused, though it might be used in a future release of ** SQLite. */ #define SQLITE_ACCESS_EXISTS 0 #define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ #define SQLITE_ACCESS_READ 2 /* Unused */ /* ** CAPI3REF: Flags for the xShmLock VFS method ** ** These integer constants define the various locking operations ** allowed by the xShmLock method of [sqlite3_io_methods]. The ** following are the only legal combinations of flags to the ** xShmLock method: ** **
    **
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED **
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE **
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED **
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE **
** ** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as ** was given on the corresponding lock. ** ** The xShmLock method can transition between unlocked and SHARED or ** between unlocked and EXCLUSIVE. It cannot transition between SHARED ** and EXCLUSIVE. */ #define SQLITE_SHM_UNLOCK 1 #define SQLITE_SHM_LOCK 2 #define SQLITE_SHM_SHARED 4 #define SQLITE_SHM_EXCLUSIVE 8 /* ** CAPI3REF: Maximum xShmLock index ** ** The xShmLock method on [sqlite3_io_methods] may use values ** between 0 and this upper bound as its "offset" argument. ** The SQLite core will never attempt to acquire or release a ** lock outside of this range */ #define SQLITE_SHM_NLOCK 8 /* ** CAPI3REF: Initialize The SQLite Library ** ** ^The sqlite3_initialize() routine initializes the ** SQLite library. ^The sqlite3_shutdown() routine ** deallocates any resources that were allocated by sqlite3_initialize(). ** These routines are designed to aid in process initialization and ** shutdown on embedded systems. Workstation applications using ** SQLite normally do not need to invoke either of these routines. ** ** A call to sqlite3_initialize() is an "effective" call if it is ** the first time sqlite3_initialize() is invoked during the lifetime of ** the process, or if it is the first time sqlite3_initialize() is invoked ** following a call to sqlite3_shutdown(). ^(Only an effective call ** of sqlite3_initialize() does any initialization. All other calls ** are harmless no-ops.)^ ** ** A call to sqlite3_shutdown() is an "effective" call if it is the first ** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only ** an effective call to sqlite3_shutdown() does any deinitialization. ** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ ** ** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() ** is not. The sqlite3_shutdown() interface must only be called from a ** single thread. All open [database connections] must be closed and all ** other SQLite resources must be deallocated prior to invoking ** sqlite3_shutdown(). ** ** Among other things, ^sqlite3_initialize() will invoke ** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() ** will invoke sqlite3_os_end(). ** ** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. ** ^If for some reason, sqlite3_initialize() is unable to initialize ** the library (perhaps it is unable to allocate a needed resource such ** as a mutex) it returns an [error code] other than [SQLITE_OK]. ** ** ^The sqlite3_initialize() routine is called internally by many other ** SQLite interfaces so that an application usually does not need to ** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] ** calls sqlite3_initialize() so the SQLite library will be automatically ** initialized when [sqlite3_open()] is called if it has not be initialized ** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] ** compile-time option, then the automatic calls to sqlite3_initialize() ** are omitted and the application must call sqlite3_initialize() directly ** prior to using any other SQLite interface. For maximum portability, ** it is recommended that applications always invoke sqlite3_initialize() ** directly prior to using any other SQLite interface. Future releases ** of SQLite may require this. In other words, the behavior exhibited ** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the ** default behavior in some future release of SQLite. ** ** The sqlite3_os_init() routine does operating-system specific ** initialization of the SQLite library. The sqlite3_os_end() ** routine undoes the effect of sqlite3_os_init(). Typical tasks ** performed by these routines include allocation or deallocation ** of static resources, initialization of global variables, ** setting up a default [sqlite3_vfs] module, or setting up ** a default configuration using [sqlite3_config()]. ** ** The application should never invoke either sqlite3_os_init() ** or sqlite3_os_end() directly. The application should only invoke ** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() ** interface is called automatically by sqlite3_initialize() and ** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate ** implementations for sqlite3_os_init() and sqlite3_os_end() ** are built into SQLite when it is compiled for Unix, Windows, or OS/2. ** When [custom builds | built for other platforms] ** (using the [SQLITE_OS_OTHER=1] compile-time ** option) the application must supply a suitable implementation for ** sqlite3_os_init() and sqlite3_os_end(). An application-supplied ** implementation of sqlite3_os_init() or sqlite3_os_end() ** must return [SQLITE_OK] on success and some other [error code] upon ** failure. */ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void); SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void); SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void); SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void); /* ** CAPI3REF: Configuring The SQLite Library ** ** The sqlite3_config() interface is used to make global configuration ** changes to SQLite in order to tune SQLite to the specific needs of ** the application. The default configuration is recommended for most ** applications and so this routine is usually not necessary. It is ** provided to support rare applications with unusual needs. ** ** The sqlite3_config() interface is not threadsafe. The application ** must insure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running. Furthermore, sqlite3_config() ** may only be invoked prior to library initialization using ** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. ** ^If sqlite3_config() is called after [sqlite3_initialize()] and before ** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. ** Note, however, that ^sqlite3_config() can be called as part of the ** implementation of an application-defined [sqlite3_os_init()]. ** ** The first argument to sqlite3_config() is an integer ** [configuration option] that determines ** what property of SQLite is to be configured. Subsequent arguments ** vary depending on the [configuration option] ** in the first argument. ** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. */ SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...); /* ** CAPI3REF: Configure database connections ** METHOD: sqlite3 ** ** The sqlite3_db_config() interface is used to make configuration ** changes to a [database connection]. The interface is similar to ** [sqlite3_config()] except that the changes apply to a single ** [database connection] (specified in the first argument). ** ** The second argument to sqlite3_db_config(D,V,...) is the ** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code ** that indicates what aspect of the [database connection] is being configured. ** Subsequent arguments vary depending on the configuration verb. ** ** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if ** the call is considered successful. */ SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...); /* ** CAPI3REF: Memory Allocation Routines ** ** An instance of this object defines the interface between SQLite ** and low-level memory allocation routines. ** ** This object is used in only one place in the SQLite interface. ** A pointer to an instance of this object is the argument to ** [sqlite3_config()] when the configuration option is ** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. ** By creating an instance of this object ** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) ** during configuration, an application can specify an alternative ** memory allocation subsystem for SQLite to use for all of its ** dynamic memory needs. ** ** Note that SQLite comes with several [built-in memory allocators] ** that are perfectly adequate for the overwhelming majority of applications ** and that this object is only useful to a tiny minority of applications ** with specialized memory allocation requirements. This object is ** also used during testing of SQLite in order to specify an alternative ** memory allocator that simulates memory out-of-memory conditions in ** order to verify that SQLite recovers gracefully from such ** conditions. ** ** The xMalloc, xRealloc, and xFree methods must work like the ** malloc(), realloc() and free() functions from the standard C library. ** ^SQLite guarantees that the second argument to ** xRealloc is always a value returned by a prior call to xRoundup. ** ** xSize should return the allocated size of a memory allocation ** previously obtained from xMalloc or xRealloc. The allocated size ** is always at least as big as the requested size but may be larger. ** ** The xRoundup method returns what would be the allocated size of ** a memory allocation given a particular requested size. Most memory ** allocators round up memory allocations at least to the next multiple ** of 8. Some allocators round up to a larger multiple or to a power of 2. ** Every memory allocation request coming in through [sqlite3_malloc()] ** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, ** that causes the corresponding memory allocation to fail. ** ** The xInit method initializes the memory allocator. For example, ** it might allocate any require mutexes or initialize internal data ** structures. The xShutdown method is invoked (indirectly) by ** [sqlite3_shutdown()] and should deallocate any resources acquired ** by xInit. The pAppData pointer is used as the only parameter to ** xInit and xShutdown. ** ** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes ** the xInit method, so the xInit method need not be threadsafe. The ** xShutdown method is only called from [sqlite3_shutdown()] so it does ** not need to be threadsafe either. For all other methods, SQLite ** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the ** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which ** it is by default) and so the methods are automatically serialized. ** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other ** methods must be threadsafe or else make their own arrangements for ** serialization. ** ** SQLite will never invoke xInit() more than once without an intervening ** call to xShutdown(). */ typedef struct sqlite3_mem_methods sqlite3_mem_methods; struct sqlite3_mem_methods { void *(*xMalloc)(int); /* Memory allocation function */ void (*xFree)(void*); /* Free a prior allocation */ void *(*xRealloc)(void*,int); /* Resize an allocation */ int (*xSize)(void*); /* Return the size of an allocation */ int (*xRoundup)(int); /* Round up request size to allocation size */ int (*xInit)(void*); /* Initialize the memory allocator */ void (*xShutdown)(void*); /* Deinitialize the memory allocator */ void *pAppData; /* Argument to xInit() and xShutdown() */ }; /* ** CAPI3REF: Configuration Options ** KEYWORDS: {configuration option} ** ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that ** the call worked. The [sqlite3_config()] interface will return a ** non-zero [error code] if a discontinued or unsupported configuration option ** is invoked. ** **
** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
**
There are no arguments to this option. ^This option sets the ** [threading mode] to Single-thread. In other words, it disables ** all mutexing and puts SQLite into a mode where it can only be used ** by a single thread. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then ** it is not possible to change the [threading mode] from its default ** value of Single-thread and so [sqlite3_config()] will return ** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD ** configuration option.
** ** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
**
There are no arguments to this option. ^This option sets the ** [threading mode] to Multi-thread. In other words, it disables ** mutexing on [database connection] and [prepared statement] objects. ** The application is responsible for serializing access to ** [database connections] and [prepared statements]. But other mutexes ** are enabled so that SQLite will be safe to use in a multi-threaded ** environment as long as no two threads attempt to use the same ** [database connection] at the same time. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then ** it is not possible to set the Multi-thread [threading mode] and ** [sqlite3_config()] will return [SQLITE_ERROR] if called with the ** SQLITE_CONFIG_MULTITHREAD configuration option.
** ** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
**
There are no arguments to this option. ^This option sets the ** [threading mode] to Serialized. In other words, this option enables ** all mutexes including the recursive ** mutexes on [database connection] and [prepared statement] objects. ** In this mode (which is the default when SQLite is compiled with ** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access ** to [database connections] and [prepared statements] so that the ** application is free to use the same [database connection] or the ** same [prepared statement] in different threads at the same time. ** ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then ** it is not possible to set the Serialized [threading mode] and ** [sqlite3_config()] will return [SQLITE_ERROR] if called with the ** SQLITE_CONFIG_SERIALIZED configuration option.
** ** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
**
^(The SQLITE_CONFIG_MALLOC option takes a single argument which is ** a pointer to an instance of the [sqlite3_mem_methods] structure. ** The argument specifies ** alternative low-level memory allocation routines to be used in place of ** the memory allocation routines built into SQLite.)^ ^SQLite makes ** its own private copy of the content of the [sqlite3_mem_methods] structure ** before the [sqlite3_config()] call returns.
** ** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
**
^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which ** is a pointer to an instance of the [sqlite3_mem_methods] structure. ** The [sqlite3_mem_methods] ** structure is filled with the currently defined memory allocation routines.)^ ** This option can be used to overload the default memory allocation ** routines with a wrapper that simulations memory allocation failure or ** tracks memory usage, for example.
** ** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
**
^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, ** interpreted as a boolean, which enables or disables the collection of ** memory allocation statistics. ^(When memory allocation statistics are ** disabled, the following SQLite interfaces become non-operational: **
    **
  • [sqlite3_memory_used()] **
  • [sqlite3_memory_highwater()] **
  • [sqlite3_soft_heap_limit64()] **
  • [sqlite3_status64()] **
)^ ** ^Memory allocation statistics are enabled by default unless SQLite is ** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory ** allocation statistics are disabled by default. **
** ** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
**
^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer ** that SQLite can use for scratch memory. ^(There are three arguments ** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte ** aligned memory buffer from which the scratch allocations will be ** drawn, the size of each scratch allocation (sz), ** and the maximum number of scratch allocations (N).)^ ** The first argument must be a pointer to an 8-byte aligned buffer ** of at least sz*N bytes of memory. ** ^SQLite will not use more than one scratch buffers per thread. ** ^SQLite will never request a scratch buffer that is more than 6 ** times the database page size. ** ^If SQLite needs needs additional ** scratch memory beyond what is provided by this configuration option, then ** [sqlite3_malloc()] will be used to obtain the memory needed.

** ^When the application provides any amount of scratch memory using ** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large ** [sqlite3_malloc|heap allocations]. ** This can help [Robson proof|prevent memory allocation failures] due to heap ** fragmentation in low-memory embedded systems. **

** ** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
**
^The SQLITE_CONFIG_PAGECACHE option specifies a static memory buffer ** that SQLite can use for the database page cache with the default page ** cache implementation. ** This configuration should not be used if an application-define page ** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2] ** configuration option. ** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to ** 8-byte aligned ** memory, the size of each page buffer (sz), and the number of pages (N). ** The sz argument should be the size of the largest database page ** (a power of two between 512 and 65536) plus some extra bytes for each ** page header. ^The number of extra bytes needed by the page header ** can be determined using the [SQLITE_CONFIG_PCACHE_HDRSZ] option ** to [sqlite3_config()]. ** ^It is harmless, apart from the wasted memory, ** for the sz parameter to be larger than necessary. The first ** argument should pointer to an 8-byte aligned block of memory that ** is at least sz*N bytes of memory, otherwise subsequent behavior is ** undefined. ** ^SQLite will use the memory provided by the first argument to satisfy its ** memory needs for the first N pages that it adds to cache. ^If additional ** page cache memory is needed beyond what is provided by this option, then ** SQLite goes to [sqlite3_malloc()] for the additional storage space.
** ** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
**
^The SQLITE_CONFIG_HEAP option specifies a static memory buffer ** that SQLite will use for all of its dynamic memory allocation needs ** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and ** [SQLITE_CONFIG_PAGECACHE]. ** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled ** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns ** [SQLITE_ERROR] if invoked otherwise. ** ^There are three arguments to SQLITE_CONFIG_HEAP: ** An 8-byte aligned pointer to the memory, ** the number of bytes in the memory buffer, and the minimum allocation size. ** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts ** to using its default memory allocator (the system malloc() implementation), ** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the ** memory pointer is not NULL then the alternative memory ** allocator is engaged to handle all of SQLites memory allocation needs. ** The first pointer (the memory pointer) must be aligned to an 8-byte ** boundary or subsequent behavior of SQLite will be undefined. ** The minimum allocation size is capped at 2**12. Reasonable values ** for the minimum allocation size are 2**5 through 2**8.
** ** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
**
^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a ** pointer to an instance of the [sqlite3_mutex_methods] structure. ** The argument specifies alternative low-level mutex routines to be used ** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of ** the content of the [sqlite3_mutex_methods] structure before the call to ** [sqlite3_config()] returns. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then ** the entire mutexing subsystem is omitted from the build and hence calls to ** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will ** return [SQLITE_ERROR].
** ** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
**
^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which ** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The ** [sqlite3_mutex_methods] ** structure is filled with the currently defined mutex routines.)^ ** This option can be used to overload the default mutex allocation ** routines with a wrapper used to track mutex usage for performance ** profiling or testing, for example. ^If SQLite is compiled with ** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then ** the entire mutexing subsystem is omitted from the build and hence calls to ** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will ** return [SQLITE_ERROR].
** ** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
**
^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine ** the default size of lookaside memory on each [database connection]. ** The first argument is the ** size of each lookaside buffer slot and the second is the number of ** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE ** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] ** option to [sqlite3_db_config()] can be used to change the lookaside ** configuration on individual connections.)^
** ** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
**
^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is ** a pointer to an [sqlite3_pcache_methods2] object. This object specifies ** the interface to a custom page cache implementation.)^ ** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.
** ** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
**
^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which ** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of ** the current page cache implementation into that object.)^
** ** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
**
The SQLITE_CONFIG_LOG option is used to configure the SQLite ** global [error log]. ** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a ** function with a call signature of void(*)(void*,int,const char*), ** and a pointer to void. ^If the function pointer is not NULL, it is ** invoked by [sqlite3_log()] to process each logging event. ^If the ** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. ** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is ** passed through as the first parameter to the application-defined logger ** function whenever that function is invoked. ^The second parameter to ** the logger function is a copy of the first parameter to the corresponding ** [sqlite3_log()] call and is intended to be a [result code] or an ** [extended result code]. ^The third parameter passed to the logger is ** log message after formatting via [sqlite3_snprintf()]. ** The SQLite logging interface is not reentrant; the logger function ** supplied by the application must not invoke any SQLite interface. ** In a multi-threaded application, the application-defined logger ** function must be threadsafe.
** ** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI **
^(The SQLITE_CONFIG_URI option takes a single argument of type int. ** If non-zero, then URI handling is globally enabled. If the parameter is zero, ** then URI handling is globally disabled.)^ ^If URI handling is globally ** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()], ** [sqlite3_open16()] or ** specified as part of [ATTACH] commands are interpreted as URIs, regardless ** of whether or not the [SQLITE_OPEN_URI] flag is set when the database ** connection is opened. ^If it is globally disabled, filenames are ** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the ** database connection is opened. ^(By default, URI handling is globally ** disabled. The default value may be changed by compiling with the ** [SQLITE_USE_URI] symbol defined.)^ ** ** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
SQLITE_CONFIG_COVERING_INDEX_SCAN **
^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer ** argument which is interpreted as a boolean in order to enable or disable ** the use of covering indices for full table scans in the query optimizer. ** ^The default setting is determined ** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" ** if that compile-time option is omitted. ** The ability to disable the use of covering indices for full table scans ** is because some incorrectly coded legacy applications might malfunction ** when the optimization is enabled. Providing the ability to ** disable the optimization allows the older, buggy application code to work ** without change even with newer versions of SQLite. ** ** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] **
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE **
These options are obsolete and should not be used by new code. ** They are retained for backwards compatibility but are now no-ops. **
** ** [[SQLITE_CONFIG_SQLLOG]] **
SQLITE_CONFIG_SQLLOG **
This option is only available if sqlite is compiled with the ** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should ** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). ** The second should be of type (void*). The callback is invoked by the library ** in three separate circumstances, identified by the value passed as the ** fourth parameter. If the fourth parameter is 0, then the database connection ** passed as the second argument has just been opened. The third argument ** points to a buffer containing the name of the main database file. If the ** fourth parameter is 1, then the SQL statement that the third parameter ** points to has just been executed. Or, if the fourth parameter is 2, then ** the connection being passed as the second parameter is being closed. The ** third parameter is passed NULL In this case. An example of using this ** configuration option can be seen in the "test_sqllog.c" source file in ** the canonical SQLite source tree.
** ** [[SQLITE_CONFIG_MMAP_SIZE]] **
SQLITE_CONFIG_MMAP_SIZE **
^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values ** that are the default mmap size limit (the default setting for ** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. ** ^The default setting can be overridden by each database connection using ** either the [PRAGMA mmap_size] command, or by using the ** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size ** will be silently truncated if necessary so that it does not exceed the ** compile-time maximum mmap size set by the ** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ ** ^If either argument to this option is negative, then that argument is ** changed to its compile-time default. ** ** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] **
SQLITE_CONFIG_WIN32_HEAPSIZE **
^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is ** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro ** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value ** that specifies the maximum size of the created heap. ** ** [[SQLITE_CONFIG_PCACHE_HDRSZ]] **
SQLITE_CONFIG_PCACHE_HDRSZ **
^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which ** is a pointer to an integer and writes into that integer the number of extra ** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE]. ** The amount of extra space required can change depending on the compiler, ** target platform, and SQLite version. ** ** [[SQLITE_CONFIG_PMASZ]] **
SQLITE_CONFIG_PMASZ **
^The SQLITE_CONFIG_PMASZ option takes a single parameter which ** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded ** sorter to that integer. The default minimum PMA Size is set by the ** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched ** to help with sort operations when multithreaded sorting ** is enabled (using the [PRAGMA threads] command) and the amount of content ** to be sorted exceeds the page size times the minimum of the ** [PRAGMA cache_size] setting and this value. **
*/ #define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ #define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ #define SQLITE_CONFIG_SERIALIZED 3 /* nil */ #define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ #define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ #define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */ #define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ #define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ #define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ #define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ #define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ /* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ #define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ #define SQLITE_CONFIG_PCACHE 14 /* no-op */ #define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ #define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ #define SQLITE_CONFIG_URI 17 /* int */ #define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ #define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ #define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ /* ** CAPI3REF: Database Connection Configuration Options ** ** These constants are the available integer configuration options that ** can be passed as the second argument to the [sqlite3_db_config()] interface. ** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_db_config()] to make sure that ** the call worked. ^The [sqlite3_db_config()] interface will return a ** non-zero [error code] if a discontinued or unsupported configuration option ** is invoked. ** **
**
SQLITE_DBCONFIG_LOOKASIDE
**
^This option takes three additional arguments that determine the ** [lookaside memory allocator] configuration for the [database connection]. ** ^The first argument (the third parameter to [sqlite3_db_config()] is a ** pointer to a memory buffer to use for lookaside memory. ** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb ** may be NULL in which case SQLite will allocate the ** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the ** size of each lookaside buffer slot. ^The third argument is the number of ** slots. The size of the buffer in the first argument must be greater than ** or equal to the product of the second and third arguments. The buffer ** must be aligned to an 8-byte boundary. ^If the second argument to ** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally ** rounded down to the next smaller multiple of 8. ^(The lookaside memory ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by ** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^
** **
SQLITE_DBCONFIG_ENABLE_FKEY
**
^This option is used to enable or disable the enforcement of ** [foreign key constraints]. There should be two additional arguments. ** The first argument is an integer which is 0 to disable FK enforcement, ** positive to enable FK enforcement or negative to leave FK enforcement ** unchanged. The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether FK enforcement is off or on ** following this call. The second parameter may be a NULL pointer, in ** which case the FK enforcement setting is not reported back.
** **
SQLITE_DBCONFIG_ENABLE_TRIGGER
**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. ** There should be two additional arguments. ** The first argument is an integer which is 0 to disable triggers, ** positive to enable triggers or negative to leave the setting unchanged. ** The second parameter is a pointer to an integer into which ** is written 0 or 1 to indicate whether triggers are disabled or enabled ** following this call. The second parameter may be a NULL pointer, in ** which case the trigger setting is not reported back.
** **
*/ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ #define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes ** METHOD: sqlite3 ** ** ^The sqlite3_extended_result_codes() routine enables or disables the ** [extended result codes] feature of SQLite. ^The extended result ** codes are disabled by default for historical compatibility. */ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff); /* ** CAPI3REF: Last Insert Rowid ** METHOD: sqlite3 ** ** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) ** has a unique 64-bit signed ** integer key called the [ROWID | "rowid"]. ^The rowid is always available ** as an undeclared column named ROWID, OID, or _ROWID_ as long as those ** names are not also used by explicitly declared columns. ^If ** the table has a column of type [INTEGER PRIMARY KEY] then that column ** is another alias for the rowid. ** ** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the ** most recent successful [INSERT] into a rowid table or [virtual table] ** on database connection D. ** ^Inserts into [WITHOUT ROWID] tables are not recorded. ** ^If no successful [INSERT]s into rowid tables ** have ever occurred on the database connection D, ** then sqlite3_last_insert_rowid(D) returns zero. ** ** ^(If an [INSERT] occurs within a trigger or within a [virtual table] ** method, then this routine will return the [rowid] of the inserted ** row as long as the trigger or virtual table method is running. ** But once the trigger or virtual table method ends, the value returned ** by this routine reverts to what it was before the trigger or virtual ** table method began.)^ ** ** ^An [INSERT] that fails due to a constraint violation is not a ** successful [INSERT] and does not change the value returned by this ** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, ** and INSERT OR ABORT make no changes to the return value of this ** routine when their insertion fails. ^(When INSERT OR REPLACE ** encounters a constraint violation, it does not fail. The ** INSERT continues to completion after deleting rows that caused ** the constraint problem so INSERT OR REPLACE will always change ** the return value of this interface.)^ ** ** ^For the purposes of this routine, an [INSERT] is considered to ** be successful even if it is subsequently rolled back. ** ** This function is accessible to SQL statements via the ** [last_insert_rowid() SQL function]. ** ** If a separate thread performs a new [INSERT] on the same ** database connection while the [sqlite3_last_insert_rowid()] ** function is running and thus changes the last insert [rowid], ** then the value returned by [sqlite3_last_insert_rowid()] is ** unpredictable and might not equal either the old or the new ** last insert [rowid]. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*); /* ** CAPI3REF: Count The Number Of Rows Modified ** METHOD: sqlite3 ** ** ^This function returns the number of rows modified, inserted or ** deleted by the most recently completed INSERT, UPDATE or DELETE ** statement on the database connection specified by the only parameter. ** ^Executing any other type of SQL statement does not modify the value ** returned by this function. ** ** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are ** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], ** [foreign key actions] or [REPLACE] constraint resolution are not counted. ** ** Changes to a view that are intercepted by ** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value ** returned by sqlite3_changes() immediately after an INSERT, UPDATE or ** DELETE statement run on a view is always zero. Only changes made to real ** tables are counted. ** ** Things are more complicated if the sqlite3_changes() function is ** executed while a trigger program is running. This may happen if the ** program uses the [changes() SQL function], or if some other callback ** function invokes sqlite3_changes() directly. Essentially: ** **
    **
  • ^(Before entering a trigger program the value returned by ** sqlite3_changes() function is saved. After the trigger program ** has finished, the original value is restored.)^ ** **
  • ^(Within a trigger program each INSERT, UPDATE and DELETE ** statement sets the value returned by sqlite3_changes() ** upon completion as normal. Of course, this value will not include ** any changes performed by sub-triggers, as the sqlite3_changes() ** value will be saved and restored after each sub-trigger has run.)^ **
** ** ^This means that if the changes() SQL function (or similar) is used ** by the first INSERT, UPDATE or DELETE statement within a trigger, it ** returns the value as set when the calling statement began executing. ** ^If it is used by the second or subsequent such statement within a trigger ** program, the value returned reflects the number of rows modified by the ** previous INSERT, UPDATE or DELETE statement within the same trigger. ** ** See also the [sqlite3_total_changes()] interface, the ** [count_changes pragma], and the [changes() SQL function]. ** ** If a separate thread makes changes on the same database connection ** while [sqlite3_changes()] is running then the value returned ** is unpredictable and not meaningful. */ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*); /* ** CAPI3REF: Total Number Of Rows Modified ** METHOD: sqlite3 ** ** ^This function returns the total number of rows inserted, modified or ** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed ** since the database connection was opened, including those executed as ** part of trigger programs. ^Executing any other type of SQL statement ** does not affect the value returned by sqlite3_total_changes(). ** ** ^Changes made as part of [foreign key actions] are included in the ** count, but those made as part of REPLACE constraint resolution are ** not. ^Changes to a view that are intercepted by INSTEAD OF triggers ** are not counted. ** ** See also the [sqlite3_changes()] interface, the ** [count_changes pragma], and the [total_changes() SQL function]. ** ** If a separate thread makes changes on the same database connection ** while [sqlite3_total_changes()] is running then the value ** returned is unpredictable and not meaningful. */ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*); /* ** CAPI3REF: Interrupt A Long-Running Query ** METHOD: sqlite3 ** ** ^This function causes any pending database operation to abort and ** return at its earliest opportunity. This routine is typically ** called in response to a user action such as pressing "Cancel" ** or Ctrl-C where the user wants a long query operation to halt ** immediately. ** ** ^It is safe to call this routine from a thread different from the ** thread that is currently running the database operation. But it ** is not safe to call this routine with a [database connection] that ** is closed or might close before sqlite3_interrupt() returns. ** ** ^If an SQL operation is very nearly finished at the time when ** sqlite3_interrupt() is called, then it might not have an opportunity ** to be interrupted and might continue to completion. ** ** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. ** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE ** that is inside an explicit transaction, then the entire transaction ** will be rolled back automatically. ** ** ^The sqlite3_interrupt(D) call is in effect until all currently running ** SQL statements on [database connection] D complete. ^Any new SQL statements ** that are started after the sqlite3_interrupt() call and before the ** running statements reaches zero are interrupted as if they had been ** running prior to the sqlite3_interrupt() call. ^New SQL statements ** that are started after the running statement count reaches zero are ** not effected by the sqlite3_interrupt(). ** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. ** ** If the database connection closes while [sqlite3_interrupt()] ** is running then bad things will likely happen. */ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete ** ** These routines are useful during command-line input to determine if the ** currently entered text seems to form a complete SQL statement or ** if additional input is needed before sending the text into ** SQLite for parsing. ^These routines return 1 if the input string ** appears to be a complete SQL statement. ^A statement is judged to be ** complete if it ends with a semicolon token and is not a prefix of a ** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within ** string literals or quoted identifier names or comments are not ** independent tokens (they are part of the token in which they are ** embedded) and thus do not count as a statement terminator. ^Whitespace ** and comments that follow the final semicolon are ignored. ** ** ^These routines return 0 if the statement is incomplete. ^If a ** memory allocation fails, then SQLITE_NOMEM is returned. ** ** ^These routines do not parse the SQL statements thus ** will not detect syntactically incorrect SQL. ** ** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior ** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked ** automatically by sqlite3_complete16(). If that initialization fails, ** then the return value from sqlite3_complete16() will be non-zero ** regardless of whether or not the input SQL is complete.)^ ** ** The input to [sqlite3_complete()] must be a zero-terminated ** UTF-8 string. ** ** The input to [sqlite3_complete16()] must be a zero-terminated ** UTF-16 string in native byte order. */ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql); SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql); /* ** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors ** KEYWORDS: {busy-handler callback} {busy handler} ** METHOD: sqlite3 ** ** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X ** that might be invoked with argument P whenever ** an attempt is made to access a database table associated with ** [database connection] D when another thread ** or process has the table locked. ** The sqlite3_busy_handler() interface is used to implement ** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout]. ** ** ^If the busy callback is NULL, then [SQLITE_BUSY] ** is returned immediately upon encountering the lock. ^If the busy callback ** is not NULL, then the callback might be invoked with two arguments. ** ** ^The first argument to the busy handler is a copy of the void* pointer which ** is the third argument to sqlite3_busy_handler(). ^The second argument to ** the busy handler callback is the number of times that the busy handler has ** been invoked previously for the same locking event. ^If the ** busy callback returns 0, then no additional attempts are made to ** access the database and [SQLITE_BUSY] is returned ** to the application. ** ^If the callback returns non-zero, then another attempt ** is made to access the database and the cycle repeats. ** ** The presence of a busy handler does not guarantee that it will be invoked ** when there is lock contention. ^If SQLite determines that invoking the busy ** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] ** to the application instead of invoking the ** busy handler. ** Consider a scenario where one process is holding a read lock that ** it is trying to promote to a reserved lock and ** a second process is holding a reserved lock that it is trying ** to promote to an exclusive lock. The first process cannot proceed ** because it is blocked by the second and the second process cannot ** proceed because it is blocked by the first. If both processes ** invoke the busy handlers, neither will make any progress. Therefore, ** SQLite returns [SQLITE_BUSY] for the first process, hoping that this ** will induce the first process to release its read lock and allow ** the second process to proceed. ** ** ^The default busy callback is NULL. ** ** ^(There can only be a single busy handler defined for each ** [database connection]. Setting a new busy handler clears any ** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] ** or evaluating [PRAGMA busy_timeout=N] will change the ** busy handler and thus clear any previously set busy handler. ** ** The busy callback should not take any actions which modify the ** database connection that invoked the busy handler. In other words, ** the busy handler is not reentrant. Any such actions ** result in undefined behavior. ** ** A busy handler must not close the database connection ** or [prepared statement] that invoked the busy handler. */ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); /* ** CAPI3REF: Set A Busy Timeout ** METHOD: sqlite3 ** ** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps ** for a specified amount of time when a table is locked. ^The handler ** will sleep multiple times until at least "ms" milliseconds of sleeping ** have accumulated. ^After at least "ms" milliseconds of sleeping, ** the handler returns 0 which causes [sqlite3_step()] to return ** [SQLITE_BUSY]. ** ** ^Calling this routine with an argument less than or equal to zero ** turns off all busy handlers. ** ** ^(There can only be a single busy handler for a particular ** [database connection] at any given moment. If another busy handler ** was defined (using [sqlite3_busy_handler()]) prior to calling ** this routine, that other busy handler is cleared.)^ ** ** See also: [PRAGMA busy_timeout] */ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms); /* ** CAPI3REF: Convenience Routines For Running Queries ** METHOD: sqlite3 ** ** This is a legacy interface that is preserved for backwards compatibility. ** Use of this interface is not recommended. ** ** Definition: A result table is memory data structure created by the ** [sqlite3_get_table()] interface. A result table records the ** complete query results from one or more queries. ** ** The table conceptually has a number of rows and columns. But ** these numbers are not part of the result table itself. These ** numbers are obtained separately. Let N be the number of rows ** and M be the number of columns. ** ** A result table is an array of pointers to zero-terminated UTF-8 strings. ** There are (N+1)*M elements in the array. The first M pointers point ** to zero-terminated strings that contain the names of the columns. ** The remaining entries all point to query results. NULL values result ** in NULL pointers. All other values are in their UTF-8 zero-terminated ** string representation as returned by [sqlite3_column_text()]. ** ** A result table might consist of one or more memory allocations. ** It is not safe to pass a result table directly to [sqlite3_free()]. ** A result table should be deallocated using [sqlite3_free_table()]. ** ** ^(As an example of the result table format, suppose a query result ** is as follows: ** **
**        Name        | Age
**        -----------------------
**        Alice       | 43
**        Bob         | 28
**        Cindy       | 21
** 
** ** There are two column (M==2) and three rows (N==3). Thus the ** result table has 8 entries. Suppose the result table is stored ** in an array names azResult. Then azResult holds this content: ** **
**        azResult[0] = "Name";
**        azResult[1] = "Age";
**        azResult[2] = "Alice";
**        azResult[3] = "43";
**        azResult[4] = "Bob";
**        azResult[5] = "28";
**        azResult[6] = "Cindy";
**        azResult[7] = "21";
** 
)^ ** ** ^The sqlite3_get_table() function evaluates one or more ** semicolon-separated SQL statements in the zero-terminated UTF-8 ** string of its 2nd parameter and returns a result table to the ** pointer given in its 3rd parameter. ** ** After the application has finished with the result from sqlite3_get_table(), ** it must pass the result table pointer to sqlite3_free_table() in order to ** release the memory that was malloced. Because of the way the ** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling ** function must not try to call [sqlite3_free()] directly. Only ** [sqlite3_free_table()] is able to release the memory properly and safely. ** ** The sqlite3_get_table() interface is implemented as a wrapper around ** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access ** to any internal data structures of SQLite. It uses only the public ** interface defined here. As a consequence, errors that occur in the ** wrapper layer outside of the internal [sqlite3_exec()] call are not ** reflected in subsequent calls to [sqlite3_errcode()] or ** [sqlite3_errmsg()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_get_table( sqlite3 *db, /* An open database */ const char *zSql, /* SQL to be evaluated */ char ***pazResult, /* Results of the query */ int *pnRow, /* Number of result rows written here */ int *pnColumn, /* Number of result columns written here */ char **pzErrmsg /* Error msg written here */ ); SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result); /* ** CAPI3REF: Formatted String Printing Functions ** ** These routines are work-alikes of the "printf()" family of functions ** from the standard C library. ** These routines understand most of the common K&R formatting options, ** plus some additional non-standard formats, detailed below. ** Note that some of the more obscure formatting options from recent ** C-library standards are omitted from this implementation. ** ** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their ** results into memory obtained from [sqlite3_malloc()]. ** The strings returned by these two routines should be ** released by [sqlite3_free()]. ^Both routines return a ** NULL pointer if [sqlite3_malloc()] is unable to allocate enough ** memory to hold the resulting string. ** ** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from ** the standard C library. The result is written into the ** buffer supplied as the second parameter whose size is given by ** the first parameter. Note that the order of the ** first two parameters is reversed from snprintf().)^ This is an ** historical accident that cannot be fixed without breaking ** backwards compatibility. ^(Note also that sqlite3_snprintf() ** returns a pointer to its buffer instead of the number of ** characters actually written into the buffer.)^ We admit that ** the number of characters written would be a more useful return ** value but we cannot change the implementation of sqlite3_snprintf() ** now without breaking compatibility. ** ** ^As long as the buffer size is greater than zero, sqlite3_snprintf() ** guarantees that the buffer is always zero-terminated. ^The first ** parameter "n" is the total size of the buffer, including space for ** the zero terminator. So the longest string that can be completely ** written will be n-1 characters. ** ** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). ** ** These routines all implement some additional formatting ** options that are useful for constructing SQL statements. ** All of the usual printf() formatting options apply. In addition, there ** is are "%q", "%Q", "%w" and "%z" options. ** ** ^(The %q option works like %s in that it substitutes a nul-terminated ** string from the argument list. But %q also doubles every '\'' character. ** %q is designed for use inside a string literal.)^ By doubling each '\'' ** character it escapes that character and allows it to be inserted into ** the string. ** ** For example, assume the string variable zText contains text as follows: ** **
**  char *zText = "It's a happy day!";
** 
** ** One can use this text in an SQL statement as follows: ** **
**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
**  sqlite3_exec(db, zSQL, 0, 0, 0);
**  sqlite3_free(zSQL);
** 
** ** Because the %q format string is used, the '\'' character in zText ** is escaped and the SQL generated is as follows: ** **
**  INSERT INTO table1 VALUES('It''s a happy day!')
** 
** ** This is correct. Had we used %s instead of %q, the generated SQL ** would have looked like this: ** **
**  INSERT INTO table1 VALUES('It's a happy day!');
** 
** ** This second example is an SQL syntax error. As a general rule you should ** always use %q instead of %s when inserting text into a string literal. ** ** ^(The %Q option works like %q except it also adds single quotes around ** the outside of the total string. Additionally, if the parameter in the ** argument list is a NULL pointer, %Q substitutes the text "NULL" (without ** single quotes).)^ So, for example, one could say: ** **
**  char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
**  sqlite3_exec(db, zSQL, 0, 0, 0);
**  sqlite3_free(zSQL);
** 
** ** The code above will render a correct SQL statement in the zSQL ** variable even if the zText variable is a NULL pointer. ** ** ^(The "%w" formatting option is like "%q" except that it expects to ** be contained within double-quotes instead of single quotes, and it ** escapes the double-quote character instead of the single-quote ** character.)^ The "%w" formatting option is intended for safely inserting ** table and column names into a constructed SQL statement. ** ** ^(The "%z" formatting option works like "%s" but with the ** addition that after the string has been read and copied into ** the result, [sqlite3_free()] is called on the input string.)^ */ SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...); SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list); SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...); SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list); /* ** CAPI3REF: Memory Allocation Subsystem ** ** The SQLite core uses these three routines for all of its own ** internal memory allocation needs. "Core" in the previous sentence ** does not include operating-system specific VFS implementation. The ** Windows VFS uses native malloc() and free() for some operations. ** ** ^The sqlite3_malloc() routine returns a pointer to a block ** of memory at least N bytes in length, where N is the parameter. ** ^If sqlite3_malloc() is unable to obtain sufficient free ** memory, it returns a NULL pointer. ^If the parameter N to ** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns ** a NULL pointer. ** ** ^The sqlite3_malloc64(N) routine works just like ** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead ** of a signed 32-bit integer. ** ** ^Calling sqlite3_free() with a pointer previously returned ** by sqlite3_malloc() or sqlite3_realloc() releases that memory so ** that it might be reused. ^The sqlite3_free() routine is ** a no-op if is called with a NULL pointer. Passing a NULL pointer ** to sqlite3_free() is harmless. After being freed, memory ** should neither be read nor written. Even reading previously freed ** memory might result in a segmentation fault or other severe error. ** Memory corruption, a segmentation fault, or other severe error ** might result if sqlite3_free() is called with a non-NULL pointer that ** was not obtained from sqlite3_malloc() or sqlite3_realloc(). ** ** ^The sqlite3_realloc(X,N) interface attempts to resize a ** prior memory allocation X to be at least N bytes. ** ^If the X parameter to sqlite3_realloc(X,N) ** is a NULL pointer then its behavior is identical to calling ** sqlite3_malloc(N). ** ^If the N parameter to sqlite3_realloc(X,N) is zero or ** negative then the behavior is exactly the same as calling ** sqlite3_free(X). ** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation ** of at least N bytes in size or NULL if insufficient memory is available. ** ^If M is the size of the prior allocation, then min(N,M) bytes ** of the prior allocation are copied into the beginning of buffer returned ** by sqlite3_realloc(X,N) and the prior allocation is freed. ** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the ** prior allocation is not freed. ** ** ^The sqlite3_realloc64(X,N) interfaces works the same as ** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead ** of a 32-bit signed integer. ** ** ^If X is a memory allocation previously obtained from sqlite3_malloc(), ** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then ** sqlite3_msize(X) returns the size of that memory allocation in bytes. ** ^The value returned by sqlite3_msize(X) might be larger than the number ** of bytes requested when X was allocated. ^If X is a NULL pointer then ** sqlite3_msize(X) returns zero. If X points to something that is not ** the beginning of memory allocation, or if it points to a formerly ** valid memory allocation that has now been freed, then the behavior ** of sqlite3_msize(X) is undefined and possibly harmful. ** ** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(), ** sqlite3_malloc64(), and sqlite3_realloc64() ** is always aligned to at least an 8 byte boundary, or to a ** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time ** option is used. ** ** In SQLite version 3.5.0 and 3.5.1, it was possible to define ** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in ** implementation of these routines to be omitted. That capability ** is no longer provided. Only built-in memory allocators can be used. ** ** Prior to SQLite version 3.7.10, the Windows OS interface layer called ** the system malloc() and free() directly when converting ** filenames between the UTF-8 encoding used by SQLite ** and whatever filename encoding is used by the particular Windows ** installation. Memory allocation errors were detected, but ** they were reported back as [SQLITE_CANTOPEN] or ** [SQLITE_IOERR] rather than [SQLITE_NOMEM]. ** ** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] ** must be either NULL or else pointers obtained from a prior ** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have ** not yet been released. ** ** The application must not read or write any part of ** a block of memory after it has been released using ** [sqlite3_free()] or [sqlite3_realloc()]. */ SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int); SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64); SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int); SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64); SQLITE_API void SQLITE_STDCALL sqlite3_free(void*); SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*); /* ** CAPI3REF: Memory Allocator Statistics ** ** SQLite provides these two interfaces for reporting on the status ** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] ** routines, which form the built-in memory allocation subsystem. ** ** ^The [sqlite3_memory_used()] routine returns the number of bytes ** of memory currently outstanding (malloced but not freed). ** ^The [sqlite3_memory_highwater()] routine returns the maximum ** value of [sqlite3_memory_used()] since the high-water mark ** was last reset. ^The values returned by [sqlite3_memory_used()] and ** [sqlite3_memory_highwater()] include any overhead ** added by SQLite in its implementation of [sqlite3_malloc()], ** but not overhead added by the any underlying system library ** routines that [sqlite3_malloc()] may call. ** ** ^The memory high-water mark is reset to the current value of ** [sqlite3_memory_used()] if and only if the parameter to ** [sqlite3_memory_highwater()] is true. ^The value returned ** by [sqlite3_memory_highwater(1)] is the high-water mark ** prior to the reset. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void); SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag); /* ** CAPI3REF: Pseudo-Random Number Generator ** ** SQLite contains a high-quality pseudo-random number generator (PRNG) used to ** select random [ROWID | ROWIDs] when inserting new records into a table that ** already uses the largest possible [ROWID]. The PRNG is also used for ** the build-in random() and randomblob() SQL functions. This interface allows ** applications to access the same PRNG for other purposes. ** ** ^A call to this routine stores N bytes of randomness into buffer P. ** ^The P parameter can be a NULL pointer. ** ** ^If this routine has not been previously called or if the previous ** call had N less than one or a NULL pointer for P, then the PRNG is ** seeded using randomness obtained from the xRandomness method of ** the default [sqlite3_vfs] object. ** ^If the previous call to this routine had an N of 1 or more and a ** non-NULL P then the pseudo-randomness is generated ** internally and without recourse to the [sqlite3_vfs] xRandomness ** method. */ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P); /* ** CAPI3REF: Compile-Time Authorization Callbacks ** METHOD: sqlite3 ** ** ^This routine registers an authorizer callback with a particular ** [database connection], supplied in the first argument. ** ^The authorizer callback is invoked as SQL statements are being compiled ** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], ** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various ** points during the compilation process, as logic is being created ** to perform various actions, the authorizer callback is invoked to ** see if those actions are allowed. ^The authorizer callback should ** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the ** specific action but allow the SQL statement to continue to be ** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be ** rejected with an error. ^If the authorizer callback returns ** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] ** then the [sqlite3_prepare_v2()] or equivalent call that triggered ** the authorizer will fail with an error message. ** ** When the callback returns [SQLITE_OK], that means the operation ** requested is ok. ^When the callback returns [SQLITE_DENY], the ** [sqlite3_prepare_v2()] or equivalent call that triggered the ** authorizer will fail with an error message explaining that ** access is denied. ** ** ^The first parameter to the authorizer callback is a copy of the third ** parameter to the sqlite3_set_authorizer() interface. ^The second parameter ** to the callback is an integer [SQLITE_COPY | action code] that specifies ** the particular action to be authorized. ^The third through sixth parameters ** to the callback are zero-terminated strings that contain additional ** details about the action to be authorized. ** ** ^If the action code is [SQLITE_READ] ** and the callback returns [SQLITE_IGNORE] then the ** [prepared statement] statement is constructed to substitute ** a NULL value in place of the table column that would have ** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] ** return can be used to deny an untrusted user access to individual ** columns of a table. ** ^If the action code is [SQLITE_DELETE] and the callback returns ** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the ** [truncate optimization] is disabled and all rows are deleted individually. ** ** An authorizer is used when [sqlite3_prepare | preparing] ** SQL statements from an untrusted source, to ensure that the SQL statements ** do not try to access data they are not allowed to see, or that they do not ** try to execute malicious statements that damage the database. For ** example, an application may allow a user to enter arbitrary ** SQL queries for evaluation by a database. But the application does ** not want the user to be able to make arbitrary changes to the ** database. An authorizer could then be put in place while the ** user-entered SQL is being [sqlite3_prepare | prepared] that ** disallows everything except [SELECT] statements. ** ** Applications that need to process SQL from untrusted sources ** might also consider lowering resource limits using [sqlite3_limit()] ** and limiting database size using the [max_page_count] [PRAGMA] ** in addition to using an authorizer. ** ** ^(Only a single authorizer can be in place on a database connection ** at a time. Each call to sqlite3_set_authorizer overrides the ** previous call.)^ ^Disable the authorizer by installing a NULL callback. ** The authorizer is disabled by default. ** ** The authorizer callback must not do anything that will modify ** the database connection that invoked the authorizer callback. ** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** ** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the ** statement might be re-prepared during [sqlite3_step()] due to a ** schema change. Hence, the application should ensure that the ** correct authorizer callback remains in place during the [sqlite3_step()]. ** ** ^Note that the authorizer callback is invoked only during ** [sqlite3_prepare()] or its variants. Authorization is not ** performed during statement evaluation in [sqlite3_step()], unless ** as stated in the previous paragraph, sqlite3_step() invokes ** sqlite3_prepare_v2() to reprepare a statement after a schema change. */ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( sqlite3*, int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), void *pUserData ); /* ** CAPI3REF: Authorizer Return Codes ** ** The [sqlite3_set_authorizer | authorizer callback function] must ** return either [SQLITE_OK] or one of these two constants in order ** to signal SQLite whether or not the action is permitted. See the ** [sqlite3_set_authorizer | authorizer documentation] for additional ** information. ** ** Note that SQLITE_IGNORE is also used as a [conflict resolution mode] ** returned from the [sqlite3_vtab_on_conflict()] interface. */ #define SQLITE_DENY 1 /* Abort the SQL statement with an error */ #define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ /* ** CAPI3REF: Authorizer Action Codes ** ** The [sqlite3_set_authorizer()] interface registers a callback function ** that is invoked to authorize certain SQL statement actions. The ** second parameter to the callback is an integer code that specifies ** what action is being authorized. These are the integer action codes that ** the authorizer callback may be passed. ** ** These action code values signify what kind of operation is to be ** authorized. The 3rd and 4th parameters to the authorization ** callback function will be parameters or NULL depending on which of these ** codes is used as the second parameter. ^(The 5th parameter to the ** authorizer callback is the name of the database ("main", "temp", ** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback ** is the name of the inner-most trigger or view that is responsible for ** the access attempt or NULL if this access attempt is directly from ** top-level SQL code. */ /******************************************* 3rd ************ 4th ***********/ #define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ #define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ #define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ #define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ #define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ #define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ #define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ #define SQLITE_CREATE_VIEW 8 /* View Name NULL */ #define SQLITE_DELETE 9 /* Table Name NULL */ #define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ #define SQLITE_DROP_TABLE 11 /* Table Name NULL */ #define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ #define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ #define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ #define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ #define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ #define SQLITE_DROP_VIEW 17 /* View Name NULL */ #define SQLITE_INSERT 18 /* Table Name NULL */ #define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ #define SQLITE_READ 20 /* Table Name Column Name */ #define SQLITE_SELECT 21 /* NULL NULL */ #define SQLITE_TRANSACTION 22 /* Operation NULL */ #define SQLITE_UPDATE 23 /* Table Name Column Name */ #define SQLITE_ATTACH 24 /* Filename NULL */ #define SQLITE_DETACH 25 /* Database Name NULL */ #define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ #define SQLITE_REINDEX 27 /* Index Name NULL */ #define SQLITE_ANALYZE 28 /* Table Name NULL */ #define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ #define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ #define SQLITE_FUNCTION 31 /* NULL Function Name */ #define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ #define SQLITE_COPY 0 /* No longer used */ #define SQLITE_RECURSIVE 33 /* NULL NULL */ /* ** CAPI3REF: Tracing And Profiling Functions ** METHOD: sqlite3 ** ** These routines register callback functions that can be used for ** tracing and profiling the execution of SQL statements. ** ** ^The callback function registered by sqlite3_trace() is invoked at ** various times when an SQL statement is being run by [sqlite3_step()]. ** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the ** SQL statement text as the statement first begins executing. ** ^(Additional sqlite3_trace() callbacks might occur ** as each triggered subprogram is entered. The callbacks for triggers ** contain a UTF-8 SQL comment that identifies the trigger.)^ ** ** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit ** the length of [bound parameter] expansion in the output of sqlite3_trace(). ** ** ^The callback function registered by sqlite3_profile() is invoked ** as each SQL statement finishes. ^The profile callback contains ** the original statement text and an estimate of wall-clock time ** of how long that statement took to run. ^The profile callback ** time is in units of nanoseconds, however the current implementation ** is only capable of millisecond resolution so the six least significant ** digits in the time are meaningless. Future versions of SQLite ** might provide greater resolution on the profiler callback. The ** sqlite3_profile() function is considered experimental and is ** subject to change in future versions of SQLite. */ SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*, void(*xProfile)(void*,const char*,sqlite3_uint64), void*); /* ** CAPI3REF: Query Progress Callbacks ** METHOD: sqlite3 ** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to ** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. ** ** ^The parameter P is passed through as the only parameter to the ** callback function X. ^The parameter N is the approximate number of ** [virtual machine instructions] that are evaluated between successive ** invocations of the callback X. ^If N is less than one then the progress ** handler is disabled. ** ** ^Only a single progress handler may be defined at one time per ** [database connection]; setting a new progress handler cancels the ** old one. ^Setting parameter X to NULL disables the progress handler. ** ^The progress handler is also disabled by setting N to a value less ** than 1. ** ** ^If the progress callback returns non-zero, the operation is ** interrupted. This feature can be used to implement a ** "Cancel" button on a GUI progress dialog box. ** ** The progress handler callback must not do anything that will modify ** the database connection that invoked the progress handler. ** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** */ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); /* ** CAPI3REF: Opening A New Database Connection ** CONSTRUCTOR: sqlite3 ** ** ^These routines open an SQLite database file as specified by the ** filename argument. ^The filename argument is interpreted as UTF-8 for ** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte ** order for sqlite3_open16(). ^(A [database connection] handle is usually ** returned in *ppDb, even if an error occurs. The only exception is that ** if SQLite is unable to allocate memory to hold the [sqlite3] object, ** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] ** object.)^ ^(If the database is opened (and/or created) successfully, then ** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The ** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain ** an English language description of the error following a failure of any ** of the sqlite3_open() routines. ** ** ^The default encoding will be UTF-8 for databases created using ** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases ** created using sqlite3_open16() will be UTF-16 in the native byte order. ** ** Whether or not an error occurs when it is opened, resources ** associated with the [database connection] handle should be released by ** passing it to [sqlite3_close()] when it is no longer required. ** ** The sqlite3_open_v2() interface works like sqlite3_open() ** except that it accepts two additional parameters for additional control ** over the new database connection. ^(The flags parameter to ** sqlite3_open_v2() can take one of ** the following three values, optionally combined with the ** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE], ** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^ ** **
** ^(
[SQLITE_OPEN_READONLY]
**
The database is opened in read-only mode. If the database does not ** already exist, an error is returned.
)^ ** ** ^(
[SQLITE_OPEN_READWRITE]
**
The database is opened for reading and writing if possible, or reading ** only if the file is write protected by the operating system. In either ** case the database must already exist, otherwise an error is returned.
)^ ** ** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
**
The database is opened for reading and writing, and is created if ** it does not already exist. This is the behavior that is always used for ** sqlite3_open() and sqlite3_open16().
)^ **
** ** If the 3rd parameter to sqlite3_open_v2() is not one of the ** combinations shown above optionally combined with other ** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] ** then the behavior is undefined. ** ** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection ** opens in the multi-thread [threading mode] as long as the single-thread ** mode has not been set at compile-time or start-time. ^If the ** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens ** in the serialized [threading mode] unless single-thread was ** previously selected at compile-time or start-time. ** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be ** eligible to use [shared cache mode], regardless of whether or not shared ** cache is enabled using [sqlite3_enable_shared_cache()]. ^The ** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not ** participate in [shared cache mode] even if it is enabled. ** ** ^The fourth parameter to sqlite3_open_v2() is the name of the ** [sqlite3_vfs] object that defines the operating system interface that ** the new database connection should use. ^If the fourth parameter is ** a NULL pointer then the default [sqlite3_vfs] object is used. ** ** ^If the filename is ":memory:", then a private, temporary in-memory database ** is created for the connection. ^This in-memory database will vanish when ** the database connection is closed. Future versions of SQLite might ** make use of additional special filenames that begin with the ":" character. ** It is recommended that when a database filename actually does begin with ** a ":" character you should prefix the filename with a pathname such as ** "./" to avoid ambiguity. ** ** ^If the filename is an empty string, then a private, temporary ** on-disk database will be created. ^This private database will be ** automatically deleted as soon as the database connection is closed. ** ** [[URI filenames in sqlite3_open()]]

URI Filenames

** ** ^If [URI filename] interpretation is enabled, and the filename argument ** begins with "file:", then the filename is interpreted as a URI. ^URI ** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is ** set in the fourth argument to sqlite3_open_v2(), or if it has ** been enabled globally using the [SQLITE_CONFIG_URI] option with the ** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. ** As of SQLite version 3.7.7, URI filename interpretation is turned off ** by default, but future releases of SQLite might enable URI filename ** interpretation by default. See "[URI filenames]" for additional ** information. ** ** URI filenames are parsed according to RFC 3986. ^If the URI contains an ** authority, then it must be either an empty string or the string ** "localhost". ^If the authority is not an empty string or "localhost", an ** error is returned to the caller. ^The fragment component of a URI, if ** present, is ignored. ** ** ^SQLite uses the path component of the URI as the name of the disk file ** which contains the database. ^If the path begins with a '/' character, ** then it is interpreted as an absolute path. ^If the path does not begin ** with a '/' (meaning that the authority section is omitted from the URI) ** then the path is interpreted as a relative path. ** ^(On windows, the first component of an absolute path ** is a drive specification (e.g. "C:").)^ ** ** [[core URI query parameters]] ** The query component of a URI may contain parameters that are interpreted ** either by SQLite itself, or by a [VFS | custom VFS implementation]. ** SQLite and its built-in [VFSes] interpret the ** following query parameters: ** **
    **
  • vfs: ^The "vfs" parameter may be used to specify the name of ** a VFS object that provides the operating system interface that should ** be used to access the database file on disk. ^If this option is set to ** an empty string the default VFS object is used. ^Specifying an unknown ** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is ** present, then the VFS specified by the option takes precedence over ** the value passed as the fourth parameter to sqlite3_open_v2(). ** **
  • mode: ^(The mode parameter may be set to either "ro", "rw", ** "rwc", or "memory". Attempting to set it to any other value is ** an error)^. ** ^If "ro" is specified, then the database is opened for read-only ** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the ** third argument to sqlite3_open_v2(). ^If the mode option is set to ** "rw", then the database is opened for read-write (but not create) ** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had ** been set. ^Value "rwc" is equivalent to setting both ** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is ** set to "memory" then a pure [in-memory database] that never reads ** or writes from disk is used. ^It is an error to specify a value for ** the mode parameter that is less restrictive than that specified by ** the flags passed in the third parameter to sqlite3_open_v2(). ** **
  • cache: ^The cache parameter may be set to either "shared" or ** "private". ^Setting it to "shared" is equivalent to setting the ** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to ** sqlite3_open_v2(). ^Setting the cache parameter to "private" is ** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. ** ^If sqlite3_open_v2() is used and the "cache" parameter is present in ** a URI filename, its value overrides any behavior requested by setting ** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. ** **
  • psow: ^The psow parameter indicates whether or not the ** [powersafe overwrite] property does or does not apply to the ** storage media on which the database file resides. ** **
  • nolock: ^The nolock parameter is a boolean query parameter ** which if set disables file locking in rollback journal modes. This ** is useful for accessing a database on a filesystem that does not ** support locking. Caution: Database corruption might result if two ** or more processes write to the same database and any one of those ** processes uses nolock=1. ** **
  • immutable: ^The immutable parameter is a boolean query ** parameter that indicates that the database file is stored on ** read-only media. ^When immutable is set, SQLite assumes that the ** database file cannot be changed, even by a process with higher ** privilege, and so the database is opened read-only and all locking ** and change detection is disabled. Caution: Setting the immutable ** property on a database file that does in fact change can result ** in incorrect query results and/or [SQLITE_CORRUPT] errors. ** See also: [SQLITE_IOCAP_IMMUTABLE]. ** **
** ** ^Specifying an unknown parameter in the query component of a URI is not an ** error. Future versions of SQLite might understand additional query ** parameters. See "[query parameters with special meaning to SQLite]" for ** additional information. ** ** [[URI filename examples]]

URI filename examples

** ** **
URI filenames Results **
file:data.db ** Open the file "data.db" in the current directory. **
file:/home/fred/data.db
** file:///home/fred/data.db
** file://localhost/home/fred/data.db
** Open the database file "/home/fred/data.db". **
file://darkstar/home/fred/data.db ** An error. "darkstar" is not a recognized authority. **
** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db ** Windows only: Open the file "data.db" on fred's desktop on drive ** C:. Note that the %20 escaping in this example is not strictly ** necessary - space characters can be used literally ** in URI filenames. **
file:data.db?mode=ro&cache=private ** Open file "data.db" in the current directory for read-only access. ** Regardless of whether or not shared-cache mode is enabled by ** default, use a private cache. **
file:/home/fred/data.db?vfs=unix-dotfile ** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" ** that uses dot-files in place of posix advisory locking. **
file:data.db?mode=readonly ** An error. "readonly" is not a valid option for the "mode" parameter. **
** ** ^URI hexadecimal escape sequences (%HH) are supported within the path and ** query components of a URI. A hexadecimal escape sequence consists of a ** percent sign - "%" - followed by exactly two hexadecimal digits ** specifying an octet value. ^Before the path or query components of a ** URI filename are interpreted, they are encoded using UTF-8 and all ** hexadecimal escape sequences replaced by a single byte containing the ** corresponding octet. If this process generates an invalid UTF-8 encoding, ** the results are undefined. ** ** Note to Windows users: The encoding used for the filename argument ** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever ** codepage is currently defined. Filenames containing international ** characters must be converted to UTF-8 prior to passing them into ** sqlite3_open() or sqlite3_open_v2(). ** ** Note to Windows Runtime users: The temporary directory must be set ** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various ** features that require the use of temporary files may fail. ** ** See also: [sqlite3_temp_directory] */ SQLITE_API int SQLITE_STDCALL sqlite3_open( const char *filename, /* Database filename (UTF-8) */ sqlite3 **ppDb /* OUT: SQLite db handle */ ); SQLITE_API int SQLITE_STDCALL sqlite3_open16( const void *filename, /* Database filename (UTF-16) */ sqlite3 **ppDb /* OUT: SQLite db handle */ ); SQLITE_API int SQLITE_STDCALL sqlite3_open_v2( const char *filename, /* Database filename (UTF-8) */ sqlite3 **ppDb, /* OUT: SQLite db handle */ int flags, /* Flags */ const char *zVfs /* Name of VFS module to use */ ); /* ** CAPI3REF: Obtain Values For URI Parameters ** ** These are utility routines, useful to VFS implementations, that check ** to see if a database file was a URI that contained a specific query ** parameter, and if so obtains the value of that query parameter. ** ** If F is the database filename pointer passed into the xOpen() method of ** a VFS implementation when the flags parameter to xOpen() has one or ** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and ** P is the name of the query parameter, then ** sqlite3_uri_parameter(F,P) returns the value of the P ** parameter if it exists or a NULL pointer if P does not appear as a ** query parameter on F. If P is a query parameter of F ** has no explicit value, then sqlite3_uri_parameter(F,P) returns ** a pointer to an empty string. ** ** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean ** parameter and returns true (1) or false (0) according to the value ** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the ** value of query parameter P is one of "yes", "true", or "on" in any ** case or if the value begins with a non-zero number. The ** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of ** query parameter P is one of "no", "false", or "off" in any case or ** if the value begins with a numeric zero. If P is not a query ** parameter on F or if the value of P is does not match any of the ** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). ** ** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a ** 64-bit signed integer and returns that integer, or D if P does not ** exist. If the value of P is something other than an integer, then ** zero is returned. ** ** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and ** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and ** is not a database file pathname pointer that SQLite passed into the xOpen ** VFS method, then the behavior of this routine is undefined and probably ** undesirable. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam); SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64); /* ** CAPI3REF: Error Codes And Messages ** METHOD: sqlite3 ** ** ^If the most recent sqlite3_* API call associated with ** [database connection] D failed, then the sqlite3_errcode(D) interface ** returns the numeric [result code] or [extended result code] for that ** API call. ** If the most recent API call was successful, ** then the return value from sqlite3_errcode() is undefined. ** ^The sqlite3_extended_errcode() ** interface is the same except that it always returns the ** [extended result code] even when extended result codes are ** disabled. ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language ** text that describes the error, as either UTF-8 or UTF-16 respectively. ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** ** ^The sqlite3_errstr() interface returns the English-language text ** that describes the [result code], as UTF-8. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** ** When the serialized [threading mode] is in use, it might be the ** case that a second error occurs on a separate thread in between ** the time of the first error and the call to these interfaces. ** When that happens, the second error will be reported since these ** interfaces always report the most recent result. To avoid ** this, each thread can obtain exclusive use of the [database connection] D ** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning ** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after ** all calls to the interfaces listed here are completed. ** ** If an interface fails with SQLITE_MISUSE, that means the interface ** was invoked incorrectly by the application. In that case, the ** error code and message may or may not be set. */ SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db); SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db); SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*); SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*); SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int); /* ** CAPI3REF: Prepared Statement Object ** KEYWORDS: {prepared statement} {prepared statements} ** ** An instance of this object represents a single SQL statement that ** has been compiled into binary form and is ready to be evaluated. ** ** Think of each SQL statement as a separate computer program. The ** original SQL text is source code. A prepared statement object ** is the compiled object code. All SQL must be converted into a ** prepared statement before it can be run. ** ** The life-cycle of a prepared statement object usually goes like this: ** **
    **
  1. Create the prepared statement object using [sqlite3_prepare_v2()]. **
  2. Bind values to [parameters] using the sqlite3_bind_*() ** interfaces. **
  3. Run the SQL by calling [sqlite3_step()] one or more times. **
  4. Reset the prepared statement using [sqlite3_reset()] then go back ** to step 2. Do this zero or more times. **
  5. Destroy the object using [sqlite3_finalize()]. **
*/ typedef struct sqlite3_stmt sqlite3_stmt; /* ** CAPI3REF: Run-time Limits ** METHOD: sqlite3 ** ** ^(This interface allows the size of various constructs to be limited ** on a connection by connection basis. The first parameter is the ** [database connection] whose limit is to be set or queried. The ** second parameter is one of the [limit categories] that define a ** class of constructs to be size limited. The third parameter is the ** new limit for that construct.)^ ** ** ^If the new limit is a negative number, the limit is unchanged. ** ^(For each limit category SQLITE_LIMIT_NAME there is a ** [limits | hard upper bound] ** set at compile-time by a C preprocessor macro called ** [limits | SQLITE_MAX_NAME]. ** (The "_LIMIT_" in the name is changed to "_MAX_".))^ ** ^Attempts to increase a limit above its hard upper bound are ** silently truncated to the hard upper bound. ** ** ^Regardless of whether or not the limit was changed, the ** [sqlite3_limit()] interface returns the prior value of the limit. ** ^Hence, to find the current value of a limit without changing it, ** simply invoke this interface with the third parameter set to -1. ** ** Run-time limits are intended for use in applications that manage ** both their own internal database and also databases that are controlled ** by untrusted external sources. An example application might be a ** web browser that has its own databases for storing history and ** separate databases controlled by JavaScript applications downloaded ** off the Internet. The internal databases can be given the ** large, default limits. Databases managed by external sources can ** be given much smaller limits designed to prevent a denial of service ** attack. Developers might also want to use the [sqlite3_set_authorizer()] ** interface to further control untrusted SQL. The size of the database ** created by an untrusted script can be contained using the ** [max_page_count] [PRAGMA]. ** ** New run-time limit categories may be added in future releases. */ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal); /* ** CAPI3REF: Run-Time Limit Categories ** KEYWORDS: {limit category} {*limit categories} ** ** These constants define various performance limits ** that can be lowered at run-time using [sqlite3_limit()]. ** The synopsis of the meanings of the various limits is shown below. ** Additional information is available at [limits | Limits in SQLite]. ** **
** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
**
The maximum size of any string or BLOB or table row, in bytes.
)^ ** ** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
**
The maximum length of an SQL statement, in bytes.
)^ ** ** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
**
The maximum number of columns in a table definition or in the ** result set of a [SELECT] or the maximum number of columns in an index ** or in an ORDER BY or GROUP BY clause.
)^ ** ** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
**
The maximum depth of the parse tree on any expression.
)^ ** ** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
**
The maximum number of terms in a compound SELECT statement.
)^ ** ** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
**
The maximum number of instructions in a virtual machine program ** used to implement an SQL statement. This limit is not currently ** enforced, though that might be added in some future release of ** SQLite.
)^ ** ** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
**
The maximum number of arguments on a function.
)^ ** ** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
**
The maximum number of [ATTACH | attached databases].)^
** ** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] ** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
**
The maximum length of the pattern argument to the [LIKE] or ** [GLOB] operators.
)^ ** ** [[SQLITE_LIMIT_VARIABLE_NUMBER]] ** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
**
The maximum index number of any [parameter] in an SQL statement.)^ ** ** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
**
The maximum depth of recursion for triggers.
)^ ** ** [[SQLITE_LIMIT_WORKER_THREADS]] ^(
SQLITE_LIMIT_WORKER_THREADS
**
The maximum number of auxiliary worker threads that a single ** [prepared statement] may start.
)^ **
*/ #define SQLITE_LIMIT_LENGTH 0 #define SQLITE_LIMIT_SQL_LENGTH 1 #define SQLITE_LIMIT_COLUMN 2 #define SQLITE_LIMIT_EXPR_DEPTH 3 #define SQLITE_LIMIT_COMPOUND_SELECT 4 #define SQLITE_LIMIT_VDBE_OP 5 #define SQLITE_LIMIT_FUNCTION_ARG 6 #define SQLITE_LIMIT_ATTACHED 7 #define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 #define SQLITE_LIMIT_VARIABLE_NUMBER 9 #define SQLITE_LIMIT_TRIGGER_DEPTH 10 #define SQLITE_LIMIT_WORKER_THREADS 11 /* ** CAPI3REF: Compiling An SQL Statement ** KEYWORDS: {SQL statement compiler} ** METHOD: sqlite3 ** CONSTRUCTOR: sqlite3_stmt ** ** To execute an SQL query, it must first be compiled into a byte-code ** program using one of these routines. ** ** The first argument, "db", is a [database connection] obtained from a ** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or ** [sqlite3_open16()]. The database connection must not have been closed. ** ** The second argument, "zSql", is the statement to be compiled, encoded ** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2() ** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2() ** use UTF-16. ** ** ^If the nByte argument is negative, then zSql is read up to the ** first zero terminator. ^If nByte is positive, then it is the ** number of bytes read from zSql. ^If nByte is zero, then no prepared ** statement is generated. ** If the caller knows that the supplied string is nul-terminated, then ** there is a small performance advantage to passing an nByte parameter that ** is the number of bytes in the input string including ** the nul-terminator. ** ** ^If pzTail is not NULL then *pzTail is made to point to the first byte ** past the end of the first SQL statement in zSql. These routines only ** compile the first statement in zSql, so *pzTail is left pointing to ** what remains uncompiled. ** ** ^*ppStmt is left pointing to a compiled [prepared statement] that can be ** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set ** to NULL. ^If the input text contains no SQL (if the input is an empty ** string or a comment) then *ppStmt is set to NULL. ** The calling procedure is responsible for deleting the compiled ** SQL statement using [sqlite3_finalize()] after it has finished with it. ** ppStmt may not be NULL. ** ** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; ** otherwise an [error code] is returned. ** ** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are ** recommended for all new programs. The two older interfaces are retained ** for backwards compatibility, but their use is discouraged. ** ^In the "v2" interfaces, the prepared statement ** that is returned (the [sqlite3_stmt] object) contains a copy of the ** original SQL text. This causes the [sqlite3_step()] interface to ** behave differently in three ways: ** **
    **
  1. ** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it ** always used to do, [sqlite3_step()] will automatically recompile the SQL ** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] ** retries will occur before sqlite3_step() gives up and returns an error. **
  2. ** **
  3. ** ^When an error occurs, [sqlite3_step()] will return one of the detailed ** [error codes] or [extended error codes]. ^The legacy behavior was that ** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code ** and the application would have to make a second call to [sqlite3_reset()] ** in order to find the underlying cause of the problem. With the "v2" prepare ** interfaces, the underlying reason for the error is returned immediately. **
  4. ** **
  5. ** ^If the specific value bound to [parameter | host parameter] in the ** WHERE clause might influence the choice of query plan for a statement, ** then the statement will be automatically recompiled, as if there had been ** a schema change, on the first [sqlite3_step()] call following any change ** to the [sqlite3_bind_text | bindings] of that [parameter]. ** ^The specific value of WHERE-clause [parameter] might influence the ** choice of query plan if the parameter is the left-hand side of a [LIKE] ** or [GLOB] operator or if the parameter is compared to an indexed column ** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled. **
  6. **
*/ SQLITE_API int SQLITE_STDCALL sqlite3_prepare( sqlite3 *db, /* Database handle */ const char *zSql, /* SQL statement, UTF-8 encoded */ int nByte, /* Maximum length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: Statement handle */ const char **pzTail /* OUT: Pointer to unused portion of zSql */ ); SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2( sqlite3 *db, /* Database handle */ const char *zSql, /* SQL statement, UTF-8 encoded */ int nByte, /* Maximum length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: Statement handle */ const char **pzTail /* OUT: Pointer to unused portion of zSql */ ); SQLITE_API int SQLITE_STDCALL sqlite3_prepare16( sqlite3 *db, /* Database handle */ const void *zSql, /* SQL statement, UTF-16 encoded */ int nByte, /* Maximum length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: Statement handle */ const void **pzTail /* OUT: Pointer to unused portion of zSql */ ); SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2( sqlite3 *db, /* Database handle */ const void *zSql, /* SQL statement, UTF-16 encoded */ int nByte, /* Maximum length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: Statement handle */ const void **pzTail /* OUT: Pointer to unused portion of zSql */ ); /* ** CAPI3REF: Retrieving Statement SQL ** METHOD: sqlite3_stmt ** ** ^This interface can be used to retrieve a saved copy of the original ** SQL text used to create a [prepared statement] if that statement was ** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt); /* ** CAPI3REF: Determine If An SQL Statement Writes The Database ** METHOD: sqlite3_stmt ** ** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if ** and only if the [prepared statement] X makes no direct changes to ** the content of the database file. ** ** Note that [application-defined SQL functions] or ** [virtual tables] might change the database indirectly as a side effect. ** ^(For example, if an application defines a function "eval()" that ** calls [sqlite3_exec()], then the following SQL statement would ** change the database file through side-effects: ** **
**    SELECT eval('DELETE FROM t1') FROM t2;
** 
** ** But because the [SELECT] statement does not change the database file ** directly, sqlite3_stmt_readonly() would still return true.)^ ** ** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], ** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, ** since the statements themselves do not actually modify the database but ** rather they control the timing of when other statements modify the ** database. ^The [ATTACH] and [DETACH] statements also cause ** sqlite3_stmt_readonly() to return true since, while those statements ** change the configuration of a database connection, they do not make ** changes to the content of the database files on disk. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt); /* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt ** ** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the ** [prepared statement] S has been stepped at least once using ** [sqlite3_step(S)] but has not run to completion and/or has not ** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) ** interface returns false if S is a NULL pointer. If S is not a ** NULL pointer and is not a pointer to a valid [prepared statement] ** object, then the behavior is undefined and probably undesirable. ** ** This interface can be used in combination [sqlite3_next_stmt()] ** to locate all prepared statements associated with a database ** connection that are in need of being reset. This can be used, ** for example, in diagnostic routines to search for prepared ** statements that are holding a transaction open. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*); /* ** CAPI3REF: Dynamically Typed Value Object ** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} ** ** SQLite uses the sqlite3_value object to represent all values ** that can be stored in a database table. SQLite uses dynamic typing ** for the values it stores. ^Values stored in sqlite3_value objects ** can be integers, floating point values, strings, BLOBs, or NULL. ** ** An sqlite3_value object may be either "protected" or "unprotected". ** Some interfaces require a protected sqlite3_value. Other interfaces ** will accept either a protected or an unprotected sqlite3_value. ** Every interface that accepts sqlite3_value arguments specifies ** whether or not it requires a protected sqlite3_value. The ** [sqlite3_value_dup()] interface can be used to construct a new ** protected sqlite3_value from an unprotected sqlite3_value. ** ** The terms "protected" and "unprotected" refer to whether or not ** a mutex is held. An internal mutex is held for a protected ** sqlite3_value object but no mutex is held for an unprotected ** sqlite3_value object. If SQLite is compiled to be single-threaded ** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) ** or if SQLite is run in one of reduced mutex modes ** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] ** then there is no distinction between protected and unprotected ** sqlite3_value objects and they can be used interchangeably. However, ** for maximum code portability it is recommended that applications ** still make the distinction between protected and unprotected ** sqlite3_value objects even when not strictly required. ** ** ^The sqlite3_value objects that are passed as parameters into the ** implementation of [application-defined SQL functions] are protected. ** ^The sqlite3_value object returned by ** [sqlite3_column_value()] is unprotected. ** Unprotected sqlite3_value objects may only be used with ** [sqlite3_result_value()] and [sqlite3_bind_value()]. ** The [sqlite3_value_blob | sqlite3_value_type()] family of ** interfaces require protected sqlite3_value objects. */ typedef struct Mem sqlite3_value; /* ** CAPI3REF: SQL Function Context Object ** ** The context in which an SQL function executes is stored in an ** sqlite3_context object. ^A pointer to an sqlite3_context object ** is always first parameter to [application-defined SQL functions]. ** The application-defined SQL function implementation will pass this ** pointer through into calls to [sqlite3_result_int | sqlite3_result()], ** [sqlite3_aggregate_context()], [sqlite3_user_data()], ** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], ** and/or [sqlite3_set_auxdata()]. */ typedef struct sqlite3_context sqlite3_context; /* ** CAPI3REF: Binding Values To Prepared Statements ** KEYWORDS: {host parameter} {host parameters} {host parameter name} ** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} ** METHOD: sqlite3_stmt ** ** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, ** literals may be replaced by a [parameter] that matches one of following ** templates: ** **
    **
  • ? **
  • ?NNN **
  • :VVV **
  • @VVV **
  • $VVV **
** ** In the templates above, NNN represents an integer literal, ** and VVV represents an alphanumeric identifier.)^ ^The values of these ** parameters (also called "host parameter names" or "SQL parameters") ** can be set using the sqlite3_bind_*() routines defined here. ** ** ^The first argument to the sqlite3_bind_*() routines is always ** a pointer to the [sqlite3_stmt] object returned from ** [sqlite3_prepare_v2()] or its variants. ** ** ^The second argument is the index of the SQL parameter to be set. ** ^The leftmost SQL parameter has an index of 1. ^When the same named ** SQL parameter is used more than once, second and subsequent ** occurrences have the same index as the first occurrence. ** ^The index for named parameters can be looked up using the ** [sqlite3_bind_parameter_index()] API if desired. ^The index ** for "?NNN" parameters is the value of NNN. ** ^The NNN value must be between 1 and the [sqlite3_limit()] ** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999). ** ** ^The third argument is the value to bind to the parameter. ** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() ** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter ** is ignored and the end result is the same as sqlite3_bind_null(). ** ** ^(In those routines that have a fourth argument, its value is the ** number of bytes in the parameter. To be clear: the value is the ** number of bytes in the value, not the number of characters.)^ ** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() ** is negative, then the length of the string is ** the number of bytes up to the first zero terminator. ** If the fourth parameter to sqlite3_bind_blob() is negative, then ** the behavior is undefined. ** If a non-negative fourth parameter is provided to sqlite3_bind_text() ** or sqlite3_bind_text16() or sqlite3_bind_text64() then ** that parameter must be the byte offset ** where the NUL terminator would occur assuming the string were NUL ** terminated. If any NUL characters occur at byte offsets less than ** the value of the fourth parameter then the resulting string value will ** contain embedded NULs. The result of expressions involving strings ** with embedded NULs is undefined. ** ** ^The fifth argument to the BLOB and string binding interfaces ** is a destructor used to dispose of the BLOB or ** string after SQLite has finished with it. ^The destructor is called ** to dispose of the BLOB or string even if the call to bind API fails. ** ^If the fifth argument is ** the special value [SQLITE_STATIC], then SQLite assumes that the ** information is in static, unmanaged space and does not need to be freed. ** ^If the fifth argument has the value [SQLITE_TRANSIENT], then ** SQLite makes its own private copy of the data immediately, before ** the sqlite3_bind_*() routine returns. ** ** ^The sixth argument to sqlite3_bind_text64() must be one of ** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] ** to specify the encoding of the text in the third parameter. If ** the sixth argument to sqlite3_bind_text64() is not one of the ** allowed values shown above, or if the text encoding is different ** from the encoding specified by the sixth parameter, then the behavior ** is undefined. ** ** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that ** is filled with zeroes. ^A zeroblob uses a fixed amount of memory ** (just an integer to hold its size) while it is being processed. ** Zeroblobs are intended to serve as placeholders for BLOBs whose ** content is later written using ** [sqlite3_blob_open | incremental BLOB I/O] routines. ** ^A negative value for the zeroblob results in a zero-length BLOB. ** ** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer ** for the [prepared statement] or with a prepared statement for which ** [sqlite3_step()] has been called more recently than [sqlite3_reset()], ** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() ** routine is passed a [prepared statement] that has been finalized, the ** result is undefined and probably harmful. ** ** ^Bindings are not cleared by the [sqlite3_reset()] routine. ** ^Unbound parameters are interpreted as NULL. ** ** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an ** [error code] if anything goes wrong. ** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB ** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or ** [SQLITE_MAX_LENGTH]. ** ^[SQLITE_RANGE] is returned if the parameter ** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. ** ** See also: [sqlite3_bind_parameter_count()], ** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64, void(*)(void*)); SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double); SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int); SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int); SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*)); SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64, void(*)(void*), unsigned char encoding); SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64); /* ** CAPI3REF: Number Of SQL Parameters ** METHOD: sqlite3_stmt ** ** ^This routine can be used to find the number of [SQL parameters] ** in a [prepared statement]. SQL parameters are tokens of the ** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as ** placeholders for values that are [sqlite3_bind_blob | bound] ** to the parameters at a later time. ** ** ^(This routine actually returns the index of the largest (rightmost) ** parameter. For all forms except ?NNN, this will correspond to the ** number of unique parameters. If parameters of the ?NNN form are used, ** there may be gaps in the list.)^ ** ** See also: [sqlite3_bind_blob|sqlite3_bind()], ** [sqlite3_bind_parameter_name()], and ** [sqlite3_bind_parameter_index()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*); /* ** CAPI3REF: Name Of A Host Parameter ** METHOD: sqlite3_stmt ** ** ^The sqlite3_bind_parameter_name(P,N) interface returns ** the name of the N-th [SQL parameter] in the [prepared statement] P. ** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" ** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" ** respectively. ** In other words, the initial ":" or "$" or "@" or "?" ** is included as part of the name.)^ ** ^Parameters of the form "?" without a following integer have no name ** and are referred to as "nameless" or "anonymous parameters". ** ** ^The first host parameter has an index of 1, not 0. ** ** ^If the value N is out of range or if the N-th parameter is ** nameless, then NULL is returned. ^The returned string is ** always in UTF-8 encoding even if the named parameter was ** originally specified as UTF-16 in [sqlite3_prepare16()] or ** [sqlite3_prepare16_v2()]. ** ** See also: [sqlite3_bind_blob|sqlite3_bind()], ** [sqlite3_bind_parameter_count()], and ** [sqlite3_bind_parameter_index()]. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int); /* ** CAPI3REF: Index Of A Parameter With A Given Name ** METHOD: sqlite3_stmt ** ** ^Return the index of an SQL parameter given its name. ^The ** index value returned is suitable for use as the second ** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero ** is returned if no matching parameter is found. ^The parameter ** name must be given in UTF-8 even if the original statement ** was prepared from UTF-16 text using [sqlite3_prepare16_v2()]. ** ** See also: [sqlite3_bind_blob|sqlite3_bind()], ** [sqlite3_bind_parameter_count()], and ** [sqlite3_bind_parameter_index()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); /* ** CAPI3REF: Reset All Bindings On A Prepared Statement ** METHOD: sqlite3_stmt ** ** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset ** the [sqlite3_bind_blob | bindings] on a [prepared statement]. ** ^Use this routine to reset all host parameters to NULL. */ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*); /* ** CAPI3REF: Number Of Columns In A Result Set ** METHOD: sqlite3_stmt ** ** ^Return the number of columns in the result set returned by the ** [prepared statement]. ^This routine returns 0 if pStmt is an SQL ** statement that does not return data (for example an [UPDATE]). ** ** See also: [sqlite3_data_count()] */ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt); /* ** CAPI3REF: Column Names In A Result Set ** METHOD: sqlite3_stmt ** ** ^These routines return the name assigned to a particular column ** in the result set of a [SELECT] statement. ^The sqlite3_column_name() ** interface returns a pointer to a zero-terminated UTF-8 string ** and sqlite3_column_name16() returns a pointer to a zero-terminated ** UTF-16 string. ^The first parameter is the [prepared statement] ** that implements the [SELECT] statement. ^The second parameter is the ** column number. ^The leftmost column is number 0. ** ** ^The returned string pointer is valid until either the [prepared statement] ** is destroyed by [sqlite3_finalize()] or until the statement is automatically ** reprepared by the first call to [sqlite3_step()] for a particular run ** or until the next call to ** sqlite3_column_name() or sqlite3_column_name16() on the same column. ** ** ^If sqlite3_malloc() fails during the processing of either routine ** (for example during a conversion from UTF-8 to UTF-16) then a ** NULL pointer is returned. ** ** ^The name of a result column is the value of the "AS" clause for ** that column, if there is an AS clause. If there is no AS clause ** then the name of the column is unspecified and may change from ** one release of SQLite to the next. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N); /* ** CAPI3REF: Source Of Data In A Query Result ** METHOD: sqlite3_stmt ** ** ^These routines provide a means to determine the database, table, and ** table column that is the origin of a particular result column in ** [SELECT] statement. ** ^The name of the database or table or column can be returned as ** either a UTF-8 or UTF-16 string. ^The _database_ routines return ** the database name, the _table_ routines return the table name, and ** the origin_ routines return the column name. ** ^The returned string is valid until the [prepared statement] is destroyed ** using [sqlite3_finalize()] or until the statement is automatically ** reprepared by the first call to [sqlite3_step()] for a particular run ** or until the same information is requested ** again in a different encoding. ** ** ^The names returned are the original un-aliased names of the ** database, table, and column. ** ** ^The first argument to these interfaces is a [prepared statement]. ** ^These functions return information about the Nth result column returned by ** the statement, where N is the second function argument. ** ^The left-most column is column 0 for these routines. ** ** ^If the Nth column returned by the statement is an expression or ** subquery and is not a column value, then all of these functions return ** NULL. ^These routine might also return NULL if a memory allocation error ** occurs. ^Otherwise, they return the name of the attached database, table, ** or column that query result column was extracted from. ** ** ^As with all other SQLite APIs, those whose names end with "16" return ** UTF-16 encoded strings and the other functions return UTF-8. ** ** ^These APIs are only available if the library was compiled with the ** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. ** ** If two or more threads call one or more of these routines against the same ** prepared statement and column at the same time then the results are ** undefined. ** ** If two or more threads call one or more ** [sqlite3_column_database_name | column metadata interfaces] ** for the same [prepared statement] and result column ** at the same time then the results are undefined. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int); SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int); SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int); /* ** CAPI3REF: Declared Datatype Of A Query Result ** METHOD: sqlite3_stmt ** ** ^(The first parameter is a [prepared statement]. ** If this statement is a [SELECT] statement and the Nth column of the ** returned result set of that [SELECT] is a table column (not an ** expression or subquery) then the declared type of the table ** column is returned.)^ ^If the Nth column of the result set is an ** expression or subquery, then a NULL pointer is returned. ** ^The returned string is always UTF-8 encoded. ** ** ^(For example, given the database schema: ** ** CREATE TABLE t1(c1 VARIANT); ** ** and the following statement to be compiled: ** ** SELECT c1 + 1, c1 FROM t1; ** ** this routine would return the string "VARIANT" for the second result ** column (i==1), and a NULL pointer for the first result column (i==0).)^ ** ** ^SQLite uses dynamic run-time typing. ^So just because a column ** is declared to contain a particular type does not mean that the ** data stored in that column is of the declared type. SQLite is ** strongly typed, but the typing is dynamic not static. ^Type ** is associated with individual values, not with the containers ** used to hold those values. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int); /* ** CAPI3REF: Evaluate An SQL Statement ** METHOD: sqlite3_stmt ** ** After a [prepared statement] has been prepared using either ** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy ** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function ** must be called one or more times to evaluate the statement. ** ** The details of the behavior of the sqlite3_step() interface depend ** on whether the statement was prepared using the newer "v2" interface ** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy ** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the ** new "v2" interface is recommended for new applications but the legacy ** interface will continue to be supported. ** ** ^In the legacy interface, the return value will be either [SQLITE_BUSY], ** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. ** ^With the "v2" interface, any of the other [result codes] or ** [extended result codes] might be returned as well. ** ** ^[SQLITE_BUSY] means that the database engine was unable to acquire the ** database locks it needs to do its job. ^If the statement is a [COMMIT] ** or occurs outside of an explicit transaction, then you can retry the ** statement. If the statement is not a [COMMIT] and occurs within an ** explicit transaction then you should rollback the transaction before ** continuing. ** ** ^[SQLITE_DONE] means that the statement has finished executing ** successfully. sqlite3_step() should not be called again on this virtual ** machine without first calling [sqlite3_reset()] to reset the virtual ** machine back to its initial state. ** ** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] ** is returned each time a new row of data is ready for processing by the ** caller. The values may be accessed using the [column access functions]. ** sqlite3_step() is called again to retrieve the next row of data. ** ** ^[SQLITE_ERROR] means that a run-time error (such as a constraint ** violation) has occurred. sqlite3_step() should not be called again on ** the VM. More information may be found by calling [sqlite3_errmsg()]. ** ^With the legacy interface, a more specific error code (for example, ** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) ** can be obtained by calling [sqlite3_reset()] on the ** [prepared statement]. ^In the "v2" interface, ** the more specific error code is returned directly by sqlite3_step(). ** ** [SQLITE_MISUSE] means that the this routine was called inappropriately. ** Perhaps it was called on a [prepared statement] that has ** already been [sqlite3_finalize | finalized] or on one that had ** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could ** be the case that the same database connection is being used by two or ** more threads at the same moment in time. ** ** For all versions of SQLite up to and including 3.6.23.1, a call to ** [sqlite3_reset()] was required after sqlite3_step() returned anything ** other than [SQLITE_ROW] before any subsequent invocation of ** sqlite3_step(). Failure to reset the prepared statement using ** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from ** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began ** calling [sqlite3_reset()] automatically in this circumstance rather ** than returning [SQLITE_MISUSE]. This is not considered a compatibility ** break because any application that ever receives an SQLITE_MISUSE error ** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option ** can be used to restore the legacy behavior. ** ** Goofy Interface Alert: In the legacy interface, the sqlite3_step() ** API always returns a generic error code, [SQLITE_ERROR], following any ** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call ** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the ** specific [error codes] that better describes the error. ** We admit that this is a goofy design. The problem has been fixed ** with the "v2" interface. If you prepare all of your SQL statements ** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead ** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, ** then the more specific [error codes] are returned directly ** by sqlite3_step(). The use of the "v2" interface is recommended. */ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*); /* ** CAPI3REF: Number of columns in a result set ** METHOD: sqlite3_stmt ** ** ^The sqlite3_data_count(P) interface returns the number of columns in the ** current row of the result set of [prepared statement] P. ** ^If prepared statement P does not have results ready to return ** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of ** interfaces) then sqlite3_data_count(P) returns 0. ** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. ** ^The sqlite3_data_count(P) routine returns 0 if the previous call to ** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) ** will return non-zero if previous call to [sqlite3_step](P) returned ** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] ** where it always returns zero since each step of that multi-step ** pragma returns 0 columns of data. ** ** See also: [sqlite3_column_count()] */ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt); /* ** CAPI3REF: Fundamental Datatypes ** KEYWORDS: SQLITE_TEXT ** ** ^(Every value in SQLite has one of five fundamental datatypes: ** **
    **
  • 64-bit signed integer **
  • 64-bit IEEE floating point number **
  • string **
  • BLOB **
  • NULL **
)^ ** ** These constants are codes for each of those types. ** ** Note that the SQLITE_TEXT constant was also used in SQLite version 2 ** for a completely different meaning. Software that links against both ** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not ** SQLITE_TEXT. */ #define SQLITE_INTEGER 1 #define SQLITE_FLOAT 2 #define SQLITE_BLOB 4 #define SQLITE_NULL 5 #ifdef SQLITE_TEXT # undef SQLITE_TEXT #else # define SQLITE_TEXT 3 #endif #define SQLITE3_TEXT 3 /* ** CAPI3REF: Result Values From A Query ** KEYWORDS: {column access functions} ** METHOD: sqlite3_stmt ** ** ^These routines return information about a single column of the current ** result row of a query. ^In every case the first argument is a pointer ** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] ** that was returned from [sqlite3_prepare_v2()] or one of its variants) ** and the second argument is the index of the column for which information ** should be returned. ^The leftmost column of the result set has the index 0. ** ^The number of columns in the result can be determined using ** [sqlite3_column_count()]. ** ** If the SQL statement does not currently point to a valid row, or if the ** column index is out of range, the result is undefined. ** These routines may only be called when the most recent call to ** [sqlite3_step()] has returned [SQLITE_ROW] and neither ** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. ** If any of these routines are called after [sqlite3_reset()] or ** [sqlite3_finalize()] or after [sqlite3_step()] has returned ** something other than [SQLITE_ROW], the results are undefined. ** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] ** are called from a different thread while any of these routines ** are pending, then the results are undefined. ** ** ^The sqlite3_column_type() routine returns the ** [SQLITE_INTEGER | datatype code] for the initial data type ** of the result column. ^The returned value is one of [SQLITE_INTEGER], ** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value ** returned by sqlite3_column_type() is only meaningful if no type ** conversions have occurred as described below. After a type conversion, ** the value returned by sqlite3_column_type() is undefined. Future ** versions of SQLite may change the behavior of sqlite3_column_type() ** following a type conversion. ** ** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() ** routine returns the number of bytes in that BLOB or string. ** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts ** the string to UTF-8 and then returns the number of bytes. ** ^If the result is a numeric value then sqlite3_column_bytes() uses ** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns ** the number of bytes in that string. ** ^If the result is NULL, then sqlite3_column_bytes() returns zero. ** ** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() ** routine returns the number of bytes in that BLOB or string. ** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts ** the string to UTF-16 and then returns the number of bytes. ** ^If the result is a numeric value then sqlite3_column_bytes16() uses ** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns ** the number of bytes in that string. ** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. ** ** ^The values returned by [sqlite3_column_bytes()] and ** [sqlite3_column_bytes16()] do not include the zero terminators at the end ** of the string. ^For clarity: the values returned by ** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of ** bytes in the string, not the number of characters. ** ** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), ** even empty strings, are always zero-terminated. ^The return ** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. ** ** Warning: ^The object returned by [sqlite3_column_value()] is an ** [unprotected sqlite3_value] object. In a multithreaded environment, ** an unprotected sqlite3_value object may only be used safely with ** [sqlite3_bind_value()] and [sqlite3_result_value()]. ** If the [unprotected sqlite3_value] object returned by ** [sqlite3_column_value()] is used in any other way, including calls ** to routines like [sqlite3_value_int()], [sqlite3_value_text()], ** or [sqlite3_value_bytes()], the behavior is not threadsafe. ** ** These routines attempt to convert the value where appropriate. ^For ** example, if the internal representation is FLOAT and a text result ** is requested, [sqlite3_snprintf()] is used internally to perform the ** conversion automatically. ^(The following table details the conversions ** that are applied: ** **
** **
Internal
Type
Requested
Type
Conversion ** **
NULL INTEGER Result is 0 **
NULL FLOAT Result is 0.0 **
NULL TEXT Result is a NULL pointer **
NULL BLOB Result is a NULL pointer **
INTEGER FLOAT Convert from integer to float **
INTEGER TEXT ASCII rendering of the integer **
INTEGER BLOB Same as INTEGER->TEXT **
FLOAT INTEGER [CAST] to INTEGER **
FLOAT TEXT ASCII rendering of the float **
FLOAT BLOB [CAST] to BLOB **
TEXT INTEGER [CAST] to INTEGER **
TEXT FLOAT [CAST] to REAL **
TEXT BLOB No change **
BLOB INTEGER [CAST] to INTEGER **
BLOB FLOAT [CAST] to REAL **
BLOB TEXT Add a zero terminator if needed **
**
)^ ** ** Note that when type conversions occur, pointers returned by prior ** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or ** sqlite3_column_text16() may be invalidated. ** Type conversions and pointer invalidations might occur ** in the following cases: ** **
    **
  • The initial content is a BLOB and sqlite3_column_text() or ** sqlite3_column_text16() is called. A zero-terminator might ** need to be added to the string.
  • **
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or ** sqlite3_column_text16() is called. The content must be converted ** to UTF-16.
  • **
  • The initial content is UTF-16 text and sqlite3_column_bytes() or ** sqlite3_column_text() is called. The content must be converted ** to UTF-8.
  • **
** ** ^Conversions between UTF-16be and UTF-16le are always done in place and do ** not invalidate a prior pointer, though of course the content of the buffer ** that the prior pointer references will have been modified. Other kinds ** of conversion are done in place when it is possible, but sometimes they ** are not possible and in those cases prior pointers are invalidated. ** ** The safest policy is to invoke these routines ** in one of the following ways: ** **
    **
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • **
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • **
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • **
** ** In other words, you should call sqlite3_column_text(), ** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result ** into the desired format, then invoke sqlite3_column_bytes() or ** sqlite3_column_bytes16() to find the size of the result. Do not mix calls ** to sqlite3_column_text() or sqlite3_column_blob() with calls to ** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() ** with calls to sqlite3_column_bytes(). ** ** ^The pointers returned are valid until a type conversion occurs as ** described above, or until [sqlite3_step()] or [sqlite3_reset()] or ** [sqlite3_finalize()] is called. ^The memory space used to hold strings ** and BLOBs is freed automatically. Do not pass the pointers returned ** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into ** [sqlite3_free()]. ** ** ^(If a memory allocation error occurs during the evaluation of any ** of these routines, a default value is returned. The default value ** is either the integer 0, the floating point number 0.0, or a NULL ** pointer. Subsequent calls to [sqlite3_errcode()] will return ** [SQLITE_NOMEM].)^ */ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol); SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol); SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol); SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol); SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol); SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol); SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol); SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol); SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol); SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol); /* ** CAPI3REF: Destroy A Prepared Statement Object ** DESTRUCTOR: sqlite3_stmt ** ** ^The sqlite3_finalize() function is called to delete a [prepared statement]. ** ^If the most recent evaluation of the statement encountered no errors ** or if the statement is never been evaluated, then sqlite3_finalize() returns ** SQLITE_OK. ^If the most recent evaluation of statement S failed, then ** sqlite3_finalize(S) returns the appropriate [error code] or ** [extended error code]. ** ** ^The sqlite3_finalize(S) routine can be called at any point during ** the life cycle of [prepared statement] S: ** before statement S is ever evaluated, after ** one or more calls to [sqlite3_reset()], or after any call ** to [sqlite3_step()] regardless of whether or not the statement has ** completed execution. ** ** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. ** ** The application must finalize every [prepared statement] in order to avoid ** resource leaks. It is a grievous error for the application to try to use ** a prepared statement after it has been finalized. Any use of a prepared ** statement after it has been finalized can result in undefined and ** undesirable behavior such as segfaults and heap corruption. */ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt); /* ** CAPI3REF: Reset A Prepared Statement Object ** METHOD: sqlite3_stmt ** ** The sqlite3_reset() function is called to reset a [prepared statement] ** object back to its initial state, ready to be re-executed. ** ^Any SQL statement variables that had values bound to them using ** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. ** Use [sqlite3_clear_bindings()] to reset the bindings. ** ** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], ** or if [sqlite3_step(S)] has never before been called on S, ** then [sqlite3_reset(S)] returns [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt); /* ** CAPI3REF: Create Or Redefine SQL Functions ** KEYWORDS: {function creation routines} ** KEYWORDS: {application-defined SQL function} ** KEYWORDS: {application-defined SQL functions} ** METHOD: sqlite3 ** ** ^These functions (collectively known as "function creation routines") ** are used to add SQL functions or aggregates or to redefine the behavior ** of existing SQL functions or aggregates. The only differences between ** these routines are the text encoding expected for ** the second parameter (the name of the function being created) ** and the presence or absence of a destructor callback for ** the application data pointer. ** ** ^The first parameter is the [database connection] to which the SQL ** function is to be added. ^If an application uses more than one database ** connection then application-defined SQL functions must be added ** to each database connection separately. ** ** ^The second parameter is the name of the SQL function to be created or ** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 ** representation, exclusive of the zero-terminator. ^Note that the name ** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. ** ^Any attempt to create a function with a longer name ** will result in [SQLITE_MISUSE] being returned. ** ** ^The third parameter (nArg) ** is the number of arguments that the SQL function or ** aggregate takes. ^If this parameter is -1, then the SQL function or ** aggregate may take any number of arguments between 0 and the limit ** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third ** parameter is less than -1 or greater than 127 then the behavior is ** undefined. ** ** ^The fourth parameter, eTextRep, specifies what ** [SQLITE_UTF8 | text encoding] this SQL function prefers for ** its parameters. The application should set this parameter to ** [SQLITE_UTF16LE] if the function implementation invokes ** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the ** implementation invokes [sqlite3_value_text16be()] on an input, or ** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] ** otherwise. ^The same SQL function may be registered multiple times using ** different preferred text encodings, with different implementations for ** each encoding. ** ^When multiple implementations of the same function are available, SQLite ** will pick the one that involves the least amount of data conversion. ** ** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] ** to signal that the function will always return the same result given ** the same inputs within a single SQL statement. Most SQL functions are ** deterministic. The built-in [random()] SQL function is an example of a ** function that is not deterministic. The SQLite query planner is able to ** perform additional optimizations on deterministic functions, so use ** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. ** ** ^(The fifth parameter is an arbitrary pointer. The implementation of the ** function can gain access to this pointer using [sqlite3_user_data()].)^ ** ** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are ** pointers to C-language functions that implement the SQL function or ** aggregate. ^A scalar SQL function requires an implementation of the xFunc ** callback only; NULL pointers must be passed as the xStep and xFinal ** parameters. ^An aggregate SQL function requires an implementation of xStep ** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing ** SQL function or aggregate, pass NULL pointers for all three function ** callbacks. ** ** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL, ** then it is destructor for the application data pointer. ** The destructor is invoked when the function is deleted, either by being ** overloaded or when the database connection closes.)^ ** ^The destructor is also invoked if the call to ** sqlite3_create_function_v2() fails. ** ^When the destructor callback of the tenth parameter is invoked, it ** is passed a single argument which is a copy of the application data ** pointer which was the fifth parameter to sqlite3_create_function_v2(). ** ** ^It is permitted to register multiple implementations of the same ** functions with the same name but with either differing numbers of ** arguments or differing preferred text encodings. ^SQLite will use ** the implementation that most closely matches the way in which the ** SQL function is used. ^A function implementation with a non-negative ** nArg parameter is a better match than a function implementation with ** a negative nArg. ^A function where the preferred text encoding ** matches the database encoding is a better ** match than a function where the encoding is different. ** ^A function where the encoding difference is between UTF16le and UTF16be ** is a closer match than a function where the encoding difference is ** between UTF8 and UTF16. ** ** ^Built-in functions may be overloaded by new application-defined functions. ** ** ^An application-defined function is permitted to call other ** SQLite interfaces. However, such calls must not ** close the database connection nor finalize or reset the prepared ** statement in which the function is running. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_function( sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*) ); SQLITE_API int SQLITE_STDCALL sqlite3_create_function16( sqlite3 *db, const void *zFunctionName, int nArg, int eTextRep, void *pApp, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*) ); SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2( sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*), void(*xDestroy)(void*) ); /* ** CAPI3REF: Text Encodings ** ** These constant define integer codes that represent the various ** text encodings supported by SQLite. */ #define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ #define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */ #define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */ #define SQLITE_UTF16 4 /* Use native byte order */ #define SQLITE_ANY 5 /* Deprecated */ #define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ /* ** CAPI3REF: Function Flags ** ** These constants may be ORed together with the ** [SQLITE_UTF8 | preferred text encoding] as the fourth argument ** to [sqlite3_create_function()], [sqlite3_create_function16()], or ** [sqlite3_create_function_v2()]. */ #define SQLITE_DETERMINISTIC 0x800 /* ** CAPI3REF: Deprecated Functions ** DEPRECATED ** ** These functions are [deprecated]. In order to maintain ** backwards compatibility with older code, these functions continue ** to be supported. However, new applications should avoid ** the use of these functions. To encourage programmers to avoid ** these functions, we will not explain what they do. */ #ifndef SQLITE_OMIT_DEPRECATED SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*); SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*); SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void); SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void); SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), void*,sqlite3_int64); #endif /* ** CAPI3REF: Obtaining SQL Values ** METHOD: sqlite3_value ** ** The C-language implementation of SQL functions and aggregates uses ** this set of interface routines to access the parameter values on ** the function or aggregate. ** ** The xFunc (for scalar functions) or xStep (for aggregates) parameters ** to [sqlite3_create_function()] and [sqlite3_create_function16()] ** define callbacks that implement the SQL functions and aggregates. ** The 3rd parameter to these callbacks is an array of pointers to ** [protected sqlite3_value] objects. There is one [sqlite3_value] object for ** each parameter to the SQL function. These routines are used to ** extract values from the [sqlite3_value] objects. ** ** These routines work only with [protected sqlite3_value] objects. ** Any attempt to use these routines on an [unprotected sqlite3_value] ** object results in undefined behavior. ** ** ^These routines work just like the corresponding [column access functions] ** except that these routines take a single [protected sqlite3_value] object ** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. ** ** ^The sqlite3_value_text16() interface extracts a UTF-16 string ** in the native byte-order of the host machine. ^The ** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces ** extract UTF-16 strings as big-endian and little-endian respectively. ** ** ^(The sqlite3_value_numeric_type() interface attempts to apply ** numeric affinity to the value. This means that an attempt is ** made to convert the value to an integer or floating point. If ** such a conversion is possible without loss of information (in other ** words, if the value is a string that looks like a number) ** then the conversion is performed. Otherwise no conversion occurs. ** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ ** ** Please pay particular attention to the fact that the pointer returned ** from [sqlite3_value_blob()], [sqlite3_value_text()], or ** [sqlite3_value_text16()] can be invalidated by a subsequent call to ** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], ** or [sqlite3_value_text16()]. ** ** These routines must be called from the same thread as ** the SQL function that supplied the [sqlite3_value*] parameters. */ SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*); SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*); SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*); SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*); SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*); SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*); SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*); SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*); /* ** CAPI3REF: Copy And Free SQL Values ** METHOD: sqlite3_value ** ** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] ** object D and returns a pointer to that copy. ^The [sqlite3_value] returned ** is a [protected sqlite3_value] object even if the input is not. ** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a ** memory allocation fails. ** ** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object ** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer ** then sqlite3_value_free(V) is a harmless no-op. */ SQLITE_API SQLITE_EXPERIMENTAL sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*); SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*); /* ** CAPI3REF: Obtain Aggregate Function Context ** METHOD: sqlite3_context ** ** Implementations of aggregate SQL functions use this ** routine to allocate memory for storing their state. ** ** ^The first time the sqlite3_aggregate_context(C,N) routine is called ** for a particular aggregate function, SQLite ** allocates N of memory, zeroes out that memory, and returns a pointer ** to the new memory. ^On second and subsequent calls to ** sqlite3_aggregate_context() for the same aggregate function instance, ** the same buffer is returned. Sqlite3_aggregate_context() is normally ** called once for each invocation of the xStep callback and then one ** last time when the xFinal callback is invoked. ^(When no rows match ** an aggregate query, the xStep() callback of the aggregate function ** implementation is never called and xFinal() is called exactly once. ** In those cases, sqlite3_aggregate_context() might be called for the ** first time from within xFinal().)^ ** ** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer ** when first called if N is less than or equal to zero or if a memory ** allocate error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is ** determined by the N parameter on first successful call. Changing the ** value of N in subsequent call to sqlite3_aggregate_context() within ** the same aggregate function instance will not resize the memory ** allocation.)^ Within the xFinal callback, it is customary to set ** N=0 in calls to sqlite3_aggregate_context(C,N) so that no ** pointless memory allocations occur. ** ** ^SQLite automatically frees the memory allocated by ** sqlite3_aggregate_context() when the aggregate query concludes. ** ** The first parameter must be a copy of the ** [sqlite3_context | SQL function context] that is the first parameter ** to the xStep or xFinal callback routine that implements the aggregate ** function. ** ** This routine must be called from the same thread in which ** the aggregate SQL function is running. */ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes); /* ** CAPI3REF: User Data For Functions ** METHOD: sqlite3_context ** ** ^The sqlite3_user_data() interface returns a copy of ** the pointer that was the pUserData parameter (the 5th parameter) ** of the [sqlite3_create_function()] ** and [sqlite3_create_function16()] routines that originally ** registered the application defined function. ** ** This routine must be called from the same thread in which ** the application-defined function is running. */ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*); /* ** CAPI3REF: Database Connection For Functions ** METHOD: sqlite3_context ** ** ^The sqlite3_context_db_handle() interface returns a copy of ** the pointer to the [database connection] (the 1st parameter) ** of the [sqlite3_create_function()] ** and [sqlite3_create_function16()] routines that originally ** registered the application defined function. */ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*); /* ** CAPI3REF: Function Auxiliary Data ** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to ** associate metadata with argument values. If the same value is passed to ** multiple invocations of the same SQL function during query execution, under ** some circumstances the associated metadata may be preserved. An example ** of where this might be useful is in a regular-expression matching ** function. The compiled version of the regular expression can be stored as ** metadata associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** ** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata ** associated by the sqlite3_set_auxdata() function with the Nth argument ** value to the application-defined function. ^If there is no metadata ** associated with the function argument, this sqlite3_get_auxdata() interface ** returns a NULL pointer. ** ** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th ** argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent ** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or ** NULL if the metadata has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly ** once, when the metadata is discarded. ** SQLite is free to discard the metadata at any time, including:
    **
  • when the corresponding function parameter changes, or **
  • when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement, or **
  • when sqlite3_set_auxdata() is invoked again on the same parameter, or **
  • during the original sqlite3_set_auxdata() call when a memory ** allocation error occurs.
)^ ** ** Note the last bullet in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after ** sqlite3_set_auxdata() has been called. ** ** ^(In practice, metadata is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ ** ** These routines must be called from the same thread in which ** the SQL function is running. */ SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior ** ** These are special values for the destructor that is passed in as the ** final argument to routines like [sqlite3_result_blob()]. ^If the destructor ** argument is SQLITE_STATIC, it means that the content pointer is constant ** and will never change. It does not need to be destroyed. ^The ** SQLITE_TRANSIENT value means that the content will likely change in ** the near future and that SQLite should make its own private copy of ** the content before returning. ** ** The typedef is necessary to work around problems in certain ** C++ compilers. */ typedef void (*sqlite3_destructor_type)(void*); #define SQLITE_STATIC ((sqlite3_destructor_type)0) #define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) /* ** CAPI3REF: Setting The Result Of An SQL Function ** METHOD: sqlite3_context ** ** These routines are used by the xFunc or xFinal callbacks that ** implement SQL functions and aggregates. See ** [sqlite3_create_function()] and [sqlite3_create_function16()] ** for additional information. ** ** These functions work very much like the [parameter binding] family of ** functions used to bind values to host parameters in prepared statements. ** Refer to the [SQL parameter] documentation for additional information. ** ** ^The sqlite3_result_blob() interface sets the result from ** an application-defined function to be the BLOB whose content is pointed ** to by the second parameter and which is N bytes long where N is the ** third parameter. ** ** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N) ** interfaces set the result of the application-defined function to be ** a BLOB containing all zero bytes and N bytes in size. ** ** ^The sqlite3_result_double() interface sets the result from ** an application-defined function to be a floating point value specified ** by its 2nd argument. ** ** ^The sqlite3_result_error() and sqlite3_result_error16() functions ** cause the implemented SQL function to throw an exception. ** ^SQLite uses the string pointed to by the ** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() ** as the text of an error message. ^SQLite interprets the error ** message string from sqlite3_result_error() as UTF-8. ^SQLite ** interprets the string from sqlite3_result_error16() as UTF-16 in native ** byte order. ^If the third parameter to sqlite3_result_error() ** or sqlite3_result_error16() is negative then SQLite takes as the error ** message all text up through the first zero character. ** ^If the third parameter to sqlite3_result_error() or ** sqlite3_result_error16() is non-negative then SQLite takes that many ** bytes (not characters) from the 2nd parameter as the error message. ** ^The sqlite3_result_error() and sqlite3_result_error16() ** routines make a private copy of the error message text before ** they return. Hence, the calling function can deallocate or ** modify the text after they return without harm. ** ^The sqlite3_result_error_code() function changes the error code ** returned by SQLite as a result of an error in a function. ^By default, ** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() ** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. ** ** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an ** error indicating that a string or BLOB is too long to represent. ** ** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an ** error indicating that a memory allocation failed. ** ** ^The sqlite3_result_int() interface sets the return value ** of the application-defined function to be the 32-bit signed integer ** value given in the 2nd argument. ** ^The sqlite3_result_int64() interface sets the return value ** of the application-defined function to be the 64-bit signed integer ** value given in the 2nd argument. ** ** ^The sqlite3_result_null() interface sets the return value ** of the application-defined function to be NULL. ** ** ^The sqlite3_result_text(), sqlite3_result_text16(), ** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces ** set the return value of the application-defined function to be ** a text string which is represented as UTF-8, UTF-16 native byte order, ** UTF-16 little endian, or UTF-16 big endian, respectively. ** ^The sqlite3_result_text64() interface sets the return value of an ** application-defined function to be a text string in an encoding ** specified by the fifth (and last) parameter, which must be one ** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. ** ^SQLite takes the text result from the application from ** the 2nd parameter of the sqlite3_result_text* interfaces. ** ^If the 3rd parameter to the sqlite3_result_text* interfaces ** is negative, then SQLite takes result text from the 2nd parameter ** through the first zero character. ** ^If the 3rd parameter to the sqlite3_result_text* interfaces ** is non-negative, then as many bytes (not characters) of the text ** pointed to by the 2nd parameter are taken as the application-defined ** function result. If the 3rd parameter is non-negative, then it ** must be the byte offset into the string where the NUL terminator would ** appear if the string where NUL terminated. If any NUL characters occur ** in the string at a byte offset that is less than the value of the 3rd ** parameter, then the resulting string will contain embedded NULs and the ** result of expressions operating on strings with embedded NULs is undefined. ** ^If the 4th parameter to the sqlite3_result_text* interfaces ** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that ** function as the destructor on the text or BLOB result when it has ** finished using that result. ** ^If the 4th parameter to the sqlite3_result_text* interfaces or to ** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite ** assumes that the text or BLOB result is in constant space and does not ** copy the content of the parameter nor call a destructor on the content ** when it has finished using that result. ** ^If the 4th parameter to the sqlite3_result_text* interfaces ** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT ** then SQLite makes a copy of the result into space obtained from ** from [sqlite3_malloc()] before it returns. ** ** ^The sqlite3_result_value() interface sets the result of ** the application-defined function to be a copy of the ** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The ** sqlite3_result_value() interface makes a copy of the [sqlite3_value] ** so that the [sqlite3_value] specified in the parameter may change or ** be deallocated after sqlite3_result_value() returns without harm. ** ^A [protected sqlite3_value] object may always be used where an ** [unprotected sqlite3_value] object is required, so either ** kind of [sqlite3_value] object can be used with this interface. ** ** If these routines are called from within the different thread ** than the one containing the application-defined function that received ** the [sqlite3_context] pointer, the results are undefined. */ SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*, sqlite3_uint64,void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double); SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int); SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int); SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*); SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*); SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int); SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int); SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64); SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*); SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64, void(*)(void*), unsigned char encoding); SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*); SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n); SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); /* ** CAPI3REF: Define New Collating Sequences ** METHOD: sqlite3 ** ** ^These functions add, remove, or modify a [collation] associated ** with the [database connection] specified as the first argument. ** ** ^The name of the collation is a UTF-8 string ** for sqlite3_create_collation() and sqlite3_create_collation_v2() ** and a UTF-16 string in native byte order for sqlite3_create_collation16(). ** ^Collation names that compare equal according to [sqlite3_strnicmp()] are ** considered to be the same name. ** ** ^(The third argument (eTextRep) must be one of the constants: **
    **
  • [SQLITE_UTF8], **
  • [SQLITE_UTF16LE], **
  • [SQLITE_UTF16BE], **
  • [SQLITE_UTF16], or **
  • [SQLITE_UTF16_ALIGNED]. **
)^ ** ^The eTextRep argument determines the encoding of strings passed ** to the collating function callback, xCallback. ** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep ** force strings to be UTF16 with native byte order. ** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin ** on an even byte address. ** ** ^The fourth argument, pArg, is an application data pointer that is passed ** through as the first argument to the collating function callback. ** ** ^The fifth argument, xCallback, is a pointer to the collating function. ** ^Multiple collating functions can be registered using the same name but ** with different eTextRep parameters and SQLite will use whichever ** function requires the least amount of data transformation. ** ^If the xCallback argument is NULL then the collating function is ** deleted. ^When all collating functions having the same name are deleted, ** that collation is no longer usable. ** ** ^The collating function callback is invoked with a copy of the pArg ** application data pointer and with two strings in the encoding specified ** by the eTextRep argument. The collating function must return an ** integer that is negative, zero, or positive ** if the first string is less than, equal to, or greater than the second, ** respectively. A collating function must always return the same answer ** given the same inputs. If two or more collating functions are registered ** to the same collation name (using different eTextRep values) then all ** must give an equivalent answer when invoked with equivalent strings. ** The collating function must obey the following properties for all ** strings A, B, and C: ** **
    **
  1. If A==B then B==A. **
  2. If A==B and B==C then A==C. **
  3. If A<B THEN B>A. **
  4. If A<B and B<C then A<C. **
** ** If a collating function fails any of the above constraints and that ** collating function is registered and used, then the behavior of SQLite ** is undefined. ** ** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() ** with the addition that the xDestroy callback is invoked on pArg when ** the collating function is deleted. ** ^Collating functions are deleted when they are overridden by later ** calls to the collation creation functions or when the ** [database connection] is closed using [sqlite3_close()]. ** ** ^The xDestroy callback is not called if the ** sqlite3_create_collation_v2() function fails. Applications that invoke ** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should ** check the return code and dispose of the application data pointer ** themselves rather than expecting SQLite to deal with it for them. ** This is different from every other SQLite interface. The inconsistency ** is unfortunate but cannot be changed without breaking backwards ** compatibility. ** ** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation( sqlite3*, const char *zName, int eTextRep, void *pArg, int(*xCompare)(void*,int,const void*,int,const void*) ); SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2( sqlite3*, const char *zName, int eTextRep, void *pArg, int(*xCompare)(void*,int,const void*,int,const void*), void(*xDestroy)(void*) ); SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16( sqlite3*, const void *zName, int eTextRep, void *pArg, int(*xCompare)(void*,int,const void*,int,const void*) ); /* ** CAPI3REF: Collation Needed Callbacks ** METHOD: sqlite3 ** ** ^To avoid having to register all collation sequences before a database ** can be used, a single callback function may be registered with the ** [database connection] to be invoked whenever an undefined collation ** sequence is required. ** ** ^If the function is registered using the sqlite3_collation_needed() API, ** then it is passed the names of undefined collation sequences as strings ** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, ** the names are passed as UTF-16 in machine native byte order. ** ^A call to either function replaces the existing collation-needed callback. ** ** ^(When the callback is invoked, the first argument passed is a copy ** of the second argument to sqlite3_collation_needed() or ** sqlite3_collation_needed16(). The second argument is the database ** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], ** or [SQLITE_UTF16LE], indicating the most desirable form of the collation ** sequence function required. The fourth parameter is the name of the ** required collation sequence.)^ ** ** The callback function should register the desired collation using ** [sqlite3_create_collation()], [sqlite3_create_collation16()], or ** [sqlite3_create_collation_v2()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed( sqlite3*, void*, void(*)(void*,sqlite3*,int eTextRep,const char*) ); SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16( sqlite3*, void*, void(*)(void*,sqlite3*,int eTextRep,const void*) ); #ifdef SQLITE_HAS_CODEC /* ** Specify the key for an encrypted database. This routine should be ** called right after sqlite3_open(). ** ** The code to implement this API is not available in the public release ** of SQLite. */ SQLITE_API int SQLITE_STDCALL sqlite3_key( sqlite3 *db, /* Database to be rekeyed */ const void *pKey, int nKey /* The key */ ); SQLITE_API int SQLITE_STDCALL sqlite3_key_v2( sqlite3 *db, /* Database to be rekeyed */ const char *zDbName, /* Name of the database */ const void *pKey, int nKey /* The key */ ); /* ** Change the key on an open database. If the current database is not ** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the ** database is decrypted. ** ** The code to implement this API is not available in the public release ** of SQLite. */ SQLITE_API int SQLITE_STDCALL sqlite3_rekey( sqlite3 *db, /* Database to be rekeyed */ const void *pKey, int nKey /* The new key */ ); SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2( sqlite3 *db, /* Database to be rekeyed */ const char *zDbName, /* Name of the database */ const void *pKey, int nKey /* The new key */ ); /* ** Specify the activation key for a SEE database. Unless ** activated, none of the SEE routines will work. */ SQLITE_API void SQLITE_STDCALL sqlite3_activate_see( const char *zPassPhrase /* Activation phrase */ ); #endif #ifdef SQLITE_ENABLE_CEROD /* ** Specify the activation key for a CEROD database. Unless ** activated, none of the CEROD routines will work. */ SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod( const char *zPassPhrase /* Activation phrase */ ); #endif /* ** CAPI3REF: Suspend Execution For A Short Time ** ** The sqlite3_sleep() function causes the current thread to suspend execution ** for at least a number of milliseconds specified in its parameter. ** ** If the operating system does not support sleep requests with ** millisecond time resolution, then the time will be rounded up to ** the nearest second. The number of milliseconds of sleep actually ** requested from the operating system is returned. ** ** ^SQLite implements this interface by calling the xSleep() ** method of the default [sqlite3_vfs] object. If the xSleep() method ** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. */ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int); /* ** CAPI3REF: Name Of The Folder Holding Temporary Files ** ** ^(If this global variable is made to point to a string which is ** the name of a folder (a.k.a. directory), then all temporary files ** created by SQLite when using a built-in [sqlite3_vfs | VFS] ** will be placed in that directory.)^ ^If this variable ** is a NULL pointer, then SQLite performs a search for an appropriate ** temporary file directory. ** ** Applications are strongly discouraged from using this global variable. ** It is required to set a temporary folder on Windows Runtime (WinRT). ** But for all other platforms, it is highly recommended that applications ** neither read nor write this variable. This global variable is a relic ** that exists for backwards compatibility of legacy applications and should ** be avoided in new projects. ** ** It is not safe to read or modify this variable in more than one ** thread at a time. It is not safe to read or modify this variable ** if a [database connection] is being used at the same time in a separate ** thread. ** It is intended that this variable be set once ** as part of process initialization and before any SQLite interface ** routines have been called and that this variable remain unchanged ** thereafter. ** ** ^The [temp_store_directory pragma] may modify this variable and cause ** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, ** the [temp_store_directory pragma] always assumes that any string ** that this variable points to is held in memory obtained from ** [sqlite3_malloc] and the pragma may attempt to free that memory ** using [sqlite3_free]. ** Hence, if this variable is modified directly, either it should be ** made NULL or made to point to memory obtained from [sqlite3_malloc] ** or else the use of the [temp_store_directory pragma] should be avoided. ** Except when requested by the [temp_store_directory pragma], SQLite ** does not free the memory that sqlite3_temp_directory points to. If ** the application wants that memory to be freed, it must do ** so itself, taking care to only do so after all [database connection] ** objects have been destroyed. ** ** Note to Windows Runtime users: The temporary directory must be set ** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various ** features that require the use of temporary files may fail. Here is an ** example of how to do this using C++ with the Windows Runtime: ** **
** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
**       TemporaryFolder->Path->Data();
** char zPathBuf[MAX_PATH + 1];
** memset(zPathBuf, 0, sizeof(zPathBuf));
** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
**       NULL, NULL);
** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
** 
*/ SQLITE_API char *sqlite3_temp_directory; /* ** CAPI3REF: Name Of The Folder Holding Database Files ** ** ^(If this global variable is made to point to a string which is ** the name of a folder (a.k.a. directory), then all database files ** specified with a relative pathname and created or accessed by ** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed ** to be relative to that directory.)^ ^If this variable is a NULL ** pointer, then SQLite assumes that all database files specified ** with a relative pathname are relative to the current directory ** for the process. Only the windows VFS makes use of this global ** variable; it is ignored by the unix VFS. ** ** Changing the value of this variable while a database connection is ** open can result in a corrupt database. ** ** It is not safe to read or modify this variable in more than one ** thread at a time. It is not safe to read or modify this variable ** if a [database connection] is being used at the same time in a separate ** thread. ** It is intended that this variable be set once ** as part of process initialization and before any SQLite interface ** routines have been called and that this variable remain unchanged ** thereafter. ** ** ^The [data_store_directory pragma] may modify this variable and cause ** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, ** the [data_store_directory pragma] always assumes that any string ** that this variable points to is held in memory obtained from ** [sqlite3_malloc] and the pragma may attempt to free that memory ** using [sqlite3_free]. ** Hence, if this variable is modified directly, either it should be ** made NULL or made to point to memory obtained from [sqlite3_malloc] ** or else the use of the [data_store_directory pragma] should be avoided. */ SQLITE_API char *sqlite3_data_directory; /* ** CAPI3REF: Test For Auto-Commit Mode ** KEYWORDS: {autocommit mode} ** METHOD: sqlite3 ** ** ^The sqlite3_get_autocommit() interface returns non-zero or ** zero if the given database connection is or is not in autocommit mode, ** respectively. ^Autocommit mode is on by default. ** ^Autocommit mode is disabled by a [BEGIN] statement. ** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. ** ** If certain kinds of errors occur on a statement within a multi-statement ** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], ** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the ** transaction might be rolled back automatically. The only way to ** find out whether SQLite automatically rolled back the transaction after ** an error is to use this function. ** ** If another thread changes the autocommit status of the database ** connection while this routine is running, then the return value ** is undefined. */ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*); /* ** CAPI3REF: Find The Database Handle Of A Prepared Statement ** METHOD: sqlite3_stmt ** ** ^The sqlite3_db_handle interface returns the [database connection] handle ** to which a [prepared statement] belongs. ^The [database connection] ** returned by sqlite3_db_handle is the same [database connection] ** that was the first argument ** to the [sqlite3_prepare_v2()] call (or its variants) that was used to ** create the statement in the first place. */ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*); /* ** CAPI3REF: Return The Filename For A Database Connection ** METHOD: sqlite3 ** ** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename ** associated with database N of connection D. ^The main database file ** has the name "main". If there is no attached database N on the database ** connection D, or if database N is a temporary or in-memory database, then ** a NULL pointer is returned. ** ** ^The filename returned by this function is the output of the ** xFullPathname method of the [VFS]. ^In other words, the filename ** will be an absolute pathname, even if the filename used ** to open the database originally was a URI or relative pathname. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Determine if a database is read-only ** METHOD: sqlite3 ** ** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N ** of connection D is read-only, 0 if it is read/write, or -1 if N is not ** the name of a database on connection D. */ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Find the next prepared statement ** METHOD: sqlite3 ** ** ^This interface returns a pointer to the next [prepared statement] after ** pStmt associated with the [database connection] pDb. ^If pStmt is NULL ** then this interface returns a pointer to the first prepared statement ** associated with the database connection pDb. ^If no prepared statement ** satisfies the conditions of this routine, it returns NULL. ** ** The [database connection] pointer D in a call to ** [sqlite3_next_stmt(D,S)] must refer to an open database ** connection and in particular must not be a NULL pointer. */ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); /* ** CAPI3REF: Commit And Rollback Notification Callbacks ** METHOD: sqlite3 ** ** ^The sqlite3_commit_hook() interface registers a callback ** function to be invoked whenever a transaction is [COMMIT | committed]. ** ^Any callback set by a previous call to sqlite3_commit_hook() ** for the same database connection is overridden. ** ^The sqlite3_rollback_hook() interface registers a callback ** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. ** ^Any callback set by a previous call to sqlite3_rollback_hook() ** for the same database connection is overridden. ** ^The pArg argument is passed through to the callback. ** ^If the callback on a commit hook function returns non-zero, ** then the commit is converted into a rollback. ** ** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions ** return the P argument from the previous call of the same function ** on the same [database connection] D, or NULL for ** the first call for each function on D. ** ** The commit and rollback hook callbacks are not reentrant. ** The callback implementation must not do anything that will modify ** the database connection that invoked the callback. Any actions ** to modify the database connection must be deferred until after the ** completion of the [sqlite3_step()] call that triggered the commit ** or rollback hook in the first place. ** Note that running any other SQL statements, including SELECT statements, ** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify ** the database connections for the meaning of "modify" in this paragraph. ** ** ^Registering a NULL function disables the callback. ** ** ^When the commit hook callback routine returns zero, the [COMMIT] ** operation is allowed to continue normally. ^If the commit hook ** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. ** ^The rollback hook is invoked on a rollback that results from a commit ** hook returning non-zero, just as it would be with any other rollback. ** ** ^For the purposes of this API, a transaction is said to have been ** rolled back if an explicit "ROLLBACK" statement is executed, or ** an error or constraint causes an implicit rollback to occur. ** ^The rollback callback is not invoked if a transaction is ** automatically rolled back because the database connection is closed. ** ** See also the [sqlite3_update_hook()] interface. */ SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); /* ** CAPI3REF: Data Change Notification Callbacks ** METHOD: sqlite3 ** ** ^The sqlite3_update_hook() interface registers a callback function ** with the [database connection] identified by the first argument ** to be invoked whenever a row is updated, inserted or deleted in ** a rowid table. ** ^Any callback set by a previous call to this function ** for the same database connection is overridden. ** ** ^The second argument is a pointer to the function to invoke when a ** row is updated, inserted or deleted in a rowid table. ** ^The first argument to the callback is a copy of the third argument ** to sqlite3_update_hook(). ** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], ** or [SQLITE_UPDATE], depending on the operation that caused the callback ** to be invoked. ** ^The third and fourth arguments to the callback contain pointers to the ** database and table name containing the affected row. ** ^The final callback parameter is the [rowid] of the row. ** ^In the case of an update, this is the [rowid] after the update takes place. ** ** ^(The update hook is not invoked when internal system tables are ** modified (i.e. sqlite_master and sqlite_sequence).)^ ** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. ** ** ^In the current implementation, the update hook ** is not invoked when duplication rows are deleted because of an ** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook ** invoked when rows are deleted using the [truncate optimization]. ** The exceptions defined in this paragraph might change in a future ** release of SQLite. ** ** The update hook implementation must not do anything that will modify ** the database connection that invoked the update hook. Any actions ** to modify the database connection must be deferred until after the ** completion of the [sqlite3_step()] call that triggered the update hook. ** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** ** ^The sqlite3_update_hook(D,C,P) function ** returns the P argument from the previous call ** on the same [database connection] D, or NULL for ** the first call on D. ** ** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] ** interfaces. */ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook( sqlite3*, void(*)(void *,int ,char const *,char const *,sqlite3_int64), void* ); /* ** CAPI3REF: Enable Or Disable Shared Pager Cache ** ** ^(This routine enables or disables the sharing of the database cache ** and schema data structures between [database connection | connections] ** to the same database. Sharing is enabled if the argument is true ** and disabled if the argument is false.)^ ** ** ^Cache sharing is enabled and disabled for an entire process. ** This is a change as of SQLite version 3.5.0. In prior versions of SQLite, ** sharing was enabled or disabled for each thread separately. ** ** ^(The cache sharing mode set by this interface effects all subsequent ** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. ** Existing database connections continue use the sharing mode ** that was in effect at the time they were opened.)^ ** ** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled ** successfully. An [error code] is returned otherwise.)^ ** ** ^Shared cache is disabled by default. But this might change in ** future releases of SQLite. Applications that care about shared ** cache setting should set it explicitly. ** ** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0 ** and will always return SQLITE_MISUSE. On those systems, ** shared cache mode should be enabled per-database connection via ** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE]. ** ** This interface is threadsafe on processors where writing a ** 32-bit integer is atomic. ** ** See Also: [SQLite Shared-Cache Mode] */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int); /* ** CAPI3REF: Attempt To Free Heap Memory ** ** ^The sqlite3_release_memory() interface attempts to free N bytes ** of heap memory by deallocating non-essential memory allocations ** held by the database library. Memory used to cache database ** pages to improve performance is an example of non-essential memory. ** ^sqlite3_release_memory() returns the number of bytes actually freed, ** which might be more or less than the amount requested. ** ^The sqlite3_release_memory() routine is a no-op returning zero ** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. ** ** See also: [sqlite3_db_release_memory()] */ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int); /* ** CAPI3REF: Free Memory Used By A Database Connection ** METHOD: sqlite3 ** ** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap ** memory as possible from database connection D. Unlike the ** [sqlite3_release_memory()] interface, this interface is in effect even ** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is ** omitted. ** ** See also: [sqlite3_release_memory()] */ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*); /* ** CAPI3REF: Impose A Limit On Heap Size ** ** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the ** soft limit on the amount of heap memory that may be allocated by SQLite. ** ^SQLite strives to keep heap memory utilization below the soft heap ** limit by reducing the number of pages held in the page cache ** as heap memory usages approaches the limit. ** ^The soft heap limit is "soft" because even though SQLite strives to stay ** below the limit, it will exceed the limit rather than generate ** an [SQLITE_NOMEM] error. In other words, the soft heap limit ** is advisory only. ** ** ^The return value from sqlite3_soft_heap_limit64() is the size of ** the soft heap limit prior to the call, or negative in the case of an ** error. ^If the argument N is negative ** then no change is made to the soft heap limit. Hence, the current ** size of the soft heap limit can be determined by invoking ** sqlite3_soft_heap_limit64() with a negative argument. ** ** ^If the argument N is zero then the soft heap limit is disabled. ** ** ^(The soft heap limit is not enforced in the current implementation ** if one or more of following conditions are true: ** **
    **
  • The soft heap limit is set to zero. **
  • Memory accounting is disabled using a combination of the ** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and ** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. **
  • An alternative page cache implementation is specified using ** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). **
  • The page cache allocates from its own memory pool supplied ** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than ** from the heap. **
)^ ** ** Beginning with SQLite version 3.7.3, the soft heap limit is enforced ** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT] ** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT], ** the soft heap limit is enforced on every memory allocation. Without ** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced ** when memory is allocated by the page cache. Testing suggests that because ** the page cache is the predominate memory user in SQLite, most ** applications will achieve adequate soft heap limit enforcement without ** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT]. ** ** The circumstances under which SQLite will enforce the soft heap limit may ** changes in future releases of SQLite. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N); /* ** CAPI3REF: Deprecated Soft Heap Limit Interface ** DEPRECATED ** ** This is a deprecated version of the [sqlite3_soft_heap_limit64()] ** interface. This routine is provided for historical compatibility ** only. All new applications should use the ** [sqlite3_soft_heap_limit64()] interface rather than this one. */ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N); /* ** CAPI3REF: Extract Metadata About A Column Of A Table ** METHOD: sqlite3 ** ** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns ** information about column C of table T in database D ** on [database connection] X.)^ ^The sqlite3_table_column_metadata() ** interface returns SQLITE_OK and fills in the non-NULL pointers in ** the final five arguments with appropriate values if the specified ** column exists. ^The sqlite3_table_column_metadata() interface returns ** SQLITE_ERROR and if the specified column does not exist. ** ^If the column-name parameter to sqlite3_table_column_metadata() is a ** NULL pointer, then this routine simply checks for the existance of the ** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it ** does not. ** ** ^The column is identified by the second, third and fourth parameters to ** this function. ^(The second parameter is either the name of the database ** (i.e. "main", "temp", or an attached database) containing the specified ** table or NULL.)^ ^If it is NULL, then all attached databases are searched ** for the table using the same algorithm used by the database engine to ** resolve unqualified table references. ** ** ^The third and fourth parameters to this function are the table and column ** name of the desired column, respectively. ** ** ^Metadata is returned by writing to the memory locations passed as the 5th ** and subsequent parameters to this function. ^Any of these arguments may be ** NULL, in which case the corresponding element of metadata is omitted. ** ** ^(
** **
Parameter Output
Type
Description ** **
5th const char* Data type **
6th const char* Name of default collation sequence **
7th int True if column has a NOT NULL constraint **
8th int True if column is part of the PRIMARY KEY **
9th int True if column is [AUTOINCREMENT] **
**
)^ ** ** ^The memory pointed to by the character pointers returned for the ** declaration type and collation sequence is valid until the next ** call to any SQLite API function. ** ** ^If the specified table is actually a view, an [error code] is returned. ** ** ^If the specified column is "rowid", "oid" or "_rowid_" and the table ** is not a [WITHOUT ROWID] table and an ** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output ** parameters are set for the explicitly declared column. ^(If there is no ** [INTEGER PRIMARY KEY] column, then the outputs ** for the [rowid] are set as follows: ** **
**     data type: "INTEGER"
**     collation sequence: "BINARY"
**     not null: 0
**     primary key: 1
**     auto increment: 0
** 
)^ ** ** ^This function causes all database schemas to be read from disk and ** parsed, if that has not already been done, and returns an error if ** any errors are encountered while loading the schema. */ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata( sqlite3 *db, /* Connection handle */ const char *zDbName, /* Database name or NULL */ const char *zTableName, /* Table name */ const char *zColumnName, /* Column name */ char const **pzDataType, /* OUTPUT: Declared data type */ char const **pzCollSeq, /* OUTPUT: Collation sequence name */ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ int *pPrimaryKey, /* OUTPUT: True if column part of PK */ int *pAutoinc /* OUTPUT: True if column is auto-increment */ ); /* ** CAPI3REF: Load An Extension ** METHOD: sqlite3 ** ** ^This interface loads an SQLite extension library from the named file. ** ** ^The sqlite3_load_extension() interface attempts to load an ** [SQLite extension] library contained in the file zFile. If ** the file cannot be loaded directly, attempts are made to load ** with various operating-system specific extensions added. ** So for example, if "samplelib" cannot be loaded, then names like ** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might ** be tried also. ** ** ^The entry point is zProc. ** ^(zProc may be 0, in which case SQLite will try to come up with an ** entry point name on its own. It first tries "sqlite3_extension_init". ** If that does not work, it constructs a name "sqlite3_X_init" where the ** X is consists of the lower-case equivalent of all ASCII alphabetic ** characters in the filename from the last "/" to the first following ** "." and omitting any initial "lib".)^ ** ^The sqlite3_load_extension() interface returns ** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. ** ^If an error occurs and pzErrMsg is not 0, then the ** [sqlite3_load_extension()] interface shall attempt to ** fill *pzErrMsg with error message text stored in memory ** obtained from [sqlite3_malloc()]. The calling function ** should free this memory by calling [sqlite3_free()]. ** ** ^Extension loading must be enabled using ** [sqlite3_enable_load_extension()] prior to calling this API, ** otherwise an error will be returned. ** ** See also the [load_extension() SQL function]. */ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( sqlite3 *db, /* Load the extension into this database connection */ const char *zFile, /* Name of the shared library containing extension */ const char *zProc, /* Entry point. Derived from zFile if 0 */ char **pzErrMsg /* Put error message here if not 0 */ ); /* ** CAPI3REF: Enable Or Disable Extension Loading ** METHOD: sqlite3 ** ** ^So as not to open security holes in older applications that are ** unprepared to deal with [extension loading], and as a means of disabling ** [extension loading] while evaluating user-entered SQL, the following API ** is provided to turn the [sqlite3_load_extension()] mechanism on and off. ** ** ^Extension loading is off by default. ** ^Call the sqlite3_enable_load_extension() routine with onoff==1 ** to turn extension loading on and call it with onoff==0 to turn ** it back off again. */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff); /* ** CAPI3REF: Automatically Load Statically Linked Extensions ** ** ^This interface causes the xEntryPoint() function to be invoked for ** each new [database connection] that is created. The idea here is that ** xEntryPoint() is the entry point for a statically linked [SQLite extension] ** that is to be automatically loaded into all new database connections. ** ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three ** arguments and expects and integer result as if the signature of the ** entry point where as follows: ** **
**    int xEntryPoint(
**      sqlite3 *db,
**      const char **pzErrMsg,
**      const struct sqlite3_api_routines *pThunk
**    );
** 
)^ ** ** If the xEntryPoint routine encounters an error, it should make *pzErrMsg ** point to an appropriate error message (obtained from [sqlite3_mprintf()]) ** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg ** is NULL before calling the xEntryPoint(). ^SQLite will invoke ** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any ** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], ** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. ** ** ^Calling sqlite3_auto_extension(X) with an entry point X that is already ** on the list of automatic extensions is a harmless no-op. ^No entry point ** will be called more than once for each database connection that is opened. ** ** See also: [sqlite3_reset_auto_extension()] ** and [sqlite3_cancel_auto_extension()] */ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void)); /* ** CAPI3REF: Cancel Automatic Extension Loading ** ** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the ** initialization routine X that was registered using a prior call to ** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)] ** routine returns 1 if initialization routine X was successfully ** unregistered and it returns 0 if X was not on the list of initialization ** routines. */ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(void)); /* ** CAPI3REF: Reset Automatic Extension Loading ** ** ^This interface disables all automatic extensions previously ** registered using [sqlite3_auto_extension()]. */ SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void); /* ** The interface to the virtual-table mechanism is currently considered ** to be experimental. The interface might change in incompatible ways. ** If this is a problem for you, do not use the interface at this time. ** ** When the virtual-table mechanism stabilizes, we will declare the ** interface fixed, support it indefinitely, and remove this comment. */ /* ** Structures used by the virtual table interface */ typedef struct sqlite3_vtab sqlite3_vtab; typedef struct sqlite3_index_info sqlite3_index_info; typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; typedef struct sqlite3_module sqlite3_module; /* ** CAPI3REF: Virtual Table Object ** KEYWORDS: sqlite3_module {virtual table module} ** ** This structure, sometimes called a "virtual table module", ** defines the implementation of a [virtual tables]. ** This structure consists mostly of methods for the module. ** ** ^A virtual table module is created by filling in a persistent ** instance of this structure and passing a pointer to that instance ** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. ** ^The registration remains valid until it is replaced by a different ** module or until the [database connection] closes. The content ** of this structure must not change while it is registered with ** any database connection. */ struct sqlite3_module { int iVersion; int (*xCreate)(sqlite3*, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char**); int (*xConnect)(sqlite3*, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char**); int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); int (*xDisconnect)(sqlite3_vtab *pVTab); int (*xDestroy)(sqlite3_vtab *pVTab); int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); int (*xClose)(sqlite3_vtab_cursor*); int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, int argc, sqlite3_value **argv); int (*xNext)(sqlite3_vtab_cursor*); int (*xEof)(sqlite3_vtab_cursor*); int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); int (*xBegin)(sqlite3_vtab *pVTab); int (*xSync)(sqlite3_vtab *pVTab); int (*xCommit)(sqlite3_vtab *pVTab); int (*xRollback)(sqlite3_vtab *pVTab); int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), void **ppArg); int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); /* The methods above are in version 1 of the sqlite_module object. Those ** below are for version 2 and greater. */ int (*xSavepoint)(sqlite3_vtab *pVTab, int); int (*xRelease)(sqlite3_vtab *pVTab, int); int (*xRollbackTo)(sqlite3_vtab *pVTab, int); }; /* ** CAPI3REF: Virtual Table Indexing Information ** KEYWORDS: sqlite3_index_info ** ** The sqlite3_index_info structure and its substructures is used as part ** of the [virtual table] interface to ** pass information into and receive the reply from the [xBestIndex] ** method of a [virtual table module]. The fields under **Inputs** are the ** inputs to xBestIndex and are read-only. xBestIndex inserts its ** results into the **Outputs** fields. ** ** ^(The aConstraint[] array records WHERE clause constraints of the form: ** **
column OP expr
** ** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is ** stored in aConstraint[].op using one of the ** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ ** ^(The index of the column is stored in ** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the ** expr on the right-hand side can be evaluated (and thus the constraint ** is usable) and false if it cannot.)^ ** ** ^The optimizer automatically inverts terms of the form "expr OP column" ** and makes other simplifications to the WHERE clause in an attempt to ** get as many WHERE clause terms into the form shown above as possible. ** ^The aConstraint[] array only reports WHERE clause terms that are ** relevant to the particular virtual table being queried. ** ** ^Information about the ORDER BY clause is stored in aOrderBy[]. ** ^Each term of aOrderBy records a column of the ORDER BY clause. ** ** The [xBestIndex] method must fill aConstraintUsage[] with information ** about what parameters to pass to xFilter. ^If argvIndex>0 then ** the right-hand side of the corresponding aConstraint[] is evaluated ** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit ** is true, then the constraint is assumed to be fully handled by the ** virtual table and is not checked again by SQLite.)^ ** ** ^The idxNum and idxPtr values are recorded and passed into the ** [xFilter] method. ** ^[sqlite3_free()] is used to free idxPtr if and only if ** needToFreeIdxPtr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate ** sorting step is required. ** ** ^The estimatedCost value is an estimate of the cost of a particular ** strategy. A cost of N indicates that the cost of the strategy is similar ** to a linear scan of an SQLite table with N rows. A cost of log(N) ** indicates that the expense of the operation is similar to that of a ** binary search on a unique indexed field of an SQLite table with N rows. ** ** ^The estimatedRows value is an estimate of the number of rows that ** will be returned by the strategy. ** ** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info ** structure for SQLite version 3.8.2. If a virtual table extension is ** used with an SQLite version earlier than 3.8.2, the results of attempting ** to read or write the estimatedRows field are undefined (but are likely ** to included crashing the application). The estimatedRows field should ** therefore only be used if [sqlite3_libversion_number()] returns a ** value greater than or equal to 3008002. */ struct sqlite3_index_info { /* Inputs */ int nConstraint; /* Number of entries in aConstraint */ struct sqlite3_index_constraint { int iColumn; /* Column on left-hand side of constraint */ unsigned char op; /* Constraint operator */ unsigned char usable; /* True if this constraint is usable */ int iTermOffset; /* Used internally - xBestIndex should ignore */ } *aConstraint; /* Table of WHERE clause constraints */ int nOrderBy; /* Number of terms in the ORDER BY clause */ struct sqlite3_index_orderby { int iColumn; /* Column number */ unsigned char desc; /* True for DESC. False for ASC. */ } *aOrderBy; /* The ORDER BY clause */ /* Outputs */ struct sqlite3_index_constraint_usage { int argvIndex; /* if >0, constraint is part of argv to xFilter */ unsigned char omit; /* Do not code a test for this constraint */ } *aConstraintUsage; int idxNum; /* Number used to identify the index */ char *idxStr; /* String, possibly obtained from sqlite3_malloc */ int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ int orderByConsumed; /* True if output is already ordered */ double estimatedCost; /* Estimated cost of using this index */ /* Fields below are only available in SQLite 3.8.2 and later */ sqlite3_int64 estimatedRows; /* Estimated number of rows returned */ }; /* ** CAPI3REF: Virtual Table Constraint Operator Codes ** ** These macros defined the allowed values for the ** [sqlite3_index_info].aConstraint[].op field. Each value represents ** an operator that is part of a constraint term in the wHERE clause of ** a query that uses a [virtual table]. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4 #define SQLITE_INDEX_CONSTRAINT_LE 8 #define SQLITE_INDEX_CONSTRAINT_LT 16 #define SQLITE_INDEX_CONSTRAINT_GE 32 #define SQLITE_INDEX_CONSTRAINT_MATCH 64 /* ** CAPI3REF: Register A Virtual Table Implementation ** METHOD: sqlite3 ** ** ^These routines are used to register a new [virtual table module] name. ** ^Module names must be registered before ** creating a new [virtual table] using the module and before using a ** preexisting [virtual table] for the module. ** ** ^The module name is registered on the [database connection] specified ** by the first parameter. ^The name of the module is given by the ** second parameter. ^The third parameter is a pointer to ** the implementation of the [virtual table module]. ^The fourth ** parameter is an arbitrary client data pointer that is passed through ** into the [xCreate] and [xConnect] methods of the virtual table module ** when a new virtual table is be being created or reinitialized. ** ** ^The sqlite3_create_module_v2() interface has a fifth parameter which ** is a pointer to a destructor for the pClientData. ^SQLite will ** invoke the destructor function (if it is not NULL) when SQLite ** no longer needs the pClientData pointer. ^The destructor will also ** be invoked if the call to sqlite3_create_module_v2() fails. ** ^The sqlite3_create_module() ** interface is equivalent to sqlite3_create_module_v2() with a NULL ** destructor. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_module( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ const sqlite3_module *p, /* Methods for the module */ void *pClientData /* Client data for xCreate/xConnect */ ); SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2( sqlite3 *db, /* SQLite connection to register module with */ const char *zName, /* Name of the module */ const sqlite3_module *p, /* Methods for the module */ void *pClientData, /* Client data for xCreate/xConnect */ void(*xDestroy)(void*) /* Module destructor function */ ); /* ** CAPI3REF: Virtual Table Instance Object ** KEYWORDS: sqlite3_vtab ** ** Every [virtual table module] implementation uses a subclass ** of this object to describe a particular instance ** of the [virtual table]. Each subclass will ** be tailored to the specific needs of the module implementation. ** The purpose of this superclass is to define certain fields that are ** common to all module implementations. ** ** ^Virtual tables methods can set an error message by assigning a ** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should ** take care that any prior string is freed by a call to [sqlite3_free()] ** prior to assigning a new string to zErrMsg. ^After the error message ** is delivered up to the client application, the string will be automatically ** freed by sqlite3_free() and the zErrMsg field will be zeroed. */ struct sqlite3_vtab { const sqlite3_module *pModule; /* The module for this virtual table */ int nRef; /* Number of open cursors */ char *zErrMsg; /* Error message from sqlite3_mprintf() */ /* Virtual table implementations will typically add additional fields */ }; /* ** CAPI3REF: Virtual Table Cursor Object ** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} ** ** Every [virtual table module] implementation uses a subclass of the ** following structure to describe cursors that point into the ** [virtual table] and are used ** to loop through the virtual table. Cursors are created using the ** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed ** by the [sqlite3_module.xClose | xClose] method. Cursors are used ** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods ** of the module. Each module implementation will define ** the content of a cursor structure to suit its own needs. ** ** This superclass exists in order to define fields of the cursor that ** are common to all implementations. */ struct sqlite3_vtab_cursor { sqlite3_vtab *pVtab; /* Virtual table of this cursor */ /* Virtual table implementations will typically add additional fields */ }; /* ** CAPI3REF: Declare The Schema Of A Virtual Table ** ** ^The [xCreate] and [xConnect] methods of a ** [virtual table module] call this interface ** to declare the format (the names and datatypes of the columns) of ** the virtual tables they implement. */ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL); /* ** CAPI3REF: Overload A Function For A Virtual Table ** METHOD: sqlite3 ** ** ^(Virtual tables can provide alternative implementations of functions ** using the [xFindFunction] method of the [virtual table module]. ** But global versions of those functions ** must exist in order to be overloaded.)^ ** ** ^(This API makes sure a global version of a function with a particular ** name and number of parameters exists. If no such function exists ** before this API is called, a new function is created.)^ ^The implementation ** of the new function always causes an exception to be thrown. So ** the new function is not good for anything by itself. Its only ** purpose is to be a placeholder function that can be overloaded ** by a [virtual table]. */ SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); /* ** The interface to the virtual-table mechanism defined above (back up ** to a comment remarkably similar to this one) is currently considered ** to be experimental. The interface might change in incompatible ways. ** If this is a problem for you, do not use the interface at this time. ** ** When the virtual-table mechanism stabilizes, we will declare the ** interface fixed, support it indefinitely, and remove this comment. */ /* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} ** ** An instance of this object represents an open BLOB on which ** [sqlite3_blob_open | incremental BLOB I/O] can be performed. ** ^Objects of this type are created by [sqlite3_blob_open()] ** and destroyed by [sqlite3_blob_close()]. ** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces ** can be used to read or write small subsections of the BLOB. ** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. */ typedef struct sqlite3_blob sqlite3_blob; /* ** CAPI3REF: Open A BLOB For Incremental I/O ** METHOD: sqlite3 ** CONSTRUCTOR: sqlite3_blob ** ** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located ** in row iRow, column zColumn, table zTable in database zDb; ** in other words, the same BLOB that would be selected by: ** **
**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
** 
)^ ** ** ^(Parameter zDb is not the filename that contains the database, but ** rather the symbolic name of the database. For attached databases, this is ** the name that appears after the AS keyword in the [ATTACH] statement. ** For the main database file, the database name is "main". For TEMP ** tables, the database name is "temp".)^ ** ** ^If the flags parameter is non-zero, then the BLOB is opened for read ** and write access. ^If the flags parameter is zero, the BLOB is opened for ** read-only access. ** ** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored ** in *ppBlob. Otherwise an [error code] is returned and, unless the error ** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided ** the API is not misused, it is always safe to call [sqlite3_blob_close()] ** on *ppBlob after this function it returns. ** ** This function fails with SQLITE_ERROR if any of the following are true: **
    **
  • ^(Database zDb does not exist)^, **
  • ^(Table zTable does not exist within database zDb)^, **
  • ^(Table zTable is a WITHOUT ROWID table)^, **
  • ^(Column zColumn does not exist)^, **
  • ^(Row iRow is not present in the table)^, **
  • ^(The specified column of row iRow contains a value that is not ** a TEXT or BLOB value)^, **
  • ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE ** constraint and the blob is being opened for read/write access)^, **
  • ^([foreign key constraints | Foreign key constraints] are enabled, ** column zColumn is part of a [child key] definition and the blob is ** being opened for read/write access)^. **
** ** ^Unless it returns SQLITE_MISUSE, this function sets the ** [database connection] error code and message accessible via ** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. ** ** ** ^(If the row that a BLOB handle points to is modified by an ** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects ** then the BLOB handle is marked as "expired". ** This is true if any column of the row is changed, even a column ** other than the one the BLOB handle is open on.)^ ** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for ** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. ** ^(Changes written into a BLOB prior to the BLOB expiring are not ** rolled back by the expiration of the BLOB. Such changes will eventually ** commit if the transaction continues to completion.)^ ** ** ^Use the [sqlite3_blob_bytes()] interface to determine the size of ** the opened blob. ^The size of a blob may not be changed by this ** interface. Use the [UPDATE] SQL command to change the size of a ** blob. ** ** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces ** and the built-in [zeroblob] SQL function may be used to create a ** zero-filled blob to read or write using the incremental-blob interface. ** ** To avoid a resource leak, every open [BLOB handle] should eventually ** be released by a call to [sqlite3_blob_close()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open( sqlite3*, const char *zDb, const char *zTable, const char *zColumn, sqlite3_int64 iRow, int flags, sqlite3_blob **ppBlob ); /* ** CAPI3REF: Move a BLOB Handle to a New Row ** METHOD: sqlite3_blob ** ** ^This function is used to move an existing blob handle so that it points ** to a different row of the same database table. ^The new row is identified ** by the rowid value passed as the second argument. Only the row can be ** changed. ^The database, table and column on which the blob handle is open ** remain the same. Moving an existing blob handle to a new row can be ** faster than closing the existing handle and opening a new one. ** ** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - ** it must exist and there must be either a blob or text value stored in ** the nominated column.)^ ^If the new row is not present in the table, or if ** it does not contain a blob or text value, or if another error occurs, an ** SQLite error code is returned and the blob handle is considered aborted. ** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or ** [sqlite3_blob_reopen()] on an aborted blob handle immediately return ** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle ** always returns zero. ** ** ^This function sets the database handle error code and message. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); /* ** CAPI3REF: Close A BLOB Handle ** DESTRUCTOR: sqlite3_blob ** ** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed ** unconditionally. Even if this routine returns an error code, the ** handle is still closed.)^ ** ** ^If the blob handle being closed was opened for read-write access, and if ** the database is in auto-commit mode and there are no other open read-write ** blob handles or active write statements, the current transaction is ** committed. ^If an error occurs while committing the transaction, an error ** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an ** open blob handle results in undefined behaviour. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the ** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *); /* ** CAPI3REF: Return The Size Of An Open BLOB ** METHOD: sqlite3_blob ** ** ^Returns the size in bytes of the BLOB accessible via the ** successfully opened [BLOB handle] in its only argument. ^The ** incremental blob I/O routines can only read or overwriting existing ** blob content; they cannot change the size of a blob. ** ** This routine only works on a [BLOB handle] which has been created ** by a prior successful call to [sqlite3_blob_open()] and which has not ** been closed by [sqlite3_blob_close()]. Passing any other pointer in ** to this routine results in undefined and probably undesirable behavior. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *); /* ** CAPI3REF: Read Data From A BLOB Incrementally ** METHOD: sqlite3_blob ** ** ^(This function is used to read data from an open [BLOB handle] into a ** caller-supplied buffer. N bytes of data are copied into buffer Z ** from the open BLOB, starting at offset iOffset.)^ ** ** ^If offset iOffset is less than N bytes from the end of the BLOB, ** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is ** less than zero, [SQLITE_ERROR] is returned and no data is read. ** ^The size of the blob (and hence the maximum value of N+iOffset) ** can be determined using the [sqlite3_blob_bytes()] interface. ** ** ^An attempt to read from an expired [BLOB handle] fails with an ** error code of [SQLITE_ABORT]. ** ** ^(On success, sqlite3_blob_read() returns SQLITE_OK. ** Otherwise, an [error code] or an [extended error code] is returned.)^ ** ** This routine only works on a [BLOB handle] which has been created ** by a prior successful call to [sqlite3_blob_open()] and which has not ** been closed by [sqlite3_blob_close()]. Passing any other pointer in ** to this routine results in undefined and probably undesirable behavior. ** ** See also: [sqlite3_blob_write()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); /* ** CAPI3REF: Write Data Into A BLOB Incrementally ** METHOD: sqlite3_blob ** ** ^(This function is used to write data into an open [BLOB handle] from a ** caller-supplied buffer. N bytes of data are copied from the buffer Z ** into the open BLOB, starting at offset iOffset.)^ ** ** ^(On success, sqlite3_blob_write() returns SQLITE_OK. ** Otherwise, an [error code] or an [extended error code] is returned.)^ ** ^Unless SQLITE_MISUSE is returned, this function sets the ** [database connection] error code and message accessible via ** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. ** ** ^If the [BLOB handle] passed as the first argument was not opened for ** writing (the flags parameter to [sqlite3_blob_open()] was zero), ** this function returns [SQLITE_READONLY]. ** ** This function may only modify the contents of the BLOB; it is ** not possible to increase the size of a BLOB using this API. ** ^If offset iOffset is less than N bytes from the end of the BLOB, ** [SQLITE_ERROR] is returned and no data is written. The size of the ** BLOB (and hence the maximum value of N+iOffset) can be determined ** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less ** than zero [SQLITE_ERROR] is returned and no data is written. ** ** ^An attempt to write to an expired [BLOB handle] fails with an ** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred ** before the [BLOB handle] expired are not rolled back by the ** expiration of the handle, though of course those changes might ** have been overwritten by the statement that expired the BLOB handle ** or by other independent statements. ** ** This routine only works on a [BLOB handle] which has been created ** by a prior successful call to [sqlite3_blob_open()] and which has not ** been closed by [sqlite3_blob_close()]. Passing any other pointer in ** to this routine results in undefined and probably undesirable behavior. ** ** See also: [sqlite3_blob_read()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); /* ** CAPI3REF: Virtual File System Objects ** ** A virtual filesystem (VFS) is an [sqlite3_vfs] object ** that SQLite uses to interact ** with the underlying operating system. Most SQLite builds come with a ** single default VFS that is appropriate for the host computer. ** New VFSes can be registered and existing VFSes can be unregistered. ** The following interfaces are provided. ** ** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. ** ^Names are case sensitive. ** ^Names are zero-terminated UTF-8 strings. ** ^If there is no match, a NULL pointer is returned. ** ^If zVfsName is NULL then the default VFS is returned. ** ** ^New VFSes are registered with sqlite3_vfs_register(). ** ^Each new VFS becomes the default VFS if the makeDflt flag is set. ** ^The same VFS can be registered multiple times without injury. ** ^To make an existing VFS into the default VFS, register it again ** with the makeDflt flag set. If two different VFSes with the ** same name are registered, the behavior is undefined. If a ** VFS is registered with a name that is NULL or an empty string, ** then the behavior is undefined. ** ** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. ** ^(If the default VFS is unregistered, another VFS is chosen as ** the default. The choice for the new VFS is arbitrary.)^ */ SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName); SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*); /* ** CAPI3REF: Mutexes ** ** The SQLite core uses these routines for thread ** synchronization. Though they are intended for internal ** use by SQLite, code that links against SQLite is ** permitted to use any of these routines. ** ** The SQLite source code contains multiple implementations ** of these mutex routines. An appropriate implementation ** is selected automatically at compile-time. The following ** implementations are available in the SQLite core: ** **
    **
  • SQLITE_MUTEX_PTHREADS **
  • SQLITE_MUTEX_W32 **
  • SQLITE_MUTEX_NOOP **
** ** The SQLITE_MUTEX_NOOP implementation is a set of routines ** that does no real locking and is appropriate for use in ** a single-threaded application. The SQLITE_MUTEX_PTHREADS and ** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix ** and Windows. ** ** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor ** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex ** implementation is included with the library. In this case the ** application must supply a custom mutex implementation using the ** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function ** before calling sqlite3_initialize() or any other public sqlite3_ ** function that calls sqlite3_initialize(). ** ** ^The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() ** routine returns NULL if it is unable to allocate the requested ** mutex. The argument to sqlite3_mutex_alloc() must one of these ** integer constants: ** **
    **
  • SQLITE_MUTEX_FAST **
  • SQLITE_MUTEX_RECURSIVE **
  • SQLITE_MUTEX_STATIC_MASTER **
  • SQLITE_MUTEX_STATIC_MEM **
  • SQLITE_MUTEX_STATIC_OPEN **
  • SQLITE_MUTEX_STATIC_PRNG **
  • SQLITE_MUTEX_STATIC_LRU **
  • SQLITE_MUTEX_STATIC_PMEM **
  • SQLITE_MUTEX_STATIC_APP1 **
  • SQLITE_MUTEX_STATIC_APP2 **
  • SQLITE_MUTEX_STATIC_APP3 **
** ** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) ** cause sqlite3_mutex_alloc() to create ** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does ** not want to. SQLite will only request a recursive mutex in ** cases where it really needs one. If a faster non-recursive mutex ** implementation is available on the host platform, the mutex subsystem ** might return such a mutex in response to SQLITE_MUTEX_FAST. ** ** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other ** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return ** a pointer to a static preexisting mutex. ^Nine static mutexes are ** used by the current version of SQLite. Future versions of SQLite ** may add additional static mutexes. Static mutexes are for internal ** use by SQLite only. Applications that use SQLite mutexes should ** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or ** SQLITE_MUTEX_RECURSIVE. ** ** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST ** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() ** returns a different mutex on every call. ^For the static ** mutex types, the same mutex is returned on every call that has ** the same type number. ** ** ^The sqlite3_mutex_free() routine deallocates a previously ** allocated dynamic mutex. Attempting to deallocate a static ** mutex results in undefined behavior. ** ** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt ** to enter a mutex. ^If another thread is already within the mutex, ** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return ** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] ** upon successful entry. ^(Mutexes created using ** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. ** In such cases, the ** mutex must be exited an equal number of times before another thread ** can enter.)^ If the same thread tries to enter any mutex other ** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined. ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() ** will always return SQLITE_BUSY. The SQLite core only ever uses ** sqlite3_mutex_try() as an optimization so this is acceptable ** behavior.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** ** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or ** sqlite3_mutex_leave() is a NULL pointer, then all three routines ** behave as no-ops. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int); SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*); SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*); SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*); SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*); /* ** CAPI3REF: Mutex Methods Object ** ** An instance of this structure defines the low-level routines ** used to allocate and use mutexes. ** ** Usually, the default mutex implementations provided by SQLite are ** sufficient, however the application has the option of substituting a custom ** implementation for specialized deployments or systems for which SQLite ** does not provide a suitable implementation. In this case, the application ** creates and populates an instance of this structure to pass ** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. ** Additionally, an instance of this structure can be used as an ** output variable when querying the system for the current mutex ** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. ** ** ^The xMutexInit method defined by this structure is invoked as ** part of system initialization by the sqlite3_initialize() function. ** ^The xMutexInit routine is called by SQLite exactly once for each ** effective call to [sqlite3_initialize()]. ** ** ^The xMutexEnd method defined by this structure is invoked as ** part of system shutdown by the sqlite3_shutdown() function. The ** implementation of this method is expected to release all outstanding ** resources obtained by the mutex methods implementation, especially ** those obtained by the xMutexInit method. ^The xMutexEnd() ** interface is invoked exactly once for each call to [sqlite3_shutdown()]. ** ** ^(The remaining seven methods defined by this structure (xMutexAlloc, ** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and ** xMutexNotheld) implement the following interfaces (respectively): ** **
    **
  • [sqlite3_mutex_alloc()]
  • **
  • [sqlite3_mutex_free()]
  • **
  • [sqlite3_mutex_enter()]
  • **
  • [sqlite3_mutex_try()]
  • **
  • [sqlite3_mutex_leave()]
  • **
  • [sqlite3_mutex_held()]
  • **
  • [sqlite3_mutex_notheld()]
  • **
)^ ** ** The only difference is that the public sqlite3_XXX functions enumerated ** above silently ignore any invocations that pass a NULL pointer instead ** of a valid mutex handle. The implementations of the methods defined ** by this structure are not required to handle this case, the results ** of passing a NULL pointer instead of a valid mutex handle are undefined ** (i.e. it is acceptable to provide an implementation that segfaults if ** it is passed a NULL pointer). ** ** The xMutexInit() method must be threadsafe. It must be harmless to ** invoke xMutexInit() multiple times within the same process and without ** intervening calls to xMutexEnd(). Second and subsequent calls to ** xMutexInit() must be no-ops. ** ** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] ** and its associates). Similarly, xMutexAlloc() must not use SQLite memory ** allocation for a static mutex. ^However xMutexAlloc() may use SQLite ** memory allocation for a fast or recursive mutex. ** ** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is ** called, but only if the prior call to xMutexInit returned SQLITE_OK. ** If xMutexInit fails in any way, it is expected to clean up after itself ** prior to returning. */ typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; struct sqlite3_mutex_methods { int (*xMutexInit)(void); int (*xMutexEnd)(void); sqlite3_mutex *(*xMutexAlloc)(int); void (*xMutexFree)(sqlite3_mutex *); void (*xMutexEnter)(sqlite3_mutex *); int (*xMutexTry)(sqlite3_mutex *); void (*xMutexLeave)(sqlite3_mutex *); int (*xMutexHeld)(sqlite3_mutex *); int (*xMutexNotheld)(sqlite3_mutex *); }; /* ** CAPI3REF: Mutex Verification Routines ** ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines ** are intended for use inside assert() statements. The SQLite core ** never uses these routines except inside an assert() and applications ** are advised to follow the lead of the core. The SQLite core only ** provides implementations for these routines when it is compiled ** with the SQLITE_DEBUG flag. External mutex implementations ** are only required to provide these routines if SQLITE_DEBUG is ** defined and if NDEBUG is not defined. ** ** These routines should return true if the mutex in their argument ** is held or not held, respectively, by the calling thread. ** ** The implementation is not required to provide versions of these ** routines that actually work. If the implementation does not provide working ** versions of these routines, it should at least provide stubs that always ** return true so that one does not get spurious assertion failures. ** ** If the argument to sqlite3_mutex_held() is a NULL pointer then ** the routine should return 1. This seems counter-intuitive since ** clearly the mutex cannot be held if it does not exist. But ** the reason the mutex does not exist is because the build is not ** using mutexes. And we do not want the assert() containing the ** call to sqlite3_mutex_held() to fail, so a non-zero return is ** the appropriate thing to do. The sqlite3_mutex_notheld() ** interface should also return 1 when given a NULL pointer. */ #ifndef NDEBUG SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*); SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*); #endif /* ** CAPI3REF: Mutex Types ** ** The [sqlite3_mutex_alloc()] interface takes a single argument ** which is one of these integer constants. ** ** The set of static mutexes may change from one SQLite release to the ** next. Applications that override the built-in mutex logic must be ** prepared to accommodate additional static mutexes. */ #define SQLITE_MUTEX_FAST 0 #define SQLITE_MUTEX_RECURSIVE 1 #define SQLITE_MUTEX_STATIC_MASTER 2 #define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ #define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ #define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ #define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */ #define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ #define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ #define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ #define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ #define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ #define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ #define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ #define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ /* ** CAPI3REF: Retrieve the mutex for a database connection ** METHOD: sqlite3 ** ** ^This interface returns a pointer the [sqlite3_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. ** ^If the [threading mode] is Single-thread or Multi-thread then this ** routine returns a NULL pointer. */ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*); /* ** CAPI3REF: Low-Level Control Of Database Files ** METHOD: sqlite3 ** ** ^The [sqlite3_file_control()] interface makes a direct call to the ** xFileControl method for the [sqlite3_io_methods] object associated ** with a particular database identified by the second argument. ^The ** name of the database is "main" for the main database or "temp" for the ** TEMP database, or the name that appears after the AS keyword for ** databases that are added using the [ATTACH] SQL command. ** ^A NULL pointer can be used in place of "main" to refer to the ** main database file. ** ^The third and fourth parameters to this routine ** are passed directly through to the second and third parameters of ** the xFileControl method. ^The return value of the xFileControl ** method becomes the return value of this routine. ** ** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes ** a pointer to the underlying [sqlite3_file] object to be written into ** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER ** case is a short-circuit path which does not actually invoke the ** underlying sqlite3_io_methods.xFileControl method. ** ** ^If the second parameter (zDbName) does not match the name of any ** open database file, then SQLITE_ERROR is returned. ^This error ** code is not remembered and will not be recalled by [sqlite3_errcode()] ** or [sqlite3_errmsg()]. The underlying xFileControl method might ** also return SQLITE_ERROR. There is no way to distinguish between ** an incorrect zDbName and an SQLITE_ERROR return from the underlying ** xFileControl method. ** ** See also: [SQLITE_FCNTL_LOCKSTATE] */ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); /* ** CAPI3REF: Testing Interface ** ** ^The sqlite3_test_control() interface is used to read out internal ** state of SQLite and to inject faults into SQLite for testing ** purposes. ^The first parameter is an operation code that determines ** the number, meaning, and operation of all subsequent parameters. ** ** This interface is not for use by applications. It exists solely ** for verifying the correct operation of the SQLite library. Depending ** on how the SQLite library is compiled, this interface might not exist. ** ** The details of the operation codes, their meanings, the parameters ** they take, and what they do are all subject to change without notice. ** Unlike most of the SQLite API, this function is not guaranteed to ** operate consistently from one release to the next. */ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...); /* ** CAPI3REF: Testing Interface Operation Codes ** ** These constants are the valid operation code parameters used ** as the first argument to [sqlite3_test_control()]. ** ** These parameters and their meanings are subject to change ** without notice. These values are for testing purposes only. ** Applications should not use any of these parameters or the ** [sqlite3_test_control()] interface. */ #define SQLITE_TESTCTRL_FIRST 5 #define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 #define SQLITE_TESTCTRL_PENDING_BYTE 11 #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 #define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 #define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */ #define SQLITE_TESTCTRL_NEVER_CORRUPT 20 #define SQLITE_TESTCTRL_VDBE_COVERAGE 21 #define SQLITE_TESTCTRL_BYTEORDER 22 #define SQLITE_TESTCTRL_ISINIT 23 #define SQLITE_TESTCTRL_SORTER_MMAP 24 #define SQLITE_TESTCTRL_IMPOSTER 25 #define SQLITE_TESTCTRL_LAST 25 /* ** CAPI3REF: SQLite Runtime Status ** ** ^These interfaces are used to retrieve runtime status information ** about the performance of SQLite, and optionally to reset various ** highwater marks. ^The first argument is an integer code for ** the specific parameter to measure. ^(Recognized integer codes ** are of the form [status parameters | SQLITE_STATUS_...].)^ ** ^The current value of the parameter is returned into *pCurrent. ** ^The highest recorded value is returned in *pHighwater. ^If the ** resetFlag is true, then the highest record value is reset after ** *pHighwater is written. ^(Some parameters do not record the highest ** value. For those parameters ** nothing is written into *pHighwater and the resetFlag is ignored.)^ ** ^(Other parameters record only the highwater mark and not the current ** value. For these latter parameters nothing is written into *pCurrent.)^ ** ** ^The sqlite3_status() and sqlite3_status64() routines return ** SQLITE_OK on success and a non-zero [error code] on failure. ** ** If either the current value or the highwater mark is too large to ** be represented by a 32-bit integer, then the values returned by ** sqlite3_status() are undefined. ** ** See also: [sqlite3_db_status()] */ SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); SQLITE_API int SQLITE_STDCALL sqlite3_status64( int op, sqlite3_int64 *pCurrent, sqlite3_int64 *pHighwater, int resetFlag ); /* ** CAPI3REF: Status Parameters ** KEYWORDS: {status parameters} ** ** These integer constants designate various run-time status parameters ** that can be returned by [sqlite3_status()]. ** **
** [[SQLITE_STATUS_MEMORY_USED]] ^(
SQLITE_STATUS_MEMORY_USED
**
This parameter is the current amount of memory checked out ** using [sqlite3_malloc()], either directly or indirectly. The ** figure includes calls made to [sqlite3_malloc()] by the application ** and internal memory usage by the SQLite library. Scratch memory ** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache ** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in ** this parameter. The amount returned is the sum of the allocation ** sizes as reported by the xSize method in [sqlite3_mem_methods].
)^ ** ** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
SQLITE_STATUS_MALLOC_SIZE
**
This parameter records the largest memory allocation request ** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their ** internal equivalents). Only the value returned in the ** *pHighwater parameter to [sqlite3_status()] is of interest. ** The value written into the *pCurrent parameter is undefined.
)^ ** ** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
SQLITE_STATUS_MALLOC_COUNT
**
This parameter records the number of separate memory allocations ** currently checked out.
)^ ** ** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
SQLITE_STATUS_PAGECACHE_USED
**
This parameter returns the number of pages used out of the ** [pagecache memory allocator] that was configured using ** [SQLITE_CONFIG_PAGECACHE]. The ** value returned is in pages, not in bytes.
)^ ** ** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] ** ^(
SQLITE_STATUS_PAGECACHE_OVERFLOW
**
This parameter returns the number of bytes of page cache ** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] ** buffer and where forced to overflow to [sqlite3_malloc()]. The ** returned value includes allocations that overflowed because they ** where too large (they were larger than the "sz" parameter to ** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because ** no space was left in the page cache.
)^ ** ** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
SQLITE_STATUS_PAGECACHE_SIZE
**
This parameter records the largest memory allocation request ** handed to [pagecache memory allocator]. Only the value returned in the ** *pHighwater parameter to [sqlite3_status()] is of interest. ** The value written into the *pCurrent parameter is undefined.
)^ ** ** [[SQLITE_STATUS_SCRATCH_USED]] ^(
SQLITE_STATUS_SCRATCH_USED
**
This parameter returns the number of allocations used out of the ** [scratch memory allocator] configured using ** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not ** in bytes. Since a single thread may only have one scratch allocation ** outstanding at time, this parameter also reports the number of threads ** using scratch memory at the same time.
)^ ** ** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
SQLITE_STATUS_SCRATCH_OVERFLOW
**
This parameter returns the number of bytes of scratch memory ** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH] ** buffer and where forced to overflow to [sqlite3_malloc()]. The values ** returned include overflows because the requested allocation was too ** larger (that is, because the requested allocation was larger than the ** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer ** slots were available. **
)^ ** ** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(
SQLITE_STATUS_SCRATCH_SIZE
**
This parameter records the largest memory allocation request ** handed to [scratch memory allocator]. Only the value returned in the ** *pHighwater parameter to [sqlite3_status()] is of interest. ** The value written into the *pCurrent parameter is undefined.
)^ ** ** [[SQLITE_STATUS_PARSER_STACK]] ^(
SQLITE_STATUS_PARSER_STACK
**
This parameter records the deepest parser stack. It is only ** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
)^ **
** ** New status parameters may be added from time to time. */ #define SQLITE_STATUS_MEMORY_USED 0 #define SQLITE_STATUS_PAGECACHE_USED 1 #define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 #define SQLITE_STATUS_SCRATCH_USED 3 #define SQLITE_STATUS_SCRATCH_OVERFLOW 4 #define SQLITE_STATUS_MALLOC_SIZE 5 #define SQLITE_STATUS_PARSER_STACK 6 #define SQLITE_STATUS_PAGECACHE_SIZE 7 #define SQLITE_STATUS_SCRATCH_SIZE 8 #define SQLITE_STATUS_MALLOC_COUNT 9 /* ** CAPI3REF: Database Connection Status ** METHOD: sqlite3 ** ** ^This interface is used to retrieve runtime status information ** about a single [database connection]. ^The first argument is the ** database connection object to be interrogated. ^The second argument ** is an integer constant, taken from the set of ** [SQLITE_DBSTATUS options], that ** determines the parameter to interrogate. The set of ** [SQLITE_DBSTATUS options] is likely ** to grow in future releases of SQLite. ** ** ^The current value of the requested parameter is written into *pCur ** and the highest instantaneous value is written into *pHiwtr. ^If ** the resetFlg is true, then the highest instantaneous value is ** reset back down to the current value. ** ** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a ** non-zero [error code] on failure. ** ** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); /* ** CAPI3REF: Status Parameters for database connections ** KEYWORDS: {SQLITE_DBSTATUS options} ** ** These constants are the available integer "verbs" that can be passed as ** the second argument to the [sqlite3_db_status()] interface. ** ** New verbs may be added in future releases of SQLite. Existing verbs ** might be discontinued. Applications should check the return code from ** [sqlite3_db_status()] to make sure that the call worked. ** The [sqlite3_db_status()] interface will return a non-zero error code ** if a discontinued or unsupported verb is invoked. ** **
** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
**
This parameter returns the number of lookaside memory slots currently ** checked out.
)^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
SQLITE_DBSTATUS_LOOKASIDE_HIT
**
This parameter returns the number malloc attempts that were ** satisfied using lookaside memory. Only the high-water value is meaningful; ** the current value is always zero.)^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] ** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
**
This parameter returns the number malloc attempts that might have ** been satisfied using lookaside memory but failed due to the amount of ** memory requested being larger than the lookaside slot size. ** Only the high-water value is meaningful; ** the current value is always zero.)^ ** ** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] ** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
**
This parameter returns the number malloc attempts that might have ** been satisfied using lookaside memory but failed due to all lookaside ** memory already being in use. ** Only the high-water value is meaningful; ** the current value is always zero.)^ ** ** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
SQLITE_DBSTATUS_CACHE_USED
**
This parameter returns the approximate number of bytes of heap ** memory used by all pager caches associated with the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. ** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
SQLITE_DBSTATUS_SCHEMA_USED
**
This parameter returns the approximate number of bytes of heap ** memory used to store the schema for all databases associated ** with the connection - main, temp, and any [ATTACH]-ed databases.)^ ** ^The full amount of memory used by the schemas is reported, even if the ** schema memory is shared with other database connections due to ** [shared cache mode] being enabled. ** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. ** ** [[SQLITE_DBSTATUS_STMT_USED]] ^(
SQLITE_DBSTATUS_STMT_USED
**
This parameter returns the approximate number of bytes of heap ** and lookaside memory used by all prepared statements associated with ** the database connection.)^ ** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. **
** ** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
SQLITE_DBSTATUS_CACHE_HIT
**
This parameter returns the number of pager cache hits that have ** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT ** is always 0. **
** ** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
SQLITE_DBSTATUS_CACHE_MISS
**
This parameter returns the number of pager cache misses that have ** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS ** is always 0. **
** ** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
SQLITE_DBSTATUS_CACHE_WRITE
**
This parameter returns the number of dirty cache entries that have ** been written to disk. Specifically, the number of pages written to the ** wal file in wal mode databases, or the number of pages written to the ** database file in rollback mode databases. Any pages written as part of ** transaction rollback or database recovery operations are not included. ** If an IO or other error occurs while writing a page to disk, the effect ** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The ** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. **
** ** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
SQLITE_DBSTATUS_DEFERRED_FKS
**
This parameter returns zero for the current value if and only if ** all foreign key constraints (deferred or immediate) have been ** resolved.)^ ^The highwater mark is always 0. **
**
*/ #define SQLITE_DBSTATUS_LOOKASIDE_USED 0 #define SQLITE_DBSTATUS_CACHE_USED 1 #define SQLITE_DBSTATUS_SCHEMA_USED 2 #define SQLITE_DBSTATUS_STMT_USED 3 #define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 #define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 #define SQLITE_DBSTATUS_CACHE_HIT 7 #define SQLITE_DBSTATUS_CACHE_MISS 8 #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_DEFERRED_FKS 10 #define SQLITE_DBSTATUS_MAX 10 /* Largest defined DBSTATUS */ /* ** CAPI3REF: Prepared Statement Status ** METHOD: sqlite3_stmt ** ** ^(Each prepared statement maintains various ** [SQLITE_STMTSTATUS counters] that measure the number ** of times it has performed specific operations.)^ These counters can ** be used to monitor the performance characteristics of the prepared ** statements. For example, if the number of table steps greatly exceeds ** the number of table searches or result rows, that would tend to indicate ** that the prepared statement is using a full table scan rather than ** an index. ** ** ^(This interface is used to retrieve and reset counter values from ** a [prepared statement]. The first argument is the prepared statement ** object to be interrogated. The second argument ** is an integer code for a specific [SQLITE_STMTSTATUS counter] ** to be interrogated.)^ ** ^The current value of the requested counter is returned. ** ^If the resetFlg is true, then the counter is reset to zero after this ** interface call returns. ** ** See also: [sqlite3_status()] and [sqlite3_db_status()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); /* ** CAPI3REF: Status Parameters for prepared statements ** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} ** ** These preprocessor macros define integer codes that name counter ** values associated with the [sqlite3_stmt_status()] interface. ** The meanings of the various counters are as follows: ** **
** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
SQLITE_STMTSTATUS_FULLSCAN_STEP
**
^This is the number of times that SQLite has stepped forward in ** a table as part of a full table scan. Large numbers for this counter ** may indicate opportunities for performance improvement through ** careful use of indices.
** ** [[SQLITE_STMTSTATUS_SORT]]
SQLITE_STMTSTATUS_SORT
**
^This is the number of sort operations that have occurred. ** A non-zero value in this counter may indicate an opportunity to ** improvement performance through careful use of indices.
** ** [[SQLITE_STMTSTATUS_AUTOINDEX]]
SQLITE_STMTSTATUS_AUTOINDEX
**
^This is the number of rows inserted into transient indices that ** were created automatically in order to help joins run faster. ** A non-zero value in this counter may indicate an opportunity to ** improvement performance by adding permanent indices that do not ** need to be reinitialized each time the statement is run.
** ** [[SQLITE_STMTSTATUS_VM_STEP]]
SQLITE_STMTSTATUS_VM_STEP
**
^This is the number of virtual machine operations executed ** by the prepared statement if that number is less than or equal ** to 2147483647. The number of virtual machine operations can be ** used as a proxy for the total work done by the prepared statement. ** If the number of virtual machine operations exceeds 2147483647 ** then the value returned by this statement status code is undefined. **
**
*/ #define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 #define SQLITE_STMTSTATUS_SORT 2 #define SQLITE_STMTSTATUS_AUTOINDEX 3 #define SQLITE_STMTSTATUS_VM_STEP 4 /* ** CAPI3REF: Custom Page Cache Object ** ** The sqlite3_pcache type is opaque. It is implemented by ** the pluggable module. The SQLite core has no knowledge of ** its size or internal structure and never deals with the ** sqlite3_pcache object except by holding and passing pointers ** to the object. ** ** See [sqlite3_pcache_methods2] for additional information. */ typedef struct sqlite3_pcache sqlite3_pcache; /* ** CAPI3REF: Custom Page Cache Object ** ** The sqlite3_pcache_page object represents a single page in the ** page cache. The page cache will allocate instances of this ** object. Various methods of the page cache use pointers to instances ** of this object as parameters or as their return value. ** ** See [sqlite3_pcache_methods2] for additional information. */ typedef struct sqlite3_pcache_page sqlite3_pcache_page; struct sqlite3_pcache_page { void *pBuf; /* The content of the page */ void *pExtra; /* Extra information associated with the page */ }; /* ** CAPI3REF: Application Defined Page Cache. ** KEYWORDS: {page cache} ** ** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can ** register an alternative page cache implementation by passing in an ** instance of the sqlite3_pcache_methods2 structure.)^ ** In many applications, most of the heap memory allocated by ** SQLite is used for the page cache. ** By implementing a ** custom page cache using this API, an application can better control ** the amount of memory consumed by SQLite, the way in which ** that memory is allocated and released, and the policies used to ** determine exactly which parts of a database file are cached and for ** how long. ** ** The alternative page cache mechanism is an ** extreme measure that is only needed by the most demanding applications. ** The built-in page cache is recommended for most uses. ** ** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an ** internal buffer by SQLite within the call to [sqlite3_config]. Hence ** the application may discard the parameter after the call to ** [sqlite3_config()] returns.)^ ** ** [[the xInit() page cache method]] ** ^(The xInit() method is called once for each effective ** call to [sqlite3_initialize()])^ ** (usually only once during the lifetime of the process). ^(The xInit() ** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ ** The intent of the xInit() method is to set up global data structures ** required by the custom page cache implementation. ** ^(If the xInit() method is NULL, then the ** built-in default page cache is used instead of the application defined ** page cache.)^ ** ** [[the xShutdown() page cache method]] ** ^The xShutdown() method is called by [sqlite3_shutdown()]. ** It can be used to clean up ** any outstanding resources before process shutdown, if required. ** ^The xShutdown() method may be NULL. ** ** ^SQLite automatically serializes calls to the xInit method, ** so the xInit method need not be threadsafe. ^The ** xShutdown method is only called from [sqlite3_shutdown()] so it does ** not need to be threadsafe either. All other methods must be threadsafe ** in multithreaded applications. ** ** ^SQLite will never invoke xInit() more than once without an intervening ** call to xShutdown(). ** ** [[the xCreate() page cache methods]] ** ^SQLite invokes the xCreate() method to construct a new cache instance. ** SQLite will typically create one cache instance for each open database file, ** though this is not guaranteed. ^The ** first parameter, szPage, is the size in bytes of the pages that must ** be allocated by the cache. ^szPage will always a power of two. ^The ** second parameter szExtra is a number of bytes of extra storage ** associated with each page cache entry. ^The szExtra parameter will ** a number less than 250. SQLite will use the ** extra szExtra bytes on each page to store metadata about the underlying ** database page on disk. The value passed into szExtra depends ** on the SQLite version, the target platform, and how SQLite was compiled. ** ^The third argument to xCreate(), bPurgeable, is true if the cache being ** created will be used to cache database pages of a file stored on disk, or ** false if it is used for an in-memory database. The cache implementation ** does not have to do anything special based with the value of bPurgeable; ** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will ** never invoke xUnpin() except to deliberately delete a page. ** ^In other words, calls to xUnpin() on a cache with bPurgeable set to ** false will always have the "discard" flag set to true. ** ^Hence, a cache created with bPurgeable false will ** never contain any unpinned pages. ** ** [[the xCachesize() page cache method]] ** ^(The xCachesize() method may be called at any time by SQLite to set the ** suggested maximum cache-size (number of pages stored by) the cache ** instance passed as the first argument. This is the value configured using ** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable ** parameter, the implementation is not required to do anything with this ** value; it is advisory only. ** ** [[the xPagecount() page cache methods]] ** The xPagecount() method must return the number of pages currently ** stored in the cache, both pinned and unpinned. ** ** [[the xFetch() page cache methods]] ** The xFetch() method locates a page in the cache and returns a pointer to ** an sqlite3_pcache_page object associated with that page, or a NULL pointer. ** The pBuf element of the returned sqlite3_pcache_page object will be a ** pointer to a buffer of szPage bytes used to store the content of a ** single database page. The pExtra element of sqlite3_pcache_page will be ** a pointer to the szExtra bytes of extra storage that SQLite has requested ** for each entry in the page cache. ** ** The page to be fetched is determined by the key. ^The minimum key value ** is 1. After it has been retrieved using xFetch, the page is considered ** to be "pinned". ** ** If the requested page is already in the page cache, then the page cache ** implementation must return a pointer to the page buffer with its content ** intact. If the requested page is not already in the cache, then the ** cache implementation should use the value of the createFlag ** parameter to help it determined what action to take: ** ** **
createFlag Behavior when page is not already in cache **
0 Do not allocate a new page. Return NULL. **
1 Allocate a new page if it easy and convenient to do so. ** Otherwise return NULL. **
2 Make every effort to allocate a new page. Only return ** NULL if allocating a new page is effectively impossible. **
** ** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite ** will only use a createFlag of 2 after a prior call with a createFlag of 1 ** failed.)^ In between the to xFetch() calls, SQLite may ** attempt to unpin one or more cache pages by spilling the content of ** pinned pages to disk and synching the operating system disk cache. ** ** [[the xUnpin() page cache method]] ** ^xUnpin() is called by SQLite with a pointer to a currently pinned page ** as its second argument. If the third parameter, discard, is non-zero, ** then the page must be evicted from the cache. ** ^If the discard parameter is ** zero, then the page may be discarded or retained at the discretion of ** page cache implementation. ^The page cache implementation ** may choose to evict unpinned pages at any time. ** ** The cache must not perform any reference counting. A single ** call to xUnpin() unpins the page regardless of the number of prior calls ** to xFetch(). ** ** [[the xRekey() page cache methods]] ** The xRekey() method is used to change the key value associated with the ** page passed as the second argument. If the cache ** previously contains an entry associated with newKey, it must be ** discarded. ^Any prior cache entry associated with newKey is guaranteed not ** to be pinned. ** ** When SQLite calls the xTruncate() method, the cache must discard all ** existing cache entries with page numbers (keys) greater than or equal ** to the value of the iLimit parameter passed to xTruncate(). If any ** of these pages are pinned, they are implicitly unpinned, meaning that ** they can be safely discarded. ** ** [[the xDestroy() page cache method]] ** ^The xDestroy() method is used to delete a cache allocated by xCreate(). ** All resources associated with the specified cache should be freed. ^After ** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] ** handle invalid, and will not use it with any other sqlite3_pcache_methods2 ** functions. ** ** [[the xShrink() page cache method]] ** ^SQLite invokes the xShrink() method when it wants the page cache to ** free up as much of heap memory as possible. The page cache implementation ** is not obligated to free any memory, but well-behaved implementations should ** do their best. */ typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; struct sqlite3_pcache_methods2 { int iVersion; void *pArg; int (*xInit)(void*); void (*xShutdown)(void*); sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); void (*xCachesize)(sqlite3_pcache*, int nCachesize); int (*xPagecount)(sqlite3_pcache*); sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, unsigned oldKey, unsigned newKey); void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); void (*xDestroy)(sqlite3_pcache*); void (*xShrink)(sqlite3_pcache*); }; /* ** This is the obsolete pcache_methods object that has now been replaced ** by sqlite3_pcache_methods2. This object is not used by SQLite. It is ** retained in the header file for backwards compatibility only. */ typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; struct sqlite3_pcache_methods { void *pArg; int (*xInit)(void*); void (*xShutdown)(void*); sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); void (*xCachesize)(sqlite3_pcache*, int nCachesize); int (*xPagecount)(sqlite3_pcache*); void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); void (*xUnpin)(sqlite3_pcache*, void*, int discard); void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); void (*xDestroy)(sqlite3_pcache*); }; /* ** CAPI3REF: Online Backup Object ** ** The sqlite3_backup object records state information about an ongoing ** online backup operation. ^The sqlite3_backup object is created by ** a call to [sqlite3_backup_init()] and is destroyed by a call to ** [sqlite3_backup_finish()]. ** ** See Also: [Using the SQLite Online Backup API] */ typedef struct sqlite3_backup sqlite3_backup; /* ** CAPI3REF: Online Backup API. ** ** The backup API copies the content of one database into another. ** It is useful either for creating backups of databases or ** for copying in-memory databases to or from persistent files. ** ** See Also: [Using the SQLite Online Backup API] ** ** ^SQLite holds a write transaction open on the destination database file ** for the duration of the backup operation. ** ^The source database is read-locked only while it is being read; ** it is not locked continuously for the entire backup operation. ** ^Thus, the backup may be performed on a live source database without ** preventing other database connections from ** reading or writing to the source database while the backup is underway. ** ** ^(To perform a backup operation: **
    **
  1. sqlite3_backup_init() is called once to initialize the ** backup, **
  2. sqlite3_backup_step() is called one or more times to transfer ** the data between the two databases, and finally **
  3. sqlite3_backup_finish() is called to release all resources ** associated with the backup operation. **
)^ ** There should be exactly one call to sqlite3_backup_finish() for each ** successful call to sqlite3_backup_init(). ** ** [[sqlite3_backup_init()]] sqlite3_backup_init() ** ** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the ** [database connection] associated with the destination database ** and the database name, respectively. ** ^The database name is "main" for the main database, "temp" for the ** temporary database, or the name specified after the AS keyword in ** an [ATTACH] statement for an attached database. ** ^The S and M arguments passed to ** sqlite3_backup_init(D,N,S,M) identify the [database connection] ** and database name of the source database, respectively. ** ^The source and destination [database connections] (parameters S and D) ** must be different or else sqlite3_backup_init(D,N,S,M) will fail with ** an error. ** ** ^A call to sqlite3_backup_init() will fail, returning SQLITE_ERROR, if ** there is already a read or read-write transaction open on the ** destination database. ** ** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is ** returned and an error code and error message are stored in the ** destination [database connection] D. ** ^The error code and message for the failed call to sqlite3_backup_init() ** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or ** [sqlite3_errmsg16()] functions. ** ^A successful call to sqlite3_backup_init() returns a pointer to an ** [sqlite3_backup] object. ** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and ** sqlite3_backup_finish() functions to perform the specified backup ** operation. ** ** [[sqlite3_backup_step()]] sqlite3_backup_step() ** ** ^Function sqlite3_backup_step(B,N) will copy up to N pages between ** the source and destination databases specified by [sqlite3_backup] object B. ** ^If N is negative, all remaining source pages are copied. ** ^If sqlite3_backup_step(B,N) successfully copies N pages and there ** are still more pages to be copied, then the function returns [SQLITE_OK]. ** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages ** from source to destination, then it returns [SQLITE_DONE]. ** ^If an error occurs while running sqlite3_backup_step(B,N), ** then an [error code] is returned. ^As well as [SQLITE_OK] and ** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], ** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an ** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. ** ** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if **
    **
  1. the destination database was opened read-only, or **
  2. the destination database is using write-ahead-log journaling ** and the destination and source page sizes differ, or **
  3. the destination database is an in-memory database and the ** destination and source page sizes differ. **
)^ ** ** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then ** the [sqlite3_busy_handler | busy-handler function] ** is invoked (if one is specified). ^If the ** busy-handler returns non-zero before the lock is available, then ** [SQLITE_BUSY] is returned to the caller. ^In this case the call to ** sqlite3_backup_step() can be retried later. ^If the source ** [database connection] ** is being used to write to the source database when sqlite3_backup_step() ** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this ** case the call to sqlite3_backup_step() can be retried later on. ^(If ** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or ** [SQLITE_READONLY] is returned, then ** there is no point in retrying the call to sqlite3_backup_step(). These ** errors are considered fatal.)^ The application must accept ** that the backup operation has failed and pass the backup operation handle ** to the sqlite3_backup_finish() to release associated resources. ** ** ^The first call to sqlite3_backup_step() obtains an exclusive lock ** on the destination file. ^The exclusive lock is not released until either ** sqlite3_backup_finish() is called or the backup operation is complete ** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to ** sqlite3_backup_step() obtains a [shared lock] on the source database that ** lasts for the duration of the sqlite3_backup_step() call. ** ^Because the source database is not locked between calls to ** sqlite3_backup_step(), the source database may be modified mid-way ** through the backup process. ^If the source database is modified by an ** external process or via a database connection other than the one being ** used by the backup operation, then the backup will be automatically ** restarted by the next call to sqlite3_backup_step(). ^If the source ** database is modified by the using the same database connection as is used ** by the backup operation, then the backup database is automatically ** updated at the same time. ** ** [[sqlite3_backup_finish()]] sqlite3_backup_finish() ** ** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the ** application wishes to abandon the backup operation, the application ** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). ** ^The sqlite3_backup_finish() interfaces releases all ** resources associated with the [sqlite3_backup] object. ** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any ** active write-transaction on the destination database is rolled back. ** The [sqlite3_backup] object is invalid ** and may not be used following a call to sqlite3_backup_finish(). ** ** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no ** sqlite3_backup_step() errors occurred, regardless or whether or not ** sqlite3_backup_step() completed. ** ^If an out-of-memory condition or IO error occurred during any prior ** sqlite3_backup_step() call on the same [sqlite3_backup] object, then ** sqlite3_backup_finish() returns the corresponding [error code]. ** ** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() ** is not a permanent error and does not affect the return value of ** sqlite3_backup_finish(). ** ** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]] ** sqlite3_backup_remaining() and sqlite3_backup_pagecount() ** ** ^The sqlite3_backup_remaining() routine returns the number of pages still ** to be backed up at the conclusion of the most recent sqlite3_backup_step(). ** ^The sqlite3_backup_pagecount() routine returns the total number of pages ** in the source database at the conclusion of the most recent ** sqlite3_backup_step(). ** ^(The values returned by these functions are only updated by ** sqlite3_backup_step(). If the source database is modified in a way that ** changes the size of the source database or the number of pages remaining, ** those changes are not reflected in the output of sqlite3_backup_pagecount() ** and sqlite3_backup_remaining() until after the next ** sqlite3_backup_step().)^ ** ** Concurrent Usage of Database Handles ** ** ^The source [database connection] may be used by the application for other ** purposes while a backup operation is underway or being initialized. ** ^If SQLite is compiled and configured to support threadsafe database ** connections, then the source database connection may be used concurrently ** from within other threads. ** ** However, the application must guarantee that the destination ** [database connection] is not passed to any other API (by any thread) after ** sqlite3_backup_init() is called and before the corresponding call to ** sqlite3_backup_finish(). SQLite does not currently check to see ** if the application incorrectly accesses the destination [database connection] ** and so no error code is reported, but the operations may malfunction ** nevertheless. Use of the destination database connection while a ** backup is in progress might also also cause a mutex deadlock. ** ** If running in [shared cache mode], the application must ** guarantee that the shared cache used by the destination database ** is not accessed while the backup is running. In practice this means ** that the application must guarantee that the disk file being ** backed up to is not accessed by any connection within the process, ** not just the specific connection that was passed to sqlite3_backup_init(). ** ** The [sqlite3_backup] object itself is partially threadsafe. Multiple ** threads may safely make multiple concurrent calls to sqlite3_backup_step(). ** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() ** APIs are not strictly speaking threadsafe. If they are invoked at the ** same time as another thread is invoking sqlite3_backup_step() it is ** possible that they return invalid values. */ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init( sqlite3 *pDest, /* Destination database handle */ const char *zDestName, /* Destination database name */ sqlite3 *pSource, /* Source database handle */ const char *zSourceName /* Source database name */ ); SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage); SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p); SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p); SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p); /* ** CAPI3REF: Unlock Notification ** METHOD: sqlite3 ** ** ^When running in shared-cache mode, a database operation may fail with ** an [SQLITE_LOCKED] error if the required locks on the shared-cache or ** individual tables within the shared-cache cannot be obtained. See ** [SQLite Shared-Cache Mode] for a description of shared-cache locking. ** ^This API may be used to register a callback that SQLite will invoke ** when the connection currently holding the required lock relinquishes it. ** ^This API is only available if the library was compiled with the ** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. ** ** See Also: [Using the SQLite Unlock Notification Feature]. ** ** ^Shared-cache locks are released when a database connection concludes ** its current transaction, either by committing it or rolling it back. ** ** ^When a connection (known as the blocked connection) fails to obtain a ** shared-cache lock and SQLITE_LOCKED is returned to the caller, the ** identity of the database connection (the blocking connection) that ** has locked the required resource is stored internally. ^After an ** application receives an SQLITE_LOCKED error, it may call the ** sqlite3_unlock_notify() method with the blocked connection handle as ** the first argument to register for a callback that will be invoked ** when the blocking connections current transaction is concluded. ^The ** callback is invoked from within the [sqlite3_step] or [sqlite3_close] ** call that concludes the blocking connections transaction. ** ** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, ** there is a chance that the blocking connection will have already ** concluded its transaction by the time sqlite3_unlock_notify() is invoked. ** If this happens, then the specified callback is invoked immediately, ** from within the call to sqlite3_unlock_notify().)^ ** ** ^If the blocked connection is attempting to obtain a write-lock on a ** shared-cache table, and more than one other connection currently holds ** a read-lock on the same table, then SQLite arbitrarily selects one of ** the other connections to use as the blocking connection. ** ** ^(There may be at most one unlock-notify callback registered by a ** blocked connection. If sqlite3_unlock_notify() is called when the ** blocked connection already has a registered unlock-notify callback, ** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is ** called with a NULL pointer as its second argument, then any existing ** unlock-notify callback is canceled. ^The blocked connections ** unlock-notify callback may also be canceled by closing the blocked ** connection using [sqlite3_close()]. ** ** The unlock-notify callback is not reentrant. If an application invokes ** any sqlite3_xxx API functions from within an unlock-notify callback, a ** crash or deadlock may be the result. ** ** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always ** returns SQLITE_OK. ** ** Callback Invocation Details ** ** When an unlock-notify callback is registered, the application provides a ** single void* pointer that is passed to the callback when it is invoked. ** However, the signature of the callback function allows SQLite to pass ** it an array of void* context pointers. The first argument passed to ** an unlock-notify callback is a pointer to an array of void* pointers, ** and the second is the number of entries in the array. ** ** When a blocking connections transaction is concluded, there may be ** more than one blocked connection that has registered for an unlock-notify ** callback. ^If two or more such blocked connections have specified the ** same callback function, then instead of invoking the callback function ** multiple times, it is invoked once with the set of void* context pointers ** specified by the blocked connections bundled together into an array. ** This gives the application an opportunity to prioritize any actions ** related to the set of unblocked database connections. ** ** Deadlock Detection ** ** Assuming that after registering for an unlock-notify callback a ** database waits for the callback to be issued before taking any further ** action (a reasonable assumption), then using this API may cause the ** application to deadlock. For example, if connection X is waiting for ** connection Y's transaction to be concluded, and similarly connection ** Y is waiting on connection X's transaction, then neither connection ** will proceed and the system may remain deadlocked indefinitely. ** ** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock ** detection. ^If a given call to sqlite3_unlock_notify() would put the ** system in a deadlocked state, then SQLITE_LOCKED is returned and no ** unlock-notify callback is registered. The system is said to be in ** a deadlocked state if connection A has registered for an unlock-notify ** callback on the conclusion of connection B's transaction, and connection ** B has itself registered for an unlock-notify callback when connection ** A's transaction is concluded. ^Indirect deadlock is also detected, so ** the system is also considered to be deadlocked if connection B has ** registered for an unlock-notify callback on the conclusion of connection ** C's transaction, where connection C is waiting on connection A. ^Any ** number of levels of indirection are allowed. ** ** The "DROP TABLE" Exception ** ** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost ** always appropriate to call sqlite3_unlock_notify(). There is however, ** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, ** SQLite checks if there are any currently executing SELECT statements ** that belong to the same connection. If there are, SQLITE_LOCKED is ** returned. In this case there is no "blocking connection", so invoking ** sqlite3_unlock_notify() results in the unlock-notify callback being ** invoked immediately. If the application then re-attempts the "DROP TABLE" ** or "DROP INDEX" query, an infinite loop might be the result. ** ** One way around this problem is to check the extended error code returned ** by an sqlite3_step() call. ^(If there is a blocking connection, then the ** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in ** the special "DROP TABLE/INDEX" case, the extended error code is just ** SQLITE_LOCKED.)^ */ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify( sqlite3 *pBlocked, /* Waiting connection */ void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ void *pNotifyArg /* Argument to pass to xNotify */ ); /* ** CAPI3REF: String Comparison ** ** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications ** and extensions to compare the contents of two buffers containing UTF-8 ** strings in a case-independent fashion, using the same definition of "case ** independence" that SQLite uses internally when comparing identifiers. */ SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *); SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int); /* ** CAPI3REF: String Globbing * ** ^The [sqlite3_strglob(P,X)] interface returns zero if string X matches ** the glob pattern P, and it returns non-zero if string X does not match ** the glob pattern P. ^The definition of glob pattern matching used in ** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the ** SQL dialect used by SQLite. ^The sqlite3_strglob(P,X) function is case ** sensitive. ** ** Note that this routine returns zero on a match and non-zero if the strings ** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr); /* ** CAPI3REF: Error Logging Interface ** ** ^The [sqlite3_log()] interface writes a message into the [error log] ** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. ** ^If logging is enabled, the zFormat string and subsequent arguments are ** used with [sqlite3_snprintf()] to generate the final output string. ** ** The sqlite3_log() interface is intended for use by extensions such as ** virtual tables, collating functions, and SQL functions. While there is ** nothing to prevent an application from calling sqlite3_log(), doing so ** is considered bad form. ** ** The zFormat string must not be NULL. ** ** To avoid deadlocks and other threading problems, the sqlite3_log() routine ** will not use dynamically allocated memory. The log message is stored in ** a fixed-length buffer on the stack. If the log message is longer than ** a few hundred characters, it will be truncated to the length of the ** buffer. */ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...); /* ** CAPI3REF: Write-Ahead Log Commit Hook ** METHOD: sqlite3 ** ** ^The [sqlite3_wal_hook()] function is used to register a callback that ** is invoked each time data is committed to a database in wal mode. ** ** ^(The callback is invoked by SQLite after the commit has taken place and ** the associated write-lock on the database released)^, so the implementation ** may read, write or [checkpoint] the database as required. ** ** ^The first parameter passed to the callback function when it is invoked ** is a copy of the third parameter passed to sqlite3_wal_hook() when ** registering the callback. ^The second is a copy of the database handle. ** ^The third parameter is the name of the database that was written to - ** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter ** is the number of pages currently in the write-ahead log file, ** including those that were just committed. ** ** The callback function should normally return [SQLITE_OK]. ^If an error ** code is returned, that error will propagate back up through the ** SQLite code base to cause the statement that provoked the callback ** to report an error, though the commit will have still occurred. If the ** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value ** that does not correspond to any valid SQLite error code, the results ** are undefined. ** ** A single database handle may have at most a single write-ahead log callback ** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any ** previously registered write-ahead log callback. ^Note that the ** [sqlite3_wal_autocheckpoint()] interface and the ** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will ** those overwrite any prior [sqlite3_wal_hook()] settings. */ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook( sqlite3*, int(*)(void *,sqlite3*,const char*,int), void* ); /* ** CAPI3REF: Configure an auto-checkpoint ** METHOD: sqlite3 ** ** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around ** [sqlite3_wal_hook()] that causes any database on [database connection] D ** to automatically [checkpoint] ** after committing a transaction if there are N or ** more frames in the [write-ahead log] file. ^Passing zero or ** a negative value as the nFrame parameter disables automatic ** checkpoints entirely. ** ** ^The callback registered by this function replaces any existing callback ** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback ** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism ** configured by this function. ** ** ^The [wal_autocheckpoint pragma] can be used to invoke this interface ** from SQL. ** ** ^Checkpoints initiated by this mechanism are ** [sqlite3_wal_checkpoint_v2|PASSIVE]. ** ** ^Every new [database connection] defaults to having the auto-checkpoint ** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] ** pages. The use of this interface ** is only necessary if the default setting is found to be suboptimal ** for a particular application. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N); /* ** CAPI3REF: Checkpoint a database ** METHOD: sqlite3 ** ** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to ** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^ ** ** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the ** [write-ahead log] for database X on [database connection] D to be ** transferred into the database file and for the write-ahead log to ** be reset. See the [checkpointing] documentation for addition ** information. ** ** This interface used to be the only way to cause a checkpoint to ** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()] ** interface was added. This interface is retained for backwards ** compatibility and as a convenience for applications that need to manually ** start a callback but which do not need the full power (and corresponding ** complication) of [sqlite3_wal_checkpoint_v2()]. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); /* ** CAPI3REF: Checkpoint a database ** METHOD: sqlite3 ** ** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint ** operation on database X of [database connection] D in mode M. Status ** information is written back into integers pointed to by L and C.)^ ** ^(The M parameter must be a valid [checkpoint mode]:)^ ** **
**
SQLITE_CHECKPOINT_PASSIVE
** ^Checkpoint as many frames as possible without waiting for any database ** readers or writers to finish, then sync the database file if all frames ** in the log were checkpointed. ^The [busy-handler callback] ** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode. ** ^On the other hand, passive mode might leave the checkpoint unfinished ** if there are concurrent readers or writers. ** **
SQLITE_CHECKPOINT_FULL
** ^This mode blocks (it invokes the ** [sqlite3_busy_handler|busy-handler callback]) until there is no ** database writer and all readers are reading from the most recent database ** snapshot. ^It then checkpoints all frames in the log file and syncs the ** database file. ^This mode blocks new database writers while it is pending, ** but new database readers are allowed to continue unimpeded. ** **
SQLITE_CHECKPOINT_RESTART
** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition ** that after checkpointing the log file it blocks (calls the ** [busy-handler callback]) ** until all readers are reading from the database file only. ^This ensures ** that the next writer will restart the log file from the beginning. ** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new ** database writer attempts while it is pending, but does not impede readers. ** **
SQLITE_CHECKPOINT_TRUNCATE
** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the ** addition that it also truncates the log file to zero bytes just prior ** to a successful return. **
** ** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in ** the log file or to -1 if the checkpoint could not run because ** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not ** NULL,then *pnCkpt is set to the total number of checkpointed frames in the ** log file (including any that were already checkpointed before the function ** was called) or to -1 if the checkpoint could not run due to an error or ** because the database is not in WAL mode. ^Note that upon successful ** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been ** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero. ** ** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If ** any other process is running a checkpoint operation at the same time, the ** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a ** busy-handler configured, it will not be invoked in this case. ** ** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the ** exclusive "writer" lock on the database file. ^If the writer lock cannot be ** obtained immediately, and a busy-handler is configured, it is invoked and ** the writer lock retried until either the busy-handler returns 0 or the lock ** is successfully obtained. ^The busy-handler is also invoked while waiting for ** database readers as described above. ^If the busy-handler returns 0 before ** the writer lock is obtained or while waiting for database readers, the ** checkpoint operation proceeds from that point in the same way as ** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible ** without blocking any further. ^SQLITE_BUSY is returned in this case. ** ** ^If parameter zDb is NULL or points to a zero length string, then the ** specified operation is attempted on all WAL databases [attached] to ** [database connection] db. In this case the ** values written to output parameters *pnLog and *pnCkpt are undefined. ^If ** an SQLITE_BUSY error is encountered when processing one or more of the ** attached WAL databases, the operation is still attempted on any remaining ** attached databases and SQLITE_BUSY is returned at the end. ^If any other ** error occurs while processing an attached database, processing is abandoned ** and the error code is returned to the caller immediately. ^If no error ** (SQLITE_BUSY or otherwise) is encountered while processing the attached ** databases, SQLITE_OK is returned. ** ** ^If database zDb is the name of an attached database that is not in WAL ** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If ** zDb is not NULL (or a zero length string) and is not the name of any ** attached database, SQLITE_ERROR is returned to the caller. ** ** ^Unless it returns SQLITE_MISUSE, ** the sqlite3_wal_checkpoint_v2() interface ** sets the error information that is queried by ** [sqlite3_errcode()] and [sqlite3_errmsg()]. ** ** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface ** from SQL. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2( sqlite3 *db, /* Database handle */ const char *zDb, /* Name of attached database (or NULL) */ int eMode, /* SQLITE_CHECKPOINT_* value */ int *pnLog, /* OUT: Size of WAL log in frames */ int *pnCkpt /* OUT: Total number of frames checkpointed */ ); /* ** CAPI3REF: Checkpoint Mode Values ** KEYWORDS: {checkpoint mode} ** ** These constants define all valid values for the "checkpoint mode" passed ** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface. ** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the ** meaning of each of these checkpoint modes. */ #define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ #define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ #define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */ #define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */ /* ** CAPI3REF: Virtual Table Interface Configuration ** ** This function may be called by either the [xConnect] or [xCreate] method ** of a [virtual table] implementation to configure ** various facets of the virtual table interface. ** ** If this interface is invoked outside the context of an xConnect or ** xCreate virtual table method then the behavior is undefined. ** ** At present, there is only one option that may be configured using ** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options ** may be added in the future. */ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...); /* ** CAPI3REF: Virtual Table Configuration Options ** ** These macros define the various options to the ** [sqlite3_vtab_config()] interface that [virtual table] implementations ** can use to customize and optimize their behavior. ** **
**
SQLITE_VTAB_CONSTRAINT_SUPPORT **
Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, ** where X is an integer. If X is zero, then the [virtual table] whose ** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not ** support constraints. In this configuration (which is the default) if ** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire ** statement is rolled back as if [ON CONFLICT | OR ABORT] had been ** specified as part of the users SQL statement, regardless of the actual ** ON CONFLICT mode specified. ** ** If X is non-zero, then the virtual table implementation guarantees ** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before ** any modifications to internal or persistent data structures have been made. ** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite ** is able to roll back a statement or database transaction, and abandon ** or continue processing the current SQL statement as appropriate. ** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns ** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode ** had been ABORT. ** ** Virtual table implementations that are required to handle OR REPLACE ** must do so within the [xUpdate] method. If a call to the ** [sqlite3_vtab_on_conflict()] function indicates that the current ON ** CONFLICT policy is REPLACE, the virtual table implementation should ** silently replace the appropriate rows within the xUpdate callback and ** return SQLITE_OK. Or, if this is not possible, it may return ** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT ** constraint handling. **
*/ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy ** ** This function may only be called from within a call to the [xUpdate] method ** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The ** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], ** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode ** of the SQL statement that triggered the call to the [xUpdate] method of the ** [virtual table]. */ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *); /* ** CAPI3REF: Conflict resolution modes ** KEYWORDS: {conflict resolution mode} ** ** These constants are returned by [sqlite3_vtab_on_conflict()] to ** inform a [virtual table] implementation what the [ON CONFLICT] mode ** is for the SQL statement being evaluated. ** ** Note that the [SQLITE_IGNORE] constant is also used as a potential ** return value from the [sqlite3_set_authorizer()] callback and that ** [SQLITE_ABORT] is also a [result code]. */ #define SQLITE_ROLLBACK 1 /* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ #define SQLITE_FAIL 3 /* #define SQLITE_ABORT 4 // Also an error code */ #define SQLITE_REPLACE 5 /* ** CAPI3REF: Prepared Statement Scan Status Opcodes ** KEYWORDS: {scanstatus options} ** ** The following constants can be used for the T parameter to the ** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a ** different metric for sqlite3_stmt_scanstatus() to return. ** ** When the value returned to V is a string, space to hold that string is ** managed by the prepared statement S and will be automatically freed when ** S is finalized. ** **
** [[SQLITE_SCANSTAT_NLOOP]]
SQLITE_SCANSTAT_NLOOP
**
^The [sqlite3_int64] variable pointed to by the T parameter will be ** set to the total number of times that the X-th loop has run.
** ** [[SQLITE_SCANSTAT_NVISIT]]
SQLITE_SCANSTAT_NVISIT
**
^The [sqlite3_int64] variable pointed to by the T parameter will be set ** to the total number of rows examined by all iterations of the X-th loop.
** ** [[SQLITE_SCANSTAT_EST]]
SQLITE_SCANSTAT_EST
**
^The "double" variable pointed to by the T parameter will be set to the ** query planner's estimate for the average number of rows output from each ** iteration of the X-th loop. If the query planner's estimates was accurate, ** then this value will approximate the quotient NVISIT/NLOOP and the ** product of this value for all prior loops with the same SELECTID will ** be the NLOOP value for the current loop. ** ** [[SQLITE_SCANSTAT_NAME]]
SQLITE_SCANSTAT_NAME
**
^The "const char *" variable pointed to by the T parameter will be set ** to a zero-terminated UTF-8 string containing the name of the index or table ** used for the X-th loop. ** ** [[SQLITE_SCANSTAT_EXPLAIN]]
SQLITE_SCANSTAT_EXPLAIN
**
^The "const char *" variable pointed to by the T parameter will be set ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** ** [[SQLITE_SCANSTAT_SELECTID]]
SQLITE_SCANSTAT_SELECT
**
^The "int" variable pointed to by the T parameter will be set to the ** "select-id" for the X-th loop. The select-id identifies which query or ** subquery the loop is part of. The main query has a select-id of zero. ** The select-id is the same value as is output in the first column ** of an [EXPLAIN QUERY PLAN] query. **
*/ #define SQLITE_SCANSTAT_NLOOP 0 #define SQLITE_SCANSTAT_NVISIT 1 #define SQLITE_SCANSTAT_EST 2 #define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** ** This interface returns information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found. ** ** Since this interface is expected to be rarely used, it is only ** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS] ** compile-time option. ** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior ** of this interface is undefined. ** ^The requested measurement is written into a variable pointed to by ** the "pOut" parameter. ** Parameter "idx" identifies the specific loop to retrieve statistics for. ** Loops are numbered starting from zero. ^If idx is out of range - less than ** zero or greater than or equal to the total number of loops used to implement ** the statement - a non-zero value is returned and the variable that pOut ** points to is unchanged. ** ** ^Statistics might not be available for all loops in all statements. ^In cases ** where there exist loops with no available statistics, this function behaves ** as if the loop did not exist - it returns non-zero and leave the variable ** that pOut points to unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus( sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ int idx, /* Index of loop to report on */ int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); /* ** CAPI3REF: Zero Scan-Status Counters ** METHOD: sqlite3_stmt ** ** ^Zero all [sqlite3_stmt_scanstatus()] related event counters. ** ** This API is only available if the library is built with pre-processor ** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined. */ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. */ #ifdef SQLITE_OMIT_FLOATING_POINT # undef double #endif #if 0 } /* End of the 'extern "C"' block */ #endif #endif /* _SQLITE3_H_ */ /* ** 2010 August 30 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* */ #ifndef _SQLITE3RTREE_H_ #define _SQLITE3RTREE_H_ #if 0 extern "C" { #endif typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info; /* The double-precision datatype used by RTree depends on the ** SQLITE_RTREE_INT_ONLY compile-time option. */ #ifdef SQLITE_RTREE_INT_ONLY typedef sqlite3_int64 sqlite3_rtree_dbl; #else typedef double sqlite3_rtree_dbl; #endif /* ** Register a geometry callback named zGeom that can be used as part of an ** R-Tree geometry query as follows: ** ** SELECT ... FROM WHERE MATCH $zGeom(... params ...) */ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback( sqlite3 *db, const char *zGeom, int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*), void *pContext ); /* ** A pointer to a structure of the following type is passed as the first ** argument to callbacks registered using rtree_geometry_callback(). */ struct sqlite3_rtree_geometry { void *pContext; /* Copy of pContext passed to s_r_g_c() */ int nParam; /* Size of array aParam[] */ sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */ void *pUser; /* Callback implementation user data */ void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ }; /* ** Register a 2nd-generation geometry callback named zScore that can be ** used as part of an R-Tree geometry query as follows: ** ** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...) */ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback( sqlite3 *db, const char *zQueryFunc, int (*xQueryFunc)(sqlite3_rtree_query_info*), void *pContext, void (*xDestructor)(void*) ); /* ** A pointer to a structure of the following type is passed as the ** argument to scored geometry callback registered using ** sqlite3_rtree_query_callback(). ** ** Note that the first 5 fields of this structure are identical to ** sqlite3_rtree_geometry. This structure is a subclass of ** sqlite3_rtree_geometry. */ struct sqlite3_rtree_query_info { void *pContext; /* pContext from when function registered */ int nParam; /* Number of function parameters */ sqlite3_rtree_dbl *aParam; /* value of function parameters */ void *pUser; /* callback can use this, if desired */ void (*xDelUser)(void*); /* function to free pUser */ sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */ unsigned int *anQueue; /* Number of pending entries in the queue */ int nCoord; /* Number of coordinates */ int iLevel; /* Level of current node or entry */ int mxLevel; /* The largest iLevel value in the tree */ sqlite3_int64 iRowid; /* Rowid for current entry */ sqlite3_rtree_dbl rParentScore; /* Score of parent node */ int eParentWithin; /* Visibility of parent node */ int eWithin; /* OUT: Visiblity */ sqlite3_rtree_dbl rScore; /* OUT: Write the score here */ /* The following fields are only available in 3.8.11 and later */ sqlite3_value **apSqlParam; /* Original SQL values of parameters */ }; /* ** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin. */ #define NOT_WITHIN 0 /* Object completely outside of query region */ #define PARTLY_WITHIN 1 /* Object partially overlaps query region */ #define FULLY_WITHIN 2 /* Object fully contained within query region */ #if 0 } /* end of the 'extern "C"' block */ #endif #endif /* ifndef _SQLITE3RTREE_H_ */ /************** End of sqlite3.h *********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /* ** Include the configuration header output by 'configure' if we're using the ** autoconf-based build */ #ifdef _HAVE_SQLITE_CONFIG_H #include "config.h" #endif /************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/ /************** Begin file sqliteLimit.h *************************************/ /* ** 2007 May 7 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file defines various limits of what SQLite can process. */ /* ** The maximum length of a TEXT or BLOB in bytes. This also ** limits the size of a row in a table or index. ** ** The hard limit is the ability of a 32-bit signed integer ** to count the size: 2^31-1 or 2147483647. */ #ifndef SQLITE_MAX_LENGTH # define SQLITE_MAX_LENGTH 1000000000 #endif /* ** This is the maximum number of ** ** * Columns in a table ** * Columns in an index ** * Columns in a view ** * Terms in the SET clause of an UPDATE statement ** * Terms in the result set of a SELECT statement ** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement. ** * Terms in the VALUES clause of an INSERT statement ** ** The hard upper limit here is 32676. Most database people will ** tell you that in a well-normalized database, you usually should ** not have more than a dozen or so columns in any table. And if ** that is the case, there is no point in having more than a few ** dozen values in any of the other situations described above. */ #ifndef SQLITE_MAX_COLUMN # define SQLITE_MAX_COLUMN 2000 #endif /* ** The maximum length of a single SQL statement in bytes. ** ** It used to be the case that setting this value to zero would ** turn the limit off. That is no longer true. It is not possible ** to turn this limit off. */ #ifndef SQLITE_MAX_SQL_LENGTH # define SQLITE_MAX_SQL_LENGTH 1000000000 #endif /* ** The maximum depth of an expression tree. This is limited to ** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might ** want to place more severe limits on the complexity of an ** expression. ** ** A value of 0 used to mean that the limit was not enforced. ** But that is no longer true. The limit is now strictly enforced ** at all times. */ #ifndef SQLITE_MAX_EXPR_DEPTH # define SQLITE_MAX_EXPR_DEPTH 1000 #endif /* ** The maximum number of terms in a compound SELECT statement. ** The code generator for compound SELECT statements does one ** level of recursion for each term. A stack overflow can result ** if the number of terms is too large. In practice, most SQL ** never has more than 3 or 4 terms. Use a value of 0 to disable ** any limit on the number of terms in a compount SELECT. */ #ifndef SQLITE_MAX_COMPOUND_SELECT # define SQLITE_MAX_COMPOUND_SELECT 500 #endif /* ** The maximum number of opcodes in a VDBE program. ** Not currently enforced. */ #ifndef SQLITE_MAX_VDBE_OP # define SQLITE_MAX_VDBE_OP 25000 #endif /* ** The maximum number of arguments to an SQL function. */ #ifndef SQLITE_MAX_FUNCTION_ARG # define SQLITE_MAX_FUNCTION_ARG 127 #endif /* ** The suggested maximum number of in-memory pages to use for ** the main database table and for temporary tables. ** ** IMPLEMENTATION-OF: R-31093-59126 The default suggested cache size ** is 2000 pages. ** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be ** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options. */ #ifndef SQLITE_DEFAULT_CACHE_SIZE # define SQLITE_DEFAULT_CACHE_SIZE 2000 #endif /* ** The default number of frames to accumulate in the log file before ** checkpointing the database in WAL mode. */ #ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT # define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000 #endif /* ** The maximum number of attached databases. This must be between 0 ** and 62. The upper bound on 62 is because a 64-bit integer bitmap ** is used internally to track attached databases. */ #ifndef SQLITE_MAX_ATTACHED # define SQLITE_MAX_ATTACHED 10 #endif /* ** The maximum value of a ?nnn wildcard that the parser will accept. */ #ifndef SQLITE_MAX_VARIABLE_NUMBER # define SQLITE_MAX_VARIABLE_NUMBER 999 #endif /* Maximum page size. The upper bound on this value is 65536. This a limit ** imposed by the use of 16-bit offsets within each page. ** ** Earlier versions of SQLite allowed the user to change this value at ** compile time. This is no longer permitted, on the grounds that it creates ** a library that is technically incompatible with an SQLite library ** compiled with a different limit. If a process operating on a database ** with a page-size of 65536 bytes crashes, then an instance of SQLite ** compiled with the default page-size limit will not be able to rollback ** the aborted transaction. This could lead to database corruption. */ #ifdef SQLITE_MAX_PAGE_SIZE # undef SQLITE_MAX_PAGE_SIZE #endif #define SQLITE_MAX_PAGE_SIZE 65536 /* ** The default size of a database page. */ #ifndef SQLITE_DEFAULT_PAGE_SIZE # define SQLITE_DEFAULT_PAGE_SIZE 1024 #endif #if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE # undef SQLITE_DEFAULT_PAGE_SIZE # define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE #endif /* ** Ordinarily, if no value is explicitly provided, SQLite creates databases ** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain ** device characteristics (sector-size and atomic write() support), ** SQLite may choose a larger value. This constant is the maximum value ** SQLite will choose on its own. */ #ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE # define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192 #endif #if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE # undef SQLITE_MAX_DEFAULT_PAGE_SIZE # define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE #endif /* ** Maximum number of pages in one database file. ** ** This is really just the default value for the max_page_count pragma. ** This value can be lowered (or raised) at run-time using that the ** max_page_count macro. */ #ifndef SQLITE_MAX_PAGE_COUNT # define SQLITE_MAX_PAGE_COUNT 1073741823 #endif /* ** Maximum length (in bytes) of the pattern in a LIKE or GLOB ** operator. */ #ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH # define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 #endif /* ** Maximum depth of recursion for triggers. ** ** A value of 1 means that a trigger program will not be able to itself ** fire any triggers. A value of 0 means that no trigger programs at all ** may be executed. */ #ifndef SQLITE_MAX_TRIGGER_DEPTH # define SQLITE_MAX_TRIGGER_DEPTH 1000 #endif /************** End of sqliteLimit.h *****************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /* Disable nuisance warnings on Borland compilers */ #if defined(__BORLANDC__) #pragma warn -rch /* unreachable code */ #pragma warn -ccc /* Condition is always true or false */ #pragma warn -aus /* Assigned value is never used */ #pragma warn -csu /* Comparing signed and unsigned */ #pragma warn -spa /* Suspicious pointer arithmetic */ #endif /* ** Include standard header files as necessary */ #ifdef HAVE_STDINT_H #include #endif #ifdef HAVE_INTTYPES_H #include #endif /* ** The following macros are used to cast pointers to integers and ** integers to pointers. The way you do this varies from one compiler ** to the next, so we have developed the following set of #if statements ** to generate appropriate macros for a wide range of compilers. ** ** The correct "ANSI" way to do this is to use the intptr_t type. ** Unfortunately, that typedef is not available on all compilers, or ** if it is available, it requires an #include of specific headers ** that vary from one machine to the next. ** ** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on ** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)). ** So we have to define the macros in different ways depending on the ** compiler. */ #if defined(__PTRDIFF_TYPE__) /* This case should work for GCC */ # define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X)) # define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X)) #elif !defined(__GNUC__) /* Works for compilers other than LLVM */ # define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X]) # define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0)) #elif defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */ # define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X)) # define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X)) #else /* Generates a warning - but it always works */ # define SQLITE_INT_TO_PTR(X) ((void*)(X)) # define SQLITE_PTR_TO_INT(X) ((int)(X)) #endif /* ** A macro to hint to the compiler that a function should not be ** inlined. */ #if defined(__GNUC__) # define SQLITE_NOINLINE __attribute__((noinline)) #elif defined(_MSC_VER) && _MSC_VER>=1310 # define SQLITE_NOINLINE __declspec(noinline) #else # define SQLITE_NOINLINE #endif /* ** Make sure that the compiler intrinsics we desire are enabled when ** compiling with an appropriate version of MSVC. */ #if defined(_MSC_VER) && _MSC_VER>=1300 # if !defined(_WIN32_WCE) # include # pragma intrinsic(_byteswap_ushort) # pragma intrinsic(_byteswap_ulong) # else # include # endif #endif /* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never ** threadsafe. 1 means the library is serialized which is the highest ** level of threadsafety. 2 means the library is multithreaded - multiple ** threads can use SQLite as long as no two threads try to use the same ** database connection at the same time. ** ** Older versions of SQLite used an optional THREADSAFE macro. ** We support that for legacy. */ #if !defined(SQLITE_THREADSAFE) # if defined(THREADSAFE) # define SQLITE_THREADSAFE THREADSAFE # else # define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */ # endif #endif /* ** Powersafe overwrite is on by default. But can be turned off using ** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option. */ #ifndef SQLITE_POWERSAFE_OVERWRITE # define SQLITE_POWERSAFE_OVERWRITE 1 #endif /* ** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by ** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in ** which case memory allocation statistics are disabled by default. */ #if !defined(SQLITE_DEFAULT_MEMSTATUS) # define SQLITE_DEFAULT_MEMSTATUS 1 #endif /* ** Exactly one of the following macros must be defined in order to ** specify which memory allocation subsystem to use. ** ** SQLITE_SYSTEM_MALLOC // Use normal system malloc() ** SQLITE_WIN32_MALLOC // Use Win32 native heap API ** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails ** SQLITE_MEMDEBUG // Debugging version of system malloc() ** ** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the ** assert() macro is enabled, each call into the Win32 native heap subsystem ** will cause HeapValidate to be called. If heap validation should fail, an ** assertion will be triggered. ** ** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as ** the default. */ #if defined(SQLITE_SYSTEM_MALLOC) \ + defined(SQLITE_WIN32_MALLOC) \ + defined(SQLITE_ZERO_MALLOC) \ + defined(SQLITE_MEMDEBUG)>1 # error "Two or more of the following compile-time configuration options\ are defined but at most one is allowed:\ SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG,\ SQLITE_ZERO_MALLOC" #endif #if defined(SQLITE_SYSTEM_MALLOC) \ + defined(SQLITE_WIN32_MALLOC) \ + defined(SQLITE_ZERO_MALLOC) \ + defined(SQLITE_MEMDEBUG)==0 # define SQLITE_SYSTEM_MALLOC 1 #endif /* ** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the ** sizes of memory allocations below this value where possible. */ #if !defined(SQLITE_MALLOC_SOFT_LIMIT) # define SQLITE_MALLOC_SOFT_LIMIT 1024 #endif /* ** We need to define _XOPEN_SOURCE as follows in order to enable ** recursive mutexes on most Unix systems and fchmod() on OpenBSD. ** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit ** it. */ #if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__) # define _XOPEN_SOURCE 600 #endif /* ** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that ** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true, ** make it true by defining or undefining NDEBUG. ** ** Setting NDEBUG makes the code smaller and faster by disabling the ** assert() statements in the code. So we want the default action ** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG ** is set. Thus NDEBUG becomes an opt-in rather than an opt-out ** feature. */ #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif #if defined(NDEBUG) && defined(SQLITE_DEBUG) # undef NDEBUG #endif /* ** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on. */ #if !defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) && defined(SQLITE_DEBUG) # define SQLITE_ENABLE_EXPLAIN_COMMENTS 1 #endif /* ** The testcase() macro is used to aid in coverage testing. When ** doing coverage testing, the condition inside the argument to ** testcase() must be evaluated both true and false in order to ** get full branch coverage. The testcase() macro is inserted ** to help ensure adequate test coverage in places where simple ** condition/decision coverage is inadequate. For example, testcase() ** can be used to make sure boundary values are tested. For ** bitmask tests, testcase() can be used to make sure each bit ** is significant and used at least once. On switch statements ** where multiple cases go to the same block of code, testcase() ** can insure that all cases are evaluated. ** */ #ifdef SQLITE_COVERAGE_TEST SQLITE_PRIVATE void sqlite3Coverage(int); # define testcase(X) if( X ){ sqlite3Coverage(__LINE__); } #else # define testcase(X) #endif /* ** The TESTONLY macro is used to enclose variable declarations or ** other bits of code that are needed to support the arguments ** within testcase() and assert() macros. */ #if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST) # define TESTONLY(X) X #else # define TESTONLY(X) #endif /* ** Sometimes we need a small amount of code such as a variable initialization ** to setup for a later assert() statement. We do not want this code to ** appear when assert() is disabled. The following macro is therefore ** used to contain that setup code. The "VVA" acronym stands for ** "Verification, Validation, and Accreditation". In other words, the ** code within VVA_ONLY() will only run during verification processes. */ #ifndef NDEBUG # define VVA_ONLY(X) X #else # define VVA_ONLY(X) #endif /* ** The ALWAYS and NEVER macros surround boolean expressions which ** are intended to always be true or false, respectively. Such ** expressions could be omitted from the code completely. But they ** are included in a few cases in order to enhance the resilience ** of SQLite to unexpected behavior - to make the code "self-healing" ** or "ductile" rather than being "brittle" and crashing at the first ** hint of unplanned behavior. ** ** In other words, ALWAYS and NEVER are added for defensive code. ** ** When doing coverage testing ALWAYS and NEVER are hard-coded to ** be true and false so that the unreachable code they specify will ** not be counted as untested code. */ #if defined(SQLITE_COVERAGE_TEST) # define ALWAYS(X) (1) # define NEVER(X) (0) #elif !defined(NDEBUG) # define ALWAYS(X) ((X)?1:(assert(0),0)) # define NEVER(X) ((X)?(assert(0),1):0) #else # define ALWAYS(X) (X) # define NEVER(X) (X) #endif /* ** Declarations used for tracing the operating system interfaces. */ #if defined(SQLITE_FORCE_OS_TRACE) || defined(SQLITE_TEST) || \ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) extern int sqlite3OSTrace; # define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X # define SQLITE_HAVE_OS_TRACE #else # define OSTRACE(X) # undef SQLITE_HAVE_OS_TRACE #endif /* ** Is the sqlite3ErrName() function needed in the build? Currently, ** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when ** OSTRACE is enabled), and by several "test*.c" files (which are ** compiled using SQLITE_TEST). */ #if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \ (defined(SQLITE_DEBUG) && SQLITE_OS_WIN) # define SQLITE_NEED_ERR_NAME #else # undef SQLITE_NEED_ERR_NAME #endif /* ** Return true (non-zero) if the input is an integer that is too large ** to fit in 32-bits. This macro is used inside of various testcase() ** macros to verify that we have tested SQLite for large-file support. */ #define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0) /* ** The macro unlikely() is a hint that surrounds a boolean ** expression that is usually false. Macro likely() surrounds ** a boolean expression that is usually true. These hints could, ** in theory, be used by the compiler to generate better code, but ** currently they are just comments for human readers. */ #define likely(X) (X) #define unlikely(X) (X) /************** Include hash.h in the middle of sqliteInt.h ******************/ /************** Begin file hash.h ********************************************/ /* ** 2001 September 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This is the header file for the generic hash-table implementation ** used in SQLite. */ #ifndef _SQLITE_HASH_H_ #define _SQLITE_HASH_H_ /* Forward declarations of structures. */ typedef struct Hash Hash; typedef struct HashElem HashElem; /* A complete hash table is an instance of the following structure. ** The internals of this structure are intended to be opaque -- client ** code should not attempt to access or modify the fields of this structure ** directly. Change this structure only by using the routines below. ** However, some of the "procedures" and "functions" for modifying and ** accessing this structure are really macros, so we can't really make ** this structure opaque. ** ** All elements of the hash table are on a single doubly-linked list. ** Hash.first points to the head of this list. ** ** There are Hash.htsize buckets. Each bucket points to a spot in ** the global doubly-linked list. The contents of the bucket are the ** element pointed to plus the next _ht.count-1 elements in the list. ** ** Hash.htsize and Hash.ht may be zero. In that case lookup is done ** by a linear search of the global list. For small tables, the ** Hash.ht table is never allocated because if there are few elements ** in the table, it is faster to do a linear search than to manage ** the hash table. */ struct Hash { unsigned int htsize; /* Number of buckets in the hash table */ unsigned int count; /* Number of entries in this table */ HashElem *first; /* The first element of the array */ struct _ht { /* the hash table */ int count; /* Number of entries with this hash */ HashElem *chain; /* Pointer to first entry with this hash */ } *ht; }; /* Each element in the hash table is an instance of the following ** structure. All elements are stored on a single doubly-linked list. ** ** Again, this structure is intended to be opaque, but it can't really ** be opaque because it is used by macros. */ struct HashElem { HashElem *next, *prev; /* Next and previous elements in the table */ void *data; /* Data associated with this element */ const char *pKey; /* Key associated with this element */ }; /* ** Access routines. To delete, insert a NULL pointer. */ SQLITE_PRIVATE void sqlite3HashInit(Hash*); SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, void *pData); SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey); SQLITE_PRIVATE void sqlite3HashClear(Hash*); /* ** Macros for looping over all elements of a hash table. The idiom is ** like this: ** ** Hash h; ** HashElem *p; ** ... ** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){ ** SomeStructure *pData = sqliteHashData(p); ** // do something with pData ** } */ #define sqliteHashFirst(H) ((H)->first) #define sqliteHashNext(E) ((E)->next) #define sqliteHashData(E) ((E)->data) /* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */ /* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */ /* ** Number of entries in a hash table */ /* #define sqliteHashCount(H) ((H)->count) // NOT USED */ #endif /* _SQLITE_HASH_H_ */ /************** End of hash.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include parse.h in the middle of sqliteInt.h *****************/ /************** Begin file parse.h *******************************************/ #define TK_SEMI 1 #define TK_EXPLAIN 2 #define TK_QUERY 3 #define TK_PLAN 4 #define TK_BEGIN 5 #define TK_TRANSACTION 6 #define TK_DEFERRED 7 #define TK_IMMEDIATE 8 #define TK_EXCLUSIVE 9 #define TK_COMMIT 10 #define TK_END 11 #define TK_ROLLBACK 12 #define TK_SAVEPOINT 13 #define TK_RELEASE 14 #define TK_TO 15 #define TK_TABLE 16 #define TK_CREATE 17 #define TK_IF 18 #define TK_NOT 19 #define TK_EXISTS 20 #define TK_TEMP 21 #define TK_LP 22 #define TK_RP 23 #define TK_AS 24 #define TK_WITHOUT 25 #define TK_COMMA 26 #define TK_ID 27 #define TK_INDEXED 28 #define TK_ABORT 29 #define TK_ACTION 30 #define TK_AFTER 31 #define TK_ANALYZE 32 #define TK_ASC 33 #define TK_ATTACH 34 #define TK_BEFORE 35 #define TK_BY 36 #define TK_CASCADE 37 #define TK_CAST 38 #define TK_COLUMNKW 39 #define TK_CONFLICT 40 #define TK_DATABASE 41 #define TK_DESC 42 #define TK_DETACH 43 #define TK_EACH 44 #define TK_FAIL 45 #define TK_FOR 46 #define TK_IGNORE 47 #define TK_INITIALLY 48 #define TK_INSTEAD 49 #define TK_LIKE_KW 50 #define TK_MATCH 51 #define TK_NO 52 #define TK_KEY 53 #define TK_OF 54 #define TK_OFFSET 55 #define TK_PRAGMA 56 #define TK_RAISE 57 #define TK_RECURSIVE 58 #define TK_REPLACE 59 #define TK_RESTRICT 60 #define TK_ROW 61 #define TK_TRIGGER 62 #define TK_VACUUM 63 #define TK_VIEW 64 #define TK_VIRTUAL 65 #define TK_WITH 66 #define TK_REINDEX 67 #define TK_RENAME 68 #define TK_CTIME_KW 69 #define TK_ANY 70 #define TK_OR 71 #define TK_AND 72 #define TK_IS 73 #define TK_BETWEEN 74 #define TK_IN 75 #define TK_ISNULL 76 #define TK_NOTNULL 77 #define TK_NE 78 #define TK_EQ 79 #define TK_GT 80 #define TK_LE 81 #define TK_LT 82 #define TK_GE 83 #define TK_ESCAPE 84 #define TK_BITAND 85 #define TK_BITOR 86 #define TK_LSHIFT 87 #define TK_RSHIFT 88 #define TK_PLUS 89 #define TK_MINUS 90 #define TK_STAR 91 #define TK_SLASH 92 #define TK_REM 93 #define TK_CONCAT 94 #define TK_COLLATE 95 #define TK_BITNOT 96 #define TK_STRING 97 #define TK_JOIN_KW 98 #define TK_CONSTRAINT 99 #define TK_DEFAULT 100 #define TK_NULL 101 #define TK_PRIMARY 102 #define TK_UNIQUE 103 #define TK_CHECK 104 #define TK_REFERENCES 105 #define TK_AUTOINCR 106 #define TK_ON 107 #define TK_INSERT 108 #define TK_DELETE 109 #define TK_UPDATE 110 #define TK_SET 111 #define TK_DEFERRABLE 112 #define TK_FOREIGN 113 #define TK_DROP 114 #define TK_UNION 115 #define TK_ALL 116 #define TK_EXCEPT 117 #define TK_INTERSECT 118 #define TK_SELECT 119 #define TK_VALUES 120 #define TK_DISTINCT 121 #define TK_DOT 122 #define TK_FROM 123 #define TK_JOIN 124 #define TK_USING 125 #define TK_ORDER 126 #define TK_GROUP 127 #define TK_HAVING 128 #define TK_LIMIT 129 #define TK_WHERE 130 #define TK_INTO 131 #define TK_INTEGER 132 #define TK_FLOAT 133 #define TK_BLOB 134 #define TK_VARIABLE 135 #define TK_CASE 136 #define TK_WHEN 137 #define TK_THEN 138 #define TK_ELSE 139 #define TK_INDEX 140 #define TK_ALTER 141 #define TK_ADD 142 #define TK_TO_TEXT 143 #define TK_TO_BLOB 144 #define TK_TO_NUMERIC 145 #define TK_TO_INT 146 #define TK_TO_REAL 147 #define TK_ISNOT 148 #define TK_END_OF_FILE 149 #define TK_ILLEGAL 150 #define TK_SPACE 151 #define TK_UNCLOSED_STRING 152 #define TK_FUNCTION 153 #define TK_COLUMN 154 #define TK_AGG_FUNCTION 155 #define TK_AGG_COLUMN 156 #define TK_UMINUS 157 #define TK_UPLUS 158 #define TK_REGISTER 159 /************** End of parse.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ #include #include #include #include #include /* ** If compiling for a processor that lacks floating point support, ** substitute integer for floating-point */ #ifdef SQLITE_OMIT_FLOATING_POINT # define double sqlite_int64 # define float sqlite_int64 # define LONGDOUBLE_TYPE sqlite_int64 # ifndef SQLITE_BIG_DBL # define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50) # endif # define SQLITE_OMIT_DATETIME_FUNCS 1 # define SQLITE_OMIT_TRACE 1 # undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT # undef SQLITE_HAVE_ISNAN #endif #ifndef SQLITE_BIG_DBL # define SQLITE_BIG_DBL (1e99) #endif /* ** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0 ** afterward. Having this macro allows us to cause the C compiler ** to omit code used by TEMP tables without messy #ifndef statements. */ #ifdef SQLITE_OMIT_TEMPDB #define OMIT_TEMPDB 1 #else #define OMIT_TEMPDB 0 #endif /* ** The "file format" number is an integer that is incremented whenever ** the VDBE-level file format changes. The following macros define the ** the default file format for new databases and the maximum file format ** that the library can read. */ #define SQLITE_MAX_FILE_FORMAT 4 #ifndef SQLITE_DEFAULT_FILE_FORMAT # define SQLITE_DEFAULT_FILE_FORMAT 4 #endif /* ** Determine whether triggers are recursive by default. This can be ** changed at run-time using a pragma. */ #ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS # define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0 #endif /* ** Provide a default value for SQLITE_TEMP_STORE in case it is not specified ** on the command-line */ #ifndef SQLITE_TEMP_STORE # define SQLITE_TEMP_STORE 1 # define SQLITE_TEMP_STORE_xc 1 /* Exclude from ctime.c */ #endif /* ** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if ** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it ** to zero. */ #if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0 # undef SQLITE_MAX_WORKER_THREADS # define SQLITE_MAX_WORKER_THREADS 0 #endif #ifndef SQLITE_MAX_WORKER_THREADS # define SQLITE_MAX_WORKER_THREADS 8 #endif #ifndef SQLITE_DEFAULT_WORKER_THREADS # define SQLITE_DEFAULT_WORKER_THREADS 0 #endif #if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS # undef SQLITE_MAX_WORKER_THREADS # define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS #endif /* ** The default initial allocation for the pagecache when using separate ** pagecaches for each database connection. A positive number is the ** number of pages. A negative number N translations means that a buffer ** of -1024*N bytes is allocated and used for as many pages as it will hold. */ #ifndef SQLITE_DEFAULT_PCACHE_INITSZ # define SQLITE_DEFAULT_PCACHE_INITSZ 100 #endif /* ** GCC does not define the offsetof() macro so we'll have to do it ** ourselves. */ #ifndef offsetof #define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD)) #endif /* ** Macros to compute minimum and maximum of two numbers. */ #define MIN(A,B) ((A)<(B)?(A):(B)) #define MAX(A,B) ((A)>(B)?(A):(B)) /* ** Swap two objects of type TYPE. */ #define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} /* ** Check to see if this machine uses EBCDIC. (Yes, believe it or ** not, there are still machines out there that use EBCDIC.) */ #if 'A' == '\301' # define SQLITE_EBCDIC 1 #else # define SQLITE_ASCII 1 #endif /* ** Integers of known sizes. These typedefs might change for architectures ** where the sizes very. Preprocessor macros are available so that the ** types can be conveniently redefined at compile-type. Like this: ** ** cc '-DUINTPTR_TYPE=long long int' ... */ #ifndef UINT32_TYPE # ifdef HAVE_UINT32_T # define UINT32_TYPE uint32_t # else # define UINT32_TYPE unsigned int # endif #endif #ifndef UINT16_TYPE # ifdef HAVE_UINT16_T # define UINT16_TYPE uint16_t # else # define UINT16_TYPE unsigned short int # endif #endif #ifndef INT16_TYPE # ifdef HAVE_INT16_T # define INT16_TYPE int16_t # else # define INT16_TYPE short int # endif #endif #ifndef UINT8_TYPE # ifdef HAVE_UINT8_T # define UINT8_TYPE uint8_t # else # define UINT8_TYPE unsigned char # endif #endif #ifndef INT8_TYPE # ifdef HAVE_INT8_T # define INT8_TYPE int8_t # else # define INT8_TYPE signed char # endif #endif #ifndef LONGDOUBLE_TYPE # define LONGDOUBLE_TYPE long double #endif typedef sqlite_int64 i64; /* 8-byte signed integer */ typedef sqlite_uint64 u64; /* 8-byte unsigned integer */ typedef UINT32_TYPE u32; /* 4-byte unsigned integer */ typedef UINT16_TYPE u16; /* 2-byte unsigned integer */ typedef INT16_TYPE i16; /* 2-byte signed integer */ typedef UINT8_TYPE u8; /* 1-byte unsigned integer */ typedef INT8_TYPE i8; /* 1-byte signed integer */ /* ** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value ** that can be stored in a u32 without loss of data. The value ** is 0x00000000ffffffff. But because of quirks of some compilers, we ** have to specify the value in the less intuitive manner shown: */ #define SQLITE_MAX_U32 ((((u64)1)<<32)-1) /* ** The datatype used to store estimates of the number of rows in a ** table or index. This is an unsigned integer type. For 99.9% of ** the world, a 32-bit integer is sufficient. But a 64-bit integer ** can be used at compile-time if desired. */ #ifdef SQLITE_64BIT_STATS typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ #else typedef u32 tRowcnt; /* 32-bit is the default */ #endif /* ** Estimated quantities used for query planning are stored as 16-bit ** logarithms. For quantity X, the value stored is 10*log2(X). This ** gives a possible range of values of approximately 1.0e986 to 1e-986. ** But the allowed values are "grainy". Not every value is representable. ** For example, quantities 16 and 17 are both represented by a LogEst ** of 40. However, since LogEst quantities are suppose to be estimates, ** not exact values, this imprecision is not a problem. ** ** "LogEst" is short for "Logarithmic Estimate". ** ** Examples: ** 1 -> 0 20 -> 43 10000 -> 132 ** 2 -> 10 25 -> 46 25000 -> 146 ** 3 -> 16 100 -> 66 1000000 -> 199 ** 4 -> 20 1000 -> 99 1048576 -> 200 ** 10 -> 33 1024 -> 100 4294967296 -> 320 ** ** The LogEst can be negative to indicate fractional values. ** Examples: ** ** 0.5 -> -10 0.1 -> -33 0.0625 -> -40 */ typedef INT16_TYPE LogEst; /* ** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer */ #ifndef SQLITE_PTRSIZE # if defined(__SIZEOF_POINTER__) # define SQLITE_PTRSIZE __SIZEOF_POINTER__ # elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(_M_ARM) || defined(__arm__) || defined(__x86) # define SQLITE_PTRSIZE 4 # else # define SQLITE_PTRSIZE 8 # endif #endif /* ** Macros to determine whether the machine is big or little endian, ** and whether or not that determination is run-time or compile-time. ** ** For best performance, an attempt is made to guess at the byte-order ** using C-preprocessor macros. If that is unsuccessful, or if ** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined ** at run-time. */ #ifdef SQLITE_AMALGAMATION SQLITE_PRIVATE const int sqlite3one = 1; #else SQLITE_PRIVATE const int sqlite3one; #endif #if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER) # define SQLITE_BYTEORDER 1234 # define SQLITE_BIGENDIAN 0 # define SQLITE_LITTLEENDIAN 1 # define SQLITE_UTF16NATIVE SQLITE_UTF16LE #endif #if (defined(sparc) || defined(__ppc__)) \ && !defined(SQLITE_RUNTIME_BYTEORDER) # define SQLITE_BYTEORDER 4321 # define SQLITE_BIGENDIAN 1 # define SQLITE_LITTLEENDIAN 0 # define SQLITE_UTF16NATIVE SQLITE_UTF16BE #endif #if !defined(SQLITE_BYTEORDER) # define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ # define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0) # define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1) # define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE) #endif /* ** Constants for the largest and smallest possible 64-bit signed integers. ** These macros are designed to work correctly on both 32-bit and 64-bit ** compilers. */ #define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) #define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) /* ** Round up a number to the next larger multiple of 8. This is used ** to force 8-byte alignment on 64-bit architectures. */ #define ROUND8(x) (((x)+7)&~7) /* ** Round down to the nearest multiple of 8 */ #define ROUNDDOWN8(x) ((x)&~7) /* ** Assert that the pointer X is aligned to an 8-byte boundary. This ** macro is used only within assert() to verify that the code gets ** all alignment restrictions correct. ** ** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the ** underlying malloc() implementation might return us 4-byte aligned ** pointers. In that case, only verify 4-byte alignment. */ #ifdef SQLITE_4_BYTE_ALIGNED_MALLOC # define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) #else # define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) #endif /* ** Disable MMAP on platforms where it is known to not work */ #if defined(__OpenBSD__) || defined(__QNXNTO__) # undef SQLITE_MAX_MMAP_SIZE # define SQLITE_MAX_MMAP_SIZE 0 #endif /* ** Default maximum size of memory used by memory-mapped I/O in the VFS */ #ifdef __APPLE__ # include # if TARGET_OS_IPHONE # undef SQLITE_MAX_MMAP_SIZE # define SQLITE_MAX_MMAP_SIZE 0 # endif #endif #ifndef SQLITE_MAX_MMAP_SIZE # if defined(__linux__) \ || defined(_WIN32) \ || (defined(__APPLE__) && defined(__MACH__)) \ || defined(__sun) \ || defined(__FreeBSD__) \ || defined(__DragonFly__) # define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */ # else # define SQLITE_MAX_MMAP_SIZE 0 # endif # define SQLITE_MAX_MMAP_SIZE_xc 1 /* exclude from ctime.c */ #endif /* ** The default MMAP_SIZE is zero on all platforms. Or, even if a larger ** default MMAP_SIZE is specified at compile-time, make sure that it does ** not exceed the maximum mmap size. */ #ifndef SQLITE_DEFAULT_MMAP_SIZE # define SQLITE_DEFAULT_MMAP_SIZE 0 # define SQLITE_DEFAULT_MMAP_SIZE_xc 1 /* Exclude from ctime.c */ #endif #if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE # undef SQLITE_DEFAULT_MMAP_SIZE # define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE #endif /* ** Only one of SQLITE_ENABLE_STAT3 or SQLITE_ENABLE_STAT4 can be defined. ** Priority is given to SQLITE_ENABLE_STAT4. If either are defined, also ** define SQLITE_ENABLE_STAT3_OR_STAT4 */ #ifdef SQLITE_ENABLE_STAT4 # undef SQLITE_ENABLE_STAT3 # define SQLITE_ENABLE_STAT3_OR_STAT4 1 #elif SQLITE_ENABLE_STAT3 # define SQLITE_ENABLE_STAT3_OR_STAT4 1 #elif SQLITE_ENABLE_STAT3_OR_STAT4 # undef SQLITE_ENABLE_STAT3_OR_STAT4 #endif /* ** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not ** the Select query generator tracing logic is turned on. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_SELECTTRACE) # define SELECTTRACE_ENABLED 1 #else # define SELECTTRACE_ENABLED 0 #endif /* ** An instance of the following structure is used to store the busy-handler ** callback for a given sqlite handle. ** ** The sqlite.busyHandler member of the sqlite struct contains the busy ** callback for the database handle. Each pager opened via the sqlite ** handle is passed a pointer to sqlite.busyHandler. The busy-handler ** callback is currently invoked only from within pager.c. */ typedef struct BusyHandler BusyHandler; struct BusyHandler { int (*xFunc)(void *,int); /* The busy callback */ void *pArg; /* First arg to busy callback */ int nBusy; /* Incremented with each busy call */ }; /* ** Name of the master database table. The master database table ** is a special table that holds the names and attributes of all ** user tables and indices. */ #define MASTER_NAME "sqlite_master" #define TEMP_MASTER_NAME "sqlite_temp_master" /* ** The root-page of the master database table. */ #define MASTER_ROOT 1 /* ** The name of the schema table. */ #define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME) /* ** A convenience macro that returns the number of elements in ** an array. */ #define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0]))) /* ** Determine if the argument is a power of two */ #define IsPowerOfTwo(X) (((X)&((X)-1))==0) /* ** The following value as a destructor means to use sqlite3DbFree(). ** The sqlite3DbFree() routine requires two parameters instead of the ** one parameter that destructors normally want. So we have to introduce ** this magic value that the code knows to handle differently. Any ** pointer will work here as long as it is distinct from SQLITE_STATIC ** and SQLITE_TRANSIENT. */ #define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3MallocSize) /* ** When SQLITE_OMIT_WSD is defined, it means that the target platform does ** not support Writable Static Data (WSD) such as global and static variables. ** All variables must either be on the stack or dynamically allocated from ** the heap. When WSD is unsupported, the variable declarations scattered ** throughout the SQLite code must become constants instead. The SQLITE_WSD ** macro is used for this purpose. And instead of referencing the variable ** directly, we use its constant as a key to lookup the run-time allocated ** buffer that holds real variable. The constant is also the initializer ** for the run-time allocated buffer. ** ** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL ** macros become no-ops and have zero performance impact. */ #ifdef SQLITE_OMIT_WSD #define SQLITE_WSD const #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v))) #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config) SQLITE_API int SQLITE_STDCALL sqlite3_wsd_init(int N, int J); SQLITE_API void *SQLITE_STDCALL sqlite3_wsd_find(void *K, int L); #else #define SQLITE_WSD #define GLOBAL(t,v) v #define sqlite3GlobalConfig sqlite3Config #endif /* ** The following macros are used to suppress compiler warnings and to ** make it clear to human readers when a function parameter is deliberately ** left unused within the body of a function. This usually happens when ** a function is called via a function pointer. For example the ** implementation of an SQL aggregate step callback may not use the ** parameter indicating the number of arguments passed to the aggregate, ** if it knows that this is enforced elsewhere. ** ** When a function parameter is not used at all within the body of a function, ** it is generally named "NotUsed" or "NotUsed2" to make things even clearer. ** However, these macros may also be used to suppress warnings related to ** parameters that may or may not be used depending on compilation options. ** For example those parameters only used in assert() statements. In these ** cases the parameters are named as per the usual conventions. */ #define UNUSED_PARAMETER(x) (void)(x) #define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y) /* ** Forward references to structures */ typedef struct AggInfo AggInfo; typedef struct AuthContext AuthContext; typedef struct AutoincInfo AutoincInfo; typedef struct Bitvec Bitvec; typedef struct CollSeq CollSeq; typedef struct Column Column; typedef struct Db Db; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; typedef struct ExprSpan ExprSpan; typedef struct FKey FKey; typedef struct FuncDestructor FuncDestructor; typedef struct FuncDef FuncDef; typedef struct FuncDefHash FuncDefHash; typedef struct IdList IdList; typedef struct Index Index; typedef struct IndexSample IndexSample; typedef struct KeyClass KeyClass; typedef struct KeyInfo KeyInfo; typedef struct Lookaside Lookaside; typedef struct LookasideSlot LookasideSlot; typedef struct Module Module; typedef struct NameContext NameContext; typedef struct Parse Parse; typedef struct PrintfArguments PrintfArguments; typedef struct RowSet RowSet; typedef struct Savepoint Savepoint; typedef struct Select Select; typedef struct SQLiteThread SQLiteThread; typedef struct SelectDest SelectDest; typedef struct SrcList SrcList; typedef struct StrAccum StrAccum; typedef struct Table Table; typedef struct TableLock TableLock; typedef struct Token Token; typedef struct TreeView TreeView; typedef struct Trigger Trigger; typedef struct TriggerPrg TriggerPrg; typedef struct TriggerStep TriggerStep; typedef struct UnpackedRecord UnpackedRecord; typedef struct VTable VTable; typedef struct VtabCtx VtabCtx; typedef struct Walker Walker; typedef struct WhereInfo WhereInfo; typedef struct With With; /* ** Defer sourcing vdbe.h and btree.h until after the "u8" and ** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque ** pointer types (i.e. FuncDef) defined above. */ /************** Include btree.h in the middle of sqliteInt.h *****************/ /************** Begin file btree.h *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the interface that the sqlite B-Tree file ** subsystem. See comments in the source code for a detailed description ** of what each interface routine does. */ #ifndef _BTREE_H_ #define _BTREE_H_ /* TODO: This definition is just included so other modules compile. It ** needs to be revisited. */ #define SQLITE_N_BTREE_META 16 /* ** If defined as non-zero, auto-vacuum is enabled by default. Otherwise ** it must be turned on for each database using "PRAGMA auto_vacuum = 1". */ #ifndef SQLITE_DEFAULT_AUTOVACUUM #define SQLITE_DEFAULT_AUTOVACUUM 0 #endif #define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */ #define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */ #define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */ /* ** Forward declarations of structure */ typedef struct Btree Btree; typedef struct BtCursor BtCursor; typedef struct BtShared BtShared; SQLITE_PRIVATE int sqlite3BtreeOpen( sqlite3_vfs *pVfs, /* VFS to use with this b-tree */ const char *zFilename, /* Name of database file to open */ sqlite3 *db, /* Associated database connection */ Btree **ppBtree, /* Return open Btree* here */ int flags, /* Flags */ int vfsFlags /* Flags passed through to VFS open */ ); /* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the ** following values. ** ** NOTE: These values must match the corresponding PAGER_ values in ** pager.h. */ #define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */ #define BTREE_MEMORY 2 /* This is an in-memory DB */ #define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */ #define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */ SQLITE_PRIVATE int sqlite3BtreeClose(Btree*); SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int); #if SQLITE_MAX_MMAP_SIZE>0 SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64); #endif SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned); SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree*); SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix); SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*); SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int); SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*); SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int); SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree*); SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p); SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int); SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *); SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int); SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster); SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int); SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*); SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int); SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int); SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags); SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*); SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*); SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*); SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *)); SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree); SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock); SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int); SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *); SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *); SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *); SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *); /* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR ** of the flags shown below. ** ** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set. ** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data ** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With ** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored ** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL ** indices.) */ #define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */ #define BTREE_BLOBKEY 2 /* Table has keys only - no data */ SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*); SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*); SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int); SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue); SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value); SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p); /* ** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta ** should be one of the following values. The integer values are assigned ** to constants so that the offset of the corresponding field in an ** SQLite database header may be found using the following formula: ** ** offset = 36 + (idx * 4) ** ** For example, the free-page-count field is located at byte offset 36 of ** the database file header. The incr-vacuum-flag field is located at ** byte offset 64 (== 36+4*7). ** ** The BTREE_DATA_VERSION value is not really a value stored in the header. ** It is a read-only number computed by the pager. But we merge it with ** the header value access routines since its access pattern is the same. ** Call it a "virtual meta value". */ #define BTREE_FREE_PAGE_COUNT 0 #define BTREE_SCHEMA_VERSION 1 #define BTREE_FILE_FORMAT 2 #define BTREE_DEFAULT_CACHE_SIZE 3 #define BTREE_LARGEST_ROOT_PAGE 4 #define BTREE_TEXT_ENCODING 5 #define BTREE_USER_VERSION 6 #define BTREE_INCR_VACUUM 7 #define BTREE_APPLICATION_ID 8 #define BTREE_DATA_VERSION 15 /* A virtual meta-value */ /* ** Values that may be OR'd together to form the second argument of an ** sqlite3BtreeCursorHints() call. ** ** The BTREE_BULKLOAD flag is set on index cursors when the index is going ** to be filled with content that is already in sorted order. ** ** The BTREE_SEEK_EQ flag is set on cursors that will get OP_SeekGE or ** OP_SeekLE opcodes for a range search, but where the range of entries ** selected will all have the same key. In other words, the cursor will ** be used only for equality key searches. ** */ #define BTREE_BULKLOAD 0x00000001 /* Used to full index in sorted order */ #define BTREE_SEEK_EQ 0x00000002 /* EQ seeks only - no range seeks */ SQLITE_PRIVATE int sqlite3BtreeCursor( Btree*, /* BTree containing table to open */ int iTable, /* Index of root page */ int wrFlag, /* 1 for writing. 0 for read-only */ struct KeyInfo*, /* First argument to compare function */ BtCursor *pCursor /* Space to write cursor structure */ ); SQLITE_PRIVATE int sqlite3BtreeCursorSize(void); SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( BtCursor*, UnpackedRecord *pUnKey, i64 intKey, int bias, int *pRes ); SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*); SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, const void *pData, int nData, int nZero, int bias, int seekResult); SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor*, i64 *pSize); SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *); SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *); SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion); SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *, unsigned int mask); #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask); #endif SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt); SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void); #ifndef NDEBUG SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*); #endif #ifndef SQLITE_OMIT_BTREECOUNT SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *); #endif #ifdef SQLITE_TEST SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int); SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*); #endif #ifndef SQLITE_OMIT_WAL SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); #endif /* ** If we are not using shared cache, then there is no need to ** use mutexes to access the BtShared structures. So make the ** Enter and Leave procedures no-ops. */ #ifndef SQLITE_OMIT_SHARED_CACHE SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*); SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*); #else # define sqlite3BtreeEnter(X) # define sqlite3BtreeEnterAll(X) #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*); SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*); SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*); #ifndef NDEBUG /* These routines are used inside assert() statements only. */ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*); SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*); SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); #endif #else # define sqlite3BtreeSharable(X) 0 # define sqlite3BtreeLeave(X) # define sqlite3BtreeEnterCursor(X) # define sqlite3BtreeLeaveCursor(X) # define sqlite3BtreeLeaveAll(X) # define sqlite3BtreeHoldsMutex(X) 1 # define sqlite3BtreeHoldsAllMutexes(X) 1 # define sqlite3SchemaMutexHeld(X,Y,Z) 1 #endif #endif /* _BTREE_H_ */ /************** End of btree.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include vdbe.h in the middle of sqliteInt.h ******************/ /************** Begin file vdbe.h ********************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Header file for the Virtual DataBase Engine (VDBE) ** ** This header defines the interface to the virtual database engine ** or VDBE. The VDBE implements an abstract machine that runs a ** simple program to access and modify the underlying database. */ #ifndef _SQLITE_VDBE_H_ #define _SQLITE_VDBE_H_ /* #include */ /* ** A single VDBE is an opaque structure named "Vdbe". Only routines ** in the source file sqliteVdbe.c are allowed to see the insides ** of this structure. */ typedef struct Vdbe Vdbe; /* ** The names of the following types declared in vdbeInt.h are required ** for the VdbeOp definition. */ typedef struct Mem Mem; typedef struct SubProgram SubProgram; /* ** A single instruction of the virtual machine has an opcode ** and as many as three operands. The instruction is recorded ** as an instance of the following structure: */ struct VdbeOp { u8 opcode; /* What operation to perform */ signed char p4type; /* One of the P4_xxx constants for p4 */ u8 opflags; /* Mask of the OPFLG_* flags in opcodes.h */ u8 p5; /* Fifth parameter is an unsigned character */ int p1; /* First operand */ int p2; /* Second parameter (often the jump destination) */ int p3; /* The third parameter */ union p4union { /* fourth parameter */ int i; /* Integer value if p4type==P4_INT32 */ void *p; /* Generic pointer */ char *z; /* Pointer to data for string (char array) types */ i64 *pI64; /* Used when p4type is P4_INT64 */ double *pReal; /* Used when p4type is P4_REAL */ FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */ sqlite3_context *pCtx; /* Used when p4type is P4_FUNCCTX */ CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */ Mem *pMem; /* Used when p4type is P4_MEM */ VTable *pVtab; /* Used when p4type is P4_VTAB */ KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ int *ai; /* Used when p4type is P4_INTARRAY */ SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ int (*xAdvance)(BtCursor *, int *); } p4; #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS char *zComment; /* Comment to improve readability */ #endif #ifdef VDBE_PROFILE u32 cnt; /* Number of times this instruction was executed */ u64 cycles; /* Total time spent executing this instruction */ #endif #ifdef SQLITE_VDBE_COVERAGE int iSrcLine; /* Source-code line that generated this opcode */ #endif }; typedef struct VdbeOp VdbeOp; /* ** A sub-routine used to implement a trigger program. */ struct SubProgram { VdbeOp *aOp; /* Array of opcodes for sub-program */ int nOp; /* Elements in aOp[] */ int nMem; /* Number of memory cells required */ int nCsr; /* Number of cursors required */ int nOnce; /* Number of OP_Once instructions */ void *token; /* id that may be used to recursive triggers */ SubProgram *pNext; /* Next sub-program already visited */ }; /* ** A smaller version of VdbeOp used for the VdbeAddOpList() function because ** it takes up less space. */ struct VdbeOpList { u8 opcode; /* What operation to perform */ signed char p1; /* First operand */ signed char p2; /* Second parameter (often the jump destination) */ signed char p3; /* Third parameter */ }; typedef struct VdbeOpList VdbeOpList; /* ** Allowed values of VdbeOp.p4type */ #define P4_NOTUSED 0 /* The P4 parameter is not used */ #define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */ #define P4_STATIC (-2) /* Pointer to a static string */ #define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */ #define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */ #define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */ #define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */ #define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */ #define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */ #define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */ #define P4_REAL (-12) /* P4 is a 64-bit floating point value */ #define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ #define P4_INT32 (-14) /* P4 is a 32-bit signed integer */ #define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ #define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */ #define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */ #define P4_FUNCCTX (-20) /* P4 is a pointer to an sqlite3_context object */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 #define P5_ConstraintUnique 2 #define P5_ConstraintCheck 3 #define P5_ConstraintFK 4 /* ** The Vdbe.aColName array contains 5n Mem structures, where n is the ** number of columns of data returned by the statement. */ #define COLNAME_NAME 0 #define COLNAME_DECLTYPE 1 #define COLNAME_DATABASE 2 #define COLNAME_TABLE 3 #define COLNAME_COLUMN 4 #ifdef SQLITE_ENABLE_COLUMN_METADATA # define COLNAME_N 5 /* Number of COLNAME_xxx symbols */ #else # ifdef SQLITE_OMIT_DECLTYPE # define COLNAME_N 1 /* Store only the name */ # else # define COLNAME_N 2 /* Store the name and decltype */ # endif #endif /* ** The following macro converts a relative address in the p2 field ** of a VdbeOp structure into a negative number so that ** sqlite3VdbeAddOpList() knows that the address is relative. Calling ** the macro again restores the address. */ #define ADDR(X) (-1-(X)) /* ** The makefile scans the vdbe.c source file and creates the "opcodes.h" ** header file that defines a number for each opcode used by the VDBE. */ /************** Include opcodes.h in the middle of vdbe.h ********************/ /************** Begin file opcodes.h *****************************************/ /* Automatically generated. Do not edit */ /* See the mkopcodeh.awk script for details */ #define OP_Savepoint 1 #define OP_AutoCommit 2 #define OP_Transaction 3 #define OP_SorterNext 4 #define OP_PrevIfOpen 5 #define OP_NextIfOpen 6 #define OP_Prev 7 #define OP_Next 8 #define OP_Checkpoint 9 #define OP_JournalMode 10 #define OP_Vacuum 11 #define OP_VFilter 12 /* synopsis: iplan=r[P3] zplan='P4' */ #define OP_VUpdate 13 /* synopsis: data=r[P3@P2] */ #define OP_Goto 14 #define OP_Gosub 15 #define OP_Return 16 #define OP_InitCoroutine 17 #define OP_EndCoroutine 18 #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ #define OP_Yield 20 #define OP_HaltIfNull 21 /* synopsis: if r[P3]=null halt */ #define OP_Halt 22 #define OP_Integer 23 /* synopsis: r[P2]=P1 */ #define OP_Int64 24 /* synopsis: r[P2]=P4 */ #define OP_String 25 /* synopsis: r[P2]='P4' (len=P1) */ #define OP_Null 26 /* synopsis: r[P2..P3]=NULL */ #define OP_SoftNull 27 /* synopsis: r[P1]=NULL */ #define OP_Blob 28 /* synopsis: r[P2]=P4 (len=P1) */ #define OP_Variable 29 /* synopsis: r[P2]=parameter(P1,P4) */ #define OP_Move 30 /* synopsis: r[P2@P3]=r[P1@P3] */ #define OP_Copy 31 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ #define OP_SCopy 32 /* synopsis: r[P2]=r[P1] */ #define OP_ResultRow 33 /* synopsis: output=r[P1@P2] */ #define OP_CollSeq 34 #define OP_Function0 35 /* synopsis: r[P3]=func(r[P2@P5]) */ #define OP_Function 36 /* synopsis: r[P3]=func(r[P2@P5]) */ #define OP_AddImm 37 /* synopsis: r[P1]=r[P1]+P2 */ #define OP_MustBeInt 38 #define OP_RealAffinity 39 #define OP_Cast 40 /* synopsis: affinity(r[P1]) */ #define OP_Permutation 41 #define OP_Compare 42 /* synopsis: r[P1@P3] <-> r[P2@P3] */ #define OP_Jump 43 #define OP_Once 44 #define OP_If 45 #define OP_IfNot 46 #define OP_Column 47 /* synopsis: r[P3]=PX */ #define OP_Affinity 48 /* synopsis: affinity(r[P1@P2]) */ #define OP_MakeRecord 49 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ #define OP_Count 50 /* synopsis: r[P2]=count() */ #define OP_ReadCookie 51 #define OP_SetCookie 52 #define OP_ReopenIdx 53 /* synopsis: root=P2 iDb=P3 */ #define OP_OpenRead 54 /* synopsis: root=P2 iDb=P3 */ #define OP_OpenWrite 55 /* synopsis: root=P2 iDb=P3 */ #define OP_OpenAutoindex 56 /* synopsis: nColumn=P2 */ #define OP_OpenEphemeral 57 /* synopsis: nColumn=P2 */ #define OP_SorterOpen 58 #define OP_SequenceTest 59 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ #define OP_OpenPseudo 60 /* synopsis: P3 columns in r[P2] */ #define OP_Close 61 #define OP_ColumnsUsed 62 #define OP_SeekLT 63 /* synopsis: key=r[P3@P4] */ #define OP_SeekLE 64 /* synopsis: key=r[P3@P4] */ #define OP_SeekGE 65 /* synopsis: key=r[P3@P4] */ #define OP_SeekGT 66 /* synopsis: key=r[P3@P4] */ #define OP_Seek 67 /* synopsis: intkey=r[P2] */ #define OP_NoConflict 68 /* synopsis: key=r[P3@P4] */ #define OP_NotFound 69 /* synopsis: key=r[P3@P4] */ #define OP_Found 70 /* synopsis: key=r[P3@P4] */ #define OP_Or 71 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ #define OP_And 72 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ #define OP_NotExists 73 /* synopsis: intkey=r[P3] */ #define OP_Sequence 74 /* synopsis: r[P2]=cursor[P1].ctr++ */ #define OP_NewRowid 75 /* synopsis: r[P2]=rowid */ #define OP_IsNull 76 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 77 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ #define OP_Ne 78 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */ #define OP_Eq 79 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */ #define OP_Gt 80 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */ #define OP_Le 81 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */ #define OP_Lt 82 /* same as TK_LT, synopsis: if r[P1]=r[P3] goto P2 */ #define OP_Insert 84 /* synopsis: intkey=r[P3] data=r[P2] */ #define OP_BitAnd 85 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ #define OP_BitOr 86 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ #define OP_ShiftLeft 87 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ #define OP_Add 89 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ #define OP_Subtract 90 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ #define OP_Multiply 91 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ #define OP_Divide 92 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ #define OP_Remainder 93 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ #define OP_Concat 94 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ #define OP_InsertInt 95 /* synopsis: intkey=P3 data=r[P2] */ #define OP_BitNot 96 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ #define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */ #define OP_Delete 98 #define OP_ResetCount 99 #define OP_SorterCompare 100 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ #define OP_SorterData 101 /* synopsis: r[P2]=data */ #define OP_RowKey 102 /* synopsis: r[P2]=key */ #define OP_RowData 103 /* synopsis: r[P2]=data */ #define OP_Rowid 104 /* synopsis: r[P2]=rowid */ #define OP_NullRow 105 #define OP_Last 106 #define OP_SorterSort 107 #define OP_Sort 108 #define OP_Rewind 109 #define OP_SorterInsert 110 #define OP_IdxInsert 111 /* synopsis: key=r[P2] */ #define OP_IdxDelete 112 /* synopsis: key=r[P2@P3] */ #define OP_IdxRowid 113 /* synopsis: r[P2]=rowid */ #define OP_IdxLE 114 /* synopsis: key=r[P3@P4] */ #define OP_IdxGT 115 /* synopsis: key=r[P3@P4] */ #define OP_IdxLT 116 /* synopsis: key=r[P3@P4] */ #define OP_IdxGE 117 /* synopsis: key=r[P3@P4] */ #define OP_Destroy 118 #define OP_Clear 119 #define OP_ResetSorter 120 #define OP_CreateIndex 121 /* synopsis: r[P2]=root iDb=P1 */ #define OP_CreateTable 122 /* synopsis: r[P2]=root iDb=P1 */ #define OP_ParseSchema 123 #define OP_LoadAnalysis 124 #define OP_DropTable 125 #define OP_DropIndex 126 #define OP_DropTrigger 127 #define OP_IntegrityCk 128 #define OP_RowSetAdd 129 /* synopsis: rowset(P1)=r[P2] */ #define OP_RowSetRead 130 /* synopsis: r[P3]=rowset(P1) */ #define OP_RowSetTest 131 /* synopsis: if r[P3] in rowset(P1) goto P2 */ #define OP_Program 132 #define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ #define OP_Param 134 #define OP_FkCounter 135 /* synopsis: fkctr[P1]+=P2 */ #define OP_FkIfZero 136 /* synopsis: if fkctr[P1]==0 goto P2 */ #define OP_MemMax 137 /* synopsis: r[P1]=max(r[P1],r[P2]) */ #define OP_IfPos 138 /* synopsis: if r[P1]>0 goto P2 */ #define OP_IfNeg 139 /* synopsis: r[P1]+=P3, if r[P1]<0 goto P2 */ #define OP_IfNotZero 140 /* synopsis: if r[P1]!=0 then r[P1]+=P3, goto P2 */ #define OP_DecrJumpZero 141 /* synopsis: if (--r[P1])==0 goto P2 */ #define OP_JumpZeroIncr 142 /* synopsis: if (r[P1]++)==0 ) goto P2 */ #define OP_AggStep0 143 /* synopsis: accum=r[P3] step(r[P2@P5]) */ #define OP_AggStep 144 /* synopsis: accum=r[P3] step(r[P2@P5]) */ #define OP_AggFinal 145 /* synopsis: accum=r[P1] N=P2 */ #define OP_IncrVacuum 146 #define OP_Expire 147 #define OP_TableLock 148 /* synopsis: iDb=P1 root=P2 write=P3 */ #define OP_VBegin 149 #define OP_VCreate 150 #define OP_VDestroy 151 #define OP_VOpen 152 #define OP_VColumn 153 /* synopsis: r[P3]=vcolumn(P2) */ #define OP_VNext 154 #define OP_VRename 155 #define OP_Pagecount 156 #define OP_MaxPgcnt 157 #define OP_Init 158 /* synopsis: Start at P2 */ #define OP_Noop 159 #define OP_Explain 160 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c ** are encoded into bitvectors as follows: */ #define OPFLG_JUMP 0x0001 /* jump: P2 holds jmp target */ #define OPFLG_IN1 0x0002 /* in1: P1 is an input */ #define OPFLG_IN2 0x0004 /* in2: P2 is an input */ #define OPFLG_IN3 0x0008 /* in3: P3 is an input */ #define OPFLG_OUT2 0x0010 /* out2: P2 is an output */ #define OPFLG_OUT3 0x0020 /* out3: P3 is an output */ #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,\ /* 8 */ 0x01, 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01,\ /* 16 */ 0x02, 0x01, 0x02, 0x12, 0x03, 0x08, 0x00, 0x10,\ /* 24 */ 0x10, 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00,\ /* 32 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x03, 0x02,\ /* 40 */ 0x02, 0x00, 0x00, 0x01, 0x01, 0x03, 0x03, 0x00,\ /* 48 */ 0x00, 0x00, 0x10, 0x10, 0x08, 0x00, 0x00, 0x00,\ /* 56 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,\ /* 64 */ 0x09, 0x09, 0x09, 0x04, 0x09, 0x09, 0x09, 0x26,\ /* 72 */ 0x26, 0x09, 0x10, 0x10, 0x03, 0x03, 0x0b, 0x0b,\ /* 80 */ 0x0b, 0x0b, 0x0b, 0x0b, 0x00, 0x26, 0x26, 0x26,\ /* 88 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\ /* 96 */ 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 104 */ 0x10, 0x00, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04,\ /* 112 */ 0x00, 0x10, 0x01, 0x01, 0x01, 0x01, 0x10, 0x00,\ /* 120 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 128 */ 0x00, 0x06, 0x23, 0x0b, 0x01, 0x10, 0x10, 0x00,\ /* 136 */ 0x01, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x00,\ /* 144 */ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,\ /* 152 */ 0x00, 0x00, 0x01, 0x00, 0x10, 0x10, 0x01, 0x00,\ /* 160 */ 0x00,} /************** End of opcodes.h *********************************************/ /************** Continuing where we left off in vdbe.h ***********************/ /* ** Prototypes for the VDBE interface. See comments on the implementation ** for a description of what each of these routines does. */ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*); SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int); SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int); SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int); SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int); SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int); SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(Vdbe*,int,int,int,int,const u8*,int); SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int); SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno); SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*); SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1); SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2); SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3); SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5); SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe*, int addr); SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op); SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N); SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*); SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*); SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*); SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int); SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int); #endif SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*); SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int); SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*)); SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*); SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int); SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*); SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*); SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe*, int, u8); SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int); #ifndef SQLITE_OMIT_TRACE SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*); #endif SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*); SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*); SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int); SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **); typedef int (*RecordCompare)(int,const void*,UnpackedRecord*); SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*); #ifndef SQLITE_OMIT_TRIGGER SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *); #endif /* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on ** each VDBE opcode. ** ** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op ** comments in VDBE programs that show key decision points in the code ** generator. */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...); # define VdbeComment(X) sqlite3VdbeComment X SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); # define VdbeNoopComment(X) sqlite3VdbeNoopComment X # ifdef SQLITE_ENABLE_MODULE_COMMENTS # define VdbeModuleComment(X) sqlite3VdbeNoopComment X # else # define VdbeModuleComment(X) # endif #else # define VdbeComment(X) # define VdbeNoopComment(X) # define VdbeModuleComment(X) #endif /* ** The VdbeCoverage macros are used to set a coverage testing point ** for VDBE branch instructions. The coverage testing points are line ** numbers in the sqlite3.c source file. VDBE branch coverage testing ** only works with an amalagmation build. That's ok since a VDBE branch ** coverage build designed for testing the test suite only. No application ** should ever ship with VDBE branch coverage measuring turned on. ** ** VdbeCoverage(v) // Mark the previously coded instruction ** // as a branch ** ** VdbeCoverageIf(v, conditional) // Mark previous if conditional true ** ** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken ** ** VdbeCoverageNeverTaken(v) // Previous branch is never taken ** ** Every VDBE branch operation must be tagged with one of the macros above. ** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and ** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch() ** routine in vdbe.c, alerting the developer to the missed tag. */ #ifdef SQLITE_VDBE_COVERAGE SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int); # define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__) # define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__) # define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2); # define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1); # define VDBE_OFFSET_LINENO(x) (__LINE__+x) #else # define VdbeCoverage(v) # define VdbeCoverageIf(v,x) # define VdbeCoverageAlwaysTaken(v) # define VdbeCoverageNeverTaken(v) # define VDBE_OFFSET_LINENO(x) 0 #endif #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*); #else # define sqlite3VdbeScanStatus(a,b,c,d,e) #endif #endif /************** End of vdbe.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include pager.h in the middle of sqliteInt.h *****************/ /************** Begin file pager.h *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the interface that the sqlite page cache ** subsystem. The page cache subsystem reads and writes a file a page ** at a time and provides a journal for rollback. */ #ifndef _PAGER_H_ #define _PAGER_H_ /* ** Default maximum size for persistent journal files. A negative ** value means no limit. This value may be overridden using the ** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit". */ #ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1 #endif /* ** The type used to represent a page number. The first page in a file ** is called page 1. 0 is used to represent "not a page". */ typedef u32 Pgno; /* ** Each open file is managed by a separate instance of the "Pager" structure. */ typedef struct Pager Pager; /* ** Handle type for pages. */ typedef struct PgHdr DbPage; /* ** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is ** reserved for working around a windows/posix incompatibility). It is ** used in the journal to signify that the remainder of the journal file ** is devoted to storing a master journal name - there are no more pages to ** roll back. See comments for function writeMasterJournal() in pager.c ** for details. */ #define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1)) /* ** Allowed values for the flags parameter to sqlite3PagerOpen(). ** ** NOTE: These values must match the corresponding BTREE_ values in btree.h. */ #define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */ #define PAGER_MEMORY 0x0002 /* In-memory database */ /* ** Valid values for the second argument to sqlite3PagerLockingMode(). */ #define PAGER_LOCKINGMODE_QUERY -1 #define PAGER_LOCKINGMODE_NORMAL 0 #define PAGER_LOCKINGMODE_EXCLUSIVE 1 /* ** Numeric constants that encode the journalmode. */ #define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ #define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ #define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */ #define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */ #define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */ #define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */ #define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */ /* ** Flags that make up the mask passed to sqlite3PagerAcquire(). */ #define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */ #define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */ /* ** Flags for sqlite3PagerSetFlags() */ #define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */ #define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */ #define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */ #define PAGER_SYNCHRONOUS_MASK 0x03 /* Mask for three values above */ #define PAGER_FULLFSYNC 0x04 /* PRAGMA fullfsync=ON */ #define PAGER_CKPT_FULLFSYNC 0x08 /* PRAGMA checkpoint_fullfsync=ON */ #define PAGER_CACHESPILL 0x10 /* PRAGMA cache_spill=ON */ #define PAGER_FLAGS_MASK 0x1c /* All above except SYNCHRONOUS */ /* ** The remainder of this file contains the declarations of the functions ** that make up the Pager sub-system API. See source code comments for ** a detailed description of each routine. */ /* Open and close a Pager connection. */ SQLITE_PRIVATE int sqlite3PagerOpen( sqlite3_vfs*, Pager **ppPager, const char*, int, int, int, void(*)(DbPage*) ); SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager); SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*); /* Functions used to configure a Pager object. */ SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *); SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int); SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int); SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int); SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64); SQLITE_PRIVATE void sqlite3PagerShrink(Pager*); SQLITE_PRIVATE void sqlite3PagerSetFlags(Pager*,unsigned); SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int); SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int); SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*); SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*); SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64); SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*); /* Functions used to obtain and release page references. */ SQLITE_PRIVATE int sqlite3PagerAcquire(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag); #define sqlite3PagerGet(A,B,C) sqlite3PagerAcquire(A,B,C,0) SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno); SQLITE_PRIVATE void sqlite3PagerRef(DbPage*); SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*); SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*); /* Operations on page references. */ SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*); SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*); SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int); SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*); SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *); SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *); /* Functions used to manage pager transactions and savepoints. */ SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*); SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int); SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int); SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*); SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster); SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*); SQLITE_PRIVATE int sqlite3PagerRollback(Pager*); SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n); SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint); SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager); #ifndef SQLITE_OMIT_WAL SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*); SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager); SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager); SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen); SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager); #endif #ifdef SQLITE_ENABLE_ZIPVFS SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager); #endif /* Functions used to query pager state and configuration. */ SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*); SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*); #endif SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*); SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int); SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager*); SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*); SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); SQLITE_PRIVATE int sqlite3PagerNosync(Pager*); SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); /* Functions used to truncate the database file. */ SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno); SQLITE_PRIVATE void sqlite3PagerRekey(DbPage*, Pgno, u16); #if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL) SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *); #endif /* Functions to support testing and debugging. */ #if !defined(NDEBUG) || defined(SQLITE_TEST) SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*); SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*); #endif #ifdef SQLITE_TEST SQLITE_PRIVATE int *sqlite3PagerStats(Pager*); SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); void disable_simulated_io_errors(void); void enable_simulated_io_errors(void); #else # define disable_simulated_io_errors() # define enable_simulated_io_errors() #endif #endif /* _PAGER_H_ */ /************** End of pager.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include pcache.h in the middle of sqliteInt.h ****************/ /************** Begin file pcache.h ******************************************/ /* ** 2008 August 05 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the interface that the sqlite page cache ** subsystem. */ #ifndef _PCACHE_H_ typedef struct PgHdr PgHdr; typedef struct PCache PCache; /* ** Every page in the cache is controlled by an instance of the following ** structure. */ struct PgHdr { sqlite3_pcache_page *pPage; /* Pcache object page handle */ void *pData; /* Page data */ void *pExtra; /* Extra content */ PgHdr *pDirty; /* Transient list of dirty pages */ Pager *pPager; /* The pager this page is part of */ Pgno pgno; /* Page number for this page */ #ifdef SQLITE_CHECK_PAGES u32 pageHash; /* Hash of page content */ #endif u16 flags; /* PGHDR flags defined below */ /********************************************************************** ** Elements above are public. All that follows is private to pcache.c ** and should not be accessed by other modules. */ i16 nRef; /* Number of users of this page */ PCache *pCache; /* Cache that owns this page */ PgHdr *pDirtyNext; /* Next element in list of dirty pages */ PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ }; /* Bit values for PgHdr.flags */ #define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */ #define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */ #define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ #define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before ** writing this page to the database */ #define PGHDR_NEED_READ 0x010 /* Content is unread */ #define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ #define PGHDR_MMAP 0x040 /* This is an mmap page object */ /* Initialize and shutdown the page cache subsystem */ SQLITE_PRIVATE int sqlite3PcacheInitialize(void); SQLITE_PRIVATE void sqlite3PcacheShutdown(void); /* Page cache buffer management: ** These routines implement SQLITE_CONFIG_PAGECACHE. */ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n); /* Create a new pager cache. ** Under memory stress, invoke xStress to try to make pages clean. ** Only clean and unpinned pages can be reclaimed. */ SQLITE_PRIVATE int sqlite3PcacheOpen( int szPage, /* Size of every page */ int szExtra, /* Extra space associated with each page */ int bPurgeable, /* True if pages are on backing store */ int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */ void *pStress, /* Argument to xStress */ PCache *pToInit /* Preallocated space for the PCache */ ); /* Modify the page-size after the cache has been created. */ SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *, int); /* Return the size in bytes of a PCache object. Used to preallocate ** storage space. */ SQLITE_PRIVATE int sqlite3PcacheSize(void); /* One release per successful fetch. Page is pinned until released. ** Reference counted. */ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag); SQLITE_PRIVATE int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**); SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage); SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*); SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ /* Change a page number. Used by incr-vacuum. */ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno); /* Remove all pages with pgno>x. Reset the cache if x==0 */ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x); /* Get a list of all dirty pages in the cache, sorted by page number */ SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*); /* Reset and close the cache object */ SQLITE_PRIVATE void sqlite3PcacheClose(PCache*); /* Clear flags from pages of the page cache */ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *); /* Discard the contents of the cache */ SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); /* Return the total number of outstanding page references */ SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); /* Increment the reference count of an existing page */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); /* Return the total number of pages stored in the cache */ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) /* Iterate through all dirty pages currently stored in the cache. This ** interface is only available if SQLITE_CHECK_PAGES is defined when the ** library is built. */ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); #endif /* Set and get the suggested cache-size for the specified pager-cache. ** ** If no global maximum is configured, then the system attempts to limit ** the total number of pages cached by purgeable pager-caches to the sum ** of the suggested cache-sizes. */ SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int); #ifdef SQLITE_TEST SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *); #endif /* Free up as much memory as possible from the page cache */ SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*); #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT /* Try to return memory used by the pcache module to the main memory heap */ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int); #endif #ifdef SQLITE_TEST SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*); #endif SQLITE_PRIVATE void sqlite3PCacheSetDefault(void); /* Return the header size */ SQLITE_PRIVATE int sqlite3HeaderSizePcache(void); SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void); #endif /* _PCACHE_H_ */ /************** End of pcache.h **********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include os.h in the middle of sqliteInt.h ********************/ /************** Begin file os.h **********************************************/ /* ** 2001 September 16 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This header file (together with is companion C source-code file ** "os.c") attempt to abstract the underlying operating system so that ** the SQLite library will work on both POSIX and windows systems. ** ** This header file is #include-ed by sqliteInt.h and thus ends up ** being included by every source file. */ #ifndef _SQLITE_OS_H_ #define _SQLITE_OS_H_ /* ** Attempt to automatically detect the operating system and setup the ** necessary pre-processor macros for it. */ /************** Include os_setup.h in the middle of os.h *********************/ /************** Begin file os_setup.h ****************************************/ /* ** 2013 November 25 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains pre-processor directives related to operating system ** detection and/or setup. */ #ifndef _OS_SETUP_H_ #define _OS_SETUP_H_ /* ** Figure out if we are dealing with Unix, Windows, or some other operating ** system. ** ** After the following block of preprocess macros, all of SQLITE_OS_UNIX, ** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of ** the three will be 1. The other two will be 0. */ #if defined(SQLITE_OS_OTHER) # if SQLITE_OS_OTHER==1 # undef SQLITE_OS_UNIX # define SQLITE_OS_UNIX 0 # undef SQLITE_OS_WIN # define SQLITE_OS_WIN 0 # else # undef SQLITE_OS_OTHER # endif #endif #if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) # define SQLITE_OS_OTHER 0 # ifndef SQLITE_OS_WIN # if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ defined(__MINGW32__) || defined(__BORLANDC__) # define SQLITE_OS_WIN 1 # define SQLITE_OS_UNIX 0 # else # define SQLITE_OS_WIN 0 # define SQLITE_OS_UNIX 1 # endif # else # define SQLITE_OS_UNIX 0 # endif #else # ifndef SQLITE_OS_WIN # define SQLITE_OS_WIN 0 # endif #endif #endif /* _OS_SETUP_H_ */ /************** End of os_setup.h ********************************************/ /************** Continuing where we left off in os.h *************************/ /* If the SET_FULLSYNC macro is not defined above, then make it ** a no-op */ #ifndef SET_FULLSYNC # define SET_FULLSYNC(x,y) #endif /* ** The default size of a disk sector */ #ifndef SQLITE_DEFAULT_SECTOR_SIZE # define SQLITE_DEFAULT_SECTOR_SIZE 4096 #endif /* ** Temporary files are named starting with this prefix followed by 16 random ** alphanumeric characters, and no file extension. They are stored in the ** OS's standard temporary file directory, and are deleted prior to exit. ** If sqlite is being embedded in another program, you may wish to change the ** prefix to reflect your program's name, so that if your program exits ** prematurely, old temporary files can be easily identified. This can be done ** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. ** ** 2006-10-31: The default prefix used to be "sqlite_". But then ** Mcafee started using SQLite in their anti-virus product and it ** started putting files with the "sqlite" name in the c:/temp folder. ** This annoyed many windows users. Those users would then do a ** Google search for "sqlite", find the telephone numbers of the ** developers and call to wake them up at night and complain. ** For this reason, the default name prefix is changed to be "sqlite" ** spelled backwards. So the temp files are still identified, but ** anybody smart enough to figure out the code is also likely smart ** enough to know that calling the developer will not help get rid ** of the file. */ #ifndef SQLITE_TEMP_FILE_PREFIX # define SQLITE_TEMP_FILE_PREFIX "etilqs_" #endif /* ** The following values may be passed as the second argument to ** sqlite3OsLock(). The various locks exhibit the following semantics: ** ** SHARED: Any number of processes may hold a SHARED lock simultaneously. ** RESERVED: A single process may hold a RESERVED lock on a file at ** any time. Other processes may hold and obtain new SHARED locks. ** PENDING: A single process may hold a PENDING lock on a file at ** any one time. Existing SHARED locks may persist, but no new ** SHARED locks may be obtained by other processes. ** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. ** ** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a ** process that requests an EXCLUSIVE lock may actually obtain a PENDING ** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to ** sqlite3OsLock(). */ #define NO_LOCK 0 #define SHARED_LOCK 1 #define RESERVED_LOCK 2 #define PENDING_LOCK 3 #define EXCLUSIVE_LOCK 4 /* ** File Locking Notes: (Mostly about windows but also some info for Unix) ** ** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because ** those functions are not available. So we use only LockFile() and ** UnlockFile(). ** ** LockFile() prevents not just writing but also reading by other processes. ** A SHARED_LOCK is obtained by locking a single randomly-chosen ** byte out of a specific range of bytes. The lock byte is obtained at ** random so two separate readers can probably access the file at the ** same time, unless they are unlucky and choose the same lock byte. ** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. ** There can only be one writer. A RESERVED_LOCK is obtained by locking ** a single byte of the file that is designated as the reserved lock byte. ** A PENDING_LOCK is obtained by locking a designated byte different from ** the RESERVED_LOCK byte. ** ** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, ** which means we can use reader/writer locks. When reader/writer locks ** are used, the lock is placed on the same range of bytes that is used ** for probabilistic locking in Win95/98/ME. Hence, the locking scheme ** will support two or more Win95 readers or two or more WinNT readers. ** But a single Win95 reader will lock out all WinNT readers and a single ** WinNT reader will lock out all other Win95 readers. ** ** The following #defines specify the range of bytes used for locking. ** SHARED_SIZE is the number of bytes available in the pool from which ** a random byte is selected for a shared lock. The pool of bytes for ** shared locks begins at SHARED_FIRST. ** ** The same locking strategy and ** byte ranges are used for Unix. This leaves open the possibility of having ** clients on win95, winNT, and unix all talking to the same shared file ** and all locking correctly. To do so would require that samba (or whatever ** tool is being used for file sharing) implements locks correctly between ** windows and unix. I'm guessing that isn't likely to happen, but by ** using the same locking range we are at least open to the possibility. ** ** Locking in windows is manditory. For this reason, we cannot store ** actual data in the bytes used for locking. The pager never allocates ** the pages involved in locking therefore. SHARED_SIZE is selected so ** that all locks will fit on a single page even at the minimum page size. ** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE ** is set high so that we don't have to allocate an unused page except ** for very large databases. But one should test the page skipping logic ** by setting PENDING_BYTE low and running the entire regression suite. ** ** Changing the value of PENDING_BYTE results in a subtly incompatible ** file format. Depending on how it is changed, you might not notice ** the incompatibility right away, even running a full regression test. ** The default location of PENDING_BYTE is the first byte past the ** 1GB boundary. ** */ #ifdef SQLITE_OMIT_WSD # define PENDING_BYTE (0x40000000) #else # define PENDING_BYTE sqlite3PendingByte #endif #define RESERVED_BYTE (PENDING_BYTE+1) #define SHARED_FIRST (PENDING_BYTE+2) #define SHARED_SIZE 510 /* ** Wrapper around OS specific sqlite3_os_init() function. */ SQLITE_PRIVATE int sqlite3OsInit(void); /* ** Functions for accessing sqlite3_file methods */ SQLITE_PRIVATE int sqlite3OsClose(sqlite3_file*); SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); #define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *); /* ** Functions for accessing sqlite3_vfs methods */ SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); #ifndef SQLITE_OMIT_LOAD_EXTENSION SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); #endif /* SQLITE_OMIT_LOAD_EXTENSION */ SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); /* ** Convenience functions for opening and closing files using ** sqlite3_malloc() to obtain space for the file-handle structure. */ SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *); #endif /* _SQLITE_OS_H_ */ /************** End of os.h **************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /************** Include mutex.h in the middle of sqliteInt.h *****************/ /************** Begin file mutex.h *******************************************/ /* ** 2007 August 28 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains the common header for all mutex implementations. ** The sqliteInt.h header #includes this file so that it is available ** to all source files. We break it out in an effort to keep the code ** better organized. ** ** NOTE: source files should *not* #include this header file directly. ** Source files should #include the sqliteInt.h file and let that file ** include this one indirectly. */ /* ** Figure out what version of the code to use. The choices are ** ** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The ** mutexes implementation cannot be overridden ** at start-time. ** ** SQLITE_MUTEX_NOOP For single-threaded applications. No ** mutual exclusion is provided. But this ** implementation can be overridden at ** start-time. ** ** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix. ** ** SQLITE_MUTEX_W32 For multi-threaded applications on Win32. */ #if !SQLITE_THREADSAFE # define SQLITE_MUTEX_OMIT #endif #if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP) # if SQLITE_OS_UNIX # define SQLITE_MUTEX_PTHREADS # elif SQLITE_OS_WIN # define SQLITE_MUTEX_W32 # else # define SQLITE_MUTEX_NOOP # endif #endif #ifdef SQLITE_MUTEX_OMIT /* ** If this is a no-op implementation, implement everything as macros. */ #define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8) #define sqlite3_mutex_free(X) #define sqlite3_mutex_enter(X) #define sqlite3_mutex_try(X) SQLITE_OK #define sqlite3_mutex_leave(X) #define sqlite3_mutex_held(X) ((void)(X),1) #define sqlite3_mutex_notheld(X) ((void)(X),1) #define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8) #define sqlite3MutexInit() SQLITE_OK #define sqlite3MutexEnd() #define MUTEX_LOGIC(X) #else #define MUTEX_LOGIC(X) X #endif /* defined(SQLITE_MUTEX_OMIT) */ /************** End of mutex.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ /* ** Each database file to be accessed by the system is an instance ** of the following structure. There are normally two of these structures ** in the sqlite.aDb[] array. aDb[0] is the main database file and ** aDb[1] is the database file used to hold temporary tables. Additional ** databases may be attached. */ struct Db { char *zName; /* Name of this database */ Btree *pBt; /* The B*Tree structure for this database file */ u8 safety_level; /* How aggressive at syncing data to disk */ Schema *pSchema; /* Pointer to database schema (possibly shared) */ }; /* ** An instance of the following structure stores a database schema. ** ** Most Schema objects are associated with a Btree. The exception is ** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. ** In shared cache mode, a single Schema object can be shared by multiple ** Btrees that refer to the same underlying BtShared object. ** ** Schema objects are automatically deallocated when the last Btree that ** references them is destroyed. The TEMP Schema is manually freed by ** sqlite3_close(). * ** A thread must be holding a mutex on the corresponding Btree in order ** to access Schema content. This implies that the thread must also be ** holding a mutex on the sqlite3 connection pointer that owns the Btree. ** For a TEMP Schema, only the connection mutex is required. */ struct Schema { int schema_cookie; /* Database schema version number for this file */ int iGeneration; /* Generation counter. Incremented with each change */ Hash tblHash; /* All tables indexed by name */ Hash idxHash; /* All (named) indices indexed by name */ Hash trigHash; /* All triggers indexed by name */ Hash fkeyHash; /* All foreign keys by referenced table name */ Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */ u8 file_format; /* Schema format version for this file */ u8 enc; /* Text encoding used by this database */ u16 schemaFlags; /* Flags associated with this schema */ int cache_size; /* Number of pages to use in the cache */ }; /* ** These macros can be used to test, set, or clear bits in the ** Db.pSchema->flags field. */ #define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))==(P)) #define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))!=0) #define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags|=(P) #define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags&=~(P) /* ** Allowed values for the DB.pSchema->flags field. ** ** The DB_SchemaLoaded flag is set after the database schema has been ** read into internal hash tables. ** ** DB_UnresetViews means that one or more views have column names that ** have been filled out. If the schema changes, these column names might ** changes and so the view will need to be reset. */ #define DB_SchemaLoaded 0x0001 /* The schema has been loaded */ #define DB_UnresetViews 0x0002 /* Some views have defined column names */ #define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */ /* ** The number of different kinds of things that can be limited ** using the sqlite3_limit() interface. */ #define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1) /* ** Lookaside malloc is a set of fixed-size buffers that can be used ** to satisfy small transient memory allocation requests for objects ** associated with a particular database connection. The use of ** lookaside malloc provides a significant performance enhancement ** (approx 10%) by avoiding numerous malloc/free requests while parsing ** SQL statements. ** ** The Lookaside structure holds configuration information about the ** lookaside malloc subsystem. Each available memory allocation in ** the lookaside subsystem is stored on a linked list of LookasideSlot ** objects. ** ** Lookaside allocations are only allowed for objects that are associated ** with a particular database connection. Hence, schema information cannot ** be stored in lookaside because in shared cache mode the schema information ** is shared by multiple database connections. Therefore, while parsing ** schema information, the Lookaside.bEnabled flag is cleared so that ** lookaside allocations are not used to construct the schema objects. */ struct Lookaside { u16 sz; /* Size of each buffer in bytes */ u8 bEnabled; /* False to disable new lookaside allocations */ u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */ int nOut; /* Number of buffers currently checked out */ int mxOut; /* Highwater mark for nOut */ int anStat[3]; /* 0: hits. 1: size misses. 2: full misses */ LookasideSlot *pFree; /* List of available buffers */ void *pStart; /* First byte of available memory space */ void *pEnd; /* First byte past end of available space */ }; struct LookasideSlot { LookasideSlot *pNext; /* Next buffer in the list of free buffers */ }; /* ** A hash table for function definitions. ** ** Hash each FuncDef structure into one of the FuncDefHash.a[] slots. ** Collisions are on the FuncDef.pHash chain. */ struct FuncDefHash { FuncDef *a[23]; /* Hash table for functions */ }; #ifdef SQLITE_USER_AUTHENTICATION /* ** Information held in the "sqlite3" database connection object and used ** to manage user authentication. */ typedef struct sqlite3_userauth sqlite3_userauth; struct sqlite3_userauth { u8 authLevel; /* Current authentication level */ int nAuthPW; /* Size of the zAuthPW in bytes */ char *zAuthPW; /* Password used to authenticate */ char *zAuthUser; /* User name used to authenticate */ }; /* Allowed values for sqlite3_userauth.authLevel */ #define UAUTH_Unknown 0 /* Authentication not yet checked */ #define UAUTH_Fail 1 /* User authentication failed */ #define UAUTH_User 2 /* Authenticated as a normal user */ #define UAUTH_Admin 3 /* Authenticated as an administrator */ /* Functions used only by user authorization logic */ SQLITE_PRIVATE int sqlite3UserAuthTable(const char*); SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*); SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*); SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); #endif /* SQLITE_USER_AUTHENTICATION */ /* ** typedef for the authorization callback function. */ #ifdef SQLITE_USER_AUTHENTICATION typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, const char*, const char*); #else typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*, const char*); #endif /* ** Each database connection is an instance of the following structure. */ struct sqlite3 { sqlite3_vfs *pVfs; /* OS Interface */ struct Vdbe *pVdbe; /* List of active virtual machines */ CollSeq *pDfltColl; /* The default collating sequence (BINARY) */ sqlite3_mutex *mutex; /* Connection mutex */ Db *aDb; /* All backends */ int nDb; /* Number of backends currently in use */ int flags; /* Miscellaneous flags. See below */ i64 lastRowid; /* ROWID of most recent insert (see above) */ i64 szMmap; /* Default mmap_size setting */ unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */ int errCode; /* Most recent error code (SQLITE_*) */ int errMask; /* & result codes with this before returning */ u16 dbOptFlags; /* Flags to enable/disable optimizations */ u8 enc; /* Text encoding */ u8 autoCommit; /* The auto-commit flag. */ u8 temp_store; /* 1: file 2: memory 0: default */ u8 mallocFailed; /* True if we have seen a malloc failure */ u8 dfltLockMode; /* Default locking-mode for attached dbs */ signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */ u8 suppressErr; /* Do not issue error messages if true */ u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ int nextPagesize; /* Pagesize after VACUUM if >0 */ u32 magic; /* Magic number for detect library misuse */ int nChange; /* Value returned by sqlite3_changes() */ int nTotalChange; /* Value returned by sqlite3_total_changes() */ int aLimit[SQLITE_N_LIMIT]; /* Limits */ int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */ struct sqlite3InitInfo { /* Information used during initialization */ int newTnum; /* Rootpage of table being initialized */ u8 iDb; /* Which db file is being initialized */ u8 busy; /* TRUE if currently initializing */ u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */ u8 imposterTable; /* Building an imposter table */ } init; int nVdbeActive; /* Number of VDBEs currently running */ int nVdbeRead; /* Number of active VDBEs that read or write */ int nVdbeWrite; /* Number of active VDBEs that read and write */ int nVdbeExec; /* Number of nested calls to VdbeExec() */ int nVDestroy; /* Number of active OP_VDestroy operations */ int nExtension; /* Number of loaded extensions */ void **aExtension; /* Array of shared library handles */ void (*xTrace)(void*,const char*); /* Trace function */ void *pTraceArg; /* Argument to the trace function */ void (*xProfile)(void*,const char*,u64); /* Profiling function */ void *pProfileArg; /* Argument to profile function */ void *pCommitArg; /* Argument to xCommitCallback() */ int (*xCommitCallback)(void*); /* Invoked at every commit. */ void *pRollbackArg; /* Argument to xRollbackCallback() */ void (*xRollbackCallback)(void*); /* Invoked at every commit. */ void *pUpdateArg; void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); #ifndef SQLITE_OMIT_WAL int (*xWalCallback)(void *, sqlite3 *, const char *, int); void *pWalArg; #endif void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*); void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*); void *pCollNeededArg; sqlite3_value *pErr; /* Most recent error message */ union { volatile int isInterrupted; /* True if sqlite3_interrupt has been called */ double notUsed1; /* Spacer */ } u1; Lookaside lookaside; /* Lookaside malloc configuration */ #ifndef SQLITE_OMIT_AUTHORIZATION sqlite3_xauth xAuth; /* Access authorization function */ void *pAuthArg; /* 1st argument to the access auth function */ #endif #ifndef SQLITE_OMIT_PROGRESS_CALLBACK int (*xProgress)(void *); /* The progress callback */ void *pProgressArg; /* Argument to the progress callback */ unsigned nProgressOps; /* Number of opcodes for progress callback */ #endif #ifndef SQLITE_OMIT_VIRTUALTABLE int nVTrans; /* Allocated size of aVTrans */ Hash aModule; /* populated by sqlite3_create_module() */ VtabCtx *pVtabCtx; /* Context for active vtab connect/create */ VTable **aVTrans; /* Virtual tables with open transactions */ VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */ #endif FuncDefHash aFunc; /* Hash table of connection functions */ Hash aCollSeq; /* All collating sequences */ BusyHandler busyHandler; /* Busy callback */ Db aDbStatic[2]; /* Static space for the 2 default backends */ Savepoint *pSavepoint; /* List of active savepoints */ int busyTimeout; /* Busy handler timeout, in msec */ int nSavepoint; /* Number of non-transaction savepoints */ int nStatement; /* Number of nested statement-transactions */ i64 nDeferredCons; /* Net deferred constraints this transaction. */ i64 nDeferredImmCons; /* Net deferred immediate constraints */ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* The following variables are all protected by the STATIC_MASTER ** mutex, not by sqlite3.mutex. They are used by code in notify.c. ** ** When X.pUnlockConnection==Y, that means that X is waiting for Y to ** unlock so that it can proceed. ** ** When X.pBlockingConnection==Y, that means that something that X tried ** tried to do recently failed with an SQLITE_LOCKED error due to locks ** held by Y. */ sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */ sqlite3 *pUnlockConnection; /* Connection to watch for unlock */ void *pUnlockArg; /* Argument to xUnlockNotify */ void (*xUnlockNotify)(void **, int); /* Unlock notify callback */ sqlite3 *pNextBlocked; /* Next in list of all blocked connections */ #endif #ifdef SQLITE_USER_AUTHENTICATION sqlite3_userauth auth; /* User authentication information */ #endif }; /* ** A macro to discover the encoding of a database. */ #define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc) #define ENC(db) ((db)->enc) /* ** Possible values for the sqlite3.flags. */ #define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */ #define SQLITE_InternChanges 0x00000002 /* Uncommitted Hash table changes */ #define SQLITE_FullFSync 0x00000004 /* Use full fsync on the backend */ #define SQLITE_CkptFullFSync 0x00000008 /* Use full fsync for checkpoint */ #define SQLITE_CacheSpill 0x00000010 /* OK to spill pager cache */ #define SQLITE_FullColNames 0x00000020 /* Show full column names on SELECT */ #define SQLITE_ShortColNames 0x00000040 /* Show short columns names */ #define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */ /* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ /* result set is empty */ #define SQLITE_SqlTrace 0x00000200 /* Debug print SQL as it executes */ #define SQLITE_VdbeListing 0x00000400 /* Debug listings of VDBE programs */ #define SQLITE_WriteSchema 0x00000800 /* OK to update SQLITE_MASTER */ #define SQLITE_VdbeAddopTrace 0x00001000 /* Trace sqlite3VdbeAddOp() calls */ #define SQLITE_IgnoreChecks 0x00002000 /* Do not enforce check constraints */ #define SQLITE_ReadUncommitted 0x0004000 /* For shared-cache mode */ #define SQLITE_LegacyFileFmt 0x00008000 /* Create new databases in format 1 */ #define SQLITE_RecoveryMode 0x00010000 /* Ignore schema errors */ #define SQLITE_ReverseOrder 0x00020000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x00040000 /* Enable recursive triggers */ #define SQLITE_ForeignKeys 0x00080000 /* Enforce foreign key constraints */ #define SQLITE_AutoIndex 0x00100000 /* Enable automatic indexes */ #define SQLITE_PreferBuiltin 0x00200000 /* Preference to built-in funcs */ #define SQLITE_LoadExtension 0x00400000 /* Enable load_extension */ #define SQLITE_EnableTrigger 0x00800000 /* True to enable triggers */ #define SQLITE_DeferFKs 0x01000000 /* Defer all FK constraints */ #define SQLITE_QueryOnly 0x02000000 /* Disable database changes */ #define SQLITE_VdbeEQP 0x04000000 /* Debug EXPLAIN QUERY PLAN */ #define SQLITE_Vacuum 0x08000000 /* Currently in a VACUUM */ #define SQLITE_CellSizeCk 0x10000000 /* Check btree cell sizes on load */ /* ** Bits of the sqlite3.dbOptFlags field that are used by the ** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to ** selectively disable various optimizations. */ #define SQLITE_QueryFlattener 0x0001 /* Query flattening */ #define SQLITE_ColumnCache 0x0002 /* Column cache */ #define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */ #define SQLITE_FactorOutConst 0x0008 /* Constant factoring */ /* not used 0x0010 // Was: SQLITE_IdxRealAsInt */ #define SQLITE_DistinctOpt 0x0020 /* DISTINCT using indexes */ #define SQLITE_CoverIdxScan 0x0040 /* Covering index scans */ #define SQLITE_OrderByIdxJoin 0x0080 /* ORDER BY of joins via index */ #define SQLITE_SubqCoroutine 0x0100 /* Evaluate subqueries as coroutines */ #define SQLITE_Transitive 0x0200 /* Transitive constraints */ #define SQLITE_OmitNoopJoin 0x0400 /* Omit unused tables in joins */ #define SQLITE_Stat34 0x0800 /* Use STAT3 or STAT4 data */ #define SQLITE_AllOpts 0xffff /* All optimizations */ /* ** Macros for testing whether or not optimizations are enabled or disabled. */ #ifndef SQLITE_OMIT_BUILTIN_TEST #define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0) #define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0) #else #define OptimizationDisabled(db, mask) 0 #define OptimizationEnabled(db, mask) 1 #endif /* ** Return true if it OK to factor constant expressions into the initialization ** code. The argument is a Parse object for the code generator. */ #define ConstFactorOk(P) ((P)->okConstFactor) /* ** Possible values for the sqlite.magic field. ** The numbers are obtained at random and have no special meaning, other ** than being distinct from one another. */ #define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */ #define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */ #define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */ #define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */ #define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */ #define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */ /* ** Each SQL function is defined by an instance of the following ** structure. A pointer to this structure is stored in the sqlite.aFunc ** hash table. When multiple functions have the same name, the hash table ** points to a linked list of these structures. */ struct FuncDef { i16 nArg; /* Number of arguments. -1 means unlimited */ u16 funcFlags; /* Some combination of SQLITE_FUNC_* */ void *pUserData; /* User data parameter */ FuncDef *pNext; /* Next function with same name */ void (*xFunc)(sqlite3_context*,int,sqlite3_value**); /* Regular function */ void (*xStep)(sqlite3_context*,int,sqlite3_value**); /* Aggregate step */ void (*xFinalize)(sqlite3_context*); /* Aggregate finalizer */ char *zName; /* SQL name of the function. */ FuncDef *pHash; /* Next with a different name but the same hash */ FuncDestructor *pDestructor; /* Reference counted destructor function */ }; /* ** This structure encapsulates a user-function destructor callback (as ** configured using create_function_v2()) and a reference counter. When ** create_function_v2() is called to create a function with a destructor, ** a single object of this type is allocated. FuncDestructor.nRef is set to ** the number of FuncDef objects created (either 1 or 3, depending on whether ** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor ** member of each of the new FuncDef objects is set to point to the allocated ** FuncDestructor. ** ** Thereafter, when one of the FuncDef objects is deleted, the reference ** count on this object is decremented. When it reaches 0, the destructor ** is invoked and the FuncDestructor structure freed. */ struct FuncDestructor { int nRef; void (*xDestroy)(void *); void *pUserData; }; /* ** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF ** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. There ** are assert() statements in the code to verify this. */ #define SQLITE_FUNC_ENCMASK 0x003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ #define SQLITE_FUNC_LIKE 0x004 /* Candidate for the LIKE optimization */ #define SQLITE_FUNC_CASE 0x008 /* Case-sensitive LIKE-type function */ #define SQLITE_FUNC_EPHEM 0x010 /* Ephemeral. Delete with VDBE */ #define SQLITE_FUNC_NEEDCOLL 0x020 /* sqlite3GetFuncCollSeq() might be called */ #define SQLITE_FUNC_LENGTH 0x040 /* Built-in length() function */ #define SQLITE_FUNC_TYPEOF 0x080 /* Built-in typeof() function */ #define SQLITE_FUNC_COUNT 0x100 /* Built-in count(*) aggregate */ #define SQLITE_FUNC_COALESCE 0x200 /* Built-in coalesce() or ifnull() */ #define SQLITE_FUNC_UNLIKELY 0x400 /* Built-in unlikely() function */ #define SQLITE_FUNC_CONSTANT 0x800 /* Constant inputs give a constant output */ #define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */ /* ** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are ** used to create the initializers for the FuncDef structures. ** ** FUNCTION(zName, nArg, iArg, bNC, xFunc) ** Used to create a scalar function definition of a function zName ** implemented by C function xFunc that accepts nArg arguments. The ** value passed as iArg is cast to a (void*) and made available ** as the user-data (sqlite3_user_data()) for the function. If ** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set. ** ** VFUNCTION(zName, nArg, iArg, bNC, xFunc) ** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag. ** ** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal) ** Used to create an aggregate function definition implemented by ** the C functions xStep and xFinal. The first four parameters ** are interpreted in the same way as the first 4 parameters to ** FUNCTION(). ** ** LIKEFUNC(zName, nArg, pArg, flags) ** Used to create a scalar function definition of a function zName ** that accepts nArg arguments and is implemented by a call to C ** function likeFunc. Argument pArg is cast to a (void *) and made ** available as the function user-data (sqlite3_user_data()). The ** FuncDef.flags variable is set to the value passed as the flags ** parameter. */ #define FUNCTION(zName, nArg, iArg, bNC, xFunc) \ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} #define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \ {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} #define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \ {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\ SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, #zName, 0, 0} #define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \ pArg, 0, xFunc, 0, 0, #zName, 0, 0} #define LIKEFUNC(zName, nArg, arg, flags) \ {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \ (void *)arg, 0, likeFunc, 0, 0, #zName, 0, 0} #define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \ {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \ SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0} #define AGGREGATE2(zName, nArg, arg, nc, xStep, xFinal, extraFlags) \ {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|extraFlags, \ SQLITE_INT_TO_PTR(arg), 0, 0, xStep,xFinal,#zName,0,0} /* ** All current savepoints are stored in a linked list starting at ** sqlite3.pSavepoint. The first element in the list is the most recently ** opened savepoint. Savepoints are added to the list by the vdbe ** OP_Savepoint instruction. */ struct Savepoint { char *zName; /* Savepoint name (nul-terminated) */ i64 nDeferredCons; /* Number of deferred fk violations */ i64 nDeferredImmCons; /* Number of deferred imm fk. */ Savepoint *pNext; /* Parent savepoint (if any) */ }; /* ** The following are used as the second parameter to sqlite3Savepoint(), ** and as the P1 argument to the OP_Savepoint instruction. */ #define SAVEPOINT_BEGIN 0 #define SAVEPOINT_RELEASE 1 #define SAVEPOINT_ROLLBACK 2 /* ** Each SQLite module (virtual table definition) is defined by an ** instance of the following structure, stored in the sqlite3.aModule ** hash table. */ struct Module { const sqlite3_module *pModule; /* Callback pointers */ const char *zName; /* Name passed to create_module() */ void *pAux; /* pAux passed to create_module() */ void (*xDestroy)(void *); /* Module destructor function */ }; /* ** information about each column of an SQL table is held in an instance ** of this structure. */ struct Column { char *zName; /* Name of this column */ Expr *pDflt; /* Default value of this column */ char *zDflt; /* Original text of the default value */ char *zType; /* Data type for this column */ char *zColl; /* Collating sequence. If NULL, use the default */ u8 notNull; /* An OE_ code for handling a NOT NULL constraint */ char affinity; /* One of the SQLITE_AFF_... values */ u8 szEst; /* Estimated size of this column. INT==1 */ u8 colFlags; /* Boolean properties. See COLFLAG_ defines below */ }; /* Allowed values for Column.colFlags: */ #define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */ #define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */ /* ** A "Collating Sequence" is defined by an instance of the following ** structure. Conceptually, a collating sequence consists of a name and ** a comparison routine that defines the order of that sequence. ** ** If CollSeq.xCmp is NULL, it means that the ** collating sequence is undefined. Indices built on an undefined ** collating sequence may not be read or written. */ struct CollSeq { char *zName; /* Name of the collating sequence, UTF-8 encoded */ u8 enc; /* Text encoding handled by xCmp() */ void *pUser; /* First argument to xCmp() */ int (*xCmp)(void*,int, const void*, int, const void*); void (*xDel)(void*); /* Destructor for pUser */ }; /* ** A sort order can be either ASC or DESC. */ #define SQLITE_SO_ASC 0 /* Sort in ascending order */ #define SQLITE_SO_DESC 1 /* Sort in ascending order */ /* ** Column affinity types. ** ** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and ** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve ** the speed a little by numbering the values consecutively. ** ** But rather than start with 0 or 1, we begin with 'A'. That way, ** when multiple affinity types are concatenated into a string and ** used as the P4 operand, they will be more readable. ** ** Note also that the numeric types are grouped together so that testing ** for a numeric type is a single comparison. And the BLOB type is first. */ #define SQLITE_AFF_BLOB 'A' #define SQLITE_AFF_TEXT 'B' #define SQLITE_AFF_NUMERIC 'C' #define SQLITE_AFF_INTEGER 'D' #define SQLITE_AFF_REAL 'E' #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) /* ** The SQLITE_AFF_MASK values masks off the significant bits of an ** affinity value. */ #define SQLITE_AFF_MASK 0x47 /* ** Additional bit values that can be ORed with an affinity without ** changing the affinity. ** ** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL. ** It causes an assert() to fire if either operand to a comparison ** operator is NULL. It is added to certain comparison operators to ** prove that the operands are always NOT NULL. */ #define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */ #define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */ #define SQLITE_NULLEQ 0x80 /* NULL=NULL */ #define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */ /* ** An object of this type is created for each virtual table present in ** the database schema. ** ** If the database schema is shared, then there is one instance of this ** structure for each database connection (sqlite3*) that uses the shared ** schema. This is because each database connection requires its own unique ** instance of the sqlite3_vtab* handle used to access the virtual table ** implementation. sqlite3_vtab* handles can not be shared between ** database connections, even when the rest of the in-memory database ** schema is shared, as the implementation often stores the database ** connection handle passed to it via the xConnect() or xCreate() method ** during initialization internally. This database connection handle may ** then be used by the virtual table implementation to access real tables ** within the database. So that they appear as part of the callers ** transaction, these accesses need to be made via the same database ** connection as that used to execute SQL operations on the virtual table. ** ** All VTable objects that correspond to a single table in a shared ** database schema are initially stored in a linked-list pointed to by ** the Table.pVTable member variable of the corresponding Table object. ** When an sqlite3_prepare() operation is required to access the virtual ** table, it searches the list for the VTable that corresponds to the ** database connection doing the preparing so as to use the correct ** sqlite3_vtab* handle in the compiled query. ** ** When an in-memory Table object is deleted (for example when the ** schema is being reloaded for some reason), the VTable objects are not ** deleted and the sqlite3_vtab* handles are not xDisconnect()ed ** immediately. Instead, they are moved from the Table.pVTable list to ** another linked list headed by the sqlite3.pDisconnect member of the ** corresponding sqlite3 structure. They are then deleted/xDisconnected ** next time a statement is prepared using said sqlite3*. This is done ** to avoid deadlock issues involving multiple sqlite3.mutex mutexes. ** Refer to comments above function sqlite3VtabUnlockList() for an ** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect ** list without holding the corresponding sqlite3.mutex mutex. ** ** The memory for objects of this type is always allocated by ** sqlite3DbMalloc(), using the connection handle stored in VTable.db as ** the first argument. */ struct VTable { sqlite3 *db; /* Database connection associated with this table */ Module *pMod; /* Pointer to module implementation */ sqlite3_vtab *pVtab; /* Pointer to vtab instance */ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */ }; /* ** The schema for each SQL table and view is represented in memory ** by an instance of the following structure. */ struct Table { char *zName; /* Name of the table or view */ Column *aCol; /* Information about each column */ Index *pIndex; /* List of SQL indexes on this table. */ Select *pSelect; /* NULL for tables. Points to definition if a view. */ FKey *pFKey; /* Linked list of all foreign keys in this table */ char *zColAff; /* String defining the affinity of each column */ #ifndef SQLITE_OMIT_CHECK ExprList *pCheck; /* All CHECK constraints */ #endif int tnum; /* Root BTree page for this table */ i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */ i16 nCol; /* Number of columns in this table */ u16 nRef; /* Number of pointers to this Table */ LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */ LogEst szTabRow; /* Estimated size of each table row in bytes */ #ifdef SQLITE_ENABLE_COSTMULT LogEst costMult; /* Cost multiplier for using this table */ #endif u8 tabFlags; /* Mask of TF_* values */ u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */ #ifndef SQLITE_OMIT_ALTERTABLE int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */ #endif #ifndef SQLITE_OMIT_VIRTUALTABLE int nModuleArg; /* Number of arguments to the module */ char **azModuleArg; /* Text of all module args. [0] is module name */ VTable *pVTable; /* List of VTable objects. */ #endif Trigger *pTrigger; /* List of triggers stored in pSchema */ Schema *pSchema; /* Schema that contains this table */ Table *pNextZombie; /* Next on the Parse.pZombieTab list */ }; /* ** Allowed values for Table.tabFlags. ** ** TF_OOOHidden applies to virtual tables that have hidden columns that are ** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING ** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden, ** the TF_OOOHidden attribute would apply in this case. Such tables require ** special handling during INSERT processing. */ #define TF_Readonly 0x01 /* Read-only system table */ #define TF_Ephemeral 0x02 /* An ephemeral table */ #define TF_HasPrimaryKey 0x04 /* Table has a primary key */ #define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */ #define TF_Virtual 0x10 /* Is a virtual table */ #define TF_WithoutRowid 0x20 /* No rowid. PRIMARY KEY is the key */ #define TF_NoVisibleRowid 0x40 /* No user-visible "rowid" column */ #define TF_OOOHidden 0x80 /* Out-of-Order hidden columns */ /* ** Test to see whether or not a table is a virtual table. This is ** done as a macro so that it will be optimized out when virtual ** table support is omitted from the build. */ #ifndef SQLITE_OMIT_VIRTUALTABLE # define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0) # define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0) #else # define IsVirtual(X) 0 # define IsHiddenColumn(X) 0 #endif /* Does the table have a rowid */ #define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0) #define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0) /* ** Each foreign key constraint is an instance of the following structure. ** ** A foreign key is associated with two tables. The "from" table is ** the table that contains the REFERENCES clause that creates the foreign ** key. The "to" table is the table that is named in the REFERENCES clause. ** Consider this example: ** ** CREATE TABLE ex1( ** a INTEGER PRIMARY KEY, ** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x) ** ); ** ** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2". ** Equivalent names: ** ** from-table == child-table ** to-table == parent-table ** ** Each REFERENCES clause generates an instance of the following structure ** which is attached to the from-table. The to-table need not exist when ** the from-table is created. The existence of the to-table is not checked. ** ** The list of all parents for child Table X is held at X.pFKey. ** ** A list of all children for a table named Z (which might not even exist) ** is held in Schema.fkeyHash with a hash key of Z. */ struct FKey { Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */ FKey *pNextFrom; /* Next FKey with the same in pFrom. Next parent of pFrom */ char *zTo; /* Name of table that the key points to (aka: Parent) */ FKey *pNextTo; /* Next with the same zTo. Next child of zTo. */ FKey *pPrevTo; /* Previous with the same zTo */ int nCol; /* Number of columns in this key */ /* EV: R-30323-21917 */ u8 isDeferred; /* True if constraint checking is deferred till COMMIT */ u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */ Trigger *apTrigger[2];/* Triggers for aAction[] actions */ struct sColMap { /* Mapping of columns in pFrom to columns in zTo */ int iFrom; /* Index of column in pFrom */ char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */ } aCol[1]; /* One entry for each of nCol columns */ }; /* ** SQLite supports many different ways to resolve a constraint ** error. ROLLBACK processing means that a constraint violation ** causes the operation in process to fail and for the current transaction ** to be rolled back. ABORT processing means the operation in process ** fails and any prior changes from that one operation are backed out, ** but the transaction is not rolled back. FAIL processing means that ** the operation in progress stops and returns an error code. But prior ** changes due to the same operation are not backed out and no rollback ** occurs. IGNORE means that the particular row that caused the constraint ** error is not inserted or updated. Processing continues and no error ** is returned. REPLACE means that preexisting database rows that caused ** a UNIQUE constraint violation are removed so that the new insert or ** update can proceed. Processing continues and no error is reported. ** ** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys. ** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the ** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign ** key is set to NULL. CASCADE means that a DELETE or UPDATE of the ** referenced table row is propagated into the row that holds the ** foreign key. ** ** The following symbolic values are used to record which type ** of action to take. */ #define OE_None 0 /* There is no constraint to check */ #define OE_Rollback 1 /* Fail the operation and rollback the transaction */ #define OE_Abort 2 /* Back out changes but do no rollback transaction */ #define OE_Fail 3 /* Stop the operation but leave all prior changes */ #define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */ #define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */ #define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */ #define OE_SetNull 7 /* Set the foreign key value to NULL */ #define OE_SetDflt 8 /* Set the foreign key value to its default */ #define OE_Cascade 9 /* Cascade the changes */ #define OE_Default 10 /* Do whatever the default action is */ /* ** An instance of the following structure is passed as the first ** argument to sqlite3VdbeKeyCompare and is used to control the ** comparison of the two index keys. ** ** Note that aSortOrder[] and aColl[] have nField+1 slots. There ** are nField slots for the columns of an index then one extra slot ** for the rowid at the end. */ struct KeyInfo { u32 nRef; /* Number of references to this KeyInfo object */ u8 enc; /* Text encoding - one of the SQLITE_UTF* values */ u16 nField; /* Number of key columns in the index */ u16 nXField; /* Number of columns beyond the key columns */ sqlite3 *db; /* The database connection */ u8 *aSortOrder; /* Sort order for each column. */ CollSeq *aColl[1]; /* Collating sequence for each term of the key */ }; /* ** An instance of the following structure holds information about a ** single index record that has already been parsed out into individual ** values. ** ** A record is an object that contains one or more fields of data. ** Records are used to store the content of a table row and to store ** the key of an index. A blob encoding of a record is created by ** the OP_MakeRecord opcode of the VDBE and is disassembled by the ** OP_Column opcode. ** ** This structure holds a record that has already been disassembled ** into its constituent fields. ** ** The r1 and r2 member variables are only used by the optimized comparison ** functions vdbeRecordCompareInt() and vdbeRecordCompareString(). */ struct UnpackedRecord { KeyInfo *pKeyInfo; /* Collation and sort-order information */ u16 nField; /* Number of entries in apMem[] */ i8 default_rc; /* Comparison result if keys are equal */ u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */ Mem *aMem; /* Values */ int r1; /* Value to return if (lhs > rhs) */ int r2; /* Value to return if (rhs < lhs) */ }; /* ** Each SQL index is represented in memory by an ** instance of the following structure. ** ** The columns of the table that are to be indexed are described ** by the aiColumn[] field of this structure. For example, suppose ** we have the following table and index: ** ** CREATE TABLE Ex1(c1 int, c2 int, c3 text); ** CREATE INDEX Ex2 ON Ex1(c3,c1); ** ** In the Table structure describing Ex1, nCol==3 because there are ** three columns in the table. In the Index structure describing ** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed. ** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the ** first column to be indexed (c3) has an index of 2 in Ex1.aCol[]. ** The second column to be indexed (c1) has an index of 0 in ** Ex1.aCol[], hence Ex2.aiColumn[1]==0. ** ** The Index.onError field determines whether or not the indexed columns ** must be unique and what to do if they are not. When Index.onError=OE_None, ** it means this is not a unique index. Otherwise it is a unique index ** and the value of Index.onError indicate the which conflict resolution ** algorithm to employ whenever an attempt is made to insert a non-unique ** element. ** ** While parsing a CREATE TABLE or CREATE INDEX statement in order to ** generate VDBE code (as opposed to parsing one read from an sqlite_master ** table as part of parsing an existing database schema), transient instances ** of this structure may be created. In this case the Index.tnum variable is ** used to store the address of a VDBE instruction, not a database page ** number (it cannot - the database page is not allocated until the VDBE ** program is executed). See convertToWithoutRowidTable() for details. */ struct Index { char *zName; /* Name of this index */ i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */ LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */ Table *pTable; /* The SQL table being indexed */ char *zColAff; /* String defining the affinity of each column */ Index *pNext; /* The next index associated with the same table */ Schema *pSchema; /* Schema containing this index */ u8 *aSortOrder; /* for each column: True==DESC, False==ASC */ char **azColl; /* Array of collation sequence names for index */ Expr *pPartIdxWhere; /* WHERE clause for partial indices */ int tnum; /* DB Page containing root of this index */ LogEst szIdxRow; /* Estimated average row size in bytes */ u16 nKeyCol; /* Number of columns forming the key */ u16 nColumn; /* Number of columns stored in the index */ u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ unsigned idxType:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */ unsigned bUnordered:1; /* Use this index for == or IN queries only */ unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */ unsigned isResized:1; /* True if resizeIndexObject() has been called */ unsigned isCovering:1; /* True if this is a covering index */ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int nSample; /* Number of elements in aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */ tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */ tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */ #endif }; /* ** Allowed values for Index.idxType */ #define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */ #define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */ #define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */ /* Return true if index X is a PRIMARY KEY index */ #define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY) /* Return true if index X is a UNIQUE index */ #define IsUniqueIndex(X) ((X)->onError!=OE_None) /* ** Each sample stored in the sqlite_stat3 table is represented in memory ** using a structure of this type. See documentation at the top of the ** analyze.c source file for additional information. */ struct IndexSample { void *p; /* Pointer to sampled record */ int n; /* Size of record in bytes */ tRowcnt *anEq; /* Est. number of rows where the key equals this sample */ tRowcnt *anLt; /* Est. number of rows where key is less than this sample */ tRowcnt *anDLt; /* Est. number of distinct keys less than this sample */ }; /* ** Each token coming out of the lexer is an instance of ** this structure. Tokens are also used as part of an expression. ** ** Note if Token.z==0 then Token.dyn and Token.n are undefined and ** may contain random values. Do not make any assumptions about Token.dyn ** and Token.n when Token.z==0. */ struct Token { const char *z; /* Text of the token. Not NULL-terminated! */ unsigned int n; /* Number of characters in this token */ }; /* ** An instance of this structure contains information needed to generate ** code for a SELECT that contains aggregate functions. ** ** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a ** pointer to this structure. The Expr.iColumn field is the index in ** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate ** code for that node. ** ** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the ** original Select structure that describes the SELECT statement. These ** fields do not need to be freed when deallocating the AggInfo structure. */ struct AggInfo { u8 directMode; /* Direct rendering mode means take data directly ** from source tables rather than from accumulators */ u8 useSortingIdx; /* In direct mode, reference the sorting index rather ** than the source table */ int sortingIdx; /* Cursor number of the sorting index */ int sortingIdxPTab; /* Cursor number of pseudo-table */ int nSortingColumn; /* Number of columns in the sorting index */ int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ ExprList *pGroupBy; /* The group by clause */ struct AggInfo_col { /* For each column used in source tables */ Table *pTab; /* Source table */ int iTable; /* Cursor number of the source table */ int iColumn; /* Column number within the source table */ int iSorterColumn; /* Column number in the sorting index */ int iMem; /* Memory location that acts as accumulator */ Expr *pExpr; /* The original expression */ } *aCol; int nColumn; /* Number of used entries in aCol[] */ int nAccumulator; /* Number of columns that show through to the output. ** Additional columns are used only as parameters to ** aggregate functions */ struct AggInfo_func { /* For each aggregate function */ Expr *pExpr; /* Expression encoding the function */ FuncDef *pFunc; /* The aggregate function implementation */ int iMem; /* Memory location that acts as accumulator */ int iDistinct; /* Ephemeral table used to enforce DISTINCT */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ }; /* ** The datatype ynVar is a signed integer, either 16-bit or 32-bit. ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater ** than 32767 we have to make it 32-bit. 16-bit is preferred because ** it uses less memory in the Expr object, which is a big memory user ** in systems with lots of prepared statements. And few applications ** need more than about 10 or 20 variables. But some extreme users want ** to have prepared statements with over 32767 variables, and for them ** the option is available (at compile-time). */ #if SQLITE_MAX_VARIABLE_NUMBER<=32767 typedef i16 ynVar; #else typedef int ynVar; #endif /* ** Each node of an expression in the parse tree is an instance ** of this structure. ** ** Expr.op is the opcode. The integer parser token codes are reused ** as opcodes here. For example, the parser defines TK_GE to be an integer ** code representing the ">=" operator. This same integer code is reused ** to represent the greater-than-or-equal-to operator in the expression ** tree. ** ** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB, ** or TK_STRING), then Expr.token contains the text of the SQL literal. If ** the expression is a variable (TK_VARIABLE), then Expr.token contains the ** variable name. Finally, if the expression is an SQL function (TK_FUNCTION), ** then Expr.token contains the name of the function. ** ** Expr.pRight and Expr.pLeft are the left and right subexpressions of a ** binary operator. Either or both may be NULL. ** ** Expr.x.pList is a list of arguments if the expression is an SQL function, ** a CASE expression or an IN expression of the form " IN (, ...)". ** Expr.x.pSelect is used if the expression is a sub-select or an expression of ** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the ** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is ** valid. ** ** An expression of the form ID or ID.ID refers to a column in a table. ** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is ** the integer cursor number of a VDBE cursor pointing to that table and ** Expr.iColumn is the column number for the specific column. If the ** expression is used as a result in an aggregate SELECT, then the ** value is also stored in the Expr.iAgg column in the aggregate so that ** it can be accessed after all aggregates are computed. ** ** If the expression is an unbound variable marker (a question mark ** character '?' in the original SQL) then the Expr.iTable holds the index ** number for that variable. ** ** If the expression is a subquery then Expr.iColumn holds an integer ** register number containing the result of the subquery. If the ** subquery gives a constant result, then iTable is -1. If the subquery ** gives a different answer at different times during statement processing ** then iTable is the address of a subroutine that computes the subquery. ** ** If the Expr is of type OP_Column, and the table it is selecting from ** is a disk table or the "old.*" pseudo-table, then pTab points to the ** corresponding table definition. ** ** ALLOCATION NOTES: ** ** Expr objects can use a lot of memory space in database schema. To ** help reduce memory requirements, sometimes an Expr object will be ** truncated. And to reduce the number of memory allocations, sometimes ** two or more Expr objects will be stored in a single memory allocation, ** together with Expr.zToken strings. ** ** If the EP_Reduced and EP_TokenOnly flags are set when ** an Expr object is truncated. When EP_Reduced is set, then all ** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees ** are contained within the same memory allocation. Note, however, that ** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately ** allocated, regardless of whether or not EP_Reduced is set. */ struct Expr { u8 op; /* Operation performed by this node */ char affinity; /* The affinity of the column or 0 if not a column */ u32 flags; /* Various flags. EP_* See below */ union { char *zToken; /* Token value. Zero terminated and dequoted */ int iValue; /* Non-negative integer value if EP_IntValue */ } u; /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no ** space is allocated for the fields below this point. An attempt to ** access them will result in a segfault or malfunction. *********************************************************************/ Expr *pLeft; /* Left subnode */ Expr *pRight; /* Right subnode */ union { ExprList *pList; /* op = IN, EXISTS, SELECT, CASE, FUNCTION, BETWEEN */ Select *pSelect; /* EP_xIsSelect and op = IN, EXISTS, SELECT */ } x; /* If the EP_Reduced flag is set in the Expr.flags mask, then no ** space is allocated for the fields below this point. An attempt to ** access them will result in a segfault or malfunction. *********************************************************************/ #if SQLITE_MAX_EXPR_DEPTH>0 int nHeight; /* Height of the tree headed by this node */ #endif int iTable; /* TK_COLUMN: cursor number of table holding column ** TK_REGISTER: register number ** TK_TRIGGER: 1 -> new, 0 -> old ** EP_Unlikely: 134217728 times likelihood */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. ** TK_VARIABLE: variable number (always >= 1). */ i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */ i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */ u8 op2; /* TK_REGISTER: original value of Expr.op ** TK_COLUMN: the value of p5 for OP_Column ** TK_AGG_FUNCTION: nesting depth */ AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */ Table *pTab; /* Table for TK_COLUMN expressions. */ }; /* ** The following are the meanings of bits in the Expr.flags field. */ #define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */ #define EP_Agg 0x000002 /* Contains one or more aggregate functions */ #define EP_Resolved 0x000004 /* IDs have been resolved to COLUMNs */ #define EP_Error 0x000008 /* Expression contains one or more errors */ #define EP_Distinct 0x000010 /* Aggregate function with DISTINCT keyword */ #define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */ #define EP_DblQuoted 0x000040 /* token.z was originally in "..." */ #define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */ #define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */ #define EP_Generic 0x000200 /* Ignore COLLATE or affinity on this tree */ #define EP_IntValue 0x000400 /* Integer value contained in u.iValue */ #define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */ #define EP_Skip 0x001000 /* COLLATE, AS, or UNLIKELY */ #define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ #define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ #define EP_Static 0x008000 /* Held in memory not obtained from malloc() */ #define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */ #define EP_NoReduce 0x020000 /* Cannot EXPRDUP_REDUCE this Expr */ #define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */ #define EP_ConstFunc 0x080000 /* Node is a SQLITE_FUNC_CONSTANT function */ #define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */ #define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */ /* ** Combinations of two or more EP_* flags */ #define EP_Propagate (EP_Collate|EP_Subquery) /* Propagate these bits up tree */ /* ** These macros can be used to test, set, or clear bits in the ** Expr.flags field. */ #define ExprHasProperty(E,P) (((E)->flags&(P))!=0) #define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P)) #define ExprSetProperty(E,P) (E)->flags|=(P) #define ExprClearProperty(E,P) (E)->flags&=~(P) /* The ExprSetVVAProperty() macro is used for Verification, Validation, ** and Accreditation only. It works like ExprSetProperty() during VVA ** processes but is a no-op for delivery. */ #ifdef SQLITE_DEBUG # define ExprSetVVAProperty(E,P) (E)->flags|=(P) #else # define ExprSetVVAProperty(E,P) #endif /* ** Macros to determine the number of bytes required by a normal Expr ** struct, an Expr struct with the EP_Reduced flag set in Expr.flags ** and an Expr struct with the EP_TokenOnly flag set. */ #define EXPR_FULLSIZE sizeof(Expr) /* Full size */ #define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */ #define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */ /* ** Flags passed to the sqlite3ExprDup() function. See the header comment ** above sqlite3ExprDup() for details. */ #define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */ /* ** A list of expressions. Each expression may optionally have a ** name. An expr/name combination can be used in several ways, such ** as the list of "expr AS ID" fields following a "SELECT" or in the ** list of "ID = expr" items in an UPDATE. A list of expressions can ** also be used as the argument to a function, in which case the a.zName ** field is not used. ** ** By default the Expr.zSpan field holds a human-readable description of ** the expression that is used in the generation of error messages and ** column labels. In this case, Expr.zSpan is typically the text of a ** column expression as it exists in a SELECT statement. However, if ** the bSpanIsTab flag is set, then zSpan is overloaded to mean the name ** of the result column in the form: DATABASE.TABLE.COLUMN. This later ** form is used for name resolution with nested FROM clauses. */ struct ExprList { int nExpr; /* Number of expressions on the list */ struct ExprList_item { /* For each expression in the list */ Expr *pExpr; /* The list of expressions */ char *zName; /* Token associated with this expression */ char *zSpan; /* Original text of the expression */ u8 sortOrder; /* 1 for DESC or 0 for ASC */ unsigned done :1; /* A flag to indicate when processing is finished */ unsigned bSpanIsTab :1; /* zSpan holds DB.TABLE.COLUMN */ unsigned reusable :1; /* Constant expression is reusable */ union { struct { u16 iOrderByCol; /* For ORDER BY, column number in result set */ u16 iAlias; /* Index into Parse.aAlias[] for zName */ } x; int iConstExprReg; /* Register in which Expr value is cached */ } u; } *a; /* Alloc a power of two greater or equal to nExpr */ }; /* ** An instance of this structure is used by the parser to record both ** the parse tree for an expression and the span of input text for an ** expression. */ struct ExprSpan { Expr *pExpr; /* The expression parse tree */ const char *zStart; /* First character of input text */ const char *zEnd; /* One character past the end of input text */ }; /* ** An instance of this structure can hold a simple list of identifiers, ** such as the list "a,b,c" in the following statements: ** ** INSERT INTO t(a,b,c) VALUES ...; ** CREATE INDEX idx ON t(a,b,c); ** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...; ** ** The IdList.a.idx field is used when the IdList represents the list of ** column names after a table name in an INSERT statement. In the statement ** ** INSERT INTO t(a,b,c) ... ** ** If "a" is the k-th column of table "t", then IdList.a[0].idx==k. */ struct IdList { struct IdList_item { char *zName; /* Name of the identifier */ int idx; /* Index in some Table.aCol[] of a column named zName */ } *a; int nId; /* Number of identifiers on the list */ }; /* ** The bitmask datatype defined below is used for various optimizations. ** ** Changing this from a 64-bit to a 32-bit type limits the number of ** tables in a join to 32 instead of 64. But it also reduces the size ** of the library by 738 bytes on ix86. */ typedef u64 Bitmask; /* ** The number of bits in a Bitmask. "BMS" means "BitMask Size". */ #define BMS ((int)(sizeof(Bitmask)*8)) /* ** A bit in a Bitmask */ #define MASKBIT(n) (((Bitmask)1)<<(n)) #define MASKBIT32(n) (((unsigned int)1)<<(n)) /* ** The following structure describes the FROM clause of a SELECT statement. ** Each table or subquery in the FROM clause is a separate element of ** the SrcList.a[] array. ** ** With the addition of multiple database support, the following structure ** can also be used to describe a particular table such as the table that ** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, ** such a table must be a simple name: ID. But in SQLite, the table can ** now be identified by a database name, a dot, then the table name: ID.ID. ** ** The jointype starts out showing the join type between the current table ** and the next table on the list. The parser builds the list this way. ** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each ** jointype expresses the join between the table and the previous table. ** ** In the colUsed field, the high-order bit (bit 63) is set if the table ** contains more than 63 columns and the 64-th or later column is used. */ struct SrcList { int nSrc; /* Number of tables or subqueries in the FROM clause */ u32 nAlloc; /* Number of entries allocated in a[] below */ struct SrcList_item { Schema *pSchema; /* Schema to which this item is fixed */ char *zDatabase; /* Name of database holding this table */ char *zName; /* Name of the table */ char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */ Table *pTab; /* An SQL table corresponding to zName */ Select *pSelect; /* A SELECT statement used in place of a table name */ int addrFillSub; /* Address of subroutine to manifest a subquery */ int regReturn; /* Register holding return address of addrFillSub */ int regResult; /* Registers holding results of a co-routine */ u8 jointype; /* Type of join between this able and the previous */ unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ unsigned isCorrelated :1; /* True if sub-query is correlated */ unsigned viaCoroutine :1; /* Implemented as a co-routine */ unsigned isRecursive :1; /* True for recursive reference in WITH */ #ifndef SQLITE_OMIT_EXPLAIN u8 iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */ #endif int iCursor; /* The VDBE cursor number used to access this table */ Expr *pOn; /* The ON clause of a join */ IdList *pUsing; /* The USING clause of a join */ Bitmask colUsed; /* Bit N (1<" clause */ Index *pIndex; /* Index structure corresponding to zIndex, if any */ } a[1]; /* One entry for each identifier on the list */ }; /* ** Permitted values of the SrcList.a.jointype field */ #define JT_INNER 0x0001 /* Any kind of inner or cross join */ #define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */ #define JT_NATURAL 0x0004 /* True for a "natural" join */ #define JT_LEFT 0x0008 /* Left outer join */ #define JT_RIGHT 0x0010 /* Right outer join */ #define JT_OUTER 0x0020 /* The "OUTER" keyword is present */ #define JT_ERROR 0x0040 /* unknown or unsupported join type */ /* ** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin() ** and the WhereInfo.wctrlFlags member. */ #define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */ #define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */ #define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ #define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ #define WHERE_DUPLICATES_OK 0x0008 /* Ok to return a row more than once */ #define WHERE_OMIT_OPEN_CLOSE 0x0010 /* Table cursors are already open */ #define WHERE_FORCE_TABLE 0x0020 /* Do not use an index-only search */ #define WHERE_ONETABLE_ONLY 0x0040 /* Only code the 1st table in pTabList */ #define WHERE_NO_AUTOINDEX 0x0080 /* Disallow automatic indexes */ #define WHERE_GROUPBY 0x0100 /* pOrderBy is really a GROUP BY */ #define WHERE_DISTINCTBY 0x0200 /* pOrderby is really a DISTINCT clause */ #define WHERE_WANT_DISTINCT 0x0400 /* All output needs to be distinct */ #define WHERE_SORTBYGROUP 0x0800 /* Support sqlite3WhereIsSorted() */ #define WHERE_REOPEN_IDX 0x1000 /* Try to use OP_ReopenIdx */ /* Allowed return values from sqlite3WhereIsDistinct() */ #define WHERE_DISTINCT_NOOP 0 /* DISTINCT keyword not used */ #define WHERE_DISTINCT_UNIQUE 1 /* No duplicates */ #define WHERE_DISTINCT_ORDERED 2 /* All duplicates are adjacent */ #define WHERE_DISTINCT_UNORDERED 3 /* Duplicates are scattered */ /* ** A NameContext defines a context in which to resolve table and column ** names. The context consists of a list of tables (the pSrcList) field and ** a list of named expression (pEList). The named expression list may ** be NULL. The pSrc corresponds to the FROM clause of a SELECT or ** to the table being operated on by INSERT, UPDATE, or DELETE. The ** pEList corresponds to the result set of a SELECT and is NULL for ** other statements. ** ** NameContexts can be nested. When resolving names, the inner-most ** context is searched first. If no match is found, the next outer ** context is checked. If there is still no match, the next context ** is checked. This process continues until either a match is found ** or all contexts are check. When a match is found, the nRef member of ** the context containing the match is incremented. ** ** Each subquery gets a new NameContext. The pNext field points to the ** NameContext in the parent query. Thus the process of scanning the ** NameContext list corresponds to searching through successively outer ** subqueries looking for a match. */ struct NameContext { Parse *pParse; /* The parser */ SrcList *pSrcList; /* One or more tables used to resolve names */ ExprList *pEList; /* Optional list of result-set columns */ AggInfo *pAggInfo; /* Information about aggregates at this level */ NameContext *pNext; /* Next outer name context. NULL for outermost */ int nRef; /* Number of names resolved by this context */ int nErr; /* Number of errors encountered while resolving names */ u16 ncFlags; /* Zero or more NC_* flags defined below */ }; /* ** Allowed values for the NameContext, ncFlags field. ** ** Note: NC_MinMaxAgg must have the same value as SF_MinMaxAgg and ** SQLITE_FUNC_MINMAX. ** */ #define NC_AllowAgg 0x0001 /* Aggregate functions are allowed here */ #define NC_HasAgg 0x0002 /* One or more aggregate functions seen */ #define NC_IsCheck 0x0004 /* True if resolving names in a CHECK constraint */ #define NC_InAggFunc 0x0008 /* True if analyzing arguments to an agg func */ #define NC_PartIdx 0x0010 /* True if resolving a partial index WHERE */ #define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */ /* ** An instance of the following structure contains all information ** needed to generate code for a single SELECT statement. ** ** nLimit is set to -1 if there is no LIMIT clause. nOffset is set to 0. ** If there is a LIMIT clause, the parser sets nLimit to the value of the ** limit and nOffset to the value of the offset (or 0 if there is not ** offset). But later on, nLimit and nOffset become the memory locations ** in the VDBE that record the limit and offset counters. ** ** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes. ** These addresses must be stored so that we can go back and fill in ** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor ** the number of columns in P2 can be computed at the same time ** as the OP_OpenEphm instruction is coded because not ** enough information about the compound query is known at that point. ** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences ** for the result set. The KeyInfo for addrOpenEphm[2] contains collating ** sequences for the ORDER BY clause. */ struct Select { ExprList *pEList; /* The fields of the result */ u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */ u16 selFlags; /* Various SF_* values */ int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */ #if SELECTTRACE_ENABLED char zSelName[12]; /* Symbolic name of this SELECT use for debugging */ #endif int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */ u64 nSelectRow; /* Estimated number of result rows */ SrcList *pSrc; /* The FROM clause */ Expr *pWhere; /* The WHERE clause */ ExprList *pGroupBy; /* The GROUP BY clause */ Expr *pHaving; /* The HAVING clause */ ExprList *pOrderBy; /* The ORDER BY clause */ Select *pPrior; /* Prior select in a compound select statement */ Select *pNext; /* Next select to the left in a compound */ Expr *pLimit; /* LIMIT expression. NULL means not used. */ Expr *pOffset; /* OFFSET expression. NULL means not used. */ With *pWith; /* WITH clause attached to this select. Or NULL. */ }; /* ** Allowed values for Select.selFlags. The "SF" prefix stands for ** "Select Flag". */ #define SF_Distinct 0x0001 /* Output should be DISTINCT */ #define SF_All 0x0002 /* Includes the ALL keyword */ #define SF_Resolved 0x0004 /* Identifiers have been resolved */ #define SF_Aggregate 0x0008 /* Contains aggregate functions */ #define SF_UsesEphemeral 0x0010 /* Uses the OpenEphemeral opcode */ #define SF_Expanded 0x0020 /* sqlite3SelectExpand() called on this */ #define SF_HasTypeInfo 0x0040 /* FROM subqueries have Table metadata */ #define SF_Compound 0x0080 /* Part of a compound query */ #define SF_Values 0x0100 /* Synthesized from VALUES clause */ #define SF_MultiValue 0x0200 /* Single VALUES term with multiple rows */ #define SF_NestedFrom 0x0400 /* Part of a parenthesized FROM clause */ #define SF_MaybeConvert 0x0800 /* Need convertCompoundSelectToSubquery() */ #define SF_MinMaxAgg 0x1000 /* Aggregate containing min() or max() */ #define SF_Recursive 0x2000 /* The recursive part of a recursive CTE */ #define SF_Converted 0x4000 /* By convertCompoundSelectToSubquery() */ /* ** The results of a SELECT can be distributed in several ways, as defined ** by one of the following macros. The "SRT" prefix means "SELECT Result ** Type". ** ** SRT_Union Store results as a key in a temporary index ** identified by pDest->iSDParm. ** ** SRT_Except Remove results from the temporary index pDest->iSDParm. ** ** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result ** set is not empty. ** ** SRT_Discard Throw the results away. This is used by SELECT ** statements within triggers whose only purpose is ** the side-effects of functions. ** ** All of the above are free to ignore their ORDER BY clause. Those that ** follow must honor the ORDER BY clause. ** ** SRT_Output Generate a row of output (using the OP_ResultRow ** opcode) for each row in the result set. ** ** SRT_Mem Only valid if the result is a single column. ** Store the first column of the first result row ** in register pDest->iSDParm then abandon the rest ** of the query. This destination implies "LIMIT 1". ** ** SRT_Set The result must be a single column. Store each ** row of result as the key in table pDest->iSDParm. ** Apply the affinity pDest->affSdst before storing ** results. Used to implement "IN (SELECT ...)". ** ** SRT_EphemTab Create an temporary table pDest->iSDParm and store ** the result there. The cursor is left open after ** returning. This is like SRT_Table except that ** this destination uses OP_OpenEphemeral to create ** the table first. ** ** SRT_Coroutine Generate a co-routine that returns a new row of ** results each time it is invoked. The entry point ** of the co-routine is stored in register pDest->iSDParm ** and the result row is stored in pDest->nDest registers ** starting with pDest->iSdst. ** ** SRT_Table Store results in temporary table pDest->iSDParm. ** SRT_Fifo This is like SRT_EphemTab except that the table ** is assumed to already be open. SRT_Fifo has ** the additional property of being able to ignore ** the ORDER BY clause. ** ** SRT_DistFifo Store results in a temporary table pDest->iSDParm. ** But also use temporary table pDest->iSDParm+1 as ** a record of all prior results and ignore any duplicate ** rows. Name means: "Distinct Fifo". ** ** SRT_Queue Store results in priority queue pDest->iSDParm (really ** an index). Append a sequence number so that all entries ** are distinct. ** ** SRT_DistQueue Store results in priority queue pDest->iSDParm only if ** the same record has never been stored before. The ** index at pDest->iSDParm+1 hold all prior stores. */ #define SRT_Union 1 /* Store result as keys in an index */ #define SRT_Except 2 /* Remove result from a UNION index */ #define SRT_Exists 3 /* Store 1 if the result is not empty */ #define SRT_Discard 4 /* Do not save the results anywhere */ #define SRT_Fifo 5 /* Store result as data with an automatic rowid */ #define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */ #define SRT_Queue 7 /* Store result in an queue */ #define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */ /* The ORDER BY clause is ignored for all of the above */ #define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue) #define SRT_Output 9 /* Output each row of result */ #define SRT_Mem 10 /* Store result in a memory cell */ #define SRT_Set 11 /* Store results as keys in an index */ #define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */ #define SRT_Coroutine 13 /* Generate a single row of result */ #define SRT_Table 14 /* Store result as data with an automatic rowid */ /* ** An instance of this object describes where to put of the results of ** a SELECT statement. */ struct SelectDest { u8 eDest; /* How to dispose of the results. On of SRT_* above. */ char affSdst; /* Affinity used when eDest==SRT_Set */ int iSDParm; /* A parameter used by the eDest disposal method */ int iSdst; /* Base register where results are written */ int nSdst; /* Number of registers allocated */ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ }; /* ** During code generation of statements that do inserts into AUTOINCREMENT ** tables, the following information is attached to the Table.u.autoInc.p ** pointer of each autoincrement table to record some side information that ** the code generator needs. We have to keep per-table autoincrement ** information in case inserts are down within triggers. Triggers do not ** normally coordinate their activities, but we do need to coordinate the ** loading and saving of autoincrement information. */ struct AutoincInfo { AutoincInfo *pNext; /* Next info block in a list of them all */ Table *pTab; /* Table this info block refers to */ int iDb; /* Index in sqlite3.aDb[] of database holding pTab */ int regCtr; /* Memory register holding the rowid counter */ }; /* ** Size of the column cache */ #ifndef SQLITE_N_COLCACHE # define SQLITE_N_COLCACHE 10 #endif /* ** At least one instance of the following structure is created for each ** trigger that may be fired while parsing an INSERT, UPDATE or DELETE ** statement. All such objects are stored in the linked list headed at ** Parse.pTriggerPrg and deleted once statement compilation has been ** completed. ** ** A Vdbe sub-program that implements the body and WHEN clause of trigger ** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of ** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable. ** The Parse.pTriggerPrg list never contains two entries with the same ** values for both pTrigger and orconf. ** ** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns ** accessed (or set to 0 for triggers fired as a result of INSERT ** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to ** a mask of new.* columns used by the program. */ struct TriggerPrg { Trigger *pTrigger; /* Trigger this program was coded from */ TriggerPrg *pNext; /* Next entry in Parse.pTriggerPrg list */ SubProgram *pProgram; /* Program implementing pTrigger/orconf */ int orconf; /* Default ON CONFLICT policy */ u32 aColmask[2]; /* Masks of old.*, new.* columns accessed */ }; /* ** The yDbMask datatype for the bitmask of all attached databases. */ #if SQLITE_MAX_ATTACHED>30 typedef unsigned char yDbMask[(SQLITE_MAX_ATTACHED+9)/8]; # define DbMaskTest(M,I) (((M)[(I)/8]&(1<<((I)&7)))!=0) # define DbMaskZero(M) memset((M),0,sizeof(M)) # define DbMaskSet(M,I) (M)[(I)/8]|=(1<<((I)&7)) # define DbMaskAllZero(M) sqlite3DbMaskAllZero(M) # define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0) #else typedef unsigned int yDbMask; # define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0) # define DbMaskZero(M) (M)=0 # define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) # define DbMaskAllZero(M) (M)==0 # define DbMaskNonZero(M) (M)!=0 #endif /* ** An SQL parser context. A copy of this structure is passed through ** the parser and down into all the parser action routine in order to ** carry around information that is global to the entire parse. ** ** The structure is divided into two parts. When the parser and code ** generate call themselves recursively, the first part of the structure ** is constant but the second part is reset at the beginning and end of ** each recursion. ** ** The nTableLock and aTableLock variables are only used if the shared-cache ** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are ** used to store the set of table-locks required by the statement being ** compiled. Function sqlite3TableLock() is used to add entries to the ** list. */ struct Parse { sqlite3 *db; /* The main database structure */ char *zErrMsg; /* An error message */ Vdbe *pVdbe; /* An engine for executing database bytecode */ int rc; /* Return code from execution */ u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */ u8 checkSchema; /* Causes schema cookie check after an error */ u8 nested; /* Number of nested calls to the parser/code generator */ u8 nTempReg; /* Number of temporary registers in aTempReg[] */ u8 isMultiWrite; /* True if statement may modify/insert multiple rows */ u8 mayAbort; /* True if statement may throw an ABORT exception */ u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ u8 okConstFactor; /* OK to factor out constants */ int aTempReg[8]; /* Holding area for temporary registers */ int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ int nErr; /* Number of errors seen */ int nTab; /* Number of previously allocated VDBE cursors */ int nMem; /* Number of memory cells used so far */ int nSet; /* Number of sets used so far */ int nOnce; /* Number of OP_Once instructions so far */ int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */ int iFixedOp; /* Never back out opcodes iFixedOp-1 or earlier */ int ckBase; /* Base register of data during check constraints */ int iPartIdxTab; /* Table corresponding to a partial index */ int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */ int iCacheCnt; /* Counter used to generate aColCache[].lru values */ int nLabel; /* Number of labels used */ int *aLabel; /* Space to hold the labels */ struct yColCache { int iTable; /* Table cursor number */ i16 iColumn; /* Table column number */ u8 tempReg; /* iReg is a temp register that needs to be freed */ int iLevel; /* Nesting level */ int iReg; /* Reg with value of this column. 0 means none. */ int lru; /* Least recently used entry has the smallest value */ } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */ ExprList *pConstExpr;/* Constant expressions */ Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */ int regRowid; /* Register holding rowid of CREATE TABLE entry */ int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ #if SELECTTRACE_ENABLED int nSelect; /* Number of SELECT statements seen */ int nSelectIndent; /* How far to indent SELECTTRACE() output */ #endif #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */ #endif AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */ /* Information used while coding trigger programs. */ Parse *pToplevel; /* Parse structure for main program (or NULL) */ Table *pTriggerTab; /* Table triggers are being coded for */ int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */ u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ u8 disableTriggers; /* True to disable triggers */ /************************************************************************ ** Above is constant between recursions. Below is reset before and after ** each recursion. The boundary between these two regions is determined ** using offsetof(Parse,nVar) so the nVar field must be the first field ** in the recursive region. ************************************************************************/ int nVar; /* Number of '?' variables seen in the SQL so far */ int nzVar; /* Number of available slots in azVar[] */ u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */ u8 bFreeWith; /* True if pWith should be freed with parser */ u8 explain; /* True if the EXPLAIN flag is found on the query */ #ifndef SQLITE_OMIT_VIRTUALTABLE u8 declareVtab; /* True if inside sqlite3_declare_vtab() */ int nVtabLock; /* Number of virtual tables to lock */ #endif int nAlias; /* Number of aliased result set columns */ int nHeight; /* Expression tree height of current sub-select */ #ifndef SQLITE_OMIT_EXPLAIN int iSelectId; /* ID of current select for EXPLAIN output */ int iNextSelectId; /* Next available select ID for EXPLAIN output */ #endif char **azVar; /* Pointers to names of parameters */ Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */ const char *zTail; /* All SQL text past the last semicolon parsed */ Table *pNewTable; /* A table being constructed by CREATE TABLE */ Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */ const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */ Token sNameToken; /* Token with unqualified schema object name */ Token sLastToken; /* The last token parsed */ #ifndef SQLITE_OMIT_VIRTUALTABLE Token sArg; /* Complete text of a module argument */ Table **apVtabLock; /* Pointer to virtual tables needing locking */ #endif Table *pZombieTab; /* List of Table objects to delete after code gen */ TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */ With *pWith; /* Current WITH clause, or NULL */ }; /* ** Return true if currently inside an sqlite3_declare_vtab() call. */ #ifdef SQLITE_OMIT_VIRTUALTABLE #define IN_DECLARE_VTAB 0 #else #define IN_DECLARE_VTAB (pParse->declareVtab) #endif /* ** An instance of the following structure can be declared on a stack and used ** to save the Parse.zAuthContext value so that it can be restored later. */ struct AuthContext { const char *zAuthContext; /* Put saved Parse.zAuthContext here */ Parse *pParse; /* The Parse structure */ }; /* ** Bitfield flags for P5 value in various opcodes. */ #define OPFLAG_NCHANGE 0x01 /* Set to update db->nChange */ #define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */ #define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */ #define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ #define OPFLAG_APPEND 0x08 /* This is likely to be an append */ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ #define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */ #define OPFLAG_P2ISREG 0x04 /* P2 to OP_Open** is a register number */ #define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */ /* * Each trigger present in the database schema is stored as an instance of * struct Trigger. * * Pointers to instances of struct Trigger are stored in two ways. * 1. In the "trigHash" hash table (part of the sqlite3* that represents the * database). This allows Trigger structures to be retrieved by name. * 2. All triggers associated with a single table form a linked list, using the * pNext member of struct Trigger. A pointer to the first element of the * linked list is stored as the "pTrigger" member of the associated * struct Table. * * The "step_list" member points to the first element of a linked list * containing the SQL statements specified as the trigger program. */ struct Trigger { char *zName; /* The name of the trigger */ char *table; /* The table or view to which the trigger applies */ u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */ u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */ IdList *pColumns; /* If this is an UPDATE OF trigger, the is stored here */ Schema *pSchema; /* Schema containing the trigger */ Schema *pTabSchema; /* Schema containing the table */ TriggerStep *step_list; /* Link list of trigger program steps */ Trigger *pNext; /* Next trigger associated with the table */ }; /* ** A trigger is either a BEFORE or an AFTER trigger. The following constants ** determine which. ** ** If there are multiple triggers, you might of some BEFORE and some AFTER. ** In that cases, the constants below can be ORed together. */ #define TRIGGER_BEFORE 1 #define TRIGGER_AFTER 2 /* * An instance of struct TriggerStep is used to store a single SQL statement * that is a part of a trigger-program. * * Instances of struct TriggerStep are stored in a singly linked list (linked * using the "pNext" member) referenced by the "step_list" member of the * associated struct Trigger instance. The first element of the linked list is * the first step of the trigger-program. * * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or * "SELECT" statement. The meanings of the other members is determined by the * value of "op" as follows: * * (op == TK_INSERT) * orconf -> stores the ON CONFLICT algorithm * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then * this stores a pointer to the SELECT statement. Otherwise NULL. * zTarget -> Dequoted name of the table to insert into. * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then * this stores values to be inserted. Otherwise NULL. * pIdList -> If this is an INSERT INTO ... () VALUES ... * statement, then this stores the column-names to be * inserted into. * * (op == TK_DELETE) * zTarget -> Dequoted name of the table to delete from. * pWhere -> The WHERE clause of the DELETE statement if one is specified. * Otherwise NULL. * * (op == TK_UPDATE) * zTarget -> Dequoted name of the table to update. * pWhere -> The WHERE clause of the UPDATE statement if one is specified. * Otherwise NULL. * pExprList -> A list of the columns to update and the expressions to update * them to. See sqlite3Update() documentation of "pChanges" * argument. * */ struct TriggerStep { u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */ u8 orconf; /* OE_Rollback etc. */ Trigger *pTrig; /* The trigger that this step is a part of */ Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */ char *zTarget; /* Target table for DELETE, UPDATE, INSERT */ Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */ ExprList *pExprList; /* SET clause for UPDATE. */ IdList *pIdList; /* Column names for INSERT */ TriggerStep *pNext; /* Next in the link-list */ TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */ }; /* ** The following structure contains information used by the sqliteFix... ** routines as they walk the parse tree to make database references ** explicit. */ typedef struct DbFixer DbFixer; struct DbFixer { Parse *pParse; /* The parsing context. Error messages written here */ Schema *pSchema; /* Fix items to this schema */ int bVarOnly; /* Check for variable references only */ const char *zDb; /* Make sure all objects are contained in this database */ const char *zType; /* Type of the container - used for error messages */ const Token *pName; /* Name of the container - used for error messages */ }; /* ** An objected used to accumulate the text of a string where we ** do not necessarily know how big the string will be in the end. */ struct StrAccum { sqlite3 *db; /* Optional database for lookaside. Can be NULL */ char *zBase; /* A base allocation. Not from malloc. */ char *zText; /* The string collected so far */ int nChar; /* Length of the string so far */ int nAlloc; /* Amount of space allocated in zText */ int mxAlloc; /* Maximum allowed allocation. 0 for no malloc usage */ u8 accError; /* STRACCUM_NOMEM or STRACCUM_TOOBIG */ }; #define STRACCUM_NOMEM 1 #define STRACCUM_TOOBIG 2 /* ** A pointer to this structure is used to communicate information ** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback. */ typedef struct { sqlite3 *db; /* The database being initialized */ char **pzErrMsg; /* Error message stored here */ int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */ int rc; /* Result code stored here */ } InitData; /* ** Structure containing global configuration data for the SQLite library. ** ** This structure also contains some state information. */ struct Sqlite3Config { int bMemstat; /* True to enable memory status */ int bCoreMutex; /* True to enable core mutexing */ int bFullMutex; /* True to enable full mutexing */ int bOpenUri; /* True to interpret filenames as URIs */ int bUseCis; /* Use covering indices for full-scans */ int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */ int nLookaside; /* Default lookaside buffer count */ sqlite3_mem_methods m; /* Low-level memory allocation interface */ sqlite3_mutex_methods mutex; /* Low-level mutex interface */ sqlite3_pcache_methods2 pcache2; /* Low-level page-cache interface */ void *pHeap; /* Heap storage space */ int nHeap; /* Size of pHeap[] */ int mnReq, mxReq; /* Min and max heap requests sizes */ sqlite3_int64 szMmap; /* mmap() space per open file */ sqlite3_int64 mxMmap; /* Maximum value for szMmap */ void *pScratch; /* Scratch memory */ int szScratch; /* Size of each scratch buffer */ int nScratch; /* Number of scratch buffers */ void *pPage; /* Page cache memory */ int szPage; /* Size of each page in pPage[] */ int nPage; /* Number of pages in pPage[] */ int mxParserStack; /* maximum depth of the parser stack */ int sharedCacheEnabled; /* true if shared-cache mode enabled */ u32 szPma; /* Maximum Sorter PMA size */ /* The above might be initialized to non-zero. The following need to always ** initially be zero, however. */ int isInit; /* True after initialization has finished */ int inProgress; /* True while initialization in progress */ int isMutexInit; /* True after mutexes are initialized */ int isMallocInit; /* True after malloc is initialized */ int isPCacheInit; /* True after malloc is initialized */ int nRefInitMutex; /* Number of users of pInitMutex */ sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */ void (*xLog)(void*,int,const char*); /* Function for logging */ void *pLogArg; /* First argument to xLog() */ #ifdef SQLITE_ENABLE_SQLLOG void(*xSqllog)(void*,sqlite3*,const char*, int); void *pSqllogArg; #endif #ifdef SQLITE_VDBE_COVERAGE /* The following callback (if not NULL) is invoked on every VDBE branch ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE. */ void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */ void *pVdbeBranchArg; /* 1st argument */ #endif #ifndef SQLITE_OMIT_BUILTIN_TEST int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ #endif int bLocaltimeFault; /* True to fail localtime() calls */ }; /* ** This macro is used inside of assert() statements to indicate that ** the assert is only valid on a well-formed database. Instead of: ** ** assert( X ); ** ** One writes: ** ** assert( X || CORRUPT_DB ); ** ** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate ** that the database is definitely corrupt, only that it might be corrupt. ** For most test cases, CORRUPT_DB is set to false using a special ** sqlite3_test_control(). This enables assert() statements to prove ** things that are always true for well-formed databases. */ #define CORRUPT_DB (sqlite3Config.neverCorrupt==0) /* ** Context pointer passed down through the tree-walk. */ struct Walker { int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */ int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */ void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ Parse *pParse; /* Parser context. */ int walkerDepth; /* Number of subqueries */ u8 eCode; /* A small processing code */ union { /* Extra data for callback */ NameContext *pNC; /* Naming context */ int n; /* A counter */ int iCur; /* A cursor number */ SrcList *pSrcList; /* FROM clause */ struct SrcCount *pSrcCount; /* Counting column references */ } u; }; /* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*); /* ** Return code from the parse-tree walking primitives and their ** callbacks. */ #define WRC_Continue 0 /* Continue down into children */ #define WRC_Prune 1 /* Omit children but continue walking siblings */ #define WRC_Abort 2 /* Abandon the tree walk */ /* ** An instance of this structure represents a set of one or more CTEs ** (common table expressions) created by a single WITH clause. */ struct With { int nCte; /* Number of CTEs in the WITH clause */ With *pOuter; /* Containing WITH clause, or NULL */ struct Cte { /* For each CTE in the WITH clause.... */ char *zName; /* Name of this CTE */ ExprList *pCols; /* List of explicit column names, or NULL */ Select *pSelect; /* The definition of this CTE */ const char *zErr; /* Error message for circular references */ } a[1]; }; #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of ** data structures on sqlite3DebugPrintf() using a tree-like view. */ struct TreeView { int iLevel; /* Which level of the tree we are on */ u8 bLine[100]; /* Draw vertical in column i if bLine[i] is true */ }; #endif /* SQLITE_DEBUG */ /* ** Assuming zIn points to the first byte of a UTF-8 character, ** advance zIn to point to the first byte of the next UTF-8 character. */ #define SQLITE_SKIP_UTF8(zIn) { \ if( (*(zIn++))>=0xc0 ){ \ while( (*zIn & 0xc0)==0x80 ){ zIn++; } \ } \ } /* ** The SQLITE_*_BKPT macros are substitutes for the error codes with ** the same name but without the _BKPT suffix. These macros invoke ** routines that report the line-number on which the error originated ** using sqlite3_log(). The routines also provide a convenient place ** to set a debugger breakpoint. */ SQLITE_PRIVATE int sqlite3CorruptError(int); SQLITE_PRIVATE int sqlite3MisuseError(int); SQLITE_PRIVATE int sqlite3CantopenError(int); #define SQLITE_CORRUPT_BKPT sqlite3CorruptError(__LINE__) #define SQLITE_MISUSE_BKPT sqlite3MisuseError(__LINE__) #define SQLITE_CANTOPEN_BKPT sqlite3CantopenError(__LINE__) /* ** FTS4 is really an extension for FTS3. It is enabled using the ** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also call ** the SQLITE_ENABLE_FTS4 macro to serve as an alias for SQLITE_ENABLE_FTS3. */ #if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) # define SQLITE_ENABLE_FTS3 1 #endif /* ** The ctype.h header is needed for non-ASCII systems. It is also ** needed by FTS3 when FTS3 is included in the amalgamation. */ #if !defined(SQLITE_ASCII) || \ (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION)) # include #endif /* ** The following macros mimic the standard library functions toupper(), ** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The ** sqlite versions only work for ASCII characters, regardless of locale. */ #ifdef SQLITE_ASCII # define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20)) # define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01) # define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06) # define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02) # define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04) # define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x)) # define sqlite3Isalnum(x) isalnum((unsigned char)(x)) # define sqlite3Isalpha(x) isalpha((unsigned char)(x)) # define sqlite3Isdigit(x) isdigit((unsigned char)(x)) # define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) #endif #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS SQLITE_PRIVATE int sqlite3IsIdChar(u8); #endif /* ** Internal function prototypes */ #define sqlite3StrICmp sqlite3_stricmp SQLITE_PRIVATE int sqlite3Strlen30(const char*); #define sqlite3StrNICmp sqlite3_strnicmp SQLITE_PRIVATE int sqlite3MallocInit(void); SQLITE_PRIVATE void sqlite3MallocEnd(void); SQLITE_PRIVATE void *sqlite3Malloc(u64); SQLITE_PRIVATE void *sqlite3MallocZero(u64); SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, u64); SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, u64); SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*); SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, u64); SQLITE_PRIVATE void *sqlite3Realloc(void*, u64); SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64); SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64); SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*); SQLITE_PRIVATE int sqlite3MallocSize(void*); SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*); SQLITE_PRIVATE void *sqlite3ScratchMalloc(int); SQLITE_PRIVATE void sqlite3ScratchFree(void*); SQLITE_PRIVATE void *sqlite3PageMalloc(int); SQLITE_PRIVATE void sqlite3PageFree(void*); SQLITE_PRIVATE void sqlite3MemSetDefault(void); #ifndef SQLITE_OMIT_BUILTIN_TEST SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void)); #endif SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); /* ** On systems with ample stack space and that support alloca(), make ** use of alloca() to obtain space for large automatic objects. By default, ** obtain space from malloc(). ** ** The alloca() routine never returns NULL. This will cause code paths ** that deal with sqlite3StackAlloc() failures to be unreachable. */ #ifdef SQLITE_USE_ALLOCA # define sqlite3StackAllocRaw(D,N) alloca(N) # define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) # define sqlite3StackFree(D,P) #else # define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) # define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) #endif #ifdef SQLITE_ENABLE_MEMSYS3 SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); #endif #ifdef SQLITE_ENABLE_MEMSYS5 SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); #endif #ifndef SQLITE_MUTEX_OMIT SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void); SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void); SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int); SQLITE_PRIVATE int sqlite3MutexInit(void); SQLITE_PRIVATE int sqlite3MutexEnd(void); #endif SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int); SQLITE_PRIVATE void sqlite3StatusUp(int, int); SQLITE_PRIVATE void sqlite3StatusDown(int, int); SQLITE_PRIVATE void sqlite3StatusSet(int, int); /* Access to mutexes used by sqlite3_status() */ SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void); SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void); #ifndef SQLITE_OMIT_FLOATING_POINT SQLITE_PRIVATE int sqlite3IsNaN(double); #else # define sqlite3IsNaN(X) 0 #endif /* ** An instance of the following structure holds information about SQL ** functions arguments that are the parameters to the printf() function. */ struct PrintfArguments { int nArg; /* Total number of arguments */ int nUsed; /* Number of arguments used so far */ sqlite3_value **apArg; /* The argument values */ }; #define SQLITE_PRINTF_INTERNAL 0x01 #define SQLITE_PRINTF_SQLFUNC 0x02 SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, u32, const char*, va_list); SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, u32, const char*, ...); SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...); #endif #if defined(SQLITE_TEST) SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*); #endif #if defined(SQLITE_DEBUG) SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8); SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*); SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8); #endif SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); SQLITE_PRIVATE int sqlite3Dequote(char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **); SQLITE_PRIVATE void sqlite3FinishCoding(Parse*); SQLITE_PRIVATE int sqlite3GetTempReg(Parse*); SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*); SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int); SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*); SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**); SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int); SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*); SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int); SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*); SQLITE_PRIVATE void sqlite3BeginParse(Parse*,int); SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*); SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *, int); SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*); SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index*, i16); SQLITE_PRIVATE void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int); SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*); SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int); SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int); SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*); SQLITE_PRIVATE void sqlite3AddColumnType(Parse*,Token*); SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,ExprSpan*); SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*); SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*); SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*, sqlite3_vfs**,char**,char **); SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*); SQLITE_PRIVATE int sqlite3CodeOnce(Parse *); #ifdef SQLITE_OMIT_BUILTIN_TEST # define sqlite3FaultSim(X) SQLITE_OK #else SQLITE_PRIVATE int sqlite3FaultSim(int); #endif SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32); SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32); SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec*, u32); SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32); SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*); SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*); SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*); #ifndef SQLITE_OMIT_BUILTIN_TEST SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*); #endif SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int); SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*); SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64); SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64); SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*); SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,Select*,int,int); #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*); #else # define sqlite3ViewGetColumnNames(A,B) 0 #endif #if SQLITE_MAX_ATTACHED>30 SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask); #endif SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); #ifndef SQLITE_OMIT_AUTOINCREMENT SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse); #else # define sqlite3AutoincrementBegin(X) # define sqlite3AutoincrementEnd(X) #endif SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int); SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*); SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*); SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*); SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int); SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*); SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*, Token*, Select*, Expr*, IdList*); SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *); SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *); SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*); SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*); SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**); SQLITE_PRIVATE Index *sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, Expr*, int, int); SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int); SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, Expr*,ExprList*,u16,Expr*,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,Expr*,char*); #endif SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*); SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int); SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int); SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*); SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*); SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8); SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int); SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int); SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int); SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*); SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*); SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int); SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*); SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int); SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8); SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, u8); #define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */ #define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int); SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,int isView,const char*, const char*); SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,int isView,struct SrcList_item *); SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3Vacuum(Parse*); SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*); SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*); SQLITE_PRIVATE int sqlite3ExprCompare(Expr*, Expr*, int); SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); #ifndef SQLITE_OMIT_BUILTIN_TEST SQLITE_PRIVATE void sqlite3PrngSaveState(void); SQLITE_PRIVATE void sqlite3PrngRestoreState(void); #endif SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int); SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int); SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb); SQLITE_PRIVATE void sqlite3BeginTransaction(Parse*, int); SQLITE_PRIVATE void sqlite3CommitTransaction(Parse*); SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse*); SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*); SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *); SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*); SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); SQLITE_PRIVATE void sqlite3GenerateRowDelete(Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8); SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*); SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int); SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int); SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int, u8,u8,int,int*); SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int); SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, int, u8*, int*, int*); SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int); SQLITE_PRIVATE void sqlite3MultiWrite(Parse*); SQLITE_PRIVATE void sqlite3MayAbort(Parse*); SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8); SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*); SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*); SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int); SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int); SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int); SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*); SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int); #if SELECTTRACE_ENABLED SQLITE_PRIVATE void sqlite3SelectSetName(Select*,const char*); #else # define sqlite3SelectSetName(A,B) #endif SQLITE_PRIVATE void sqlite3FuncDefInsert(FuncDefHash*, FuncDef*); SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,int,u8,u8); SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(sqlite3*); SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void); SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void); SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*); SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*); SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int); #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, int); #endif #ifndef SQLITE_OMIT_TRIGGER SQLITE_PRIVATE void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*, Expr*,int, int); SQLITE_PRIVATE void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*); SQLITE_PRIVATE void sqlite3DropTrigger(Parse*, SrcList*, int); SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse*, Trigger*); SQLITE_PRIVATE Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask); SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *, Table *); SQLITE_PRIVATE void sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *, int, int, int); SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, int, int, int); void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*); SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*); SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*); SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*, Select*,u8); SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8); SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*); SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*); SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int); # define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p)) #else # define sqlite3TriggersExist(B,C,D,E,F) 0 # define sqlite3DeleteTrigger(A,B) # define sqlite3DropTriggerPtr(A,B) # define sqlite3UnlinkAndDeleteTrigger(A,B,C) # define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I) # define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F) # define sqlite3TriggerList(X, Y) 0 # define sqlite3ParseToplevel(p) p # define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0 #endif SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*); SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int); SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int); #ifndef SQLITE_OMIT_AUTHORIZATION SQLITE_PRIVATE void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*); SQLITE_PRIVATE int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*); SQLITE_PRIVATE void sqlite3AuthContextPush(Parse*, AuthContext*, const char*); SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext*); SQLITE_PRIVATE int sqlite3AuthReadCol(Parse*, const char *, const char *, int); #else # define sqlite3AuthRead(a,b,c,d) # define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK # define sqlite3AuthContextPush(a,b,c) # define sqlite3AuthContextPop(a) ((void)(a)) #endif SQLITE_PRIVATE void sqlite3Attach(Parse*, Expr*, Expr*, Expr*); SQLITE_PRIVATE void sqlite3Detach(Parse*, Expr*); SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*); SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); SQLITE_PRIVATE int sqlite3Atoi(const char*); SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); SQLITE_PRIVATE LogEst sqlite3LogEst(u64); SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); #ifndef SQLITE_OMIT_VIRTUALTABLE SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); #endif SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst); /* ** Routines to read and write variable-length integers. These used to ** be defined locally, but now we use the varint routines in the util.c ** file. */ SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64); SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *); SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *); SQLITE_PRIVATE int sqlite3VarintLen(u64 v); /* ** The common case is for a varint to be a single byte. They following ** macros handle the common case without a procedure call, but then call ** the procedure for larger varints. */ #define getVarint32(A,B) \ (u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B))) #define putVarint32(A,B) \ (u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\ sqlite3PutVarint((A),(B))) #define getVarint sqlite3GetVarint #define putVarint sqlite3PutVarint SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *, Index *); SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int); SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2); SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity); SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr); SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); SQLITE_PRIVATE void sqlite3Error(sqlite3*,int); SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n); SQLITE_PRIVATE u8 sqlite3HexToInt(int h); SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **); #if defined(SQLITE_NEED_ERR_NAME) SQLITE_PRIVATE const char *sqlite3ErrName(int); #endif SQLITE_PRIVATE const char *sqlite3ErrStr(int); SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse); SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int); SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName); SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr); SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int); SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*); SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*); SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *); SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *, const char *); SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int); SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64); SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64); SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64); SQLITE_PRIVATE int sqlite3AbsInt32(int); #ifdef SQLITE_ENABLE_8_3_NAMES SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*); #else # define sqlite3FileSuffix3(X,Y) #endif SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8); SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, void(*)(void*)); SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*); SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*); SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *); SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8); SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **); SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8); #ifndef SQLITE_AMALGAMATION SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[]; SQLITE_PRIVATE const Token sqlite3IntTokens[]; SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config; SQLITE_PRIVATE SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; #ifndef SQLITE_OMIT_WSD SQLITE_PRIVATE int sqlite3PendingByte; #endif #endif SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int); SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*); SQLITE_PRIVATE void sqlite3AlterFunctions(void); SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*); SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *); SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...); SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*); SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *, Expr *, int, int); SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*); SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p); SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*); SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*); SQLITE_PRIVATE void sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*); SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*); SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int); SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *); SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *); SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*); SQLITE_PRIVATE char sqlite3AffinityType(const char*, u8*); SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*); SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*); SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*); SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *); SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB); SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3*,Index*); SQLITE_PRIVATE void sqlite3DefaultRowEst(Index*); SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3*, int); SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*); SQLITE_PRIVATE void sqlite3MinimumFileFormat(Parse*, int, int); SQLITE_PRIVATE void sqlite3SchemaClear(void *); SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *, Btree *); SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int); SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*); SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*); #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*); #endif SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *, void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*), FuncDestructor *pDestructor ); SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum*,const char*,int); SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum*,const char*); SQLITE_PRIVATE void sqlite3AppendChar(StrAccum*,int,char); SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum*); SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int); SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int); SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *); SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void); SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(Parse*,Index*,UnpackedRecord**,Expr*,u8,int,int*); SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**); SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*); SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**); #endif /* ** The interface to the LEMON-generated parser */ SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64)); SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*)); SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*); #ifdef YYTRACKMAXSTACKDEPTH SQLITE_PRIVATE int sqlite3ParserStackPeak(void*); #endif SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3*); #ifndef SQLITE_OMIT_LOAD_EXTENSION SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3*); #else # define sqlite3CloseExtensions(X) #endif #ifndef SQLITE_OMIT_SHARED_CACHE SQLITE_PRIVATE void sqlite3TableLock(Parse *, int, int, u8, const char *); #else #define sqlite3TableLock(v,w,x,y,z) #endif #ifdef SQLITE_TEST SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*); #endif #ifdef SQLITE_OMIT_VIRTUALTABLE # define sqlite3VtabClear(Y) # define sqlite3VtabSync(X,Y) SQLITE_OK # define sqlite3VtabRollback(X) # define sqlite3VtabCommit(X) # define sqlite3VtabInSync(db) 0 # define sqlite3VtabLock(X) # define sqlite3VtabUnlock(X) # define sqlite3VtabUnlockList(X) # define sqlite3VtabSavepoint(X, Y, Z) SQLITE_OK # define sqlite3GetVTable(X,Y) ((VTable*)0) #else SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table*); SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p); SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe*); SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db); SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db); SQLITE_PRIVATE void sqlite3VtabLock(VTable *); SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *); SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*); SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int); SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*); SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*); # define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0) #endif SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*); SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int); SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*); SQLITE_PRIVATE void sqlite3VtabArgInit(Parse*); SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse*, Token*); SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **); SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*); SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); SQLITE_PRIVATE void sqlite3InvalidFunction(sqlite3_context*,int,sqlite3_value**); SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); SQLITE_PRIVATE void sqlite3ParserReset(Parse*); SQLITE_PRIVATE int sqlite3Reprepare(Vdbe*); SQLITE_PRIVATE void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*); SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *); SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3*); SQLITE_PRIVATE const char *sqlite3JournalModename(int); #ifndef SQLITE_OMIT_WAL SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*); SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int); #endif #ifndef SQLITE_OMIT_CTE SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8); #else #define sqlite3WithPush(x,y,z) #define sqlite3WithDelete(x,y) #endif /* Declarations for functions in fkey.c. All of these are replaced by ** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign ** key functionality is available. If OMIT_TRIGGER is defined but ** OMIT_FOREIGN_KEY is not, only some of the functions are no-oped. In ** this case foreign keys are parsed, but no other functionality is ** provided (enforcement of FK constraints requires the triggers sub-system). */ #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE void sqlite3FkCheck(Parse*, Table*, int, int, int*, int); SQLITE_PRIVATE void sqlite3FkDropTable(Parse*, SrcList *, Table*); SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int); SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int); SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*); SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *); #else #define sqlite3FkActions(a,b,c,d,e,f) #define sqlite3FkCheck(a,b,c,d,e,f) #define sqlite3FkDropTable(a,b,c) #define sqlite3FkOldmask(a,b) 0 #define sqlite3FkRequired(a,b,c,d) 0 #endif #ifndef SQLITE_OMIT_FOREIGN_KEY SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*); SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**); #else #define sqlite3FkDelete(a,b) #define sqlite3FkLocateIndex(a,b,c,d,e) #endif /* ** Available fault injectors. Should be numbered beginning with 0. */ #define SQLITE_FAULTINJECTOR_MALLOC 0 #define SQLITE_FAULTINJECTOR_COUNT 1 /* ** The interface to the code in fault.c used for identifying "benign" ** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST ** is not defined. */ #ifndef SQLITE_OMIT_BUILTIN_TEST SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void); SQLITE_PRIVATE void sqlite3EndBenignMalloc(void); #else #define sqlite3BeginBenignMalloc() #define sqlite3EndBenignMalloc() #endif /* ** Allowed return values from sqlite3FindInIndex() */ #define IN_INDEX_ROWID 1 /* Search the rowid of the table */ #define IN_INDEX_EPH 2 /* Search an ephemeral b-tree */ #define IN_INDEX_INDEX_ASC 3 /* Existing index ASCENDING */ #define IN_INDEX_INDEX_DESC 4 /* Existing index DESCENDING */ #define IN_INDEX_NOOP 5 /* No table available. Use comparisons */ /* ** Allowed flags for the 3rd parameter to sqlite3FindInIndex(). */ #define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */ #define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */ #define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*); #ifdef SQLITE_ENABLE_ATOMIC_WRITE SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int); SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *); SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *); SQLITE_PRIVATE int sqlite3JournalExists(sqlite3_file *p); #else #define sqlite3JournalSize(pVfs) ((pVfs)->szOsFile) #define sqlite3JournalExists(p) 1 #endif SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *); SQLITE_PRIVATE int sqlite3MemJournalSize(void); SQLITE_PRIVATE int sqlite3IsMemJournal(sqlite3_file *); SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p); #if SQLITE_MAX_EXPR_DEPTH>0 SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *); SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int); #else #define sqlite3SelectExprHeight(x) 0 #define sqlite3ExprCheckHeight(x,y) #endif SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32); #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *); SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db); SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db); #else #define sqlite3ConnectionBlocked(x,y) #define sqlite3ConnectionUnlocked(x) #define sqlite3ConnectionClosed(x) #endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *); #endif /* ** If the SQLITE_ENABLE IOTRACE exists then the global variable ** sqlite3IoTrace is a pointer to a printf-like routine used to ** print I/O tracing messages. */ #ifdef SQLITE_ENABLE_IOTRACE # define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; } SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*); SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...); #else # define IOTRACE(A) # define sqlite3VdbeIOTraceSql(X) #endif /* ** These routines are available for the mem2.c debugging memory allocator ** only. They are used to verify that different "types" of memory ** allocations are properly tracked by the system. ** ** sqlite3MemdebugSetType() sets the "type" of an allocation to one of ** the MEMTYPE_* macros defined below. The type must be a bitmask with ** a single bit set. ** ** sqlite3MemdebugHasType() returns true if any of the bits in its second ** argument match the type set by the previous sqlite3MemdebugSetType(). ** sqlite3MemdebugHasType() is intended for use inside assert() statements. ** ** sqlite3MemdebugNoType() returns true if none of the bits in its second ** argument match the type set by the previous sqlite3MemdebugSetType(). ** ** Perhaps the most important point is the difference between MEMTYPE_HEAP ** and MEMTYPE_LOOKASIDE. If an allocation is MEMTYPE_LOOKASIDE, that means ** it might have been allocated by lookaside, except the allocation was ** too large or lookaside was already full. It is important to verify ** that allocations that might have been satisfied by lookaside are not ** passed back to non-lookaside free() routines. Asserts such as the ** example above are placed on the non-lookaside free() routines to verify ** this constraint. ** ** All of this is no-op for a production build. It only comes into ** play when the SQLITE_MEMDEBUG compile-time option is used. */ #ifdef SQLITE_MEMDEBUG SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8); SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8); SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8); #else # define sqlite3MemdebugSetType(X,Y) /* no-op */ # define sqlite3MemdebugHasType(X,Y) 1 # define sqlite3MemdebugNoType(X,Y) 1 #endif #define MEMTYPE_HEAP 0x01 /* General heap allocations */ #define MEMTYPE_LOOKASIDE 0x02 /* Heap that might have been lookaside */ #define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */ #define MEMTYPE_PCACHE 0x08 /* Page cache allocations */ /* ** Threading interface */ #if SQLITE_MAX_WORKER_THREADS>0 SQLITE_PRIVATE int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*); SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**); #endif #if defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST) SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*); #endif #endif /* _SQLITEINT_H_ */ /************** End of sqliteInt.h *******************************************/ /************** Begin file global.c ******************************************/ /* ** 2008 June 13 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains definitions of global variables and constants. */ /* #include "sqliteInt.h" */ /* An array to map all upper-case characters into their corresponding ** lower-case character. ** ** SQLite only considers US-ASCII (or EBCDIC) characters. We do not ** handle case conversions for the UTF character set since the tables ** involved are nearly as big or bigger than SQLite itself. */ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { #ifdef SQLITE_ASCII 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103, 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121, 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107, 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125, 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161, 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179, 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197, 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215, 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233, 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251, 252,253,254,255 #endif #ifdef SQLITE_EBCDIC 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */ 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */ 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, /* 6x */ 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, /* 7x */ 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */ 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, /* 9x */ 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */ 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */ 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */ 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */ 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */ 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */ #endif }; /* ** The following 256 byte lookup table is used to support SQLites built-in ** equivalents to the following standard library functions: ** ** isspace() 0x01 ** isalpha() 0x02 ** isdigit() 0x04 ** isalnum() 0x06 ** isxdigit() 0x08 ** toupper() 0x20 ** SQLite identifier character 0x40 ** ** Bit 0x20 is set if the mapped character requires translation to upper ** case. i.e. if the character is a lower-case ASCII character. ** If x is a lower-case ASCII character, then its upper-case equivalent ** is (x - 0x20). Therefore toupper() can be implemented as: ** ** (x & ~(map[x]&0x20)) ** ** Standard function tolower() is implemented using the sqlite3UpperToLower[] ** array. tolower() is used more often than toupper() by SQLite. ** ** Bit 0x40 is set if the character non-alphanumeric and can be used in an ** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any ** non-ASCII UTF character. Hence the test for whether or not a character is ** part of an identifier is 0x46. ** ** SQLite's versions are identical to the standard versions assuming a ** locale of "C". They are implemented as macros in sqliteInt.h. */ #ifdef SQLITE_ASCII SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */ 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */ 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, /* 20..27 !"#$%&' */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */ 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */ 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */ 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */ 0x00, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */ 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 80..87 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 88..8f ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 90..97 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 98..9f ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a0..a7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a8..af ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b0..b7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b8..bf ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c0..c7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c8..cf ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d0..d7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d8..df ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e0..e7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e8..ef ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* f0..f7 ........ */ 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 /* f8..ff ........ */ }; #endif /* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards ** compatibility for legacy applications, the URI filename capability is ** disabled by default. ** ** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled ** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options. ** ** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally ** disabled. The default value may be changed by compiling with the ** SQLITE_USE_URI symbol defined. */ #ifndef SQLITE_USE_URI # define SQLITE_USE_URI 0 #endif /* EVIDENCE-OF: R-38720-18127 The default setting is determined by the ** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if ** that compile-time option is omitted. */ #ifndef SQLITE_ALLOW_COVERING_INDEX_SCAN # define SQLITE_ALLOW_COVERING_INDEX_SCAN 1 #endif /* The minimum PMA size is set to this value multiplied by the database ** page size in bytes. */ #ifndef SQLITE_SORTER_PMASZ # define SQLITE_SORTER_PMASZ 250 #endif /* ** The following singleton contains the global configuration for ** the SQLite library. */ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */ 1, /* bCoreMutex */ SQLITE_THREADSAFE==1, /* bFullMutex */ SQLITE_USE_URI, /* bOpenUri */ SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ 128, /* szLookaside */ 500, /* nLookaside */ {0,0,0,0,0,0,0,0}, /* m */ {0,0,0,0,0,0,0,0,0}, /* mutex */ {0,0,0,0,0,0,0,0,0,0,0,0,0},/* pcache2 */ (void*)0, /* pHeap */ 0, /* nHeap */ 0, 0, /* mnHeap, mxHeap */ SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */ SQLITE_MAX_MMAP_SIZE, /* mxMmap */ (void*)0, /* pScratch */ 0, /* szScratch */ 0, /* nScratch */ (void*)0, /* pPage */ 0, /* szPage */ SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */ 0, /* mxParserStack */ 0, /* sharedCacheEnabled */ SQLITE_SORTER_PMASZ, /* szPma */ /* All the rest should always be initialized to zero */ 0, /* isInit */ 0, /* inProgress */ 0, /* isMutexInit */ 0, /* isMallocInit */ 0, /* isPCacheInit */ 0, /* nRefInitMutex */ 0, /* pInitMutex */ 0, /* xLog */ 0, /* pLogArg */ #ifdef SQLITE_ENABLE_SQLLOG 0, /* xSqllog */ 0, /* pSqllogArg */ #endif #ifdef SQLITE_VDBE_COVERAGE 0, /* xVdbeBranch */ 0, /* pVbeBranchArg */ #endif #ifndef SQLITE_OMIT_BUILTIN_TEST 0, /* xTestCallback */ #endif 0 /* bLocaltimeFault */ }; /* ** Hash table for global functions - functions common to all ** database connections. After initialization, this table is ** read-only. */ SQLITE_PRIVATE SQLITE_WSD FuncDefHash sqlite3GlobalFunctions; /* ** Constant tokens for values 0 and 1. */ SQLITE_PRIVATE const Token sqlite3IntTokens[] = { { "0", 1 }, { "1", 1 } }; /* ** The value of the "pending" byte must be 0x40000000 (1 byte past the ** 1-gibabyte boundary) in a compatible database. SQLite never uses ** the database page that contains the pending byte. It never attempts ** to read or write that page. The pending byte page is set assign ** for use by the VFS layers as space for managing file locks. ** ** During testing, it is often desirable to move the pending byte to ** a different position in the file. This allows code that has to ** deal with the pending byte to run on files that are much smaller ** than 1 GiB. The sqlite3_test_control() interface can be used to ** move the pending byte. ** ** IMPORTANT: Changing the pending byte to any value other than ** 0x40000000 results in an incompatible database file format! ** Changing the pending byte during operation will result in undefined ** and incorrect behavior. */ #ifndef SQLITE_OMIT_WSD SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000; #endif /* #include "opcodes.h" */ /* ** Properties of opcodes. The OPFLG_INITIALIZER macro is ** created by mkopcodeh.awk during compilation. Data is obtained ** from the comments following the "case OP_xxxx:" statements in ** the vdbe.c file. */ SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER; /************** End of global.c **********************************************/ /************** Begin file ctime.c *******************************************/ /* ** 2010 February 23 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file implements routines used to report what compile-time options ** SQLite was built with. */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* #include "sqliteInt.h" */ /* ** An array of names of all compile-time options. This array should ** be sorted A-Z. ** ** This array looks large, but in a typical installation actually uses ** only a handful of compile-time options, so most times this array is usually ** rather short and uses little memory space. */ static const char * const azCompileOpt[] = { /* These macros are provided to "stringify" the value of the define ** for those options in which the value is meaningful. */ #define CTIMEOPT_VAL_(opt) #opt #define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt) #if SQLITE_32BIT_ROWID "32BIT_ROWID", #endif #if SQLITE_4_BYTE_ALIGNED_MALLOC "4_BYTE_ALIGNED_MALLOC", #endif #if SQLITE_CASE_SENSITIVE_LIKE "CASE_SENSITIVE_LIKE", #endif #if SQLITE_CHECK_PAGES "CHECK_PAGES", #endif #if SQLITE_COVERAGE_TEST "COVERAGE_TEST", #endif #if SQLITE_DEBUG "DEBUG", #endif #if SQLITE_DEFAULT_LOCKING_MODE "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE), #endif #if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc) "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE), #endif #if SQLITE_DISABLE_DIRSYNC "DISABLE_DIRSYNC", #endif #if SQLITE_DISABLE_LFS "DISABLE_LFS", #endif #if SQLITE_ENABLE_API_ARMOR "ENABLE_API_ARMOR", #endif #if SQLITE_ENABLE_ATOMIC_WRITE "ENABLE_ATOMIC_WRITE", #endif #if SQLITE_ENABLE_CEROD "ENABLE_CEROD", #endif #if SQLITE_ENABLE_COLUMN_METADATA "ENABLE_COLUMN_METADATA", #endif #if SQLITE_ENABLE_DBSTAT_VTAB "ENABLE_DBSTAT_VTAB", #endif #if SQLITE_ENABLE_EXPENSIVE_ASSERT "ENABLE_EXPENSIVE_ASSERT", #endif #if SQLITE_ENABLE_FTS1 "ENABLE_FTS1", #endif #if SQLITE_ENABLE_FTS2 "ENABLE_FTS2", #endif #if SQLITE_ENABLE_FTS3 "ENABLE_FTS3", #endif #if SQLITE_ENABLE_FTS3_PARENTHESIS "ENABLE_FTS3_PARENTHESIS", #endif #if SQLITE_ENABLE_FTS4 "ENABLE_FTS4", #endif #if SQLITE_ENABLE_ICU "ENABLE_ICU", #endif #if SQLITE_ENABLE_IOTRACE "ENABLE_IOTRACE", #endif #if SQLITE_ENABLE_LOAD_EXTENSION "ENABLE_LOAD_EXTENSION", #endif #if SQLITE_ENABLE_LOCKING_STYLE "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE), #endif #if SQLITE_ENABLE_MEMORY_MANAGEMENT "ENABLE_MEMORY_MANAGEMENT", #endif #if SQLITE_ENABLE_MEMSYS3 "ENABLE_MEMSYS3", #endif #if SQLITE_ENABLE_MEMSYS5 "ENABLE_MEMSYS5", #endif #if SQLITE_ENABLE_OVERSIZE_CELL_CHECK "ENABLE_OVERSIZE_CELL_CHECK", #endif #if SQLITE_ENABLE_RTREE "ENABLE_RTREE", #endif #if defined(SQLITE_ENABLE_STAT4) "ENABLE_STAT4", #elif defined(SQLITE_ENABLE_STAT3) "ENABLE_STAT3", #endif #if SQLITE_ENABLE_UNLOCK_NOTIFY "ENABLE_UNLOCK_NOTIFY", #endif #if SQLITE_ENABLE_UPDATE_DELETE_LIMIT "ENABLE_UPDATE_DELETE_LIMIT", #endif #if SQLITE_HAS_CODEC "HAS_CODEC", #endif #if HAVE_ISNAN || SQLITE_HAVE_ISNAN "HAVE_ISNAN", #endif #if SQLITE_HOMEGROWN_RECURSIVE_MUTEX "HOMEGROWN_RECURSIVE_MUTEX", #endif #if SQLITE_IGNORE_AFP_LOCK_ERRORS "IGNORE_AFP_LOCK_ERRORS", #endif #if SQLITE_IGNORE_FLOCK_LOCK_ERRORS "IGNORE_FLOCK_LOCK_ERRORS", #endif #ifdef SQLITE_INT64_TYPE "INT64_TYPE", #endif #if SQLITE_LOCK_TRACE "LOCK_TRACE", #endif #if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc) "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE), #endif #ifdef SQLITE_MAX_SCHEMA_RETRY "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY), #endif #if SQLITE_MEMDEBUG "MEMDEBUG", #endif #if SQLITE_MIXED_ENDIAN_64BIT_FLOAT "MIXED_ENDIAN_64BIT_FLOAT", #endif #if SQLITE_NO_SYNC "NO_SYNC", #endif #if SQLITE_OMIT_ALTERTABLE "OMIT_ALTERTABLE", #endif #if SQLITE_OMIT_ANALYZE "OMIT_ANALYZE", #endif #if SQLITE_OMIT_ATTACH "OMIT_ATTACH", #endif #if SQLITE_OMIT_AUTHORIZATION "OMIT_AUTHORIZATION", #endif #if SQLITE_OMIT_AUTOINCREMENT "OMIT_AUTOINCREMENT", #endif #if SQLITE_OMIT_AUTOINIT "OMIT_AUTOINIT", #endif #if SQLITE_OMIT_AUTOMATIC_INDEX "OMIT_AUTOMATIC_INDEX", #endif #if SQLITE_OMIT_AUTORESET "OMIT_AUTORESET", #endif #if SQLITE_OMIT_AUTOVACUUM "OMIT_AUTOVACUUM", #endif #if SQLITE_OMIT_BETWEEN_OPTIMIZATION "OMIT_BETWEEN_OPTIMIZATION", #endif #if SQLITE_OMIT_BLOB_LITERAL "OMIT_BLOB_LITERAL", #endif #if SQLITE_OMIT_BTREECOUNT "OMIT_BTREECOUNT", #endif #if SQLITE_OMIT_BUILTIN_TEST "OMIT_BUILTIN_TEST", #endif #if SQLITE_OMIT_CAST "OMIT_CAST", #endif #if SQLITE_OMIT_CHECK "OMIT_CHECK", #endif #if SQLITE_OMIT_COMPLETE "OMIT_COMPLETE", #endif #if SQLITE_OMIT_COMPOUND_SELECT "OMIT_COMPOUND_SELECT", #endif #if SQLITE_OMIT_CTE "OMIT_CTE", #endif #if SQLITE_OMIT_DATETIME_FUNCS "OMIT_DATETIME_FUNCS", #endif #if SQLITE_OMIT_DECLTYPE "OMIT_DECLTYPE", #endif #if SQLITE_OMIT_DEPRECATED "OMIT_DEPRECATED", #endif #if SQLITE_OMIT_DISKIO "OMIT_DISKIO", #endif #if SQLITE_OMIT_EXPLAIN "OMIT_EXPLAIN", #endif #if SQLITE_OMIT_FLAG_PRAGMAS "OMIT_FLAG_PRAGMAS", #endif #if SQLITE_OMIT_FLOATING_POINT "OMIT_FLOATING_POINT", #endif #if SQLITE_OMIT_FOREIGN_KEY "OMIT_FOREIGN_KEY", #endif #if SQLITE_OMIT_GET_TABLE "OMIT_GET_TABLE", #endif #if SQLITE_OMIT_INCRBLOB "OMIT_INCRBLOB", #endif #if SQLITE_OMIT_INTEGRITY_CHECK "OMIT_INTEGRITY_CHECK", #endif #if SQLITE_OMIT_LIKE_OPTIMIZATION "OMIT_LIKE_OPTIMIZATION", #endif #if SQLITE_OMIT_LOAD_EXTENSION "OMIT_LOAD_EXTENSION", #endif #if SQLITE_OMIT_LOCALTIME "OMIT_LOCALTIME", #endif #if SQLITE_OMIT_LOOKASIDE "OMIT_LOOKASIDE", #endif #if SQLITE_OMIT_MEMORYDB "OMIT_MEMORYDB", #endif #if SQLITE_OMIT_OR_OPTIMIZATION "OMIT_OR_OPTIMIZATION", #endif #if SQLITE_OMIT_PAGER_PRAGMAS "OMIT_PAGER_PRAGMAS", #endif #if SQLITE_OMIT_PRAGMA "OMIT_PRAGMA", #endif #if SQLITE_OMIT_PROGRESS_CALLBACK "OMIT_PROGRESS_CALLBACK", #endif #if SQLITE_OMIT_QUICKBALANCE "OMIT_QUICKBALANCE", #endif #if SQLITE_OMIT_REINDEX "OMIT_REINDEX", #endif #if SQLITE_OMIT_SCHEMA_PRAGMAS "OMIT_SCHEMA_PRAGMAS", #endif #if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS "OMIT_SCHEMA_VERSION_PRAGMAS", #endif #if SQLITE_OMIT_SHARED_CACHE "OMIT_SHARED_CACHE", #endif #if SQLITE_OMIT_SUBQUERY "OMIT_SUBQUERY", #endif #if SQLITE_OMIT_TCL_VARIABLE "OMIT_TCL_VARIABLE", #endif #if SQLITE_OMIT_TEMPDB "OMIT_TEMPDB", #endif #if SQLITE_OMIT_TRACE "OMIT_TRACE", #endif #if SQLITE_OMIT_TRIGGER "OMIT_TRIGGER", #endif #if SQLITE_OMIT_TRUNCATE_OPTIMIZATION "OMIT_TRUNCATE_OPTIMIZATION", #endif #if SQLITE_OMIT_UTF16 "OMIT_UTF16", #endif #if SQLITE_OMIT_VACUUM "OMIT_VACUUM", #endif #if SQLITE_OMIT_VIEW "OMIT_VIEW", #endif #if SQLITE_OMIT_VIRTUALTABLE "OMIT_VIRTUALTABLE", #endif #if SQLITE_OMIT_WAL "OMIT_WAL", #endif #if SQLITE_OMIT_WSD "OMIT_WSD", #endif #if SQLITE_OMIT_XFER_OPT "OMIT_XFER_OPT", #endif #if SQLITE_PERFORMANCE_TRACE "PERFORMANCE_TRACE", #endif #if SQLITE_PROXY_DEBUG "PROXY_DEBUG", #endif #if SQLITE_RTREE_INT_ONLY "RTREE_INT_ONLY", #endif #if SQLITE_SECURE_DELETE "SECURE_DELETE", #endif #if SQLITE_SMALL_STACK "SMALL_STACK", #endif #if SQLITE_SOUNDEX "SOUNDEX", #endif #if SQLITE_SYSTEM_MALLOC "SYSTEM_MALLOC", #endif #if SQLITE_TCL "TCL", #endif #if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc) "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE), #endif #if SQLITE_TEST "TEST", #endif #if defined(SQLITE_THREADSAFE) "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE), #endif #if SQLITE_USE_ALLOCA "USE_ALLOCA", #endif #if SQLITE_USER_AUTHENTICATION "USER_AUTHENTICATION", #endif #if SQLITE_WIN32_MALLOC "WIN32_MALLOC", #endif #if SQLITE_ZERO_MALLOC "ZERO_MALLOC" #endif }; /* ** Given the name of a compile-time option, return true if that option ** was used and false if not. ** ** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix ** is not required for a match. */ SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){ int i, n; #if SQLITE_ENABLE_API_ARMOR if( zOptName==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7; n = sqlite3Strlen30(zOptName); /* Since ArraySize(azCompileOpt) is normally in single digits, a ** linear search is adequate. No need for a binary search. */ for(i=0; i=0 && NaDb[] (or -1) */ u8 nullRow; /* True if pointing to a row with no data */ u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */ Bool isEphemeral:1; /* True for an ephemeral table */ Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */ Bool isTable:1; /* True if a table requiring integer keys */ Bool isOrdered:1; /* True if the underlying table is BTREE_UNORDERED */ Pgno pgnoRoot; /* Root page of the open btree cursor */ sqlite3_vtab_cursor *pVtabCursor; /* The cursor for a virtual table */ i64 seqCount; /* Sequence counter */ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */ VdbeSorter *pSorter; /* Sorter object for OP_SorterOpen cursors */ #ifdef SQLITE_ENABLE_COLUMN_USED_MASK u64 maskUsed; /* Mask of columns used by this cursor */ #endif /* Cached information about the header for the data record that the ** cursor is currently pointing to. Only valid if cacheStatus matches ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of ** CACHE_STALE and so setting cacheStatus=CACHE_STALE guarantees that ** the cache is out of date. ** ** aRow might point to (ephemeral) data for the current row, or it might ** be NULL. */ u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */ u32 payloadSize; /* Total number of bytes in the record */ u32 szRow; /* Byte available in aRow */ u32 iHdrOffset; /* Offset to next unparsed byte of the header */ const u8 *aRow; /* Data for the current row, if all on one page */ u32 *aOffset; /* Pointer to aType[nField] */ u32 aType[1]; /* Type values for all entries in the record */ /* 2*nField extra array elements allocated for aType[], beyond the one ** static element declared in the structure. nField total array slots for ** aType[] and nField+1 array slots for aOffset[] */ }; typedef struct VdbeCursor VdbeCursor; /* ** When a sub-program is executed (OP_Program), a structure of this type ** is allocated to store the current value of the program counter, as ** well as the current memory cell array and various other frame specific ** values stored in the Vdbe struct. When the sub-program is finished, ** these values are copied back to the Vdbe from the VdbeFrame structure, ** restoring the state of the VM to as it was before the sub-program ** began executing. ** ** The memory for a VdbeFrame object is allocated and managed by a memory ** cell in the parent (calling) frame. When the memory cell is deleted or ** overwritten, the VdbeFrame object is not freed immediately. Instead, it ** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame ** list is deleted when the VM is reset in VdbeHalt(). The reason for doing ** this instead of deleting the VdbeFrame immediately is to avoid recursive ** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the ** child frame are released. ** ** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is ** set to NULL if the currently executing frame is the main program. */ typedef struct VdbeFrame VdbeFrame; struct VdbeFrame { Vdbe *v; /* VM this frame belongs to */ VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ Op *aOp; /* Program instructions for parent frame */ i64 *anExec; /* Event counters from parent frame */ Mem *aMem; /* Array of memory cells for parent frame */ u8 *aOnceFlag; /* Array of OP_Once flags for parent frame */ VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ void *token; /* Copy of SubProgram.token */ i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */ int nCursor; /* Number of entries in apCsr */ int pc; /* Program Counter in parent (calling) frame */ int nOp; /* Size of aOp array */ int nMem; /* Number of entries in aMem */ int nOnceFlag; /* Number of entries in aOnceFlag */ int nChildMem; /* Number of memory cells for child frame */ int nChildCsr; /* Number of cursors for child frame */ int nChange; /* Statement changes (Vdbe.nChange) */ int nDbChange; /* Value of db->nChange */ }; #define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))]) /* ** A value for VdbeCursor.cacheValid that means the cache is always invalid. */ #define CACHE_STALE 0 /* ** Internally, the vdbe manipulates nearly all SQL values as Mem ** structures. Each Mem struct may cache multiple representations (string, ** integer etc.) of the same value. */ struct Mem { union MemValue { double r; /* Real value used when MEM_Real is set in flags */ i64 i; /* Integer value used when MEM_Int is set in flags */ int nZero; /* Used when bit MEM_Zero is set in flags */ FuncDef *pDef; /* Used only when flags==MEM_Agg */ RowSet *pRowSet; /* Used only when flags==MEM_RowSet */ VdbeFrame *pFrame; /* Used when flags==MEM_Frame */ } u; u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */ u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */ int n; /* Number of characters in string value, excluding '\0' */ char *z; /* String or BLOB value */ /* ShallowCopy only needs to copy the information above */ char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */ int szMalloc; /* Size of the zMalloc allocation */ u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */ sqlite3 *db; /* The associated database connection */ void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */ #ifdef SQLITE_DEBUG Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */ void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */ #endif }; /* ** Size of struct Mem not including the Mem.zMalloc member or anything that ** follows. */ #define MEMCELLSIZE offsetof(Mem,zMalloc) /* One or more of the following flags are set to indicate the validOK ** representations of the value stored in the Mem struct. ** ** If the MEM_Null flag is set, then the value is an SQL NULL value. ** No other flags may be set in this case. ** ** If the MEM_Str flag is set then Mem.z points at a string representation. ** Usually this is encoded in the same unicode encoding as the main ** database (see below for exceptions). If the MEM_Term flag is also ** set, then the string is nul terminated. The MEM_Int and MEM_Real ** flags may coexist with the MEM_Str flag. */ #define MEM_Null 0x0001 /* Value is NULL */ #define MEM_Str 0x0002 /* Value is a string */ #define MEM_Int 0x0004 /* Value is an integer */ #define MEM_Real 0x0008 /* Value is a real number */ #define MEM_Blob 0x0010 /* Value is a BLOB */ #define MEM_AffMask 0x001f /* Mask of affinity bits */ #define MEM_RowSet 0x0020 /* Value is a RowSet object */ #define MEM_Frame 0x0040 /* Value is a VdbeFrame object */ #define MEM_Undefined 0x0080 /* Value is undefined */ #define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */ #define MEM_TypeMask 0x01ff /* Mask of type bits */ /* Whenever Mem contains a valid string or blob representation, one of ** the following flags must be set to determine the memory management ** policy for Mem.z. The MEM_Term flag tells us whether or not the ** string is \000 or \u0000 terminated */ #define MEM_Term 0x0200 /* String rep is nul terminated */ #define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */ #define MEM_Static 0x0800 /* Mem.z points to a static string */ #define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */ #define MEM_Agg 0x2000 /* Mem.z points to an agg function context */ #define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */ #ifdef SQLITE_OMIT_INCRBLOB #undef MEM_Zero #define MEM_Zero 0x0000 #endif /* ** Clear any existing type flags from a Mem and replace them with f */ #define MemSetTypeFlag(p, f) \ ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f) /* ** Return true if a memory cell is not marked as invalid. This macro ** is for use inside assert() statements only. */ #ifdef SQLITE_DEBUG #define memIsValid(M) ((M)->flags & MEM_Undefined)==0 #endif /* ** Each auxiliary data pointer stored by a user defined function ** implementation calling sqlite3_set_auxdata() is stored in an instance ** of this structure. All such structures associated with a single VM ** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed ** when the VM is halted (if not before). */ struct AuxData { int iOp; /* Instruction number of OP_Function opcode */ int iArg; /* Index of function argument. */ void *pAux; /* Aux data pointer */ void (*xDelete)(void *); /* Destructor for the aux data */ AuxData *pNext; /* Next element in list */ }; /* ** The "context" argument for an installable function. A pointer to an ** instance of this structure is the first argument to the routines used ** implement the SQL functions. ** ** There is a typedef for this structure in sqlite.h. So all routines, ** even the public interface to SQLite, can use a pointer to this structure. ** But this file is the only place where the internal details of this ** structure are known. ** ** This structure is defined inside of vdbeInt.h because it uses substructures ** (Mem) which are only defined there. */ struct sqlite3_context { Mem *pOut; /* The return value is stored here */ FuncDef *pFunc; /* Pointer to function information */ Mem *pMem; /* Memory cell used to store aggregate context */ Vdbe *pVdbe; /* The VM that owns this context */ int iOp; /* Instruction number of OP_Function */ int isError; /* Error code returned by the function. */ u8 skipFlag; /* Skip accumulator loading if true */ u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */ u8 argc; /* Number of arguments */ sqlite3_value *argv[1]; /* Argument set */ }; /* ** An Explain object accumulates indented output which is helpful ** in describing recursive data structures. */ struct Explain { Vdbe *pVdbe; /* Attach the explanation to this Vdbe */ StrAccum str; /* The string being accumulated */ int nIndent; /* Number of elements in aIndent */ u16 aIndent[100]; /* Levels of indentation */ char zBase[100]; /* Initial space */ }; /* A bitfield type for use inside of structures. Always follow with :N where ** N is the number of bits. */ typedef unsigned bft; /* Bit Field Type */ typedef struct ScanStatus ScanStatus; struct ScanStatus { int addrExplain; /* OP_Explain for loop */ int addrLoop; /* Address of "loops" counter */ int addrVisit; /* Address of "rows visited" counter */ int iSelectID; /* The "Select-ID" for this loop */ LogEst nEst; /* Estimated output rows per loop */ char *zName; /* Name of table or index */ }; /* ** An instance of the virtual machine. This structure contains the complete ** state of the virtual machine. ** ** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare() ** is really a pointer to an instance of this structure. */ struct Vdbe { sqlite3 *db; /* The database connection that owns this statement */ Op *aOp; /* Space to hold the virtual machine's program */ Mem *aMem; /* The memory locations */ Mem **apArg; /* Arguments to currently executing user function */ Mem *aColName; /* Column names to return */ Mem *pResultSet; /* Pointer to an array of results */ Parse *pParse; /* Parsing context used to create this Vdbe */ int nMem; /* Number of memory locations currently allocated */ int nOp; /* Number of instructions in the program */ int nCursor; /* Number of slots in apCsr[] */ u32 magic; /* Magic number for sanity checking */ char *zErrMsg; /* Error message written here */ Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ VdbeCursor **apCsr; /* One element of this array for each open cursor */ Mem *aVar; /* Values for the OP_Variable opcode. */ char **azVar; /* Name of variables */ ynVar nVar; /* Number of entries in aVar[] */ ynVar nzVar; /* Number of entries in azVar[] */ u32 cacheCtr; /* VdbeCursor row cache generation counter */ int pc; /* The program counter */ int rc; /* Value to return */ #ifdef SQLITE_DEBUG int rcApp; /* errcode set by sqlite3_result_error_code() */ #endif u16 nResColumn; /* Number of columns in one row of the result set */ u8 errorAction; /* Recovery action to do in case of an error */ u8 minWriteFileFormat; /* Minimum file format for writable database files */ bft explain:2; /* True if EXPLAIN present on SQL command */ bft changeCntOn:1; /* True to update the change-counter */ bft expired:1; /* True if the VM needs to be recompiled */ bft runOnlyOnce:1; /* Automatically expire on reset */ bft usesStmtJournal:1; /* True if uses a statement journal */ bft readOnly:1; /* True for statements that do not write */ bft bIsReader:1; /* True for statements that read */ bft isPrepareV2:1; /* True if prepared with prepare_v2() */ bft doingRerun:1; /* True if rerunning after an auto-reprepare */ int nChange; /* Number of db changes made since last reset */ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ yDbMask lockMask; /* Subset of btreeMask that requires a lock */ int iStatement; /* Statement number (or 0 if has not opened stmt) */ u32 aCounter[5]; /* Counters used by sqlite3_stmt_status() */ #ifndef SQLITE_OMIT_TRACE i64 startTime; /* Time when query started - used for profiling */ #endif i64 iCurrentTime; /* Value of julianday('now') for this statement */ i64 nFkConstraint; /* Number of imm. FK constraints this VM */ i64 nStmtDefCons; /* Number of def. constraints when stmt started */ i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */ char *zSql; /* Text of the SQL statement that generated this */ void *pFree; /* Free this when deleting the vdbe */ VdbeFrame *pFrame; /* Parent frame */ VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */ int nFrame; /* Number of frames in pFrame list */ u32 expmask; /* Binding to these vars invalidates VM */ SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ int nOnceFlag; /* Size of array aOnceFlag[] */ u8 *aOnceFlag; /* Flags for OP_Once */ AuxData *pAuxData; /* Linked list of auxdata allocations */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS i64 *anExec; /* Number of times each op has been executed */ int nScan; /* Entries in aScan[] */ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ #endif }; /* ** The following are allowed values for Vdbe.magic */ #define VDBE_MAGIC_INIT 0x26bceaa5 /* Building a VDBE program */ #define VDBE_MAGIC_RUN 0xbdf20da3 /* VDBE is ready to execute */ #define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */ #define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */ /* ** Function prototypes */ SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...); SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*); void sqliteVdbePopStack(Vdbe*,int); SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor*); SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*); #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*); #endif SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32); SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int); SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32); SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe*, int, int); int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *); SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(sqlite3*,VdbeCursor*,UnpackedRecord*,int*); SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor*, i64*); SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*); SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*); SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*); SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *, int); SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*); SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int); SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*); SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*)); SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64); #ifdef SQLITE_OMIT_FLOATING_POINT # define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64 #else SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double); #endif SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16); SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*); SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int); SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*); SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*); SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*); SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8); SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*); SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p); #define VdbeMemDynamic(X) \ (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0) SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*); SQLITE_PRIVATE const char *sqlite3OpcodeName(int); SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve); SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n); SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int); SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *); SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p); SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *); SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *); SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *); SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *); SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *); SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*); #else # define sqlite3VdbeEnter(X) # define sqlite3VdbeLeave(X) #endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*); SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*); #endif #ifndef SQLITE_OMIT_FOREIGN_KEY SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int); #else # define sqlite3VdbeCheckFk(p,i) 0 #endif SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem*, u8); #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf); #endif SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem); #ifndef SQLITE_OMIT_INCRBLOB SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *); #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0) #else #define sqlite3VdbeMemExpandBlob(x) SQLITE_OK #define ExpandBlob(P) SQLITE_OK #endif #endif /* !defined(_VDBEINT_H_) */ /************** End of vdbeInt.h *********************************************/ /************** Continuing where we left off in status.c *********************/ /* ** Variables in which to record status information. */ typedef struct sqlite3StatType sqlite3StatType; static SQLITE_WSD struct sqlite3StatType { #if SQLITE_PTRSIZE>4 sqlite3_int64 nowValue[10]; /* Current value */ sqlite3_int64 mxValue[10]; /* Maximum value */ #else u32 nowValue[10]; /* Current value */ u32 mxValue[10]; /* Maximum value */ #endif } sqlite3Stat = { {0,}, {0,} }; /* ** Elements of sqlite3Stat[] are protected by either the memory allocator ** mutex, or by the pcache1 mutex. The following array determines which. */ static const char statMutex[] = { 0, /* SQLITE_STATUS_MEMORY_USED */ 1, /* SQLITE_STATUS_PAGECACHE_USED */ 1, /* SQLITE_STATUS_PAGECACHE_OVERFLOW */ 0, /* SQLITE_STATUS_SCRATCH_USED */ 0, /* SQLITE_STATUS_SCRATCH_OVERFLOW */ 0, /* SQLITE_STATUS_MALLOC_SIZE */ 0, /* SQLITE_STATUS_PARSER_STACK */ 1, /* SQLITE_STATUS_PAGECACHE_SIZE */ 0, /* SQLITE_STATUS_SCRATCH_SIZE */ 0, /* SQLITE_STATUS_MALLOC_COUNT */ }; /* The "wsdStat" macro will resolve to the status information ** state vector. If writable static data is unsupported on the target, ** we have to locate the state vector at run-time. In the more common ** case where writable static data is supported, wsdStat can refer directly ** to the "sqlite3Stat" state vector declared above. */ #ifdef SQLITE_OMIT_WSD # define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat) # define wsdStat x[0] #else # define wsdStatInit # define wsdStat sqlite3Stat #endif /* ** Return the current value of a status parameter. The caller must ** be holding the appropriate mutex. */ SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int op){ wsdStatInit; assert( op>=0 && op=0 && op=0 && op=0 && opwsdStat.mxValue[op] ){ wsdStat.mxValue[op] = wsdStat.nowValue[op]; } } SQLITE_PRIVATE void sqlite3StatusDown(int op, int N){ wsdStatInit; assert( N>=0 ); assert( op>=0 && op=0 && op=0 && op=0 && opwsdStat.mxValue[op] ){ wsdStat.mxValue[op] = wsdStat.nowValue[op]; } } /* ** Query status information. */ SQLITE_API int SQLITE_STDCALL sqlite3_status64( int op, sqlite3_int64 *pCurrent, sqlite3_int64 *pHighwater, int resetFlag ){ sqlite3_mutex *pMutex; wsdStatInit; if( op<0 || op>=ArraySize(wsdStat.nowValue) ){ return SQLITE_MISUSE_BKPT; } #ifdef SQLITE_ENABLE_API_ARMOR if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT; #endif pMutex = statMutex[op] ? sqlite3Pcache1Mutex() : sqlite3MallocMutex(); sqlite3_mutex_enter(pMutex); *pCurrent = wsdStat.nowValue[op]; *pHighwater = wsdStat.mxValue[op]; if( resetFlag ){ wsdStat.mxValue[op] = wsdStat.nowValue[op]; } sqlite3_mutex_leave(pMutex); (void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */ return SQLITE_OK; } SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){ sqlite3_int64 iCur, iHwtr; int rc; #ifdef SQLITE_ENABLE_API_ARMOR if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT; #endif rc = sqlite3_status64(op, &iCur, &iHwtr, resetFlag); if( rc==0 ){ *pCurrent = (int)iCur; *pHighwater = (int)iHwtr; } return rc; } /* ** Query status information for a single database connection */ SQLITE_API int SQLITE_STDCALL sqlite3_db_status( sqlite3 *db, /* The database connection whose status is desired */ int op, /* Status verb */ int *pCurrent, /* Write current value here */ int *pHighwater, /* Write high-water mark here */ int resetFlag /* Reset high-water mark if true */ ){ int rc = SQLITE_OK; /* Return code */ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){ return SQLITE_MISUSE_BKPT; } #endif sqlite3_mutex_enter(db->mutex); switch( op ){ case SQLITE_DBSTATUS_LOOKASIDE_USED: { *pCurrent = db->lookaside.nOut; *pHighwater = db->lookaside.mxOut; if( resetFlag ){ db->lookaside.mxOut = db->lookaside.nOut; } break; } case SQLITE_DBSTATUS_LOOKASIDE_HIT: case SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE: case SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: { testcase( op==SQLITE_DBSTATUS_LOOKASIDE_HIT ); testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE ); testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL ); assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 ); assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 ); *pCurrent = 0; *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT]; if( resetFlag ){ db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0; } break; } /* ** Return an approximation for the amount of memory currently used ** by all pagers associated with the given database connection. The ** highwater mark is meaningless and is returned as zero. */ case SQLITE_DBSTATUS_CACHE_USED: { int totalUsed = 0; int i; sqlite3BtreeEnterAll(db); for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ Pager *pPager = sqlite3BtreePager(pBt); totalUsed += sqlite3PagerMemUsed(pPager); } } sqlite3BtreeLeaveAll(db); *pCurrent = totalUsed; *pHighwater = 0; break; } /* ** *pCurrent gets an accurate estimate of the amount of memory used ** to store the schema for all databases (main, temp, and any ATTACHed ** databases. *pHighwater is set to zero. */ case SQLITE_DBSTATUS_SCHEMA_USED: { int i; /* Used to iterate through schemas */ int nByte = 0; /* Used to accumulate return value */ sqlite3BtreeEnterAll(db); db->pnBytesFreed = &nByte; for(i=0; inDb; i++){ Schema *pSchema = db->aDb[i].pSchema; if( ALWAYS(pSchema!=0) ){ HashElem *p; nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * ( pSchema->tblHash.count + pSchema->trigHash.count + pSchema->idxHash.count + pSchema->fkeyHash.count ); nByte += sqlite3MallocSize(pSchema->tblHash.ht); nByte += sqlite3MallocSize(pSchema->trigHash.ht); nByte += sqlite3MallocSize(pSchema->idxHash.ht); nByte += sqlite3MallocSize(pSchema->fkeyHash.ht); for(p=sqliteHashFirst(&pSchema->trigHash); p; p=sqliteHashNext(p)){ sqlite3DeleteTrigger(db, (Trigger*)sqliteHashData(p)); } for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ sqlite3DeleteTable(db, (Table *)sqliteHashData(p)); } } } db->pnBytesFreed = 0; sqlite3BtreeLeaveAll(db); *pHighwater = 0; *pCurrent = nByte; break; } /* ** *pCurrent gets an accurate estimate of the amount of memory used ** to store all prepared statements. ** *pHighwater is set to zero. */ case SQLITE_DBSTATUS_STMT_USED: { struct Vdbe *pVdbe; /* Used to iterate through VMs */ int nByte = 0; /* Used to accumulate return value */ db->pnBytesFreed = &nByte; for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){ sqlite3VdbeClearObject(db, pVdbe); sqlite3DbFree(db, pVdbe); } db->pnBytesFreed = 0; *pHighwater = 0; /* IMP: R-64479-57858 */ *pCurrent = nByte; break; } /* ** Set *pCurrent to the total cache hits or misses encountered by all ** pagers the database handle is connected to. *pHighwater is always set ** to zero. */ case SQLITE_DBSTATUS_CACHE_HIT: case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ int i; int nRet = 0; assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); for(i=0; inDb; i++){ if( db->aDb[i].pBt ){ Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt); sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet); } } *pHighwater = 0; /* IMP: R-42420-56072 */ /* IMP: R-54100-20147 */ /* IMP: R-29431-39229 */ *pCurrent = nRet; break; } /* Set *pCurrent to non-zero if there are unresolved deferred foreign ** key constraints. Set *pCurrent to zero if all foreign key constraints ** have been satisfied. The *pHighwater is always set to zero. */ case SQLITE_DBSTATUS_DEFERRED_FKS: { *pHighwater = 0; /* IMP: R-11967-56545 */ *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0; break; } default: { rc = SQLITE_ERROR; } } sqlite3_mutex_leave(db->mutex); return rc; } /************** End of status.c **********************************************/ /************** Begin file date.c ********************************************/ /* ** 2003 October 31 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement date and time ** functions for SQLite. ** ** There is only one exported symbol in this file - the function ** sqlite3RegisterDateTimeFunctions() found at the bottom of the file. ** All other code has file scope. ** ** SQLite processes all times and dates as julian day numbers. The ** dates and times are stored as the number of days since noon ** in Greenwich on November 24, 4714 B.C. according to the Gregorian ** calendar system. ** ** 1970-01-01 00:00:00 is JD 2440587.5 ** 2000-01-01 00:00:00 is JD 2451544.5 ** ** This implementation requires years to be expressed as a 4-digit number ** which means that only dates between 0000-01-01 and 9999-12-31 can ** be represented, even though julian day numbers allow a much wider ** range of dates. ** ** The Gregorian calendar system is used for all dates and times, ** even those that predate the Gregorian calendar. Historians usually ** use the julian calendar for dates prior to 1582-10-15 and for some ** dates afterwards, depending on locale. Beware of this difference. ** ** The conversion algorithms are implemented based on descriptions ** in the following text: ** ** Jean Meeus ** Astronomical Algorithms, 2nd Edition, 1998 ** ISBM 0-943396-61-1 ** Willmann-Bell, Inc ** Richmond, Virginia (USA) */ /* #include "sqliteInt.h" */ /* #include */ /* #include */ #include #ifndef SQLITE_OMIT_DATETIME_FUNCS /* ** A structure for holding a single date and time. */ typedef struct DateTime DateTime; struct DateTime { sqlite3_int64 iJD; /* The julian day number times 86400000 */ int Y, M, D; /* Year, month, and day */ int h, m; /* Hour and minutes */ int tz; /* Timezone offset in minutes */ double s; /* Seconds */ char validYMD; /* True (1) if Y,M,D are valid */ char validHMS; /* True (1) if h,m,s are valid */ char validJD; /* True (1) if iJD is valid */ char validTZ; /* True (1) if tz is valid */ }; /* ** Convert zDate into one or more integers. Additional arguments ** come in groups of 5 as follows: ** ** N number of digits in the integer ** min minimum allowed value of the integer ** max maximum allowed value of the integer ** nextC first character after the integer ** pVal where to write the integers value. ** ** Conversions continue until one with nextC==0 is encountered. ** The function returns the number of successful conversions. */ static int getDigits(const char *zDate, ...){ va_list ap; int val; int N; int min; int max; int nextC; int *pVal; int cnt = 0; va_start(ap, zDate); do{ N = va_arg(ap, int); min = va_arg(ap, int); max = va_arg(ap, int); nextC = va_arg(ap, int); pVal = va_arg(ap, int*); val = 0; while( N-- ){ if( !sqlite3Isdigit(*zDate) ){ goto end_getDigits; } val = val*10 + *zDate - '0'; zDate++; } if( valmax || (nextC!=0 && nextC!=*zDate) ){ goto end_getDigits; } *pVal = val; zDate++; cnt++; }while( nextC ); end_getDigits: va_end(ap); return cnt; } /* ** Parse a timezone extension on the end of a date-time. ** The extension is of the form: ** ** (+/-)HH:MM ** ** Or the "zulu" notation: ** ** Z ** ** If the parse is successful, write the number of minutes ** of change in p->tz and return 0. If a parser error occurs, ** return non-zero. ** ** A missing specifier is not considered an error. */ static int parseTimezone(const char *zDate, DateTime *p){ int sgn = 0; int nHr, nMn; int c; while( sqlite3Isspace(*zDate) ){ zDate++; } p->tz = 0; c = *zDate; if( c=='-' ){ sgn = -1; }else if( c=='+' ){ sgn = +1; }else if( c=='Z' || c=='z' ){ zDate++; goto zulu_time; }else{ return c!=0; } zDate++; if( getDigits(zDate, 2, 0, 14, ':', &nHr, 2, 0, 59, 0, &nMn)!=2 ){ return 1; } zDate += 5; p->tz = sgn*(nMn + nHr*60); zulu_time: while( sqlite3Isspace(*zDate) ){ zDate++; } return *zDate!=0; } /* ** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF. ** The HH, MM, and SS must each be exactly 2 digits. The ** fractional seconds FFFF can be one or more digits. ** ** Return 1 if there is a parsing error and 0 on success. */ static int parseHhMmSs(const char *zDate, DateTime *p){ int h, m, s; double ms = 0.0; if( getDigits(zDate, 2, 0, 24, ':', &h, 2, 0, 59, 0, &m)!=2 ){ return 1; } zDate += 5; if( *zDate==':' ){ zDate++; if( getDigits(zDate, 2, 0, 59, 0, &s)!=1 ){ return 1; } zDate += 2; if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){ double rScale = 1.0; zDate++; while( sqlite3Isdigit(*zDate) ){ ms = ms*10.0 + *zDate - '0'; rScale *= 10.0; zDate++; } ms /= rScale; } }else{ s = 0; } p->validJD = 0; p->validHMS = 1; p->h = h; p->m = m; p->s = s + ms; if( parseTimezone(zDate, p) ) return 1; p->validTZ = (p->tz!=0)?1:0; return 0; } /* ** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume ** that the YYYY-MM-DD is according to the Gregorian calendar. ** ** Reference: Meeus page 61 */ static void computeJD(DateTime *p){ int Y, M, D, A, B, X1, X2; if( p->validJD ) return; if( p->validYMD ){ Y = p->Y; M = p->M; D = p->D; }else{ Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */ M = 1; D = 1; } if( M<=2 ){ Y--; M += 12; } A = Y/100; B = 2 - A + (A/4); X1 = 36525*(Y+4716)/100; X2 = 306001*(M+1)/10000; p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); p->validJD = 1; if( p->validHMS ){ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000); if( p->validTZ ){ p->iJD -= p->tz*60000; p->validYMD = 0; p->validHMS = 0; p->validTZ = 0; } } } /* ** Parse dates of the form ** ** YYYY-MM-DD HH:MM:SS.FFF ** YYYY-MM-DD HH:MM:SS ** YYYY-MM-DD HH:MM ** YYYY-MM-DD ** ** Write the result into the DateTime structure and return 0 ** on success and 1 if the input string is not a well-formed ** date. */ static int parseYyyyMmDd(const char *zDate, DateTime *p){ int Y, M, D, neg; if( zDate[0]=='-' ){ zDate++; neg = 1; }else{ neg = 0; } if( getDigits(zDate,4,0,9999,'-',&Y,2,1,12,'-',&M,2,1,31,0,&D)!=3 ){ return 1; } zDate += 10; while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; } if( parseHhMmSs(zDate, p)==0 ){ /* We got the time */ }else if( *zDate==0 ){ p->validHMS = 0; }else{ return 1; } p->validJD = 0; p->validYMD = 1; p->Y = neg ? -Y : Y; p->M = M; p->D = D; if( p->validTZ ){ computeJD(p); } return 0; } /* ** Set the time to the current time reported by the VFS. ** ** Return the number of errors. */ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){ p->iJD = sqlite3StmtCurrentTime(context); if( p->iJD>0 ){ p->validJD = 1; return 0; }else{ return 1; } } /* ** Attempt to parse the given string into a julian day number. Return ** the number of errors. ** ** The following are acceptable forms for the input string: ** ** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM ** DDDD.DD ** now ** ** In the first form, the +/-HH:MM is always optional. The fractional ** seconds extension (the ".FFF") is optional. The seconds portion ** (":SS.FFF") is option. The year and date can be omitted as long ** as there is a time string. The time string can be omitted as long ** as there is a year and date. */ static int parseDateOrTime( sqlite3_context *context, const char *zDate, DateTime *p ){ double r; if( parseYyyyMmDd(zDate,p)==0 ){ return 0; }else if( parseHhMmSs(zDate, p)==0 ){ return 0; }else if( sqlite3StrICmp(zDate,"now")==0){ return setDateTimeToCurrent(context, p); }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){ p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5); p->validJD = 1; return 0; } return 1; } /* ** Compute the Year, Month, and Day from the julian day number. */ static void computeYMD(DateTime *p){ int Z, A, B, C, D, E, X1; if( p->validYMD ) return; if( !p->validJD ){ p->Y = 2000; p->M = 1; p->D = 1; }else{ Z = (int)((p->iJD + 43200000)/86400000); A = (int)((Z - 1867216.25)/36524.25); A = Z + 1 + A - (A/4); B = A + 1524; C = (int)((B - 122.1)/365.25); D = (36525*(C&32767))/100; E = (int)((B-D)/30.6001); X1 = (int)(30.6001*E); p->D = B - D - X1; p->M = E<14 ? E-1 : E-13; p->Y = p->M>2 ? C - 4716 : C - 4715; } p->validYMD = 1; } /* ** Compute the Hour, Minute, and Seconds from the julian day number. */ static void computeHMS(DateTime *p){ int s; if( p->validHMS ) return; computeJD(p); s = (int)((p->iJD + 43200000) % 86400000); p->s = s/1000.0; s = (int)p->s; p->s -= s; p->h = s/3600; s -= p->h*3600; p->m = s/60; p->s += s - p->m*60; p->validHMS = 1; } /* ** Compute both YMD and HMS */ static void computeYMD_HMS(DateTime *p){ computeYMD(p); computeHMS(p); } /* ** Clear the YMD and HMS and the TZ */ static void clearYMD_HMS_TZ(DateTime *p){ p->validYMD = 0; p->validHMS = 0; p->validTZ = 0; } /* ** On recent Windows platforms, the localtime_s() function is available ** as part of the "Secure CRT". It is essentially equivalent to ** localtime_r() available under most POSIX platforms, except that the ** order of the parameters is reversed. ** ** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx. ** ** If the user has not indicated to use localtime_r() or localtime_s() ** already, check for an MSVC build environment that provides ** localtime_s(). */ #if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S \ && defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE) #undef HAVE_LOCALTIME_S #define HAVE_LOCALTIME_S 1 #endif #ifndef SQLITE_OMIT_LOCALTIME /* ** The following routine implements the rough equivalent of localtime_r() ** using whatever operating-system specific localtime facility that ** is available. This routine returns 0 on success and ** non-zero on any kind of error. ** ** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this ** routine will always fail. ** ** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C ** library function localtime_r() is used to assist in the calculation of ** local time. */ static int osLocaltime(time_t *t, struct tm *pTm){ int rc; #if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S struct tm *pX; #if SQLITE_THREADSAFE>0 sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif sqlite3_mutex_enter(mutex); pX = localtime(t); #ifndef SQLITE_OMIT_BUILTIN_TEST if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0; #endif if( pX ) *pTm = *pX; sqlite3_mutex_leave(mutex); rc = pX==0; #else #ifndef SQLITE_OMIT_BUILTIN_TEST if( sqlite3GlobalConfig.bLocaltimeFault ) return 1; #endif #if HAVE_LOCALTIME_R rc = localtime_r(t, pTm)==0; #else rc = localtime_s(pTm, t); #endif /* HAVE_LOCALTIME_R */ #endif /* HAVE_LOCALTIME_R || HAVE_LOCALTIME_S */ return rc; } #endif /* SQLITE_OMIT_LOCALTIME */ #ifndef SQLITE_OMIT_LOCALTIME /* ** Compute the difference (in milliseconds) between localtime and UTC ** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs, ** return this value and set *pRc to SQLITE_OK. ** ** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value ** is undefined in this case. */ static sqlite3_int64 localtimeOffset( DateTime *p, /* Date at which to calculate offset */ sqlite3_context *pCtx, /* Write error here if one occurs */ int *pRc /* OUT: Error code. SQLITE_OK or ERROR */ ){ DateTime x, y; time_t t; struct tm sLocal; /* Initialize the contents of sLocal to avoid a compiler warning. */ memset(&sLocal, 0, sizeof(sLocal)); x = *p; computeYMD_HMS(&x); if( x.Y<1971 || x.Y>=2038 ){ /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only ** works for years between 1970 and 2037. For dates outside this range, ** SQLite attempts to map the year into an equivalent year within this ** range, do the calculation, then map the year back. */ x.Y = 2000; x.M = 1; x.D = 1; x.h = 0; x.m = 0; x.s = 0.0; } else { int s = (int)(x.s + 0.5); x.s = s; } x.tz = 0; x.validJD = 0; computeJD(&x); t = (time_t)(x.iJD/1000 - 21086676*(i64)10000); if( osLocaltime(&t, &sLocal) ){ sqlite3_result_error(pCtx, "local time unavailable", -1); *pRc = SQLITE_ERROR; return 0; } y.Y = sLocal.tm_year + 1900; y.M = sLocal.tm_mon + 1; y.D = sLocal.tm_mday; y.h = sLocal.tm_hour; y.m = sLocal.tm_min; y.s = sLocal.tm_sec; y.validYMD = 1; y.validHMS = 1; y.validJD = 0; y.validTZ = 0; computeJD(&y); *pRc = SQLITE_OK; return y.iJD - x.iJD; } #endif /* SQLITE_OMIT_LOCALTIME */ /* ** Process a modifier to a date-time stamp. The modifiers are ** as follows: ** ** NNN days ** NNN hours ** NNN minutes ** NNN.NNNN seconds ** NNN months ** NNN years ** start of month ** start of year ** start of week ** start of day ** weekday N ** unixepoch ** localtime ** utc ** ** Return 0 on success and 1 if there is any kind of error. If the error ** is in a system call (i.e. localtime()), then an error message is written ** to context pCtx. If the error is an unrecognized modifier, no error is ** written to pCtx. */ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){ int rc = 1; int n; double r; char *z, zBuf[30]; z = zBuf; for(n=0; niJD += localtimeOffset(p, pCtx, &rc); clearYMD_HMS_TZ(p); } break; } #endif case 'u': { /* ** unixepoch ** ** Treat the current value of p->iJD as the number of ** seconds since 1970. Convert to a real julian day number. */ if( strcmp(z, "unixepoch")==0 && p->validJD ){ p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000; clearYMD_HMS_TZ(p); rc = 0; } #ifndef SQLITE_OMIT_LOCALTIME else if( strcmp(z, "utc")==0 ){ sqlite3_int64 c1; computeJD(p); c1 = localtimeOffset(p, pCtx, &rc); if( rc==SQLITE_OK ){ p->iJD -= c1; clearYMD_HMS_TZ(p); p->iJD += c1 - localtimeOffset(p, pCtx, &rc); } } #endif break; } case 'w': { /* ** weekday N ** ** Move the date to the same time on the next occurrence of ** weekday N where 0==Sunday, 1==Monday, and so forth. If the ** date is already on the appropriate weekday, this is a no-op. */ if( strncmp(z, "weekday ", 8)==0 && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8) && (n=(int)r)==r && n>=0 && r<7 ){ sqlite3_int64 Z; computeYMD_HMS(p); p->validTZ = 0; p->validJD = 0; computeJD(p); Z = ((p->iJD + 129600000)/86400000) % 7; if( Z>n ) Z -= 7; p->iJD += (n - Z)*86400000; clearYMD_HMS_TZ(p); rc = 0; } break; } case 's': { /* ** start of TTTTT ** ** Move the date backwards to the beginning of the current day, ** or month or year. */ if( strncmp(z, "start of ", 9)!=0 ) break; z += 9; computeYMD(p); p->validHMS = 1; p->h = p->m = 0; p->s = 0.0; p->validTZ = 0; p->validJD = 0; if( strcmp(z,"month")==0 ){ p->D = 1; rc = 0; }else if( strcmp(z,"year")==0 ){ computeYMD(p); p->M = 1; p->D = 1; rc = 0; }else if( strcmp(z,"day")==0 ){ rc = 0; } break; } case '+': case '-': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { double rRounder; for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){ rc = 1; break; } if( z[n]==':' ){ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the ** specified number of hours, minutes, seconds, and fractional seconds ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be ** omitted. */ const char *z2 = z; DateTime tx; sqlite3_int64 day; if( !sqlite3Isdigit(*z2) ) z2++; memset(&tx, 0, sizeof(tx)); if( parseHhMmSs(z2, &tx) ) break; computeJD(&tx); tx.iJD -= 43200000; day = tx.iJD/86400000; tx.iJD -= day*86400000; if( z[0]=='-' ) tx.iJD = -tx.iJD; computeJD(p); clearYMD_HMS_TZ(p); p->iJD += tx.iJD; rc = 0; break; } z += n; while( sqlite3Isspace(*z) ) z++; n = sqlite3Strlen30(z); if( n>10 || n<3 ) break; if( z[n-1]=='s' ){ z[n-1] = 0; n--; } computeJD(p); rc = 0; rRounder = r<0 ? -0.5 : +0.5; if( n==3 && strcmp(z,"day")==0 ){ p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder); }else if( n==4 && strcmp(z,"hour")==0 ){ p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder); }else if( n==6 && strcmp(z,"minute")==0 ){ p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder); }else if( n==6 && strcmp(z,"second")==0 ){ p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder); }else if( n==5 && strcmp(z,"month")==0 ){ int x, y; computeYMD_HMS(p); p->M += (int)r; x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; p->Y += x; p->M -= x*12; p->validJD = 0; computeJD(p); y = (int)r; if( y!=r ){ p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder); } }else if( n==4 && strcmp(z,"year")==0 ){ int y = (int)r; computeYMD_HMS(p); p->Y += y; p->validJD = 0; computeJD(p); if( y!=r ){ p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder); } }else{ rc = 1; } clearYMD_HMS_TZ(p); break; } default: { break; } } return rc; } /* ** Process time function arguments. argv[0] is a date-time stamp. ** argv[1] and following are modifiers. Parse them all and write ** the resulting time into the DateTime structure p. Return 0 ** on success and 1 if there are any errors. ** ** If there are zero parameters (if even argv[0] is undefined) ** then assume a default value of "now" for argv[0]. */ static int isDate( sqlite3_context *context, int argc, sqlite3_value **argv, DateTime *p ){ int i; const unsigned char *z; int eType; memset(p, 0, sizeof(*p)); if( argc==0 ){ return setDateTimeToCurrent(context, p); } if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT || eType==SQLITE_INTEGER ){ p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5); p->validJD = 1; }else{ z = sqlite3_value_text(argv[0]); if( !z || parseDateOrTime(context, (char*)z, p) ){ return 1; } } for(i=1; iaLimit[SQLITE_LIMIT_LENGTH]+1 ); testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ); if( n(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); return; }else{ z = sqlite3DbMallocRaw(db, (int)n); if( z==0 ){ sqlite3_result_error_nomem(context); return; } } computeJD(&x); computeYMD_HMS(&x); for(i=j=0; zFmt[i]; i++){ if( zFmt[i]!='%' ){ z[j++] = zFmt[i]; }else{ i++; switch( zFmt[i] ){ case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break; case 'f': { double s = x.s; if( s>59.999 ) s = 59.999; sqlite3_snprintf(7, &z[j],"%06.3f", s); j += sqlite3Strlen30(&z[j]); break; } case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break; case 'W': /* Fall thru */ case 'j': { int nDay; /* Number of days since 1st day of year */ DateTime y = x; y.validJD = 0; y.M = 1; y.D = 1; computeJD(&y); nDay = (int)((x.iJD-y.iJD+43200000)/86400000); if( zFmt[i]=='W' ){ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ wd = (int)(((x.iJD+43200000)/86400000)%7); sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7); j += 2; }else{ sqlite3_snprintf(4, &z[j],"%03d",nDay+1); j += 3; } break; } case 'J': { sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0); j+=sqlite3Strlen30(&z[j]); break; } case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break; case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break; case 's': { sqlite3_snprintf(30,&z[j],"%lld", (i64)(x.iJD/1000 - 21086676*(i64)10000)); j += sqlite3Strlen30(&z[j]); break; } case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break; case 'w': { z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; break; } case 'Y': { sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]); break; } default: z[j++] = '%'; break; } } } z[j] = 0; sqlite3_result_text(context, z, -1, z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC); } /* ** current_time() ** ** This function returns the same value as time('now'). */ static void ctimeFunc( sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); timeFunc(context, 0, 0); } /* ** current_date() ** ** This function returns the same value as date('now'). */ static void cdateFunc( sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); dateFunc(context, 0, 0); } /* ** current_timestamp() ** ** This function returns the same value as datetime('now'). */ static void ctimestampFunc( sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); datetimeFunc(context, 0, 0); } #endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */ #ifdef SQLITE_OMIT_DATETIME_FUNCS /* ** If the library is compiled to omit the full-scale date and time ** handling (to get a smaller binary), the following minimal version ** of the functions current_time(), current_date() and current_timestamp() ** are included instead. This is to support column declarations that ** include "DEFAULT CURRENT_TIME" etc. ** ** This function uses the C-library functions time(), gmtime() ** and strftime(). The format string to pass to strftime() is supplied ** as the user-data for the function. */ static void currentTimeFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ time_t t; char *zFormat = (char *)sqlite3_user_data(context); sqlite3 *db; sqlite3_int64 iT; struct tm *pTm; struct tm sNow; char zBuf[20]; UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); iT = sqlite3StmtCurrentTime(context); if( iT<=0 ) return; t = iT/1000 - 10000*(sqlite3_int64)21086676; #if HAVE_GMTIME_R pTm = gmtime_r(&t, &sNow); #else sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); pTm = gmtime(&t); if( pTm ) memcpy(&sNow, pTm, sizeof(sNow)); sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); #endif if( pTm ){ strftime(zBuf, 20, zFormat, &sNow); sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); } } #endif /* ** This function registered all of the above C functions as SQL ** functions. This should be the only routine in this file with ** external linkage. */ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ static SQLITE_WSD FuncDef aDateTimeFuncs[] = { #ifndef SQLITE_OMIT_DATETIME_FUNCS FUNCTION(julianday, -1, 0, 0, juliandayFunc ), FUNCTION(date, -1, 0, 0, dateFunc ), FUNCTION(time, -1, 0, 0, timeFunc ), FUNCTION(datetime, -1, 0, 0, datetimeFunc ), FUNCTION(strftime, -1, 0, 0, strftimeFunc ), FUNCTION(current_time, 0, 0, 0, ctimeFunc ), FUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), FUNCTION(current_date, 0, 0, 0, cdateFunc ), #else STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc), STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc), STR_FUNCTION(current_timestamp, 0, "%Y-%m-%d %H:%M:%S", 0, currentTimeFunc), #endif }; int i; FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aDateTimeFuncs); for(i=0; ipMethods ){ rc = pId->pMethods->xClose(pId); pId->pMethods = 0; } return rc; } SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ DO_OS_MALLOC_TEST(id); return id->pMethods->xRead(id, pBuf, amt, offset); } SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){ DO_OS_MALLOC_TEST(id); return id->pMethods->xWrite(id, pBuf, amt, offset); } SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file *id, i64 size){ return id->pMethods->xTruncate(id, size); } SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file *id, int flags){ DO_OS_MALLOC_TEST(id); return id->pMethods->xSync(id, flags); } SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ DO_OS_MALLOC_TEST(id); return id->pMethods->xFileSize(id, pSize); } SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){ DO_OS_MALLOC_TEST(id); return id->pMethods->xLock(id, lockType); } SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){ return id->pMethods->xUnlock(id, lockType); } SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ DO_OS_MALLOC_TEST(id); return id->pMethods->xCheckReservedLock(id, pResOut); } /* ** Use sqlite3OsFileControl() when we are doing something that might fail ** and we need to know about the failures. Use sqlite3OsFileControlHint() ** when simply tossing information over the wall to the VFS and we do not ** really care if the VFS receives and understands the information since it ** is only a hint and can be safely ignored. The sqlite3OsFileControlHint() ** routine has no return value since the return value would be meaningless. */ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_TEST if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding ** transaction has been committed. Injecting a fault at this point ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM ** but the transaction is committed anyway. ** ** The core must call OsFileControl() though, not OsFileControlHint(), ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably ** means the commit really has failed and an error should be returned ** to the user. */ DO_OS_MALLOC_TEST(id); } #endif return id->pMethods->xFileControl(id, op, pArg); } SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){ (void)id->pMethods->xFileControl(id, op, pArg); } SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){ int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize; return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE); } SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id){ return id->pMethods->xDeviceCharacteristics(id); } SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){ return id->pMethods->xShmLock(id, offset, n, flags); } SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id){ id->pMethods->xShmBarrier(id); } SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){ return id->pMethods->xShmUnmap(id, deleteFlag); } SQLITE_PRIVATE int sqlite3OsShmMap( sqlite3_file *id, /* Database file handle */ int iPage, int pgsz, int bExtend, /* True to extend file if necessary */ void volatile **pp /* OUT: Pointer to mapping */ ){ DO_OS_MALLOC_TEST(id); return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp); } #if SQLITE_MAX_MMAP_SIZE>0 /* The real implementation of xFetch and xUnfetch */ SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ DO_OS_MALLOC_TEST(id); return id->pMethods->xFetch(id, iOff, iAmt, pp); } SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ return id->pMethods->xUnfetch(id, iOff, p); } #else /* No-op stubs to use when memory-mapped I/O is disabled */ SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){ *pp = 0; return SQLITE_OK; } SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){ return SQLITE_OK; } #endif /* ** The next group of routines are convenience wrappers around the ** VFS methods. */ SQLITE_PRIVATE int sqlite3OsOpen( sqlite3_vfs *pVfs, const char *zPath, sqlite3_file *pFile, int flags, int *pFlagsOut ){ int rc; DO_OS_MALLOC_TEST(0); /* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example, ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before ** reaching the VFS. */ rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x87f7f, pFlagsOut); assert( rc==SQLITE_OK || pFile->pMethods==0 ); return rc; } SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ DO_OS_MALLOC_TEST(0); assert( dirSync==0 || dirSync==1 ); return pVfs->xDelete(pVfs, zPath, dirSync); } SQLITE_PRIVATE int sqlite3OsAccess( sqlite3_vfs *pVfs, const char *zPath, int flags, int *pResOut ){ DO_OS_MALLOC_TEST(0); return pVfs->xAccess(pVfs, zPath, flags, pResOut); } SQLITE_PRIVATE int sqlite3OsFullPathname( sqlite3_vfs *pVfs, const char *zPath, int nPathOut, char *zPathOut ){ DO_OS_MALLOC_TEST(0); zPathOut[0] = 0; return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); } #ifndef SQLITE_OMIT_LOAD_EXTENSION SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){ return pVfs->xDlOpen(pVfs, zPath); } SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ pVfs->xDlError(pVfs, nByte, zBufOut); } SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){ return pVfs->xDlSym(pVfs, pHdle, zSym); } SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){ pVfs->xDlClose(pVfs, pHandle); } #endif /* SQLITE_OMIT_LOAD_EXTENSION */ SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){ return pVfs->xRandomness(pVfs, nByte, zBufOut); } SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){ return pVfs->xSleep(pVfs, nMicro); } SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ int rc; /* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64() ** method to get the current date and time if that method is available ** (if iVersion is 2 or greater and the function pointer is not NULL) and ** will fall back to xCurrentTime() if xCurrentTimeInt64() is ** unavailable. */ if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){ rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut); }else{ double r; rc = pVfs->xCurrentTime(pVfs, &r); *pTimeOut = (sqlite3_int64)(r*86400000.0); } return rc; } SQLITE_PRIVATE int sqlite3OsOpenMalloc( sqlite3_vfs *pVfs, const char *zFile, sqlite3_file **ppFile, int flags, int *pOutFlags ){ int rc = SQLITE_NOMEM; sqlite3_file *pFile; pFile = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile); if( pFile ){ rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags); if( rc!=SQLITE_OK ){ sqlite3_free(pFile); }else{ *ppFile = pFile; } } return rc; } SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *pFile){ int rc = SQLITE_OK; assert( pFile ); rc = sqlite3OsClose(pFile); sqlite3_free(pFile); return rc; } /* ** This function is a wrapper around the OS specific implementation of ** sqlite3_os_init(). The purpose of the wrapper is to provide the ** ability to simulate a malloc failure, so that the handling of an ** error in sqlite3_os_init() by the upper layers can be tested. */ SQLITE_PRIVATE int sqlite3OsInit(void){ void *p = sqlite3_malloc(10); if( p==0 ) return SQLITE_NOMEM; sqlite3_free(p); return sqlite3_os_init(); } /* ** The list of all registered VFS implementations. */ static sqlite3_vfs * SQLITE_WSD vfsList = 0; #define vfsList GLOBAL(sqlite3_vfs *, vfsList) /* ** Locate a VFS by name. If no name is given, simply return the ** first VFS on the list. */ SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfs){ sqlite3_vfs *pVfs = 0; #if SQLITE_THREADSAFE sqlite3_mutex *mutex; #endif #ifndef SQLITE_OMIT_AUTOINIT int rc = sqlite3_initialize(); if( rc ) return 0; #endif #if SQLITE_THREADSAFE mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif sqlite3_mutex_enter(mutex); for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){ if( zVfs==0 ) break; if( strcmp(zVfs, pVfs->zName)==0 ) break; } sqlite3_mutex_leave(mutex); return pVfs; } /* ** Unlink a VFS from the linked list */ static void vfsUnlink(sqlite3_vfs *pVfs){ assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ); if( pVfs==0 ){ /* No-op */ }else if( vfsList==pVfs ){ vfsList = pVfs->pNext; }else if( vfsList ){ sqlite3_vfs *p = vfsList; while( p->pNext && p->pNext!=pVfs ){ p = p->pNext; } if( p->pNext==pVfs ){ p->pNext = pVfs->pNext; } } } /* ** Register a VFS with the system. It is harmless to register the same ** VFS multiple times. The new VFS becomes the default if makeDflt is ** true. */ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){ MUTEX_LOGIC(sqlite3_mutex *mutex;) #ifndef SQLITE_OMIT_AUTOINIT int rc = sqlite3_initialize(); if( rc ) return rc; #endif #ifdef SQLITE_ENABLE_API_ARMOR if( pVfs==0 ) return SQLITE_MISUSE_BKPT; #endif MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) sqlite3_mutex_enter(mutex); vfsUnlink(pVfs); if( makeDflt || vfsList==0 ){ pVfs->pNext = vfsList; vfsList = pVfs; }else{ pVfs->pNext = vfsList->pNext; vfsList->pNext = pVfs; } assert(vfsList); sqlite3_mutex_leave(mutex); return SQLITE_OK; } /* ** Unregister a VFS so that it is no longer accessible. */ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs *pVfs){ #if SQLITE_THREADSAFE sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif sqlite3_mutex_enter(mutex); vfsUnlink(pVfs); sqlite3_mutex_leave(mutex); return SQLITE_OK; } /************** End of os.c **************************************************/ /************** Begin file fault.c *******************************************/ /* ** 2008 Jan 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code to support the concept of "benign" ** malloc failures (when the xMalloc() or xRealloc() method of the ** sqlite3_mem_methods structure fails to allocate a block of memory ** and returns 0). ** ** Most malloc failures are non-benign. After they occur, SQLite ** abandons the current operation and returns an error code (usually ** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily ** fatal. For example, if a malloc fails while resizing a hash table, this ** is completely recoverable simply by not carrying out the resize. The ** hash table will continue to function normally. So a malloc failure ** during a hash table resize is a benign fault. */ /* #include "sqliteInt.h" */ #ifndef SQLITE_OMIT_BUILTIN_TEST /* ** Global variables. */ typedef struct BenignMallocHooks BenignMallocHooks; static SQLITE_WSD struct BenignMallocHooks { void (*xBenignBegin)(void); void (*xBenignEnd)(void); } sqlite3Hooks = { 0, 0 }; /* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks ** structure. If writable static data is unsupported on the target, ** we have to locate the state vector at run-time. In the more common ** case where writable static data is supported, wsdHooks can refer directly ** to the "sqlite3Hooks" state vector declared above. */ #ifdef SQLITE_OMIT_WSD # define wsdHooksInit \ BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks) # define wsdHooks x[0] #else # define wsdHooksInit # define wsdHooks sqlite3Hooks #endif /* ** Register hooks to call when sqlite3BeginBenignMalloc() and ** sqlite3EndBenignMalloc() are called, respectively. */ SQLITE_PRIVATE void sqlite3BenignMallocHooks( void (*xBenignBegin)(void), void (*xBenignEnd)(void) ){ wsdHooksInit; wsdHooks.xBenignBegin = xBenignBegin; wsdHooks.xBenignEnd = xBenignEnd; } /* ** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that ** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc() ** indicates that subsequent malloc failures are non-benign. */ SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void){ wsdHooksInit; if( wsdHooks.xBenignBegin ){ wsdHooks.xBenignBegin(); } } SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){ wsdHooksInit; if( wsdHooks.xBenignEnd ){ wsdHooks.xBenignEnd(); } } #endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */ /************** End of fault.c ***********************************************/ /************** Begin file mem0.c ********************************************/ /* ** 2008 October 28 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains a no-op memory allocation drivers for use when ** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented ** here always fail. SQLite will not operate with these drivers. These ** are merely placeholders. Real drivers must be substituted using ** sqlite3_config() before SQLite will operate. */ /* #include "sqliteInt.h" */ /* ** This version of the memory allocator is the default. It is ** used when no other memory allocator is specified using compile-time ** macros. */ #ifdef SQLITE_ZERO_MALLOC /* ** No-op versions of all memory allocation routines */ static void *sqlite3MemMalloc(int nByte){ return 0; } static void sqlite3MemFree(void *pPrior){ return; } static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; } static int sqlite3MemSize(void *pPrior){ return 0; } static int sqlite3MemRoundup(int n){ return n; } static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; } static void sqlite3MemShutdown(void *NotUsed){ return; } /* ** This routine is the only routine in this file with external linkage. ** ** Populate the low-level memory allocation function pointers in ** sqlite3GlobalConfig.m with pointers to the routines in this file. */ SQLITE_PRIVATE void sqlite3MemSetDefault(void){ static const sqlite3_mem_methods defaultMethods = { sqlite3MemMalloc, sqlite3MemFree, sqlite3MemRealloc, sqlite3MemSize, sqlite3MemRoundup, sqlite3MemInit, sqlite3MemShutdown, 0 }; sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); } #endif /* SQLITE_ZERO_MALLOC */ /************** End of mem0.c ************************************************/ /************** Begin file mem1.c ********************************************/ /* ** 2007 August 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains low-level memory allocation drivers for when ** SQLite will use the standard C-library malloc/realloc/free interface ** to obtain the memory it needs. ** ** This file contains implementations of the low-level memory allocation ** routines specified in the sqlite3_mem_methods object. The content of ** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The ** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the ** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The ** default configuration is to use memory allocation routines in this ** file. ** ** C-preprocessor macro summary: ** ** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if ** the malloc_usable_size() interface exists ** on the target platform. Or, this symbol ** can be set manually, if desired. ** If an equivalent interface exists by ** a different name, using a separate -D ** option to rename it. ** ** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone ** memory allocator. Set this symbol to enable ** building on older macs. ** ** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of ** _msize() on windows systems. This might ** be necessary when compiling for Delphi, ** for example. */ /* #include "sqliteInt.h" */ /* ** This version of the memory allocator is the default. It is ** used when no other memory allocator is specified using compile-time ** macros. */ #ifdef SQLITE_SYSTEM_MALLOC #if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) /* ** Use the zone allocator available on apple products unless the ** SQLITE_WITHOUT_ZONEMALLOC symbol is defined. */ #include #include #include static malloc_zone_t* _sqliteZone_; #define SQLITE_MALLOC(x) malloc_zone_malloc(_sqliteZone_, (x)) #define SQLITE_FREE(x) malloc_zone_free(_sqliteZone_, (x)); #define SQLITE_REALLOC(x,y) malloc_zone_realloc(_sqliteZone_, (x), (y)) #define SQLITE_MALLOCSIZE(x) \ (_sqliteZone_ ? _sqliteZone_->size(_sqliteZone_,x) : malloc_size(x)) #else /* if not __APPLE__ */ /* ** Use standard C library malloc and free on non-Apple systems. ** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined. */ #define SQLITE_MALLOC(x) malloc(x) #define SQLITE_FREE(x) free(x) #define SQLITE_REALLOC(x,y) realloc((x),(y)) /* ** The malloc.h header file is needed for malloc_usable_size() function ** on some systems (e.g. Linux). */ #if HAVE_MALLOC_H && HAVE_MALLOC_USABLE_SIZE # define SQLITE_USE_MALLOC_H 1 # define SQLITE_USE_MALLOC_USABLE_SIZE 1 /* ** The MSVCRT has malloc_usable_size(), but it is called _msize(). The ** use of _msize() is automatic, but can be disabled by compiling with ** -DSQLITE_WITHOUT_MSIZE. Using the _msize() function also requires ** the malloc.h header file. */ #elif defined(_MSC_VER) && !defined(SQLITE_WITHOUT_MSIZE) # define SQLITE_USE_MALLOC_H # define SQLITE_USE_MSIZE #endif /* ** Include the malloc.h header file, if necessary. Also set define macro ** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize() ** for MSVC and malloc_usable_size() for most other systems (e.g. Linux). ** The memory size function can always be overridden manually by defining ** the macro SQLITE_MALLOCSIZE to the desired function name. */ #if defined(SQLITE_USE_MALLOC_H) # include # if defined(SQLITE_USE_MALLOC_USABLE_SIZE) # if !defined(SQLITE_MALLOCSIZE) # define SQLITE_MALLOCSIZE(x) malloc_usable_size(x) # endif # elif defined(SQLITE_USE_MSIZE) # if !defined(SQLITE_MALLOCSIZE) # define SQLITE_MALLOCSIZE _msize # endif # endif #endif /* defined(SQLITE_USE_MALLOC_H) */ #endif /* __APPLE__ or not __APPLE__ */ /* ** Like malloc(), but remember the size of the allocation ** so that we can find it later using sqlite3MemSize(). ** ** For this low-level routine, we are guaranteed that nByte>0 because ** cases of nByte<=0 will be intercepted and dealt with by higher level ** routines. */ static void *sqlite3MemMalloc(int nByte){ #ifdef SQLITE_MALLOCSIZE void *p = SQLITE_MALLOC( nByte ); if( p==0 ){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); } return p; #else sqlite3_int64 *p; assert( nByte>0 ); nByte = ROUND8(nByte); p = SQLITE_MALLOC( nByte+8 ); if( p ){ p[0] = nByte; p++; }else{ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte); } return (void *)p; #endif } /* ** Like free() but works for allocations obtained from sqlite3MemMalloc() ** or sqlite3MemRealloc(). ** ** For this low-level routine, we already know that pPrior!=0 since ** cases where pPrior==0 will have been intecepted and dealt with ** by higher-level routines. */ static void sqlite3MemFree(void *pPrior){ #ifdef SQLITE_MALLOCSIZE SQLITE_FREE(pPrior); #else sqlite3_int64 *p = (sqlite3_int64*)pPrior; assert( pPrior!=0 ); p--; SQLITE_FREE(p); #endif } /* ** Report the allocated size of a prior return from xMalloc() ** or xRealloc(). */ static int sqlite3MemSize(void *pPrior){ #ifdef SQLITE_MALLOCSIZE return pPrior ? (int)SQLITE_MALLOCSIZE(pPrior) : 0; #else sqlite3_int64 *p; if( pPrior==0 ) return 0; p = (sqlite3_int64*)pPrior; p--; return (int)p[0]; #endif } /* ** Like realloc(). Resize an allocation previously obtained from ** sqlite3MemMalloc(). ** ** For this low-level interface, we know that pPrior!=0. Cases where ** pPrior==0 while have been intercepted by higher-level routine and ** redirected to xMalloc. Similarly, we know that nByte>0 because ** cases where nByte<=0 will have been intercepted by higher-level ** routines and redirected to xFree. */ static void *sqlite3MemRealloc(void *pPrior, int nByte){ #ifdef SQLITE_MALLOCSIZE void *p = SQLITE_REALLOC(pPrior, nByte); if( p==0 ){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed memory resize %u to %u bytes", SQLITE_MALLOCSIZE(pPrior), nByte); } return p; #else sqlite3_int64 *p = (sqlite3_int64*)pPrior; assert( pPrior!=0 && nByte>0 ); assert( nByte==ROUND8(nByte) ); /* EV: R-46199-30249 */ p--; p = SQLITE_REALLOC(p, nByte+8 ); if( p ){ p[0] = nByte; p++; }else{ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed memory resize %u to %u bytes", sqlite3MemSize(pPrior), nByte); } return (void*)p; #endif } /* ** Round up a request size to the next valid allocation size. */ static int sqlite3MemRoundup(int n){ return ROUND8(n); } /* ** Initialize this module. */ static int sqlite3MemInit(void *NotUsed){ #if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) int cpuCount; size_t len; if( _sqliteZone_ ){ return SQLITE_OK; } len = sizeof(cpuCount); /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */ _sqliteZone_ = malloc_default_zone(); }else{ /* only 1 core, use our own zone to contention over global locks, ** e.g. we have our own dedicated locks */ bool success; malloc_zone_t* newzone = malloc_create_zone(4096, 0); malloc_set_zone_name(newzone, "Sqlite_Heap"); do{ success = OSAtomicCompareAndSwapPtrBarrier(NULL, newzone, (void * volatile *)&_sqliteZone_); }while(!_sqliteZone_); if( !success ){ /* somebody registered a zone first */ malloc_destroy_zone(newzone); } } #endif UNUSED_PARAMETER(NotUsed); return SQLITE_OK; } /* ** Deinitialize this module. */ static void sqlite3MemShutdown(void *NotUsed){ UNUSED_PARAMETER(NotUsed); return; } /* ** This routine is the only routine in this file with external linkage. ** ** Populate the low-level memory allocation function pointers in ** sqlite3GlobalConfig.m with pointers to the routines in this file. */ SQLITE_PRIVATE void sqlite3MemSetDefault(void){ static const sqlite3_mem_methods defaultMethods = { sqlite3MemMalloc, sqlite3MemFree, sqlite3MemRealloc, sqlite3MemSize, sqlite3MemRoundup, sqlite3MemInit, sqlite3MemShutdown, 0 }; sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); } #endif /* SQLITE_SYSTEM_MALLOC */ /************** End of mem1.c ************************************************/ /************** Begin file mem2.c ********************************************/ /* ** 2007 August 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains low-level memory allocation drivers for when ** SQLite will use the standard C-library malloc/realloc/free interface ** to obtain the memory it needs while adding lots of additional debugging ** information to each allocation in order to help detect and fix memory ** leaks and memory usage errors. ** ** This file contains implementations of the low-level memory allocation ** routines specified in the sqlite3_mem_methods object. */ /* #include "sqliteInt.h" */ /* ** This version of the memory allocator is used only if the ** SQLITE_MEMDEBUG macro is defined */ #ifdef SQLITE_MEMDEBUG /* ** The backtrace functionality is only available with GLIBC */ #ifdef __GLIBC__ extern int backtrace(void**,int); extern void backtrace_symbols_fd(void*const*,int,int); #else # define backtrace(A,B) 1 # define backtrace_symbols_fd(A,B,C) #endif /* #include */ /* ** Each memory allocation looks like this: ** ** ------------------------------------------------------------------------ ** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard | ** ------------------------------------------------------------------------ ** ** The application code sees only a pointer to the allocation. We have ** to back up from the allocation pointer to find the MemBlockHdr. The ** MemBlockHdr tells us the size of the allocation and the number of ** backtrace pointers. There is also a guard word at the end of the ** MemBlockHdr. */ struct MemBlockHdr { i64 iSize; /* Size of this allocation */ struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */ char nBacktrace; /* Number of backtraces on this alloc */ char nBacktraceSlots; /* Available backtrace slots */ u8 nTitle; /* Bytes of title; includes '\0' */ u8 eType; /* Allocation type code */ int iForeGuard; /* Guard word for sanity */ }; /* ** Guard words */ #define FOREGUARD 0x80F5E153 #define REARGUARD 0xE4676B53 /* ** Number of malloc size increments to track. */ #define NCSIZE 1000 /* ** All of the static variables used by this module are collected ** into a single structure named "mem". This is to keep the ** static variables organized and to reduce namespace pollution ** when this module is combined with other in the amalgamation. */ static struct { /* ** Mutex to control access to the memory allocation subsystem. */ sqlite3_mutex *mutex; /* ** Head and tail of a linked list of all outstanding allocations */ struct MemBlockHdr *pFirst; struct MemBlockHdr *pLast; /* ** The number of levels of backtrace to save in new allocations. */ int nBacktrace; void (*xBacktrace)(int, int, void **); /* ** Title text to insert in front of each block */ int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */ char zTitle[100]; /* The title text */ /* ** sqlite3MallocDisallow() increments the following counter. ** sqlite3MallocAllow() decrements it. */ int disallow; /* Do not allow memory allocation */ /* ** Gather statistics on the sizes of memory allocations. ** nAlloc[i] is the number of allocation attempts of i*8 ** bytes. i==NCSIZE is the number of allocation attempts for ** sizes more than NCSIZE*8 bytes. */ int nAlloc[NCSIZE]; /* Total number of allocations */ int nCurrent[NCSIZE]; /* Current number of allocations */ int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */ } mem; /* ** Adjust memory usage statistics */ static void adjustStats(int iSize, int increment){ int i = ROUND8(iSize)/8; if( i>NCSIZE-1 ){ i = NCSIZE - 1; } if( increment>0 ){ mem.nAlloc[i]++; mem.nCurrent[i]++; if( mem.nCurrent[i]>mem.mxCurrent[i] ){ mem.mxCurrent[i] = mem.nCurrent[i]; } }else{ mem.nCurrent[i]--; assert( mem.nCurrent[i]>=0 ); } } /* ** Given an allocation, find the MemBlockHdr for that allocation. ** ** This routine checks the guards at either end of the allocation and ** if they are incorrect it asserts. */ static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){ struct MemBlockHdr *p; int *pInt; u8 *pU8; int nReserve; p = (struct MemBlockHdr*)pAllocation; p--; assert( p->iForeGuard==(int)FOREGUARD ); nReserve = ROUND8(p->iSize); pInt = (int*)pAllocation; pU8 = (u8*)pAllocation; assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD ); /* This checks any of the "extra" bytes allocated due ** to rounding up to an 8 byte boundary to ensure ** they haven't been overwritten. */ while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 ); return p; } /* ** Return the number of bytes currently allocated at address p. */ static int sqlite3MemSize(void *p){ struct MemBlockHdr *pHdr; if( !p ){ return 0; } pHdr = sqlite3MemsysGetHeader(p); return (int)pHdr->iSize; } /* ** Initialize the memory allocation subsystem. */ static int sqlite3MemInit(void *NotUsed){ UNUSED_PARAMETER(NotUsed); assert( (sizeof(struct MemBlockHdr)&7) == 0 ); if( !sqlite3GlobalConfig.bMemstat ){ /* If memory status is enabled, then the malloc.c wrapper will already ** hold the STATIC_MEM mutex when the routines here are invoked. */ mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); } return SQLITE_OK; } /* ** Deinitialize the memory allocation subsystem. */ static void sqlite3MemShutdown(void *NotUsed){ UNUSED_PARAMETER(NotUsed); mem.mutex = 0; } /* ** Round up a request size to the next valid allocation size. */ static int sqlite3MemRoundup(int n){ return ROUND8(n); } /* ** Fill a buffer with pseudo-random bytes. This is used to preset ** the content of a new memory allocation to unpredictable values and ** to clear the content of a freed allocation to unpredictable values. */ static void randomFill(char *pBuf, int nByte){ unsigned int x, y, r; x = SQLITE_PTR_TO_INT(pBuf); y = nByte | 1; while( nByte >= 4 ){ x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); y = y*1103515245 + 12345; r = x ^ y; *(int*)pBuf = r; pBuf += 4; nByte -= 4; } while( nByte-- > 0 ){ x = (x>>1) ^ (-(int)(x&1) & 0xd0000001); y = y*1103515245 + 12345; r = x ^ y; *(pBuf++) = r & 0xff; } } /* ** Allocate nByte bytes of memory. */ static void *sqlite3MemMalloc(int nByte){ struct MemBlockHdr *pHdr; void **pBt; char *z; int *pInt; void *p = 0; int totalSize; int nReserve; sqlite3_mutex_enter(mem.mutex); assert( mem.disallow==0 ); nReserve = ROUND8(nByte); totalSize = nReserve + sizeof(*pHdr) + sizeof(int) + mem.nBacktrace*sizeof(void*) + mem.nTitle; p = malloc(totalSize); if( p ){ z = p; pBt = (void**)&z[mem.nTitle]; pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace]; pHdr->pNext = 0; pHdr->pPrev = mem.pLast; if( mem.pLast ){ mem.pLast->pNext = pHdr; }else{ mem.pFirst = pHdr; } mem.pLast = pHdr; pHdr->iForeGuard = FOREGUARD; pHdr->eType = MEMTYPE_HEAP; pHdr->nBacktraceSlots = mem.nBacktrace; pHdr->nTitle = mem.nTitle; if( mem.nBacktrace ){ void *aAddr[40]; pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1; memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*)); assert(pBt[0]); if( mem.xBacktrace ){ mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]); } }else{ pHdr->nBacktrace = 0; } if( mem.nTitle ){ memcpy(z, mem.zTitle, mem.nTitle); } pHdr->iSize = nByte; adjustStats(nByte, +1); pInt = (int*)&pHdr[1]; pInt[nReserve/sizeof(int)] = REARGUARD; randomFill((char*)pInt, nByte); memset(((char*)pInt)+nByte, 0x65, nReserve-nByte); p = (void*)pInt; } sqlite3_mutex_leave(mem.mutex); return p; } /* ** Free memory. */ static void sqlite3MemFree(void *pPrior){ struct MemBlockHdr *pHdr; void **pBt; char *z; assert( sqlite3GlobalConfig.bMemstat || sqlite3GlobalConfig.bCoreMutex==0 || mem.mutex!=0 ); pHdr = sqlite3MemsysGetHeader(pPrior); pBt = (void**)pHdr; pBt -= pHdr->nBacktraceSlots; sqlite3_mutex_enter(mem.mutex); if( pHdr->pPrev ){ assert( pHdr->pPrev->pNext==pHdr ); pHdr->pPrev->pNext = pHdr->pNext; }else{ assert( mem.pFirst==pHdr ); mem.pFirst = pHdr->pNext; } if( pHdr->pNext ){ assert( pHdr->pNext->pPrev==pHdr ); pHdr->pNext->pPrev = pHdr->pPrev; }else{ assert( mem.pLast==pHdr ); mem.pLast = pHdr->pPrev; } z = (char*)pBt; z -= pHdr->nTitle; adjustStats((int)pHdr->iSize, -1); randomFill(z, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) + (int)pHdr->iSize + sizeof(int) + pHdr->nTitle); free(z); sqlite3_mutex_leave(mem.mutex); } /* ** Change the size of an existing memory allocation. ** ** For this debugging implementation, we *always* make a copy of the ** allocation into a new place in memory. In this way, if the ** higher level code is using pointer to the old allocation, it is ** much more likely to break and we are much more liking to find ** the error. */ static void *sqlite3MemRealloc(void *pPrior, int nByte){ struct MemBlockHdr *pOldHdr; void *pNew; assert( mem.disallow==0 ); assert( (nByte & 7)==0 ); /* EV: R-46199-30249 */ pOldHdr = sqlite3MemsysGetHeader(pPrior); pNew = sqlite3MemMalloc(nByte); if( pNew ){ memcpy(pNew, pPrior, (int)(nByteiSize ? nByte : pOldHdr->iSize)); if( nByte>pOldHdr->iSize ){ randomFill(&((char*)pNew)[pOldHdr->iSize], nByte - (int)pOldHdr->iSize); } sqlite3MemFree(pPrior); } return pNew; } /* ** Populate the low-level memory allocation function pointers in ** sqlite3GlobalConfig.m with pointers to the routines in this file. */ SQLITE_PRIVATE void sqlite3MemSetDefault(void){ static const sqlite3_mem_methods defaultMethods = { sqlite3MemMalloc, sqlite3MemFree, sqlite3MemRealloc, sqlite3MemSize, sqlite3MemRoundup, sqlite3MemInit, sqlite3MemShutdown, 0 }; sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods); } /* ** Set the "type" of an allocation. */ SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){ if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ struct MemBlockHdr *pHdr; pHdr = sqlite3MemsysGetHeader(p); assert( pHdr->iForeGuard==FOREGUARD ); pHdr->eType = eType; } } /* ** Return TRUE if the mask of type in eType matches the type of the ** allocation p. Also return true if p==NULL. ** ** This routine is designed for use within an assert() statement, to ** verify the type of an allocation. For example: ** ** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); */ SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){ int rc = 1; if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ struct MemBlockHdr *pHdr; pHdr = sqlite3MemsysGetHeader(p); assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ if( (pHdr->eType&eType)==0 ){ rc = 0; } } return rc; } /* ** Return TRUE if the mask of type in eType matches no bits of the type of the ** allocation p. Also return true if p==NULL. ** ** This routine is designed for use within an assert() statement, to ** verify the type of an allocation. For example: ** ** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); */ SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){ int rc = 1; if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){ struct MemBlockHdr *pHdr; pHdr = sqlite3MemsysGetHeader(p); assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */ if( (pHdr->eType&eType)!=0 ){ rc = 0; } } return rc; } /* ** Set the number of backtrace levels kept for each allocation. ** A value of zero turns off backtracing. The number is always rounded ** up to a multiple of 2. */ SQLITE_PRIVATE void sqlite3MemdebugBacktrace(int depth){ if( depth<0 ){ depth = 0; } if( depth>20 ){ depth = 20; } depth = (depth+1)&0xfe; mem.nBacktrace = depth; } SQLITE_PRIVATE void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){ mem.xBacktrace = xBacktrace; } /* ** Set the title string for subsequent allocations. */ SQLITE_PRIVATE void sqlite3MemdebugSettitle(const char *zTitle){ unsigned int n = sqlite3Strlen30(zTitle) + 1; sqlite3_mutex_enter(mem.mutex); if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1; memcpy(mem.zTitle, zTitle, n); mem.zTitle[n] = 0; mem.nTitle = ROUND8(n); sqlite3_mutex_leave(mem.mutex); } SQLITE_PRIVATE void sqlite3MemdebugSync(){ struct MemBlockHdr *pHdr; for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ void **pBt = (void**)pHdr; pBt -= pHdr->nBacktraceSlots; mem.xBacktrace((int)pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]); } } /* ** Open the file indicated and write a log of all unfreed memory ** allocations into that log. */ SQLITE_PRIVATE void sqlite3MemdebugDump(const char *zFilename){ FILE *out; struct MemBlockHdr *pHdr; void **pBt; int i; out = fopen(zFilename, "w"); if( out==0 ){ fprintf(stderr, "** Unable to output memory debug output log: %s **\n", zFilename); return; } for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){ char *z = (char*)pHdr; z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle; fprintf(out, "**** %lld bytes at %p from %s ****\n", pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???"); if( pHdr->nBacktrace ){ fflush(out); pBt = (void**)pHdr; pBt -= pHdr->nBacktraceSlots; backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out)); fprintf(out, "\n"); } } fprintf(out, "COUNTS:\n"); for(i=0; i=1 ); size = mem3.aPool[i-1].u.hdr.size4x/4; assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); assert( size>=2 ); if( size <= MX_SMALL ){ memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]); }else{ hash = size % N_HASH; memsys3UnlinkFromList(i, &mem3.aiHash[hash]); } } /* ** Link the chunk at mem3.aPool[i] so that is on the list rooted ** at *pRoot. */ static void memsys3LinkIntoList(u32 i, u32 *pRoot){ assert( sqlite3_mutex_held(mem3.mutex) ); mem3.aPool[i].u.list.next = *pRoot; mem3.aPool[i].u.list.prev = 0; if( *pRoot ){ mem3.aPool[*pRoot].u.list.prev = i; } *pRoot = i; } /* ** Link the chunk at index i into either the appropriate ** small chunk list, or into the large chunk hash table. */ static void memsys3Link(u32 i){ u32 size, hash; assert( sqlite3_mutex_held(mem3.mutex) ); assert( i>=1 ); assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 ); size = mem3.aPool[i-1].u.hdr.size4x/4; assert( size==mem3.aPool[i+size-1].u.hdr.prevSize ); assert( size>=2 ); if( size <= MX_SMALL ){ memsys3LinkIntoList(i, &mem3.aiSmall[size-2]); }else{ hash = size % N_HASH; memsys3LinkIntoList(i, &mem3.aiHash[hash]); } } /* ** If the STATIC_MEM mutex is not already held, obtain it now. The mutex ** will already be held (obtained by code in malloc.c) if ** sqlite3GlobalConfig.bMemStat is true. */ static void memsys3Enter(void){ if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){ mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); } sqlite3_mutex_enter(mem3.mutex); } static void memsys3Leave(void){ sqlite3_mutex_leave(mem3.mutex); } /* ** Called when we are unable to satisfy an allocation of nBytes. */ static void memsys3OutOfMemory(int nByte){ if( !mem3.alarmBusy ){ mem3.alarmBusy = 1; assert( sqlite3_mutex_held(mem3.mutex) ); sqlite3_mutex_leave(mem3.mutex); sqlite3_release_memory(nByte); sqlite3_mutex_enter(mem3.mutex); mem3.alarmBusy = 0; } } /* ** Chunk i is a free chunk that has been unlinked. Adjust its ** size parameters for check-out and return a pointer to the ** user portion of the chunk. */ static void *memsys3Checkout(u32 i, u32 nBlock){ u32 x; assert( sqlite3_mutex_held(mem3.mutex) ); assert( i>=1 ); assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ); assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock ); x = mem3.aPool[i-1].u.hdr.size4x; mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2); mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock; mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2; return &mem3.aPool[i]; } /* ** Carve a piece off of the end of the mem3.iMaster free chunk. ** Return a pointer to the new allocation. Or, if the master chunk ** is not large enough, return 0. */ static void *memsys3FromMaster(u32 nBlock){ assert( sqlite3_mutex_held(mem3.mutex) ); assert( mem3.szMaster>=nBlock ); if( nBlock>=mem3.szMaster-1 ){ /* Use the entire master */ void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster); mem3.iMaster = 0; mem3.szMaster = 0; mem3.mnMaster = 0; return p; }else{ /* Split the master block. Return the tail. */ u32 newi, x; newi = mem3.iMaster + mem3.szMaster - nBlock; assert( newi > mem3.iMaster+1 ); mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock; mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2; mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1; mem3.szMaster -= nBlock; mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster; x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; if( mem3.szMaster < mem3.mnMaster ){ mem3.mnMaster = mem3.szMaster; } return (void*)&mem3.aPool[newi]; } } /* ** *pRoot is the head of a list of free chunks of the same size ** or same size hash. In other words, *pRoot is an entry in either ** mem3.aiSmall[] or mem3.aiHash[]. ** ** This routine examines all entries on the given list and tries ** to coalesce each entries with adjacent free chunks. ** ** If it sees a chunk that is larger than mem3.iMaster, it replaces ** the current mem3.iMaster with the new larger chunk. In order for ** this mem3.iMaster replacement to work, the master chunk must be ** linked into the hash tables. That is not the normal state of ** affairs, of course. The calling routine must link the master ** chunk before invoking this routine, then must unlink the (possibly ** changed) master chunk once this routine has finished. */ static void memsys3Merge(u32 *pRoot){ u32 iNext, prev, size, i, x; assert( sqlite3_mutex_held(mem3.mutex) ); for(i=*pRoot; i>0; i=iNext){ iNext = mem3.aPool[i].u.list.next; size = mem3.aPool[i-1].u.hdr.size4x; assert( (size&1)==0 ); if( (size&2)==0 ){ memsys3UnlinkFromList(i, pRoot); assert( i > mem3.aPool[i-1].u.hdr.prevSize ); prev = i - mem3.aPool[i-1].u.hdr.prevSize; if( prev==iNext ){ iNext = mem3.aPool[prev].u.list.next; } memsys3Unlink(prev); size = i + size/4 - prev; x = mem3.aPool[prev-1].u.hdr.size4x & 2; mem3.aPool[prev-1].u.hdr.size4x = size*4 | x; mem3.aPool[prev+size-1].u.hdr.prevSize = size; memsys3Link(prev); i = prev; }else{ size /= 4; } if( size>mem3.szMaster ){ mem3.iMaster = i; mem3.szMaster = size; } } } /* ** Return a block of memory of at least nBytes in size. ** Return NULL if unable. ** ** This function assumes that the necessary mutexes, if any, are ** already held by the caller. Hence "Unsafe". */ static void *memsys3MallocUnsafe(int nByte){ u32 i; u32 nBlock; u32 toFree; assert( sqlite3_mutex_held(mem3.mutex) ); assert( sizeof(Mem3Block)==8 ); if( nByte<=12 ){ nBlock = 2; }else{ nBlock = (nByte + 11)/8; } assert( nBlock>=2 ); /* STEP 1: ** Look for an entry of the correct size in either the small ** chunk table or in the large chunk hash table. This is ** successful most of the time (about 9 times out of 10). */ if( nBlock <= MX_SMALL ){ i = mem3.aiSmall[nBlock-2]; if( i>0 ){ memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]); return memsys3Checkout(i, nBlock); } }else{ int hash = nBlock % N_HASH; for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){ if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){ memsys3UnlinkFromList(i, &mem3.aiHash[hash]); return memsys3Checkout(i, nBlock); } } } /* STEP 2: ** Try to satisfy the allocation by carving a piece off of the end ** of the master chunk. This step usually works if step 1 fails. */ if( mem3.szMaster>=nBlock ){ return memsys3FromMaster(nBlock); } /* STEP 3: ** Loop through the entire memory pool. Coalesce adjacent free ** chunks. Recompute the master chunk as the largest free chunk. ** Then try again to satisfy the allocation by carving a piece off ** of the end of the master chunk. This step happens very ** rarely (we hope!) */ for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){ memsys3OutOfMemory(toFree); if( mem3.iMaster ){ memsys3Link(mem3.iMaster); mem3.iMaster = 0; mem3.szMaster = 0; } for(i=0; i=nBlock ){ return memsys3FromMaster(nBlock); } } } /* If none of the above worked, then we fail. */ return 0; } /* ** Free an outstanding memory allocation. ** ** This function assumes that the necessary mutexes, if any, are ** already held by the caller. Hence "Unsafe". */ static void memsys3FreeUnsafe(void *pOld){ Mem3Block *p = (Mem3Block*)pOld; int i; u32 size, x; assert( sqlite3_mutex_held(mem3.mutex) ); assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] ); i = p - mem3.aPool; assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 ); size = mem3.aPool[i-1].u.hdr.size4x/4; assert( i+size<=mem3.nPool+1 ); mem3.aPool[i-1].u.hdr.size4x &= ~1; mem3.aPool[i+size-1].u.hdr.prevSize = size; mem3.aPool[i+size-1].u.hdr.size4x &= ~2; memsys3Link(i); /* Try to expand the master using the newly freed chunk */ if( mem3.iMaster ){ while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){ size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize; mem3.iMaster -= size; mem3.szMaster += size; memsys3Unlink(mem3.iMaster); x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; } x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2; while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){ memsys3Unlink(mem3.iMaster+mem3.szMaster); mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4; mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x; mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster; } } } /* ** Return the size of an outstanding allocation, in bytes. The ** size returned omits the 8-byte header overhead. This only ** works for chunks that are currently checked out. */ static int memsys3Size(void *p){ Mem3Block *pBlock; if( p==0 ) return 0; pBlock = (Mem3Block*)p; assert( (pBlock[-1].u.hdr.size4x&1)!=0 ); return (pBlock[-1].u.hdr.size4x&~3)*2 - 4; } /* ** Round up a request size to the next valid allocation size. */ static int memsys3Roundup(int n){ if( n<=12 ){ return 12; }else{ return ((n+11)&~7) - 4; } } /* ** Allocate nBytes of memory. */ static void *memsys3Malloc(int nBytes){ sqlite3_int64 *p; assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */ memsys3Enter(); p = memsys3MallocUnsafe(nBytes); memsys3Leave(); return (void*)p; } /* ** Free memory. */ static void memsys3Free(void *pPrior){ assert( pPrior ); memsys3Enter(); memsys3FreeUnsafe(pPrior); memsys3Leave(); } /* ** Change the size of an existing memory allocation */ static void *memsys3Realloc(void *pPrior, int nBytes){ int nOld; void *p; if( pPrior==0 ){ return sqlite3_malloc(nBytes); } if( nBytes<=0 ){ sqlite3_free(pPrior); return 0; } nOld = memsys3Size(pPrior); if( nBytes<=nOld && nBytes>=nOld-128 ){ return pPrior; } memsys3Enter(); p = memsys3MallocUnsafe(nBytes); if( p ){ if( nOld>1)!=(size&1) ){ fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]); assert( 0 ); break; } if( size&1 ){ fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8); }else{ fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8, i==mem3.iMaster ? " **master**" : ""); } } for(i=0; i0; j=mem3.aPool[j].u.list.next){ fprintf(out, " %p(%d)", &mem3.aPool[j], (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); } fprintf(out, "\n"); } for(i=0; i0; j=mem3.aPool[j].u.list.next){ fprintf(out, " %p(%d)", &mem3.aPool[j], (mem3.aPool[j-1].u.hdr.size4x/4)*8-8); } fprintf(out, "\n"); } fprintf(out, "master=%d\n", mem3.iMaster); fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8); fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8); sqlite3_mutex_leave(mem3.mutex); if( out==stdout ){ fflush(stdout); }else{ fclose(out); } #else UNUSED_PARAMETER(zFilename); #endif } /* ** This routine is the only routine in this file with external ** linkage. ** ** Populate the low-level memory allocation function pointers in ** sqlite3GlobalConfig.m with pointers to the routines in this file. The ** arguments specify the block of memory to manage. ** ** This routine is only called by sqlite3_config(), and therefore ** is not required to be threadsafe (it is not). */ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){ static const sqlite3_mem_methods mempoolMethods = { memsys3Malloc, memsys3Free, memsys3Realloc, memsys3Size, memsys3Roundup, memsys3Init, memsys3Shutdown, 0 }; return &mempoolMethods; } #endif /* SQLITE_ENABLE_MEMSYS3 */ /************** End of mem3.c ************************************************/ /************** Begin file mem5.c ********************************************/ /* ** 2007 October 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement a memory ** allocation subsystem for use by SQLite. ** ** This version of the memory allocation subsystem omits all ** use of malloc(). The application gives SQLite a block of memory ** before calling sqlite3_initialize() from which allocations ** are made and returned by the xMalloc() and xRealloc() ** implementations. Once sqlite3_initialize() has been called, ** the amount of memory available to SQLite is fixed and cannot ** be changed. ** ** This version of the memory allocation subsystem is included ** in the build only if SQLITE_ENABLE_MEMSYS5 is defined. ** ** This memory allocator uses the following algorithm: ** ** 1. All memory allocations sizes are rounded up to a power of 2. ** ** 2. If two adjacent free blocks are the halves of a larger block, ** then the two blocks are coalesced into the single larger block. ** ** 3. New memory is allocated from the first available free block. ** ** This algorithm is described in: J. M. Robson. "Bounds for Some Functions ** Concerning Dynamic Storage Allocation". Journal of the Association for ** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499. ** ** Let n be the size of the largest allocation divided by the minimum ** allocation size (after rounding all sizes up to a power of 2.) Let M ** be the maximum amount of memory ever outstanding at one time. Let ** N be the total amount of memory available for allocation. Robson ** proved that this memory allocator will never breakdown due to ** fragmentation as long as the following constraint holds: ** ** N >= M*(1 + log2(n)/2) - n + 1 ** ** The sqlite3_status() logic tracks the maximum values of n and M so ** that an application can, at any time, verify this constraint. */ /* #include "sqliteInt.h" */ /* ** This version of the memory allocator is used only when ** SQLITE_ENABLE_MEMSYS5 is defined. */ #ifdef SQLITE_ENABLE_MEMSYS5 /* ** A minimum allocation is an instance of the following structure. ** Larger allocations are an array of these structures where the ** size of the array is a power of 2. ** ** The size of this object must be a power of two. That fact is ** verified in memsys5Init(). */ typedef struct Mem5Link Mem5Link; struct Mem5Link { int next; /* Index of next free chunk */ int prev; /* Index of previous free chunk */ }; /* ** Maximum size of any allocation is ((1<=0 && i=0 && iLogsize<=LOGMAX ); assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); next = MEM5LINK(i)->next; prev = MEM5LINK(i)->prev; if( prev<0 ){ mem5.aiFreelist[iLogsize] = next; }else{ MEM5LINK(prev)->next = next; } if( next>=0 ){ MEM5LINK(next)->prev = prev; } } /* ** Link the chunk at mem5.aPool[i] so that is on the iLogsize ** free list. */ static void memsys5Link(int i, int iLogsize){ int x; assert( sqlite3_mutex_held(mem5.mutex) ); assert( i>=0 && i=0 && iLogsize<=LOGMAX ); assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize ); x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize]; MEM5LINK(i)->prev = -1; if( x>=0 ){ assert( xprev = i; } mem5.aiFreelist[iLogsize] = i; } /* ** If the STATIC_MEM mutex is not already held, obtain it now. The mutex ** will already be held (obtained by code in malloc.c) if ** sqlite3GlobalConfig.bMemStat is true. */ static void memsys5Enter(void){ sqlite3_mutex_enter(mem5.mutex); } static void memsys5Leave(void){ sqlite3_mutex_leave(mem5.mutex); } /* ** Return the size of an outstanding allocation, in bytes. The ** size returned omits the 8-byte header overhead. This only ** works for chunks that are currently checked out. */ static int memsys5Size(void *p){ int iSize = 0; if( p ){ int i = (int)(((u8 *)p-mem5.zPool)/mem5.szAtom); assert( i>=0 && i0 ); /* Keep track of the maximum allocation request. Even unfulfilled ** requests are counted */ if( (u32)nByte>mem5.maxRequest ){ mem5.maxRequest = nByte; } /* Abort if the requested allocation size is larger than the largest ** power of two that we can represent using 32-bit signed integers. */ if( nByte > 0x40000000 ){ return 0; } /* Round nByte up to the next valid power of two */ for(iFullSz=mem5.szAtom, iLogsize=0; iFullSzLOGMAX ){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte); return 0; } i = mem5.aiFreelist[iBin]; memsys5Unlink(i, iBin); while( iBin>iLogsize ){ int newSize; iBin--; newSize = 1 << iBin; mem5.aCtrl[i+newSize] = CTRL_FREE | iBin; memsys5Link(i+newSize, iBin); } mem5.aCtrl[i] = iLogsize; /* Update allocator performance statistics. */ mem5.nAlloc++; mem5.totalAlloc += iFullSz; mem5.totalExcess += iFullSz - nByte; mem5.currentCount++; mem5.currentOut += iFullSz; if( mem5.maxCount=0 && iBlock0 ); assert( mem5.currentOut>=(size*mem5.szAtom) ); mem5.currentCount--; mem5.currentOut -= size*mem5.szAtom; assert( mem5.currentOut>0 || mem5.currentCount==0 ); assert( mem5.currentCount>0 || mem5.currentOut==0 ); mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize; while( ALWAYS(iLogsize>iLogsize) & 1 ){ iBuddy = iBlock - size; }else{ iBuddy = iBlock + size; } assert( iBuddy>=0 ); if( (iBuddy+(1<mem5.nBlock ) break; if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break; memsys5Unlink(iBuddy, iLogsize); iLogsize++; if( iBuddy0 ){ memsys5Enter(); p = memsys5MallocUnsafe(nBytes); memsys5Leave(); } return (void*)p; } /* ** Free memory. ** ** The outer layer memory allocator prevents this routine from ** being called with pPrior==0. */ static void memsys5Free(void *pPrior){ assert( pPrior!=0 ); memsys5Enter(); memsys5FreeUnsafe(pPrior); memsys5Leave(); } /* ** Change the size of an existing memory allocation. ** ** The outer layer memory allocator prevents this routine from ** being called with pPrior==0. ** ** nBytes is always a value obtained from a prior call to ** memsys5Round(). Hence nBytes is always a non-negative power ** of two. If nBytes==0 that means that an oversize allocation ** (an allocation larger than 0x40000000) was requested and this ** routine should return 0 without freeing pPrior. */ static void *memsys5Realloc(void *pPrior, int nBytes){ int nOld; void *p; assert( pPrior!=0 ); assert( (nBytes&(nBytes-1))==0 ); /* EV: R-46199-30249 */ assert( nBytes>=0 ); if( nBytes==0 ){ return 0; } nOld = memsys5Size(pPrior); if( nBytes<=nOld ){ return pPrior; } memsys5Enter(); p = memsys5MallocUnsafe(nBytes); if( p ){ memcpy(p, pPrior, nOld); memsys5FreeUnsafe(pPrior); } memsys5Leave(); return p; } /* ** Round up a request size to the next valid allocation size. If ** the allocation is too large to be handled by this allocation system, ** return 0. ** ** All allocations must be a power of two and must be expressed by a ** 32-bit signed integer. Hence the largest allocation is 0x40000000 ** or 1073741824 bytes. */ static int memsys5Roundup(int n){ int iFullSz; if( n > 0x40000000 ) return 0; for(iFullSz=mem5.szAtom; iFullSz 0 ** memsys5Log(2) -> 1 ** memsys5Log(4) -> 2 ** memsys5Log(5) -> 3 ** memsys5Log(8) -> 3 ** memsys5Log(9) -> 4 */ static int memsys5Log(int iValue){ int iLog; for(iLog=0; (iLog<(int)((sizeof(int)*8)-1)) && (1<mem5.szAtom ){ mem5.szAtom = mem5.szAtom << 1; } mem5.nBlock = (nByte / (mem5.szAtom+sizeof(u8))); mem5.zPool = zByte; mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.szAtom]; for(ii=0; ii<=LOGMAX; ii++){ mem5.aiFreelist[ii] = -1; } iOffset = 0; for(ii=LOGMAX; ii>=0; ii--){ int nAlloc = (1<mem5.nBlock); } /* If a mutex is required for normal operation, allocate one */ if( sqlite3GlobalConfig.bMemstat==0 ){ mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); } return SQLITE_OK; } /* ** Deinitialize this module. */ static void memsys5Shutdown(void *NotUsed){ UNUSED_PARAMETER(NotUsed); mem5.mutex = 0; return; } #ifdef SQLITE_TEST /* ** Open the file indicated and write a log of all unfreed memory ** allocations into that log. */ SQLITE_PRIVATE void sqlite3Memsys5Dump(const char *zFilename){ FILE *out; int i, j, n; int nMinLog; if( zFilename==0 || zFilename[0]==0 ){ out = stdout; }else{ out = fopen(zFilename, "w"); if( out==0 ){ fprintf(stderr, "** Unable to output memory debug output log: %s **\n", zFilename); return; } } memsys5Enter(); nMinLog = memsys5Log(mem5.szAtom); for(i=0; i<=LOGMAX && i+nMinLog<32; i++){ for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){} fprintf(out, "freelist items of size %d: %d\n", mem5.szAtom << i, n); } fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc); fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc); fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess); fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut); fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount); fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut); fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount); fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest); memsys5Leave(); if( out==stdout ){ fflush(stdout); }else{ fclose(out); } } #endif /* ** This routine is the only routine in this file with external ** linkage. It returns a pointer to a static sqlite3_mem_methods ** struct populated with the memsys5 methods. */ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){ static const sqlite3_mem_methods memsys5Methods = { memsys5Malloc, memsys5Free, memsys5Realloc, memsys5Size, memsys5Roundup, memsys5Init, memsys5Shutdown, 0 }; return &memsys5Methods; } #endif /* SQLITE_ENABLE_MEMSYS5 */ /************** End of mem5.c ************************************************/ /************** Begin file mutex.c *******************************************/ /* ** 2007 August 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement mutexes. ** ** This file contains code that is common across all mutex implementations. */ /* #include "sqliteInt.h" */ #if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT) /* ** For debugging purposes, record when the mutex subsystem is initialized ** and uninitialized so that we can assert() if there is an attempt to ** allocate a mutex while the system is uninitialized. */ static SQLITE_WSD int mutexIsInit = 0; #endif /* SQLITE_DEBUG */ #ifndef SQLITE_MUTEX_OMIT /* ** Initialize the mutex system. */ SQLITE_PRIVATE int sqlite3MutexInit(void){ int rc = SQLITE_OK; if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){ /* If the xMutexAlloc method has not been set, then the user did not ** install a mutex implementation via sqlite3_config() prior to ** sqlite3_initialize() being called. This block copies pointers to ** the default implementation into the sqlite3GlobalConfig structure. */ sqlite3_mutex_methods const *pFrom; sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex; if( sqlite3GlobalConfig.bCoreMutex ){ pFrom = sqlite3DefaultMutex(); }else{ pFrom = sqlite3NoopMutex(); } pTo->xMutexInit = pFrom->xMutexInit; pTo->xMutexEnd = pFrom->xMutexEnd; pTo->xMutexFree = pFrom->xMutexFree; pTo->xMutexEnter = pFrom->xMutexEnter; pTo->xMutexTry = pFrom->xMutexTry; pTo->xMutexLeave = pFrom->xMutexLeave; pTo->xMutexHeld = pFrom->xMutexHeld; pTo->xMutexNotheld = pFrom->xMutexNotheld; pTo->xMutexAlloc = pFrom->xMutexAlloc; } rc = sqlite3GlobalConfig.mutex.xMutexInit(); #ifdef SQLITE_DEBUG GLOBAL(int, mutexIsInit) = 1; #endif return rc; } /* ** Shutdown the mutex system. This call frees resources allocated by ** sqlite3MutexInit(). */ SQLITE_PRIVATE int sqlite3MutexEnd(void){ int rc = SQLITE_OK; if( sqlite3GlobalConfig.mutex.xMutexEnd ){ rc = sqlite3GlobalConfig.mutex.xMutexEnd(); } #ifdef SQLITE_DEBUG GLOBAL(int, mutexIsInit) = 0; #endif return rc; } /* ** Retrieve a pointer to a static mutex or allocate a new dynamic one. */ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int id){ #ifndef SQLITE_OMIT_AUTOINIT if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0; if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0; #endif return sqlite3GlobalConfig.mutex.xMutexAlloc(id); } SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){ if( !sqlite3GlobalConfig.bCoreMutex ){ return 0; } assert( GLOBAL(int, mutexIsInit) ); return sqlite3GlobalConfig.mutex.xMutexAlloc(id); } /* ** Free a dynamic mutex. */ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){ if( p ){ sqlite3GlobalConfig.mutex.xMutexFree(p); } } /* ** Obtain the mutex p. If some other thread already has the mutex, block ** until it can be obtained. */ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){ if( p ){ sqlite3GlobalConfig.mutex.xMutexEnter(p); } } /* ** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another ** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY. */ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){ int rc = SQLITE_OK; if( p ){ return sqlite3GlobalConfig.mutex.xMutexTry(p); } return rc; } /* ** The sqlite3_mutex_leave() routine exits a mutex that was previously ** entered by the same thread. The behavior is undefined if the mutex ** is not currently entered. If a NULL pointer is passed as an argument ** this function is a no-op. */ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){ if( p ){ sqlite3GlobalConfig.mutex.xMutexLeave(p); } } #ifndef NDEBUG /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use inside assert() statements. */ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex *p){ return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p); } SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex *p){ return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p); } #endif #endif /* !defined(SQLITE_MUTEX_OMIT) */ /************** End of mutex.c ***********************************************/ /************** Begin file mutex_noop.c **************************************/ /* ** 2008 October 07 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement mutexes. ** ** This implementation in this file does not provide any mutual ** exclusion and is thus suitable for use only in applications ** that use SQLite in a single thread. The routines defined ** here are place-holders. Applications can substitute working ** mutex routines at start-time using the ** ** sqlite3_config(SQLITE_CONFIG_MUTEX,...) ** ** interface. ** ** If compiled with SQLITE_DEBUG, then additional logic is inserted ** that does error checking on mutexes to make sure they are being ** called correctly. */ /* #include "sqliteInt.h" */ #ifndef SQLITE_MUTEX_OMIT #ifndef SQLITE_DEBUG /* ** Stub routines for all mutex methods. ** ** This routines provide no mutual exclusion or error checking. */ static int noopMutexInit(void){ return SQLITE_OK; } static int noopMutexEnd(void){ return SQLITE_OK; } static sqlite3_mutex *noopMutexAlloc(int id){ UNUSED_PARAMETER(id); return (sqlite3_mutex*)8; } static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } static int noopMutexTry(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return SQLITE_OK; } static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; } SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ static const sqlite3_mutex_methods sMutex = { noopMutexInit, noopMutexEnd, noopMutexAlloc, noopMutexFree, noopMutexEnter, noopMutexTry, noopMutexLeave, 0, 0, }; return &sMutex; } #endif /* !SQLITE_DEBUG */ #ifdef SQLITE_DEBUG /* ** In this implementation, error checking is provided for testing ** and debugging purposes. The mutexes still do not provide any ** mutual exclusion. */ /* ** The mutex object */ typedef struct sqlite3_debug_mutex { int id; /* The mutex type */ int cnt; /* Number of entries without a matching leave */ } sqlite3_debug_mutex; /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use inside assert() statements. */ static int debugMutexHeld(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; return p==0 || p->cnt>0; } static int debugMutexNotheld(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; return p==0 || p->cnt==0; } /* ** Initialize and deinitialize the mutex subsystem. */ static int debugMutexInit(void){ return SQLITE_OK; } static int debugMutexEnd(void){ return SQLITE_OK; } /* ** The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. If it returns NULL ** that means that a mutex could not be allocated. */ static sqlite3_mutex *debugMutexAlloc(int id){ static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1]; sqlite3_debug_mutex *pNew = 0; switch( id ){ case SQLITE_MUTEX_FAST: case SQLITE_MUTEX_RECURSIVE: { pNew = sqlite3Malloc(sizeof(*pNew)); if( pNew ){ pNew->id = id; pNew->cnt = 0; } break; } default: { #ifdef SQLITE_ENABLE_API_ARMOR if( id-2<0 || id-2>=ArraySize(aStatic) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif pNew = &aStatic[id-2]; pNew->id = id; break; } } return (sqlite3_mutex*)pNew; } /* ** This routine deallocates a previously allocated mutex. */ static void debugMutexFree(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; assert( p->cnt==0 ); if( p->id==SQLITE_MUTEX_RECURSIVE || p->id==SQLITE_MUTEX_FAST ){ sqlite3_free(p); }else{ #ifdef SQLITE_ENABLE_API_ARMOR (void)SQLITE_MISUSE_BKPT; #endif } } /* ** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt ** to enter a mutex. If another thread is already within the mutex, ** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return ** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK ** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can ** be entered multiple times by the same thread. In such cases the, ** mutex must be exited an equal number of times before another thread ** can enter. If the same thread tries to enter any other kind of mutex ** more than once, the behavior is undefined. */ static void debugMutexEnter(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); p->cnt++; } static int debugMutexTry(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); p->cnt++; return SQLITE_OK; } /* ** The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered or ** is not currently allocated. SQLite will never do either. */ static void debugMutexLeave(sqlite3_mutex *pX){ sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX; assert( debugMutexHeld(pX) ); p->cnt--; assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) ); } SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){ static const sqlite3_mutex_methods sMutex = { debugMutexInit, debugMutexEnd, debugMutexAlloc, debugMutexFree, debugMutexEnter, debugMutexTry, debugMutexLeave, debugMutexHeld, debugMutexNotheld }; return &sMutex; } #endif /* SQLITE_DEBUG */ /* ** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation ** is used regardless of the run-time threadsafety setting. */ #ifdef SQLITE_MUTEX_NOOP SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ return sqlite3NoopMutex(); } #endif /* defined(SQLITE_MUTEX_NOOP) */ #endif /* !defined(SQLITE_MUTEX_OMIT) */ /************** End of mutex_noop.c ******************************************/ /************** Begin file mutex_unix.c **************************************/ /* ** 2007 August 28 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement mutexes for pthreads */ /* #include "sqliteInt.h" */ /* ** The code in this file is only used if we are compiling threadsafe ** under unix with pthreads. ** ** Note that this implementation requires a version of pthreads that ** supports recursive mutexes. */ #ifdef SQLITE_MUTEX_PTHREADS #include /* ** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields ** are necessary under two condidtions: (1) Debug builds and (2) using ** home-grown mutexes. Encapsulate these conditions into a single #define. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX) # define SQLITE_MUTEX_NREF 1 #else # define SQLITE_MUTEX_NREF 0 #endif /* ** Each recursive mutex is an instance of the following structure. */ struct sqlite3_mutex { pthread_mutex_t mutex; /* Mutex controlling the lock */ #if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) int id; /* Mutex type */ #endif #if SQLITE_MUTEX_NREF volatile int nRef; /* Number of entrances */ volatile pthread_t owner; /* Thread that is within this mutex */ int trace; /* True to trace changes */ #endif }; #if SQLITE_MUTEX_NREF #define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0, 0 } #else #define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER } #endif /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use only inside assert() statements. On some platforms, ** there might be race conditions that can cause these routines to ** deliver incorrect results. In particular, if pthread_equal() is ** not an atomic operation, then these routines might delivery ** incorrect results. On most platforms, pthread_equal() is a ** comparison of two integers and is therefore atomic. But we are ** told that HPUX is not such a platform. If so, then these routines ** will not always work correctly on HPUX. ** ** On those platforms where pthread_equal() is not atomic, SQLite ** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to ** make sure no assert() statements are evaluated and hence these ** routines are never called. */ #if !defined(NDEBUG) || defined(SQLITE_DEBUG) static int pthreadMutexHeld(sqlite3_mutex *p){ return (p->nRef!=0 && pthread_equal(p->owner, pthread_self())); } static int pthreadMutexNotheld(sqlite3_mutex *p){ return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0; } #endif /* ** Initialize and deinitialize the mutex subsystem. */ static int pthreadMutexInit(void){ return SQLITE_OK; } static int pthreadMutexEnd(void){ return SQLITE_OK; } /* ** The sqlite3_mutex_alloc() routine allocates a new ** mutex and returns a pointer to it. If it returns NULL ** that means that a mutex could not be allocated. SQLite ** will unwind its stack and return an error. The argument ** to sqlite3_mutex_alloc() is one of these integer constants: ** **
    **
  • SQLITE_MUTEX_FAST **
  • SQLITE_MUTEX_RECURSIVE **
  • SQLITE_MUTEX_STATIC_MASTER **
  • SQLITE_MUTEX_STATIC_MEM **
  • SQLITE_MUTEX_STATIC_OPEN **
  • SQLITE_MUTEX_STATIC_PRNG **
  • SQLITE_MUTEX_STATIC_LRU **
  • SQLITE_MUTEX_STATIC_PMEM **
  • SQLITE_MUTEX_STATIC_APP1 **
  • SQLITE_MUTEX_STATIC_APP2 **
  • SQLITE_MUTEX_STATIC_APP3 **
  • SQLITE_MUTEX_STATIC_VFS1 **
  • SQLITE_MUTEX_STATIC_VFS2 **
  • SQLITE_MUTEX_STATIC_VFS3 **
** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does ** not want to. But SQLite will only request a recursive mutex in ** cases where it really needs one. If a faster non-recursive mutex ** implementation is available on the host platform, the mutex subsystem ** might return such a mutex in response to SQLITE_MUTEX_FAST. ** ** The other allowed parameters to sqlite3_mutex_alloc() each return ** a pointer to a static preexisting mutex. Six static mutexes are ** used by the current version of SQLite. Future versions of SQLite ** may add additional static mutexes. Static mutexes are for internal ** use by SQLite only. Applications that use SQLite mutexes should ** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or ** SQLITE_MUTEX_RECURSIVE. ** ** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST ** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite3_mutex *pthreadMutexAlloc(int iType){ static sqlite3_mutex staticMutexes[] = { SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER }; sqlite3_mutex *p; switch( iType ){ case SQLITE_MUTEX_RECURSIVE: { p = sqlite3MallocZero( sizeof(*p) ); if( p ){ #ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX /* If recursive mutexes are not available, we will have to ** build our own. See below. */ pthread_mutex_init(&p->mutex, 0); #else /* Use a recursive mutex if it is available */ pthread_mutexattr_t recursiveAttr; pthread_mutexattr_init(&recursiveAttr); pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&p->mutex, &recursiveAttr); pthread_mutexattr_destroy(&recursiveAttr); #endif } break; } case SQLITE_MUTEX_FAST: { p = sqlite3MallocZero( sizeof(*p) ); if( p ){ pthread_mutex_init(&p->mutex, 0); } break; } default: { #ifdef SQLITE_ENABLE_API_ARMOR if( iType-2<0 || iType-2>=ArraySize(staticMutexes) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif p = &staticMutexes[iType-2]; break; } } #if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR) if( p ) p->id = iType; #endif return p; } /* ** This routine deallocates a previously ** allocated mutex. SQLite is careful to deallocate every ** mutex that it allocates. */ static void pthreadMutexFree(sqlite3_mutex *p){ assert( p->nRef==0 ); #if SQLITE_ENABLE_API_ARMOR if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ) #endif { pthread_mutex_destroy(&p->mutex); sqlite3_free(p); } #ifdef SQLITE_ENABLE_API_ARMOR else{ (void)SQLITE_MISUSE_BKPT; } #endif } /* ** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt ** to enter a mutex. If another thread is already within the mutex, ** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return ** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK ** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can ** be entered multiple times by the same thread. In such cases the, ** mutex must be exited an equal number of times before another thread ** can enter. If the same thread tries to enter any other kind of mutex ** more than once, the behavior is undefined. */ static void pthreadMutexEnter(sqlite3_mutex *p){ assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); #ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX /* If recursive mutexes are not available, then we have to grow ** our own. This implementation assumes that pthread_equal() ** is atomic - that it cannot be deceived into thinking self ** and p->owner are equal if p->owner changes between two values ** that are not equal to self while the comparison is taking place. ** This implementation also assumes a coherent cache - that ** separate processes cannot read different values from the same ** address at the same time. If either of these two conditions ** are not met, then the mutexes will fail and problems will result. */ { pthread_t self = pthread_self(); if( p->nRef>0 && pthread_equal(p->owner, self) ){ p->nRef++; }else{ pthread_mutex_lock(&p->mutex); assert( p->nRef==0 ); p->owner = self; p->nRef = 1; } } #else /* Use the built-in recursive mutexes if they are available. */ pthread_mutex_lock(&p->mutex); #if SQLITE_MUTEX_NREF assert( p->nRef>0 || p->owner==0 ); p->owner = pthread_self(); p->nRef++; #endif #endif #ifdef SQLITE_DEBUG if( p->trace ){ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } #endif } static int pthreadMutexTry(sqlite3_mutex *p){ int rc; assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) ); #ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX /* If recursive mutexes are not available, then we have to grow ** our own. This implementation assumes that pthread_equal() ** is atomic - that it cannot be deceived into thinking self ** and p->owner are equal if p->owner changes between two values ** that are not equal to self while the comparison is taking place. ** This implementation also assumes a coherent cache - that ** separate processes cannot read different values from the same ** address at the same time. If either of these two conditions ** are not met, then the mutexes will fail and problems will result. */ { pthread_t self = pthread_self(); if( p->nRef>0 && pthread_equal(p->owner, self) ){ p->nRef++; rc = SQLITE_OK; }else if( pthread_mutex_trylock(&p->mutex)==0 ){ assert( p->nRef==0 ); p->owner = self; p->nRef = 1; rc = SQLITE_OK; }else{ rc = SQLITE_BUSY; } } #else /* Use the built-in recursive mutexes if they are available. */ if( pthread_mutex_trylock(&p->mutex)==0 ){ #if SQLITE_MUTEX_NREF p->owner = pthread_self(); p->nRef++; #endif rc = SQLITE_OK; }else{ rc = SQLITE_BUSY; } #endif #ifdef SQLITE_DEBUG if( rc==SQLITE_OK && p->trace ){ printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } #endif return rc; } /* ** The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered or ** is not currently allocated. SQLite will never do either. */ static void pthreadMutexLeave(sqlite3_mutex *p){ assert( pthreadMutexHeld(p) ); #if SQLITE_MUTEX_NREF p->nRef--; if( p->nRef==0 ) p->owner = 0; #endif assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); #ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX if( p->nRef==0 ){ pthread_mutex_unlock(&p->mutex); } #else pthread_mutex_unlock(&p->mutex); #endif #ifdef SQLITE_DEBUG if( p->trace ){ printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } #endif } SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ static const sqlite3_mutex_methods sMutex = { pthreadMutexInit, pthreadMutexEnd, pthreadMutexAlloc, pthreadMutexFree, pthreadMutexEnter, pthreadMutexTry, pthreadMutexLeave, #ifdef SQLITE_DEBUG pthreadMutexHeld, pthreadMutexNotheld #else 0, 0 #endif }; return &sMutex; } #endif /* SQLITE_MUTEX_PTHREADS */ /************** End of mutex_unix.c ******************************************/ /************** Begin file mutex_w32.c ***************************************/ /* ** 2007 August 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C functions that implement mutexes for Win32. */ /* #include "sqliteInt.h" */ #if SQLITE_OS_WIN /* ** Include code that is common to all os_*.c files */ /************** Include os_common.h in the middle of mutex_w32.c *************/ /************** Begin file os_common.h ***************************************/ /* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains macros and a little bit of code that is common to ** all of the platform-specific files (os_*.c) and is #included into those ** files. ** ** This file should be #included by the os_*.c files only. It is not a ** general purpose header file. */ #ifndef _OS_COMMON_H_ #define _OS_COMMON_H_ /* ** At least two bugs have slipped in because we changed the MEMORY_DEBUG ** macro to SQLITE_DEBUG and some older makefiles have not yet made the ** switch. The following code should catch this problem at compile-time. */ #ifdef MEMORY_DEBUG # error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." #endif /* ** Macros for performance tracing. Normally turned off. Only works ** on i486 hardware. */ #ifdef SQLITE_PERFORMANCE_TRACE /* ** hwtime.h contains inline assembler code for implementing ** high-performance timing routines. */ /************** Include hwtime.h in the middle of os_common.h ****************/ /************** Begin file hwtime.h ******************************************/ /* ** 2008 May 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ #ifndef _HWTIME_H_ #define _HWTIME_H_ /* ** The following routine only works on pentium-class (or newer) processors. ** It uses the RDTSC opcode to read the cycle count value out of the ** processor and returns that value. This can be used for high-res ** profiling. */ #if (defined(__GNUC__) || defined(_MSC_VER)) && \ (defined(i386) || defined(__i386__) || defined(_M_IX86)) #if defined(__GNUC__) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned int lo, hi; __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (sqlite_uint64)hi << 32 | lo; } #elif defined(_MSC_VER) __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ __asm { rdtsc ret ; return value at EDX:EAX } } #endif #elif (defined(__GNUC__) && defined(__x86_64__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long val; __asm__ __volatile__ ("rdtsc" : "=A" (val)); return val; } #elif (defined(__GNUC__) && defined(__ppc__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long long retval; unsigned long junk; __asm__ __volatile__ ("\n\ 1: mftbu %1\n\ mftb %L0\n\ mftbu %0\n\ cmpw %0,%1\n\ bne 1b" : "=r" (retval), "=r" (junk)); return retval; } #else #error Need implementation of sqlite3Hwtime() for your platform. /* ** To compile without implementing sqlite3Hwtime() for your platform, ** you can remove the above #error and use the following ** stub function. You will lose timing support for many ** of the debugging and testing utilities, but it should at ** least compile and run. */ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif #endif /* !defined(_HWTIME_H_) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime() #define TIMER_END g_elapsed=sqlite3Hwtime()-g_start #define TIMER_ELAPSED g_elapsed #else #define TIMER_START #define TIMER_END #define TIMER_ELAPSED ((sqlite_uint64)0) #endif /* ** If we compile with the SQLITE_TEST macro set, then the following block ** of code will give us the ability to simulate a disk I/O error. This ** is used for testing the I/O recovery logic. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ SQLITE_API int sqlite3_diskfull_pending = 0; SQLITE_API int sqlite3_diskfull = 0; #define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) #define SimulateIOError(CODE) \ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ || sqlite3_io_error_pending-- == 1 ) \ { local_ioerr(); CODE; } static void local_ioerr(){ IOTRACE(("IOERR\n")); sqlite3_io_error_hit++; if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; } #define SimulateDiskfullError(CODE) \ if( sqlite3_diskfull_pending ){ \ if( sqlite3_diskfull_pending == 1 ){ \ local_ioerr(); \ sqlite3_diskfull = 1; \ sqlite3_io_error_hit = 1; \ CODE; \ }else{ \ sqlite3_diskfull_pending--; \ } \ } #else #define SimulateIOErrorBenign(X) #define SimulateIOError(A) #define SimulateDiskfullError(A) #endif /* ** When testing, keep a count of the number of open files. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_open_file_count = 0; #define OpenCounter(X) sqlite3_open_file_count+=(X) #else #define OpenCounter(X) #endif #endif /* !defined(_OS_COMMON_H_) */ /************** End of os_common.h *******************************************/ /************** Continuing where we left off in mutex_w32.c ******************/ /* ** Include the header file for the Windows VFS. */ /************** Include os_win.h in the middle of mutex_w32.c ****************/ /************** Begin file os_win.h ******************************************/ /* ** 2013 November 25 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code that is specific to Windows. */ #ifndef _OS_WIN_H_ #define _OS_WIN_H_ /* ** Include the primary Windows SDK header file. */ #include "windows.h" #ifdef __CYGWIN__ # include # include /* amalgamator: dontcache */ #endif /* ** Determine if we are dealing with Windows NT. ** ** We ought to be able to determine if we are compiling for Windows 9x or ** Windows NT using the _WIN32_WINNT macro as follows: ** ** #if defined(_WIN32_WINNT) ** # define SQLITE_OS_WINNT 1 ** #else ** # define SQLITE_OS_WINNT 0 ** #endif ** ** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as ** it ought to, so the above test does not work. We'll just assume that ** everything is Windows NT unless the programmer explicitly says otherwise ** by setting SQLITE_OS_WINNT to 0. */ #if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT) # define SQLITE_OS_WINNT 1 #endif /* ** Determine if we are dealing with Windows CE - which has a much reduced ** API. */ #if defined(_WIN32_WCE) # define SQLITE_OS_WINCE 1 #else # define SQLITE_OS_WINCE 0 #endif /* ** Determine if we are dealing with WinRT, which provides only a subset of ** the full Win32 API. */ #if !defined(SQLITE_OS_WINRT) # define SQLITE_OS_WINRT 0 #endif /* ** For WinCE, some API function parameters do not appear to be declared as ** volatile. */ #if SQLITE_OS_WINCE # define SQLITE_WIN32_VOLATILE #else # define SQLITE_WIN32_VOLATILE volatile #endif /* ** For some Windows sub-platforms, the _beginthreadex() / _endthreadex() ** functions are not available (e.g. those not using MSVC, Cygwin, etc). */ #if SQLITE_OS_WIN && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \ SQLITE_THREADSAFE>0 && !defined(__CYGWIN__) # define SQLITE_OS_WIN_THREADS 1 #else # define SQLITE_OS_WIN_THREADS 0 #endif #endif /* _OS_WIN_H_ */ /************** End of os_win.h **********************************************/ /************** Continuing where we left off in mutex_w32.c ******************/ #endif /* ** The code in this file is only used if we are compiling multithreaded ** on a Win32 system. */ #ifdef SQLITE_MUTEX_W32 /* ** Each recursive mutex is an instance of the following structure. */ struct sqlite3_mutex { CRITICAL_SECTION mutex; /* Mutex controlling the lock */ int id; /* Mutex type */ #ifdef SQLITE_DEBUG volatile int nRef; /* Number of enterances */ volatile DWORD owner; /* Thread holding this mutex */ volatile int trace; /* True to trace changes */ #endif }; /* ** These are the initializer values used when declaring a "static" mutex ** on Win32. It should be noted that all mutexes require initialization ** on the Win32 platform. */ #define SQLITE_W32_MUTEX_INITIALIZER { 0 } #ifdef SQLITE_DEBUG #define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, \ 0L, (DWORD)0, 0 } #else #define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 } #endif #ifdef SQLITE_DEBUG /* ** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are ** intended for use only inside assert() statements. */ static int winMutexHeld(sqlite3_mutex *p){ return p->nRef!=0 && p->owner==GetCurrentThreadId(); } static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){ return p->nRef==0 || p->owner!=tid; } static int winMutexNotheld(sqlite3_mutex *p){ DWORD tid = GetCurrentThreadId(); return winMutexNotheld2(p, tid); } #endif /* ** Initialize and deinitialize the mutex subsystem. */ static sqlite3_mutex winMutex_staticMutexes[] = { SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER, SQLITE3_MUTEX_INITIALIZER }; static int winMutex_isInit = 0; static int winMutex_isNt = -1; /* <0 means "need to query" */ /* As the winMutexInit() and winMutexEnd() functions are called as part ** of the sqlite3_initialize() and sqlite3_shutdown() processing, the ** "interlocked" magic used here is probably not strictly necessary. */ static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0; SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void); /* os_win.c */ SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */ static int winMutexInit(void){ /* The first to increment to 1 does actual initialization */ if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){ int i; for(i=0; i **
  • SQLITE_MUTEX_FAST **
  • SQLITE_MUTEX_RECURSIVE **
  • SQLITE_MUTEX_STATIC_MASTER **
  • SQLITE_MUTEX_STATIC_MEM **
  • SQLITE_MUTEX_STATIC_OPEN **
  • SQLITE_MUTEX_STATIC_PRNG **
  • SQLITE_MUTEX_STATIC_LRU **
  • SQLITE_MUTEX_STATIC_PMEM **
  • SQLITE_MUTEX_STATIC_APP1 **
  • SQLITE_MUTEX_STATIC_APP2 **
  • SQLITE_MUTEX_STATIC_APP3 **
  • SQLITE_MUTEX_STATIC_VFS1 **
  • SQLITE_MUTEX_STATIC_VFS2 **
  • SQLITE_MUTEX_STATIC_VFS3 ** ** ** The first two constants cause sqlite3_mutex_alloc() to create ** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE ** is used but not necessarily so when SQLITE_MUTEX_FAST is used. ** The mutex implementation does not need to make a distinction ** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does ** not want to. But SQLite will only request a recursive mutex in ** cases where it really needs one. If a faster non-recursive mutex ** implementation is available on the host platform, the mutex subsystem ** might return such a mutex in response to SQLITE_MUTEX_FAST. ** ** The other allowed parameters to sqlite3_mutex_alloc() each return ** a pointer to a static preexisting mutex. Six static mutexes are ** used by the current version of SQLite. Future versions of SQLite ** may add additional static mutexes. Static mutexes are for internal ** use by SQLite only. Applications that use SQLite mutexes should ** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or ** SQLITE_MUTEX_RECURSIVE. ** ** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST ** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite3_mutex *winMutexAlloc(int iType){ sqlite3_mutex *p; switch( iType ){ case SQLITE_MUTEX_FAST: case SQLITE_MUTEX_RECURSIVE: { p = sqlite3MallocZero( sizeof(*p) ); if( p ){ p->id = iType; #ifdef SQLITE_DEBUG #ifdef SQLITE_WIN32_MUTEX_TRACE_DYNAMIC p->trace = 1; #endif #endif #if SQLITE_OS_WINRT InitializeCriticalSectionEx(&p->mutex, 0, 0); #else InitializeCriticalSection(&p->mutex); #endif } break; } default: { #ifdef SQLITE_ENABLE_API_ARMOR if( iType-2<0 || iType-2>=ArraySize(winMutex_staticMutexes) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif p = &winMutex_staticMutexes[iType-2]; p->id = iType; #ifdef SQLITE_DEBUG #ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC p->trace = 1; #endif #endif break; } } return p; } /* ** This routine deallocates a previously ** allocated mutex. SQLite is careful to deallocate every ** mutex that it allocates. */ static void winMutexFree(sqlite3_mutex *p){ assert( p ); assert( p->nRef==0 && p->owner==0 ); if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ){ DeleteCriticalSection(&p->mutex); sqlite3_free(p); }else{ #ifdef SQLITE_ENABLE_API_ARMOR (void)SQLITE_MISUSE_BKPT; #endif } } /* ** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt ** to enter a mutex. If another thread is already within the mutex, ** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return ** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK ** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can ** be entered multiple times by the same thread. In such cases the, ** mutex must be exited an equal number of times before another thread ** can enter. If the same thread tries to enter any other kind of mutex ** more than once, the behavior is undefined. */ static void winMutexEnter(sqlite3_mutex *p){ #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) DWORD tid = GetCurrentThreadId(); #endif #ifdef SQLITE_DEBUG assert( p ); assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); #else assert( p ); #endif assert( winMutex_isInit==1 ); EnterCriticalSection(&p->mutex); #ifdef SQLITE_DEBUG assert( p->nRef>0 || p->owner==0 ); p->owner = tid; p->nRef++; if( p->trace ){ OSTRACE(("ENTER-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n", tid, p, p->trace, p->nRef)); } #endif } static int winMutexTry(sqlite3_mutex *p){ #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) DWORD tid = GetCurrentThreadId(); #endif int rc = SQLITE_BUSY; assert( p ); assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) ); /* ** The sqlite3_mutex_try() routine is very rarely used, and when it ** is used it is merely an optimization. So it is OK for it to always ** fail. ** ** The TryEnterCriticalSection() interface is only available on WinNT. ** And some windows compilers complain if you try to use it without ** first doing some #defines that prevent SQLite from building on Win98. ** For that reason, we will omit this optimization for now. See ** ticket #2685. */ #if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0400 assert( winMutex_isInit==1 ); assert( winMutex_isNt>=-1 && winMutex_isNt<=1 ); if( winMutex_isNt<0 ){ winMutex_isNt = sqlite3_win32_is_nt(); } assert( winMutex_isNt==0 || winMutex_isNt==1 ); if( winMutex_isNt && TryEnterCriticalSection(&p->mutex) ){ #ifdef SQLITE_DEBUG p->owner = tid; p->nRef++; #endif rc = SQLITE_OK; } #else UNUSED_PARAMETER(p); #endif #ifdef SQLITE_DEBUG if( p->trace ){ OSTRACE(("TRY-MUTEX tid=%lu, mutex=%p (%d), owner=%lu, nRef=%d, rc=%s\n", tid, p, p->trace, p->owner, p->nRef, sqlite3ErrName(rc))); } #endif return rc; } /* ** The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered or ** is not currently allocated. SQLite will never do either. */ static void winMutexLeave(sqlite3_mutex *p){ #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) DWORD tid = GetCurrentThreadId(); #endif assert( p ); #ifdef SQLITE_DEBUG assert( p->nRef>0 ); assert( p->owner==tid ); p->nRef--; if( p->nRef==0 ) p->owner = 0; assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE ); #endif assert( winMutex_isInit==1 ); LeaveCriticalSection(&p->mutex); #ifdef SQLITE_DEBUG if( p->trace ){ OSTRACE(("LEAVE-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n", tid, p, p->trace, p->nRef)); } #endif } SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ static const sqlite3_mutex_methods sMutex = { winMutexInit, winMutexEnd, winMutexAlloc, winMutexFree, winMutexEnter, winMutexTry, winMutexLeave, #ifdef SQLITE_DEBUG winMutexHeld, winMutexNotheld #else 0, 0 #endif }; return &sMutex; } #endif /* SQLITE_MUTEX_W32 */ /************** End of mutex_w32.c *******************************************/ /************** Begin file malloc.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** Memory allocation functions used throughout sqlite. */ /* #include "sqliteInt.h" */ /* #include */ /* ** Attempt to release up to n bytes of non-essential memory currently ** held by SQLite. An example of non-essential memory is memory used to ** cache database pages that are not currently in use. */ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int n){ #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT return sqlite3PcacheReleaseMemory(n); #else /* IMPLEMENTATION-OF: R-34391-24921 The sqlite3_release_memory() routine ** is a no-op returning zero if SQLite is not compiled with ** SQLITE_ENABLE_MEMORY_MANAGEMENT. */ UNUSED_PARAMETER(n); return 0; #endif } /* ** An instance of the following object records the location of ** each unused scratch buffer. */ typedef struct ScratchFreeslot { struct ScratchFreeslot *pNext; /* Next unused scratch buffer */ } ScratchFreeslot; /* ** State information local to the memory allocation subsystem. */ static SQLITE_WSD struct Mem0Global { sqlite3_mutex *mutex; /* Mutex to serialize access */ /* ** The alarm callback and its arguments. The mem0.mutex lock will ** be held while the callback is running. Recursive calls into ** the memory subsystem are allowed, but no new callbacks will be ** issued. */ sqlite3_int64 alarmThreshold; void (*alarmCallback)(void*, sqlite3_int64,int); void *alarmArg; /* ** Pointers to the end of sqlite3GlobalConfig.pScratch memory ** (so that a range test can be used to determine if an allocation ** being freed came from pScratch) and a pointer to the list of ** unused scratch allocations. */ void *pScratchEnd; ScratchFreeslot *pScratchFree; u32 nScratchFree; /* ** True if heap is nearly "full" where "full" is defined by the ** sqlite3_soft_heap_limit() setting. */ int nearlyFull; } mem0 = { 0, 0, 0, 0, 0, 0, 0, 0 }; #define mem0 GLOBAL(struct Mem0Global, mem0) /* ** Return the memory allocator mutex. sqlite3_status() needs it. */ SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){ return mem0.mutex; } /* ** This routine runs when the memory allocator sees that the ** total memory allocation is about to exceed the soft heap ** limit. */ static void softHeapLimitEnforcer( void *NotUsed, sqlite3_int64 NotUsed2, int allocSize ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); sqlite3_release_memory(allocSize); } /* ** Change the alarm callback */ static int sqlite3MemoryAlarm( void(*xCallback)(void *pArg, sqlite3_int64 used,int N), void *pArg, sqlite3_int64 iThreshold ){ sqlite3_int64 nUsed; sqlite3_mutex_enter(mem0.mutex); mem0.alarmCallback = xCallback; mem0.alarmArg = pArg; mem0.alarmThreshold = iThreshold; nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); mem0.nearlyFull = (iThreshold>0 && iThreshold<=nUsed); sqlite3_mutex_leave(mem0.mutex); return SQLITE_OK; } #ifndef SQLITE_OMIT_DEPRECATED /* ** Deprecated external interface. Internal/core SQLite code ** should call sqlite3MemoryAlarm. */ SQLITE_API int SQLITE_STDCALL sqlite3_memory_alarm( void(*xCallback)(void *pArg, sqlite3_int64 used,int N), void *pArg, sqlite3_int64 iThreshold ){ return sqlite3MemoryAlarm(xCallback, pArg, iThreshold); } #endif /* ** Set the soft heap-size limit for the library. Passing a zero or ** negative value indicates no limit. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 n){ sqlite3_int64 priorLimit; sqlite3_int64 excess; #ifndef SQLITE_OMIT_AUTOINIT int rc = sqlite3_initialize(); if( rc ) return -1; #endif sqlite3_mutex_enter(mem0.mutex); priorLimit = mem0.alarmThreshold; sqlite3_mutex_leave(mem0.mutex); if( n<0 ) return priorLimit; if( n>0 ){ sqlite3MemoryAlarm(softHeapLimitEnforcer, 0, n); }else{ sqlite3MemoryAlarm(0, 0, 0); } excess = sqlite3_memory_used() - n; if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff)); return priorLimit; } SQLITE_API void SQLITE_STDCALL sqlite3_soft_heap_limit(int n){ if( n<0 ) n = 0; sqlite3_soft_heap_limit64(n); } /* ** Initialize the memory allocation subsystem. */ SQLITE_PRIVATE int sqlite3MallocInit(void){ int rc; if( sqlite3GlobalConfig.m.xMalloc==0 ){ sqlite3MemSetDefault(); } memset(&mem0, 0, sizeof(mem0)); if( sqlite3GlobalConfig.bCoreMutex ){ mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); } if( sqlite3GlobalConfig.pScratch && sqlite3GlobalConfig.szScratch>=100 && sqlite3GlobalConfig.nScratch>0 ){ int i, n, sz; ScratchFreeslot *pSlot; sz = ROUNDDOWN8(sqlite3GlobalConfig.szScratch); sqlite3GlobalConfig.szScratch = sz; pSlot = (ScratchFreeslot*)sqlite3GlobalConfig.pScratch; n = sqlite3GlobalConfig.nScratch; mem0.pScratchFree = pSlot; mem0.nScratchFree = n; for(i=0; ipNext = (ScratchFreeslot*)(sz+(char*)pSlot); pSlot = pSlot->pNext; } pSlot->pNext = 0; mem0.pScratchEnd = (void*)&pSlot[1]; }else{ mem0.pScratchEnd = 0; sqlite3GlobalConfig.pScratch = 0; sqlite3GlobalConfig.szScratch = 0; sqlite3GlobalConfig.nScratch = 0; } if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512 || sqlite3GlobalConfig.nPage<=0 ){ sqlite3GlobalConfig.pPage = 0; sqlite3GlobalConfig.szPage = 0; } rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData); if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0)); return rc; } /* ** Return true if the heap is currently under memory pressure - in other ** words if the amount of heap used is close to the limit set by ** sqlite3_soft_heap_limit(). */ SQLITE_PRIVATE int sqlite3HeapNearlyFull(void){ return mem0.nearlyFull; } /* ** Deinitialize the memory allocation subsystem. */ SQLITE_PRIVATE void sqlite3MallocEnd(void){ if( sqlite3GlobalConfig.m.xShutdown ){ sqlite3GlobalConfig.m.xShutdown(sqlite3GlobalConfig.m.pAppData); } memset(&mem0, 0, sizeof(mem0)); } /* ** Return the amount of memory currently checked out. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){ sqlite3_int64 res, mx; sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0); return res; } /* ** Return the maximum amount of memory that has ever been ** checked out since either the beginning of this process ** or since the most recent reset. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag){ sqlite3_int64 res, mx; sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag); return mx; } /* ** Trigger the alarm */ static void sqlite3MallocAlarm(int nByte){ void (*xCallback)(void*,sqlite3_int64,int); sqlite3_int64 nowUsed; void *pArg; if( mem0.alarmCallback==0 ) return; xCallback = mem0.alarmCallback; nowUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); pArg = mem0.alarmArg; mem0.alarmCallback = 0; sqlite3_mutex_leave(mem0.mutex); xCallback(pArg, nowUsed, nByte); sqlite3_mutex_enter(mem0.mutex); mem0.alarmCallback = xCallback; mem0.alarmArg = pArg; } /* ** Do a memory allocation with statistics and alarms. Assume the ** lock is already held. */ static int mallocWithAlarm(int n, void **pp){ int nFull; void *p; assert( sqlite3_mutex_held(mem0.mutex) ); nFull = sqlite3GlobalConfig.m.xRoundup(n); sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, n); if( mem0.alarmCallback!=0 ){ sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED); if( nUsed >= mem0.alarmThreshold - nFull ){ mem0.nearlyFull = 1; sqlite3MallocAlarm(nFull); }else{ mem0.nearlyFull = 0; } } p = sqlite3GlobalConfig.m.xMalloc(nFull); #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT if( p==0 && mem0.alarmCallback ){ sqlite3MallocAlarm(nFull); p = sqlite3GlobalConfig.m.xMalloc(nFull); } #endif if( p ){ nFull = sqlite3MallocSize(p); sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nFull); sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1); } *pp = p; return nFull; } /* ** Allocate memory. This routine is like sqlite3_malloc() except that it ** assumes the memory subsystem has already been initialized. */ SQLITE_PRIVATE void *sqlite3Malloc(u64 n){ void *p; if( n==0 || n>=0x7fffff00 ){ /* A memory allocation of a number of bytes which is near the maximum ** signed integer value might cause an integer overflow inside of the ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving ** 255 bytes of overhead. SQLite itself will never use anything near ** this amount. The only way to reach the limit is with sqlite3_malloc() */ p = 0; }else if( sqlite3GlobalConfig.bMemstat ){ sqlite3_mutex_enter(mem0.mutex); mallocWithAlarm((int)n, &p); sqlite3_mutex_leave(mem0.mutex); }else{ p = sqlite3GlobalConfig.m.xMalloc((int)n); } assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-11148-40995 */ return p; } /* ** This version of the memory allocation is for use by the application. ** First make sure the memory subsystem is initialized, then do the ** allocation. */ SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int n){ #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif return n<=0 ? 0 : sqlite3Malloc(n); } SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64 n){ #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif return sqlite3Malloc(n); } /* ** Each thread may only have a single outstanding allocation from ** xScratchMalloc(). We verify this constraint in the single-threaded ** case by setting scratchAllocOut to 1 when an allocation ** is outstanding clearing it when the allocation is freed. */ #if SQLITE_THREADSAFE==0 && !defined(NDEBUG) static int scratchAllocOut = 0; #endif /* ** Allocate memory that is to be used and released right away. ** This routine is similar to alloca() in that it is not intended ** for situations where the memory might be held long-term. This ** routine is intended to get memory to old large transient data ** structures that would not normally fit on the stack of an ** embedded processor. */ SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){ void *p; assert( n>0 ); sqlite3_mutex_enter(mem0.mutex); sqlite3StatusSet(SQLITE_STATUS_SCRATCH_SIZE, n); if( mem0.nScratchFree && sqlite3GlobalConfig.szScratch>=n ){ p = mem0.pScratchFree; mem0.pScratchFree = mem0.pScratchFree->pNext; mem0.nScratchFree--; sqlite3StatusUp(SQLITE_STATUS_SCRATCH_USED, 1); sqlite3_mutex_leave(mem0.mutex); }else{ sqlite3_mutex_leave(mem0.mutex); p = sqlite3Malloc(n); if( sqlite3GlobalConfig.bMemstat && p ){ sqlite3_mutex_enter(mem0.mutex); sqlite3StatusUp(SQLITE_STATUS_SCRATCH_OVERFLOW, sqlite3MallocSize(p)); sqlite3_mutex_leave(mem0.mutex); } sqlite3MemdebugSetType(p, MEMTYPE_SCRATCH); } assert( sqlite3_mutex_notheld(mem0.mutex) ); #if SQLITE_THREADSAFE==0 && !defined(NDEBUG) /* EVIDENCE-OF: R-12970-05880 SQLite will not use more than one scratch ** buffers per thread. ** ** This can only be checked in single-threaded mode. */ assert( scratchAllocOut==0 ); if( p ) scratchAllocOut++; #endif return p; } SQLITE_PRIVATE void sqlite3ScratchFree(void *p){ if( p ){ #if SQLITE_THREADSAFE==0 && !defined(NDEBUG) /* Verify that no more than two scratch allocation per thread ** is outstanding at one time. (This is only checked in the ** single-threaded case since checking in the multi-threaded case ** would be much more complicated.) */ assert( scratchAllocOut>=1 && scratchAllocOut<=2 ); scratchAllocOut--; #endif if( p>=sqlite3GlobalConfig.pScratch && ppNext = mem0.pScratchFree; mem0.pScratchFree = pSlot; mem0.nScratchFree++; assert( mem0.nScratchFree <= (u32)sqlite3GlobalConfig.nScratch ); sqlite3StatusDown(SQLITE_STATUS_SCRATCH_USED, 1); sqlite3_mutex_leave(mem0.mutex); }else{ /* Release memory back to the heap */ assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) ); assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_SCRATCH) ); sqlite3MemdebugSetType(p, MEMTYPE_HEAP); if( sqlite3GlobalConfig.bMemstat ){ int iSize = sqlite3MallocSize(p); sqlite3_mutex_enter(mem0.mutex); sqlite3StatusDown(SQLITE_STATUS_SCRATCH_OVERFLOW, iSize); sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, iSize); sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1); sqlite3GlobalConfig.m.xFree(p); sqlite3_mutex_leave(mem0.mutex); }else{ sqlite3GlobalConfig.m.xFree(p); } } } } /* ** TRUE if p is a lookaside memory allocation from db */ #ifndef SQLITE_OMIT_LOOKASIDE static int isLookaside(sqlite3 *db, void *p){ return p>=db->lookaside.pStart && plookaside.pEnd; } #else #define isLookaside(A,B) 0 #endif /* ** Return the size of a memory allocation previously obtained from ** sqlite3Malloc() or sqlite3_malloc(). */ SQLITE_PRIVATE int sqlite3MallocSize(void *p){ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); return sqlite3GlobalConfig.m.xSize(p); } SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){ if( db==0 ){ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); return sqlite3MallocSize(p); }else{ assert( sqlite3_mutex_held(db->mutex) ); if( isLookaside(db, p) ){ return db->lookaside.sz; }else{ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); return sqlite3GlobalConfig.m.xSize(p); } } } SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void *p){ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); return (sqlite3_uint64)sqlite3GlobalConfig.m.xSize(p); } /* ** Free memory previously obtained from sqlite3Malloc(). */ SQLITE_API void SQLITE_STDCALL sqlite3_free(void *p){ if( p==0 ) return; /* IMP: R-49053-54554 */ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); if( sqlite3GlobalConfig.bMemstat ){ sqlite3_mutex_enter(mem0.mutex); sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, sqlite3MallocSize(p)); sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1); sqlite3GlobalConfig.m.xFree(p); sqlite3_mutex_leave(mem0.mutex); }else{ sqlite3GlobalConfig.m.xFree(p); } } /* ** Add the size of memory allocation "p" to the count in ** *db->pnBytesFreed. */ static SQLITE_NOINLINE void measureAllocationSize(sqlite3 *db, void *p){ *db->pnBytesFreed += sqlite3DbMallocSize(db,p); } /* ** Free memory that might be associated with a particular database ** connection. */ SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){ assert( db==0 || sqlite3_mutex_held(db->mutex) ); if( p==0 ) return; if( db ){ if( db->pnBytesFreed ){ measureAllocationSize(db, p); return; } if( isLookaside(db, p) ){ LookasideSlot *pBuf = (LookasideSlot*)p; #if SQLITE_DEBUG /* Trash all content in the buffer being freed */ memset(p, 0xaa, db->lookaside.sz); #endif pBuf->pNext = db->lookaside.pFree; db->lookaside.pFree = pBuf; db->lookaside.nOut--; return; } } assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) ); sqlite3MemdebugSetType(p, MEMTYPE_HEAP); sqlite3_free(p); } /* ** Change the size of an existing memory allocation */ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){ int nOld, nNew, nDiff; void *pNew; assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) ); assert( sqlite3MemdebugNoType(pOld, (u8)~MEMTYPE_HEAP) ); if( pOld==0 ){ return sqlite3Malloc(nBytes); /* IMP: R-04300-56712 */ } if( nBytes==0 ){ sqlite3_free(pOld); /* IMP: R-26507-47431 */ return 0; } if( nBytes>=0x7fffff00 ){ /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */ return 0; } nOld = sqlite3MallocSize(pOld); /* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second ** argument to xRealloc is always a value returned by a prior call to ** xRoundup. */ nNew = sqlite3GlobalConfig.m.xRoundup((int)nBytes); if( nOld==nNew ){ pNew = pOld; }else if( sqlite3GlobalConfig.bMemstat ){ sqlite3_mutex_enter(mem0.mutex); sqlite3StatusSet(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes); nDiff = nNew - nOld; if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >= mem0.alarmThreshold-nDiff ){ sqlite3MallocAlarm(nDiff); } pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); if( pNew==0 && mem0.alarmCallback ){ sqlite3MallocAlarm((int)nBytes); pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); } if( pNew ){ nNew = sqlite3MallocSize(pNew); sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nNew-nOld); } sqlite3_mutex_leave(mem0.mutex); }else{ pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew); } assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-11148-40995 */ return pNew; } /* ** The public interface to sqlite3Realloc. Make sure that the memory ** subsystem is initialized prior to invoking sqliteRealloc. */ SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void *pOld, int n){ #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif if( n<0 ) n = 0; /* IMP: R-26507-47431 */ return sqlite3Realloc(pOld, n); } SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void *pOld, sqlite3_uint64 n){ #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif return sqlite3Realloc(pOld, n); } /* ** Allocate and zero memory. */ SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){ void *p = sqlite3Malloc(n); if( p ){ memset(p, 0, (size_t)n); } return p; } /* ** Allocate and zero memory. If the allocation fails, make ** the mallocFailed flag in the connection pointer. */ SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){ void *p = sqlite3DbMallocRaw(db, n); if( p ){ memset(p, 0, (size_t)n); } return p; } /* ** Allocate and zero memory. If the allocation fails, make ** the mallocFailed flag in the connection pointer. ** ** If db!=0 and db->mallocFailed is true (indicating a prior malloc ** failure on the same database connection) then always return 0. ** Hence for a particular database connection, once malloc starts ** failing, it fails consistently until mallocFailed is reset. ** This is an important assumption. There are many places in the ** code that do things like this: ** ** int *a = (int*)sqlite3DbMallocRaw(db, 100); ** int *b = (int*)sqlite3DbMallocRaw(db, 200); ** if( b ) a[10] = 9; ** ** In other words, if a subsequent malloc (ex: "b") worked, it is assumed ** that all prior mallocs (ex: "a") worked too. */ SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){ void *p; assert( db==0 || sqlite3_mutex_held(db->mutex) ); assert( db==0 || db->pnBytesFreed==0 ); #ifndef SQLITE_OMIT_LOOKASIDE if( db ){ LookasideSlot *pBuf; if( db->mallocFailed ){ return 0; } if( db->lookaside.bEnabled ){ if( n>db->lookaside.sz ){ db->lookaside.anStat[1]++; }else if( (pBuf = db->lookaside.pFree)==0 ){ db->lookaside.anStat[2]++; }else{ db->lookaside.pFree = pBuf->pNext; db->lookaside.nOut++; db->lookaside.anStat[0]++; if( db->lookaside.nOut>db->lookaside.mxOut ){ db->lookaside.mxOut = db->lookaside.nOut; } return (void*)pBuf; } } } #else if( db && db->mallocFailed ){ return 0; } #endif p = sqlite3Malloc(n); if( !p && db ){ db->mallocFailed = 1; } sqlite3MemdebugSetType(p, (db && db->lookaside.bEnabled) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP); return p; } /* ** Resize the block of memory pointed to by p to n bytes. If the ** resize fails, set the mallocFailed flag in the connection object. */ SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){ void *pNew = 0; assert( db!=0 ); assert( sqlite3_mutex_held(db->mutex) ); if( db->mallocFailed==0 ){ if( p==0 ){ return sqlite3DbMallocRaw(db, n); } if( isLookaside(db, p) ){ if( n<=db->lookaside.sz ){ return p; } pNew = sqlite3DbMallocRaw(db, n); if( pNew ){ memcpy(pNew, p, db->lookaside.sz); sqlite3DbFree(db, p); } }else{ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); sqlite3MemdebugSetType(p, MEMTYPE_HEAP); pNew = sqlite3_realloc64(p, n); if( !pNew ){ db->mallocFailed = 1; } sqlite3MemdebugSetType(pNew, (db->lookaside.bEnabled ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP)); } } return pNew; } /* ** Attempt to reallocate p. If the reallocation fails, then free p ** and set the mallocFailed flag in the database connection. */ SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, u64 n){ void *pNew; pNew = sqlite3DbRealloc(db, p, n); if( !pNew ){ sqlite3DbFree(db, p); } return pNew; } /* ** Make a copy of a string in memory obtained from sqliteMalloc(). These ** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This ** is because when memory debugging is turned on, these two functions are ** called via macros that record the current file and line number in the ** ThreadData structure. */ SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){ char *zNew; size_t n; if( z==0 ){ return 0; } n = sqlite3Strlen30(z) + 1; assert( (n&0x7fffffff)==n ); zNew = sqlite3DbMallocRaw(db, (int)n); if( zNew ){ memcpy(zNew, z, n); } return zNew; } SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){ char *zNew; if( z==0 ){ return 0; } assert( (n&0x7fffffff)==n ); zNew = sqlite3DbMallocRaw(db, n+1); if( zNew ){ memcpy(zNew, z, (size_t)n); zNew[n] = 0; } return zNew; } /* ** Free any prior content in *pz and replace it with a copy of zNew. */ SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){ sqlite3DbFree(db, *pz); *pz = sqlite3DbStrDup(db, zNew); } /* ** Take actions at the end of an API call to indicate an OOM error */ static SQLITE_NOINLINE int apiOomError(sqlite3 *db){ db->mallocFailed = 0; sqlite3Error(db, SQLITE_NOMEM); return SQLITE_NOMEM; } /* ** This function must be called before exiting any API function (i.e. ** returning control to the user) that has called sqlite3_malloc or ** sqlite3_realloc. ** ** The returned value is normally a copy of the second argument to this ** function. However, if a malloc() failure has occurred since the previous ** invocation SQLITE_NOMEM is returned instead. ** ** If an OOM as occurred, then the connection error-code (the value ** returned by sqlite3_errcode()) is set to SQLITE_NOMEM. */ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ /* If the db handle must hold the connection handle mutex here. ** Otherwise the read (and possible write) of db->mallocFailed ** is unsafe, as is the call to sqlite3Error(). */ assert( db!=0 ); assert( sqlite3_mutex_held(db->mutex) ); if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){ return apiOomError(db); } return rc & db->errMask; } /************** End of malloc.c **********************************************/ /************** Begin file printf.c ******************************************/ /* ** The "printf" code that follows dates from the 1980's. It is in ** the public domain. ** ************************************************************************** ** ** This file contains code for a set of "printf"-like routines. These ** routines format strings much like the printf() from the standard C ** library, though the implementation here has enhancements to support ** SQLite. */ /* #include "sqliteInt.h" */ /* ** Conversion types fall into various categories as defined by the ** following enumeration. */ #define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */ #define etFLOAT 2 /* Floating point. %f */ #define etEXP 3 /* Exponentional notation. %e and %E */ #define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */ #define etSIZE 5 /* Return number of characters processed so far. %n */ #define etSTRING 6 /* Strings. %s */ #define etDYNSTRING 7 /* Dynamically allocated strings. %z */ #define etPERCENT 8 /* Percent symbol. %% */ #define etCHARX 9 /* Characters. %c */ /* The rest are extensions, not normally found in printf() */ #define etSQLESCAPE 10 /* Strings with '\'' doubled. %q */ #define etSQLESCAPE2 11 /* Strings with '\'' doubled and enclosed in '', NULL pointers replaced by SQL NULL. %Q */ #define etTOKEN 12 /* a pointer to a Token structure */ #define etSRCLIST 13 /* a pointer to a SrcList */ #define etPOINTER 14 /* The %p conversion */ #define etSQLESCAPE3 15 /* %w -> Strings with '\"' doubled */ #define etORDINAL 16 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ #define etINVALID 0 /* Any unrecognized conversion type */ /* ** An "etByte" is an 8-bit unsigned value. */ typedef unsigned char etByte; /* ** Each builtin conversion character (ex: the 'd' in "%d") is described ** by an instance of the following structure */ typedef struct et_info { /* Information about each format field */ char fmttype; /* The format field code letter */ etByte base; /* The base for radix conversion */ etByte flags; /* One or more of FLAG_ constants below */ etByte type; /* Conversion paradigm */ etByte charset; /* Offset into aDigits[] of the digits string */ etByte prefix; /* Offset into aPrefix[] of the prefix string */ } et_info; /* ** Allowed values for et_info.flags */ #define FLAG_SIGNED 1 /* True if the value to convert is signed */ #define FLAG_INTERN 2 /* True if for internal use only */ #define FLAG_STRING 4 /* Allow infinity precision */ /* ** The following table is searched linearly, so it is good to put the ** most frequently used conversion types first. */ static const char aDigits[] = "0123456789ABCDEF0123456789abcdef"; static const char aPrefix[] = "-x0\000X0"; static const et_info fmtinfo[] = { { 'd', 10, 1, etRADIX, 0, 0 }, { 's', 0, 4, etSTRING, 0, 0 }, { 'g', 0, 1, etGENERIC, 30, 0 }, { 'z', 0, 4, etDYNSTRING, 0, 0 }, { 'q', 0, 4, etSQLESCAPE, 0, 0 }, { 'Q', 0, 4, etSQLESCAPE2, 0, 0 }, { 'w', 0, 4, etSQLESCAPE3, 0, 0 }, { 'c', 0, 0, etCHARX, 0, 0 }, { 'o', 8, 0, etRADIX, 0, 2 }, { 'u', 10, 0, etRADIX, 0, 0 }, { 'x', 16, 0, etRADIX, 16, 1 }, { 'X', 16, 0, etRADIX, 0, 4 }, #ifndef SQLITE_OMIT_FLOATING_POINT { 'f', 0, 1, etFLOAT, 0, 0 }, { 'e', 0, 1, etEXP, 30, 0 }, { 'E', 0, 1, etEXP, 14, 0 }, { 'G', 0, 1, etGENERIC, 14, 0 }, #endif { 'i', 10, 1, etRADIX, 0, 0 }, { 'n', 0, 0, etSIZE, 0, 0 }, { '%', 0, 0, etPERCENT, 0, 0 }, { 'p', 16, 0, etPOINTER, 0, 1 }, /* All the rest have the FLAG_INTERN bit set and are thus for internal ** use only */ { 'T', 0, 2, etTOKEN, 0, 0 }, { 'S', 0, 2, etSRCLIST, 0, 0 }, { 'r', 10, 3, etORDINAL, 0, 0 }, }; /* ** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point ** conversions will work. */ #ifndef SQLITE_OMIT_FLOATING_POINT /* ** "*val" is a double such that 0.1 <= *val < 10.0 ** Return the ascii code for the leading digit of *val, then ** multiply "*val" by 10.0 to renormalize. ** ** Example: ** input: *val = 3.14159 ** output: *val = 1.4159 function return = '3' ** ** The counter *cnt is incremented each time. After counter exceeds ** 16 (the number of significant digits in a 64-bit float) '0' is ** always returned. */ static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ int digit; LONGDOUBLE_TYPE d; if( (*cnt)<=0 ) return '0'; (*cnt)--; digit = (int)*val; d = digit; digit += '0'; *val = (*val - d)*10.0; return (char)digit; } #endif /* SQLITE_OMIT_FLOATING_POINT */ /* ** Set the StrAccum object to an error mode. */ static void setStrAccumError(StrAccum *p, u8 eError){ assert( eError==STRACCUM_NOMEM || eError==STRACCUM_TOOBIG ); p->accError = eError; p->nAlloc = 0; } /* ** Extra argument values from a PrintfArguments object */ static sqlite3_int64 getIntArg(PrintfArguments *p){ if( p->nArg<=p->nUsed ) return 0; return sqlite3_value_int64(p->apArg[p->nUsed++]); } static double getDoubleArg(PrintfArguments *p){ if( p->nArg<=p->nUsed ) return 0.0; return sqlite3_value_double(p->apArg[p->nUsed++]); } static char *getTextArg(PrintfArguments *p){ if( p->nArg<=p->nUsed ) return 0; return (char*)sqlite3_value_text(p->apArg[p->nUsed++]); } /* ** On machines with a small stack size, you can redefine the ** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired. */ #ifndef SQLITE_PRINT_BUF_SIZE # define SQLITE_PRINT_BUF_SIZE 70 #endif #define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */ /* ** Render a string given by "fmt" into the StrAccum object. */ SQLITE_PRIVATE void sqlite3VXPrintf( StrAccum *pAccum, /* Accumulate results here */ u32 bFlags, /* SQLITE_PRINTF_* flags */ const char *fmt, /* Format string */ va_list ap /* arguments */ ){ int c; /* Next character in the format string */ char *bufpt; /* Pointer to the conversion buffer */ int precision; /* Precision of the current field */ int length; /* Length of the field */ int idx; /* A general purpose loop counter */ int width; /* Width of the current field */ etByte flag_leftjustify; /* True if "-" flag is present */ etByte flag_plussign; /* True if "+" flag is present */ etByte flag_blanksign; /* True if " " flag is present */ etByte flag_alternateform; /* True if "#" flag is present */ etByte flag_altform2; /* True if "!" flag is present */ etByte flag_zeropad; /* True if field width constant starts with zero */ etByte flag_long; /* True if "l" flag is present */ etByte flag_longlong; /* True if the "ll" flag is present */ etByte done; /* Loop termination flag */ etByte xtype = 0; /* Conversion paradigm */ u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ u8 useIntern; /* Ok to use internal conversions (ex: %T) */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ LONGDOUBLE_TYPE realvalue; /* Value for real types */ const et_info *infop; /* Pointer to the appropriate info structure */ char *zOut; /* Rendering buffer */ int nOut; /* Size of the rendering buffer */ char *zExtra = 0; /* Malloced memory used by some conversion */ #ifndef SQLITE_OMIT_FLOATING_POINT int exp, e2; /* exponent of real numbers */ int nsd; /* Number of significant digits returned */ double rounder; /* Used for rounding floating point values */ etByte flag_dp; /* True if decimal point should be shown */ etByte flag_rtz; /* True if trailing zeros should be removed */ #endif PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ char buf[etBUFSIZE]; /* Conversion buffer */ bufpt = 0; if( bFlags ){ if( (bArgList = (bFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){ pArgList = va_arg(ap, PrintfArguments*); } useIntern = bFlags & SQLITE_PRINTF_INTERNAL; }else{ bArgList = useIntern = 0; } for(; (c=(*fmt))!=0; ++fmt){ if( c!='%' ){ bufpt = (char *)fmt; #if HAVE_STRCHRNUL fmt = strchrnul(fmt, '%'); #else do{ fmt++; }while( *fmt && *fmt != '%' ); #endif sqlite3StrAccumAppend(pAccum, bufpt, (int)(fmt - bufpt)); if( *fmt==0 ) break; } if( (c=(*++fmt))==0 ){ sqlite3StrAccumAppend(pAccum, "%", 1); break; } /* Find out what flags are present */ flag_leftjustify = flag_plussign = flag_blanksign = flag_alternateform = flag_altform2 = flag_zeropad = 0; done = 0; do{ switch( c ){ case '-': flag_leftjustify = 1; break; case '+': flag_plussign = 1; break; case ' ': flag_blanksign = 1; break; case '#': flag_alternateform = 1; break; case '!': flag_altform2 = 1; break; case '0': flag_zeropad = 1; break; default: done = 1; break; } }while( !done && (c=(*++fmt))!=0 ); /* Get the field width */ if( c=='*' ){ if( bArgList ){ width = (int)getIntArg(pArgList); }else{ width = va_arg(ap,int); } if( width<0 ){ flag_leftjustify = 1; width = width >= -2147483647 ? -width : 0; } c = *++fmt; }else{ unsigned wx = 0; while( c>='0' && c<='9' ){ wx = wx*10 + c - '0'; c = *++fmt; } testcase( wx>0x7fffffff ); width = wx & 0x7fffffff; } /* Get the precision */ if( c=='.' ){ c = *++fmt; if( c=='*' ){ if( bArgList ){ precision = (int)getIntArg(pArgList); }else{ precision = va_arg(ap,int); } c = *++fmt; if( precision<0 ){ precision = precision >= -2147483647 ? -precision : -1; } }else{ unsigned px = 0; while( c>='0' && c<='9' ){ px = px*10 + c - '0'; c = *++fmt; } testcase( px>0x7fffffff ); precision = px & 0x7fffffff; } }else{ precision = -1; } /* Get the conversion type modifier */ if( c=='l' ){ flag_long = 1; c = *++fmt; if( c=='l' ){ flag_longlong = 1; c = *++fmt; }else{ flag_longlong = 0; } }else{ flag_long = flag_longlong = 0; } /* Fetch the info entry for the field */ infop = &fmtinfo[0]; xtype = etINVALID; for(idx=0; idxflags & FLAG_INTERN)==0 ){ xtype = infop->type; }else{ return; } break; } } /* ** At this point, variables are initialized as follows: ** ** flag_alternateform TRUE if a '#' is present. ** flag_altform2 TRUE if a '!' is present. ** flag_plussign TRUE if a '+' is present. ** flag_leftjustify TRUE if a '-' is present or if the ** field width was negative. ** flag_zeropad TRUE if the width began with 0. ** flag_long TRUE if the letter 'l' (ell) prefixed ** the conversion character. ** flag_longlong TRUE if the letter 'll' (ell ell) prefixed ** the conversion character. ** flag_blanksign TRUE if a ' ' is present. ** width The specified field width. This is ** always non-negative. Zero is the default. ** precision The specified precision. The default ** is -1. ** xtype The class of the conversion. ** infop Pointer to the appropriate info struct. */ switch( xtype ){ case etPOINTER: flag_longlong = sizeof(char*)==sizeof(i64); flag_long = sizeof(char*)==sizeof(long int); /* Fall through into the next case */ case etORDINAL: case etRADIX: if( infop->flags & FLAG_SIGNED ){ i64 v; if( bArgList ){ v = getIntArg(pArgList); }else if( flag_longlong ){ v = va_arg(ap,i64); }else if( flag_long ){ v = va_arg(ap,long int); }else{ v = va_arg(ap,int); } if( v<0 ){ if( v==SMALLEST_INT64 ){ longvalue = ((u64)1)<<63; }else{ longvalue = -v; } prefix = '-'; }else{ longvalue = v; if( flag_plussign ) prefix = '+'; else if( flag_blanksign ) prefix = ' '; else prefix = 0; } }else{ if( bArgList ){ longvalue = (u64)getIntArg(pArgList); }else if( flag_longlong ){ longvalue = va_arg(ap,u64); }else if( flag_long ){ longvalue = va_arg(ap,unsigned long int); }else{ longvalue = va_arg(ap,unsigned int); } prefix = 0; } if( longvalue==0 ) flag_alternateform = 0; if( flag_zeropad && precision=4 || (longvalue/10)%10==1 ){ x = 0; } *(--bufpt) = zOrd[x*2+1]; *(--bufpt) = zOrd[x*2]; } { const char *cset = &aDigits[infop->charset]; u8 base = infop->base; do{ /* Convert to ascii */ *(--bufpt) = cset[longvalue%base]; longvalue = longvalue/base; }while( longvalue>0 ); } length = (int)(&zOut[nOut-1]-bufpt); for(idx=precision-length; idx>0; idx--){ *(--bufpt) = '0'; /* Zero pad */ } if( prefix ) *(--bufpt) = prefix; /* Add sign */ if( flag_alternateform && infop->prefix ){ /* Add "0" or "0x" */ const char *pre; char x; pre = &aPrefix[infop->prefix]; for(; (x=(*pre))!=0; pre++) *(--bufpt) = x; } length = (int)(&zOut[nOut-1]-bufpt); break; case etFLOAT: case etEXP: case etGENERIC: if( bArgList ){ realvalue = getDoubleArg(pArgList); }else{ realvalue = va_arg(ap,double); } #ifdef SQLITE_OMIT_FLOATING_POINT length = 0; #else if( precision<0 ) precision = 6; /* Set default precision */ if( realvalue<0.0 ){ realvalue = -realvalue; prefix = '-'; }else{ if( flag_plussign ) prefix = '+'; else if( flag_blanksign ) prefix = ' '; else prefix = 0; } if( xtype==etGENERIC && precision>0 ) precision--; testcase( precision>0xfff ); for(idx=precision&0xfff, rounder=0.5; idx>0; idx--, rounder*=0.1){} if( xtype==etFLOAT ) realvalue += rounder; /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ exp = 0; if( sqlite3IsNaN((double)realvalue) ){ bufpt = "NaN"; length = 3; break; } if( realvalue>0.0 ){ LONGDOUBLE_TYPE scale = 1.0; while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} while( realvalue>=1e64*scale && exp<=350 ){ scale *= 1e64; exp+=64; } while( realvalue>=1e8*scale && exp<=350 ){ scale *= 1e8; exp+=8; } while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } realvalue /= scale; while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } if( exp>350 ){ if( prefix=='-' ){ bufpt = "-Inf"; }else if( prefix=='+' ){ bufpt = "+Inf"; }else{ bufpt = "Inf"; } length = sqlite3Strlen30(bufpt); break; } } bufpt = buf; /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ if( xtype!=etFLOAT ){ realvalue += rounder; if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } } if( xtype==etGENERIC ){ flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ xtype = etEXP; }else{ precision = precision - exp; xtype = etFLOAT; } }else{ flag_rtz = flag_altform2; } if( xtype==etEXP ){ e2 = 0; }else{ e2 = exp; } if( MAX(e2,0)+(i64)precision+(i64)width > etBUFSIZE - 15 ){ bufpt = zExtra = sqlite3Malloc( MAX(e2,0)+(i64)precision+(i64)width+15 ); if( bufpt==0 ){ setStrAccumError(pAccum, STRACCUM_NOMEM); return; } } zOut = bufpt; nsd = 16 + flag_altform2*10; flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ *(bufpt++) = prefix; } /* Digits prior to the decimal point */ if( e2<0 ){ *(bufpt++) = '0'; }else{ for(; e2>=0; e2--){ *(bufpt++) = et_getdigit(&realvalue,&nsd); } } /* The decimal point */ if( flag_dp ){ *(bufpt++) = '.'; } /* "0" digits after the decimal point but before the first ** significant digit of the number */ for(e2++; e2<0; precision--, e2++){ assert( precision>0 ); *(bufpt++) = '0'; } /* Significant digits after the decimal point */ while( (precision--)>0 ){ *(bufpt++) = et_getdigit(&realvalue,&nsd); } /* Remove trailing zeros and the "." if no digits follow the "." */ if( flag_rtz && flag_dp ){ while( bufpt[-1]=='0' ) *(--bufpt) = 0; assert( bufpt>zOut ); if( bufpt[-1]=='.' ){ if( flag_altform2 ){ *(bufpt++) = '0'; }else{ *(--bufpt) = 0; } } } /* Add the "eNNN" suffix */ if( xtype==etEXP ){ *(bufpt++) = aDigits[infop->charset]; if( exp<0 ){ *(bufpt++) = '-'; exp = -exp; }else{ *(bufpt++) = '+'; } if( exp>=100 ){ *(bufpt++) = (char)((exp/100)+'0'); /* 100's digit */ exp %= 100; } *(bufpt++) = (char)(exp/10+'0'); /* 10's digit */ *(bufpt++) = (char)(exp%10+'0'); /* 1's digit */ } *bufpt = 0; /* The converted number is in buf[] and zero terminated. Output it. ** Note that the number is in the usual order, not reversed as with ** integer conversions. */ length = (int)(bufpt-zOut); bufpt = zOut; /* Special case: Add leading zeros if the flag_zeropad flag is ** set and we are not left justified */ if( flag_zeropad && !flag_leftjustify && length < width){ int i; int nPad = width - length; for(i=width; i>=nPad; i--){ bufpt[i] = bufpt[i-nPad]; } i = prefix!=0; while( nPad-- ) bufpt[i++] = '0'; length = width; } #endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ break; case etSIZE: if( !bArgList ){ *(va_arg(ap,int*)) = pAccum->nChar; } length = width = 0; break; case etPERCENT: buf[0] = '%'; bufpt = buf; length = 1; break; case etCHARX: if( bArgList ){ bufpt = getTextArg(pArgList); c = bufpt ? bufpt[0] : 0; }else{ c = va_arg(ap,int); } if( precision>1 ){ width -= precision-1; if( width>1 && !flag_leftjustify ){ sqlite3AppendChar(pAccum, width-1, ' '); width = 0; } sqlite3AppendChar(pAccum, precision-1, c); } length = 1; buf[0] = c; bufpt = buf; break; case etSTRING: case etDYNSTRING: if( bArgList ){ bufpt = getTextArg(pArgList); }else{ bufpt = va_arg(ap,char*); } if( bufpt==0 ){ bufpt = ""; }else if( xtype==etDYNSTRING && !bArgList ){ zExtra = bufpt; } if( precision>=0 ){ for(length=0; lengthetBUFSIZE ){ bufpt = zExtra = sqlite3Malloc( n ); if( bufpt==0 ){ setStrAccumError(pAccum, STRACCUM_NOMEM); return; } }else{ bufpt = buf; } j = 0; if( needQuote ) bufpt[j++] = q; k = i; for(i=0; i=0 && precisionn ){ sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n); } length = width = 0; break; } case etSRCLIST: { SrcList *pSrc = va_arg(ap, SrcList*); int k = va_arg(ap, int); struct SrcList_item *pItem = &pSrc->a[k]; assert( bArgList==0 ); assert( k>=0 && knSrc ); if( pItem->zDatabase ){ sqlite3StrAccumAppendAll(pAccum, pItem->zDatabase); sqlite3StrAccumAppend(pAccum, ".", 1); } sqlite3StrAccumAppendAll(pAccum, pItem->zName); length = width = 0; break; } default: { assert( xtype==etINVALID ); return; } }/* End switch over the format type */ /* ** The text of the conversion is pointed to by "bufpt" and is ** "length" characters long. The field width is "width". Do ** the output. */ width -= length; if( width>0 && !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); sqlite3StrAccumAppend(pAccum, bufpt, length); if( width>0 && flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' '); if( zExtra ){ sqlite3_free(zExtra); zExtra = 0; } }/* End for loop over the format string */ } /* End of function */ /* ** Enlarge the memory allocation on a StrAccum object so that it is ** able to accept at least N more bytes of text. ** ** Return the number of bytes of text that StrAccum is able to accept ** after the attempted enlargement. The value returned might be zero. */ static int sqlite3StrAccumEnlarge(StrAccum *p, int N){ char *zNew; assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ if( p->accError ){ testcase(p->accError==STRACCUM_TOOBIG); testcase(p->accError==STRACCUM_NOMEM); return 0; } if( p->mxAlloc==0 ){ N = p->nAlloc - p->nChar - 1; setStrAccumError(p, STRACCUM_TOOBIG); return N; }else{ char *zOld = (p->zText==p->zBase ? 0 : p->zText); i64 szNew = p->nChar; szNew += N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */ szNew += p->nChar; } if( szNew > p->mxAlloc ){ sqlite3StrAccumReset(p); setStrAccumError(p, STRACCUM_TOOBIG); return 0; }else{ p->nAlloc = (int)szNew; } if( p->db ){ zNew = sqlite3DbRealloc(p->db, zOld, p->nAlloc); }else{ zNew = sqlite3_realloc64(zOld, p->nAlloc); } if( zNew ){ assert( p->zText!=0 || p->nChar==0 ); if( zOld==0 && p->nChar>0 ) memcpy(zNew, p->zText, p->nChar); p->zText = zNew; p->nAlloc = sqlite3DbMallocSize(p->db, zNew); }else{ sqlite3StrAccumReset(p); setStrAccumError(p, STRACCUM_NOMEM); return 0; } } return N; } /* ** Append N copies of character c to the given string buffer. */ SQLITE_PRIVATE void sqlite3AppendChar(StrAccum *p, int N, char c){ testcase( p->nChar + (i64)N > 0x7fffffff ); if( p->nChar+(i64)N >= p->nAlloc && (N = sqlite3StrAccumEnlarge(p, N))<=0 ){ return; } while( (N--)>0 ) p->zText[p->nChar++] = c; } /* ** The StrAccum "p" is not large enough to accept N new bytes of z[]. ** So enlarge if first, then do the append. ** ** This is a helper routine to sqlite3StrAccumAppend() that does special-case ** work (enlarging the buffer) using tail recursion, so that the ** sqlite3StrAccumAppend() routine can use fast calling semantics. */ static void SQLITE_NOINLINE enlargeAndAppend(StrAccum *p, const char *z, int N){ N = sqlite3StrAccumEnlarge(p, N); if( N>0 ){ memcpy(&p->zText[p->nChar], z, N); p->nChar += N; } } /* ** Append N bytes of text from z to the StrAccum object. Increase the ** size of the memory allocation for StrAccum if necessary. */ SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){ assert( z!=0 || N==0 ); assert( p->zText!=0 || p->nChar==0 || p->accError ); assert( N>=0 ); assert( p->accError==0 || p->nAlloc==0 ); if( p->nChar+N >= p->nAlloc ){ enlargeAndAppend(p,z,N); }else{ assert( p->zText ); p->nChar += N; memcpy(&p->zText[p->nChar-N], z, N); } } /* ** Append the complete text of zero-terminated string z[] to the p string. */ SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){ sqlite3StrAccumAppend(p, z, sqlite3Strlen30(z)); } /* ** Finish off a string by making sure it is zero-terminated. ** Return a pointer to the resulting string. Return a NULL ** pointer if any kind of error was encountered. */ SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){ if( p->zText ){ p->zText[p->nChar] = 0; if( p->mxAlloc>0 && p->zText==p->zBase ){ p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 ); if( p->zText ){ memcpy(p->zText, p->zBase, p->nChar+1); }else{ setStrAccumError(p, STRACCUM_NOMEM); } } } return p->zText; } /* ** Reset an StrAccum string. Reclaim all malloced memory. */ SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum *p){ if( p->zText!=p->zBase ){ sqlite3DbFree(p->db, p->zText); } p->zText = 0; } /* ** Initialize a string accumulator. ** ** p: The accumulator to be initialized. ** db: Pointer to a database connection. May be NULL. Lookaside ** memory is used if not NULL. db->mallocFailed is set appropriately ** when not NULL. ** zBase: An initial buffer. May be NULL in which case the initial buffer ** is malloced. ** n: Size of zBase in bytes. If total space requirements never exceed ** n then no memory allocations ever occur. ** mx: Maximum number of bytes to accumulate. If mx==0 then no memory ** allocations will ever occur. */ SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum *p, sqlite3 *db, char *zBase, int n, int mx){ p->zText = p->zBase = zBase; p->db = db; p->nChar = 0; p->nAlloc = n; p->mxAlloc = mx; p->accError = 0; } /* ** Print into memory obtained from sqliteMalloc(). Use the internal ** %-conversion extensions. */ SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3 *db, const char *zFormat, va_list ap){ char *z; char zBase[SQLITE_PRINT_BUF_SIZE]; StrAccum acc; assert( db!=0 ); sqlite3StrAccumInit(&acc, db, zBase, sizeof(zBase), db->aLimit[SQLITE_LIMIT_LENGTH]); sqlite3VXPrintf(&acc, SQLITE_PRINTF_INTERNAL, zFormat, ap); z = sqlite3StrAccumFinish(&acc); if( acc.accError==STRACCUM_NOMEM ){ db->mallocFailed = 1; } return z; } /* ** Print into memory obtained from sqliteMalloc(). Use the internal ** %-conversion extensions. */ SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){ va_list ap; char *z; va_start(ap, zFormat); z = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); return z; } /* ** Print into memory obtained from sqlite3_malloc(). Omit the internal ** %-conversion extensions. */ SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char *zFormat, va_list ap){ char *z; char zBase[SQLITE_PRINT_BUF_SIZE]; StrAccum acc; #ifdef SQLITE_ENABLE_API_ARMOR if( zFormat==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif sqlite3StrAccumInit(&acc, 0, zBase, sizeof(zBase), SQLITE_MAX_LENGTH); sqlite3VXPrintf(&acc, 0, zFormat, ap); z = sqlite3StrAccumFinish(&acc); return z; } /* ** Print into memory obtained from sqlite3_malloc()(). Omit the internal ** %-conversion extensions. */ SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char *zFormat, ...){ va_list ap; char *z; #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return 0; #endif va_start(ap, zFormat); z = sqlite3_vmprintf(zFormat, ap); va_end(ap); return z; } /* ** sqlite3_snprintf() works like snprintf() except that it ignores the ** current locale settings. This is important for SQLite because we ** are not able to use a "," as the decimal point in place of "." as ** specified by some locales. ** ** Oops: The first two arguments of sqlite3_snprintf() are backwards ** from the snprintf() standard. Unfortunately, it is too late to change ** this without breaking compatibility, so we just have to live with the ** mistake. ** ** sqlite3_vsnprintf() is the varargs version. */ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){ StrAccum acc; if( n<=0 ) return zBuf; #ifdef SQLITE_ENABLE_API_ARMOR if( zBuf==0 || zFormat==0 ) { (void)SQLITE_MISUSE_BKPT; if( zBuf ) zBuf[0] = 0; return zBuf; } #endif sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); sqlite3VXPrintf(&acc, 0, zFormat, ap); return sqlite3StrAccumFinish(&acc); } SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ char *z; va_list ap; va_start(ap,zFormat); z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); va_end(ap); return z; } /* ** This is the routine that actually formats the sqlite3_log() message. ** We house it in a separate routine from sqlite3_log() to avoid using ** stack space on small-stack systems when logging is disabled. ** ** sqlite3_log() must render into a static buffer. It cannot dynamically ** allocate memory because it might be called while the memory allocator ** mutex is held. ** ** sqlite3VXPrintf() might ask for *temporary* memory allocations for ** certain format characters (%q) or for very large precisions or widths. ** Care must be taken that any sqlite3_log() calls that occur while the ** memory mutex is held do not use these mechanisms. */ static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){ StrAccum acc; /* String accumulator */ char zMsg[SQLITE_PRINT_BUF_SIZE*3]; /* Complete log message */ sqlite3StrAccumInit(&acc, 0, zMsg, sizeof(zMsg), 0); sqlite3VXPrintf(&acc, 0, zFormat, ap); sqlite3GlobalConfig.xLog(sqlite3GlobalConfig.pLogArg, iErrCode, sqlite3StrAccumFinish(&acc)); } /* ** Format and write a message to the log if logging is enabled. */ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...){ va_list ap; /* Vararg list */ if( sqlite3GlobalConfig.xLog ){ va_start(ap, zFormat); renderLogMsg(iErrCode, zFormat, ap); va_end(ap); } } #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) /* ** A version of printf() that understands %lld. Used for debugging. ** The printf() built into some versions of windows does not understand %lld ** and segfaults if you give it a long long int. */ SQLITE_PRIVATE void sqlite3DebugPrintf(const char *zFormat, ...){ va_list ap; StrAccum acc; char zBuf[500]; sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); va_start(ap,zFormat); sqlite3VXPrintf(&acc, 0, zFormat, ap); va_end(ap); sqlite3StrAccumFinish(&acc); fprintf(stdout,"%s", zBuf); fflush(stdout); } #endif /* ** variable-argument wrapper around sqlite3VXPrintf(). */ SQLITE_PRIVATE void sqlite3XPrintf(StrAccum *p, u32 bFlags, const char *zFormat, ...){ va_list ap; va_start(ap,zFormat); sqlite3VXPrintf(p, bFlags, zFormat, ap); va_end(ap); } /************** End of printf.c **********************************************/ /************** Begin file treeview.c ****************************************/ /* ** 2015-06-08 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains C code to implement the TreeView debugging routines. ** These routines print a parse tree to standard output for debugging and ** analysis. ** ** The interfaces in this file is only available when compiling ** with SQLITE_DEBUG. */ /* #include "sqliteInt.h" */ #ifdef SQLITE_DEBUG /* ** Add a new subitem to the tree. The moreToFollow flag indicates that this ** is not the last item in the tree. */ static TreeView *sqlite3TreeViewPush(TreeView *p, u8 moreToFollow){ if( p==0 ){ p = sqlite3_malloc64( sizeof(*p) ); if( p==0 ) return 0; memset(p, 0, sizeof(*p)); }else{ p->iLevel++; } assert( moreToFollow==0 || moreToFollow==1 ); if( p->iLevelbLine) ) p->bLine[p->iLevel] = moreToFollow; return p; } /* ** Finished with one layer of the tree */ static void sqlite3TreeViewPop(TreeView *p){ if( p==0 ) return; p->iLevel--; if( p->iLevel<0 ) sqlite3_free(p); } /* ** Generate a single line of output for the tree, with a prefix that contains ** all the appropriate tree lines */ static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){ va_list ap; int i; StrAccum acc; char zBuf[500]; sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0); if( p ){ for(i=0; iiLevel && ibLine)-1; i++){ sqlite3StrAccumAppend(&acc, p->bLine[i] ? "| " : " ", 4); } sqlite3StrAccumAppend(&acc, p->bLine[i] ? "|-- " : "'-- ", 4); } va_start(ap, zFormat); sqlite3VXPrintf(&acc, 0, zFormat, ap); va_end(ap); if( zBuf[acc.nChar-1]!='\n' ) sqlite3StrAccumAppend(&acc, "\n", 1); sqlite3StrAccumFinish(&acc); fprintf(stdout,"%s", zBuf); fflush(stdout); } /* ** Shorthand for starting a new tree item that consists of a single label */ static void sqlite3TreeViewItem(TreeView *p, const char *zLabel,u8 moreFollows){ p = sqlite3TreeViewPush(p, moreFollows); sqlite3TreeViewLine(p, "%s", zLabel); } /* ** Generate a human-readable description of a the Select object. */ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 moreToFollow){ int n = 0; pView = sqlite3TreeViewPush(pView, moreToFollow); sqlite3TreeViewLine(pView, "SELECT%s%s (0x%p) selFlags=0x%x", ((p->selFlags & SF_Distinct) ? " DISTINCT" : ""), ((p->selFlags & SF_Aggregate) ? " agg_flag" : ""), p, p->selFlags ); if( p->pSrc && p->pSrc->nSrc ) n++; if( p->pWhere ) n++; if( p->pGroupBy ) n++; if( p->pHaving ) n++; if( p->pOrderBy ) n++; if( p->pLimit ) n++; if( p->pOffset ) n++; if( p->pPrior ) n++; sqlite3TreeViewExprList(pView, p->pEList, (n--)>0, "result-set"); if( p->pSrc && p->pSrc->nSrc ){ int i; pView = sqlite3TreeViewPush(pView, (n--)>0); sqlite3TreeViewLine(pView, "FROM"); for(i=0; ipSrc->nSrc; i++){ struct SrcList_item *pItem = &p->pSrc->a[i]; StrAccum x; char zLine[100]; sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0); sqlite3XPrintf(&x, 0, "{%d,*}", pItem->iCursor); if( pItem->zDatabase ){ sqlite3XPrintf(&x, 0, " %s.%s", pItem->zDatabase, pItem->zName); }else if( pItem->zName ){ sqlite3XPrintf(&x, 0, " %s", pItem->zName); } if( pItem->pTab ){ sqlite3XPrintf(&x, 0, " tabname=%Q", pItem->pTab->zName); } if( pItem->zAlias ){ sqlite3XPrintf(&x, 0, " (AS %s)", pItem->zAlias); } if( pItem->jointype & JT_LEFT ){ sqlite3XPrintf(&x, 0, " LEFT-JOIN"); } sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, ipSrc->nSrc-1); if( pItem->pSelect ){ sqlite3TreeViewSelect(pView, pItem->pSelect, 0); } sqlite3TreeViewPop(pView); } sqlite3TreeViewPop(pView); } if( p->pWhere ){ sqlite3TreeViewItem(pView, "WHERE", (n--)>0); sqlite3TreeViewExpr(pView, p->pWhere, 0); sqlite3TreeViewPop(pView); } if( p->pGroupBy ){ sqlite3TreeViewExprList(pView, p->pGroupBy, (n--)>0, "GROUPBY"); } if( p->pHaving ){ sqlite3TreeViewItem(pView, "HAVING", (n--)>0); sqlite3TreeViewExpr(pView, p->pHaving, 0); sqlite3TreeViewPop(pView); } if( p->pOrderBy ){ sqlite3TreeViewExprList(pView, p->pOrderBy, (n--)>0, "ORDERBY"); } if( p->pLimit ){ sqlite3TreeViewItem(pView, "LIMIT", (n--)>0); sqlite3TreeViewExpr(pView, p->pLimit, 0); sqlite3TreeViewPop(pView); } if( p->pOffset ){ sqlite3TreeViewItem(pView, "OFFSET", (n--)>0); sqlite3TreeViewExpr(pView, p->pOffset, 0); sqlite3TreeViewPop(pView); } if( p->pPrior ){ const char *zOp = "UNION"; switch( p->op ){ case TK_ALL: zOp = "UNION ALL"; break; case TK_INTERSECT: zOp = "INTERSECT"; break; case TK_EXCEPT: zOp = "EXCEPT"; break; } sqlite3TreeViewItem(pView, zOp, (n--)>0); sqlite3TreeViewSelect(pView, p->pPrior, 0); sqlite3TreeViewPop(pView); } sqlite3TreeViewPop(pView); } /* ** Generate a human-readable explanation of an expression tree. */ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 moreToFollow){ const char *zBinOp = 0; /* Binary operator */ const char *zUniOp = 0; /* Unary operator */ char zFlgs[30]; pView = sqlite3TreeViewPush(pView, moreToFollow); if( pExpr==0 ){ sqlite3TreeViewLine(pView, "nil"); sqlite3TreeViewPop(pView); return; } if( pExpr->flags ){ sqlite3_snprintf(sizeof(zFlgs),zFlgs," flags=0x%x",pExpr->flags); }else{ zFlgs[0] = 0; } switch( pExpr->op ){ case TK_AGG_COLUMN: { sqlite3TreeViewLine(pView, "AGG{%d:%d}%s", pExpr->iTable, pExpr->iColumn, zFlgs); break; } case TK_COLUMN: { if( pExpr->iTable<0 ){ /* This only happens when coding check constraints */ sqlite3TreeViewLine(pView, "COLUMN(%d)%s", pExpr->iColumn, zFlgs); }else{ sqlite3TreeViewLine(pView, "{%d:%d}%s", pExpr->iTable, pExpr->iColumn, zFlgs); } break; } case TK_INTEGER: { if( pExpr->flags & EP_IntValue ){ sqlite3TreeViewLine(pView, "%d", pExpr->u.iValue); }else{ sqlite3TreeViewLine(pView, "%s", pExpr->u.zToken); } break; } #ifndef SQLITE_OMIT_FLOATING_POINT case TK_FLOAT: { sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); break; } #endif case TK_STRING: { sqlite3TreeViewLine(pView,"%Q", pExpr->u.zToken); break; } case TK_NULL: { sqlite3TreeViewLine(pView,"NULL"); break; } #ifndef SQLITE_OMIT_BLOB_LITERAL case TK_BLOB: { sqlite3TreeViewLine(pView,"%s", pExpr->u.zToken); break; } #endif case TK_VARIABLE: { sqlite3TreeViewLine(pView,"VARIABLE(%s,%d)", pExpr->u.zToken, pExpr->iColumn); break; } case TK_REGISTER: { sqlite3TreeViewLine(pView,"REGISTER(%d)", pExpr->iTable); break; } case TK_AS: { sqlite3TreeViewLine(pView,"AS %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; } case TK_ID: { sqlite3TreeViewLine(pView,"ID \"%w\"", pExpr->u.zToken); break; } #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ sqlite3TreeViewLine(pView,"CAST %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; } #endif /* SQLITE_OMIT_CAST */ case TK_LT: zBinOp = "LT"; break; case TK_LE: zBinOp = "LE"; break; case TK_GT: zBinOp = "GT"; break; case TK_GE: zBinOp = "GE"; break; case TK_NE: zBinOp = "NE"; break; case TK_EQ: zBinOp = "EQ"; break; case TK_IS: zBinOp = "IS"; break; case TK_ISNOT: zBinOp = "ISNOT"; break; case TK_AND: zBinOp = "AND"; break; case TK_OR: zBinOp = "OR"; break; case TK_PLUS: zBinOp = "ADD"; break; case TK_STAR: zBinOp = "MUL"; break; case TK_MINUS: zBinOp = "SUB"; break; case TK_REM: zBinOp = "REM"; break; case TK_BITAND: zBinOp = "BITAND"; break; case TK_BITOR: zBinOp = "BITOR"; break; case TK_SLASH: zBinOp = "DIV"; break; case TK_LSHIFT: zBinOp = "LSHIFT"; break; case TK_RSHIFT: zBinOp = "RSHIFT"; break; case TK_CONCAT: zBinOp = "CONCAT"; break; case TK_DOT: zBinOp = "DOT"; break; case TK_UMINUS: zUniOp = "UMINUS"; break; case TK_UPLUS: zUniOp = "UPLUS"; break; case TK_BITNOT: zUniOp = "BITNOT"; break; case TK_NOT: zUniOp = "NOT"; break; case TK_ISNULL: zUniOp = "ISNULL"; break; case TK_NOTNULL: zUniOp = "NOTNULL"; break; case TK_COLLATE: { sqlite3TreeViewLine(pView, "COLLATE %Q", pExpr->u.zToken); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); break; } case TK_AGG_FUNCTION: case TK_FUNCTION: { ExprList *pFarg; /* List of function arguments */ if( ExprHasProperty(pExpr, EP_TokenOnly) ){ pFarg = 0; }else{ pFarg = pExpr->x.pList; } if( pExpr->op==TK_AGG_FUNCTION ){ sqlite3TreeViewLine(pView, "AGG_FUNCTION%d %Q", pExpr->op2, pExpr->u.zToken); }else{ sqlite3TreeViewLine(pView, "FUNCTION %Q", pExpr->u.zToken); } if( pFarg ){ sqlite3TreeViewExprList(pView, pFarg, 0, 0); } break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_EXISTS: { sqlite3TreeViewLine(pView, "EXISTS-expr"); sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); break; } case TK_SELECT: { sqlite3TreeViewLine(pView, "SELECT-expr"); sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); break; } case TK_IN: { sqlite3TreeViewLine(pView, "IN"); sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0); }else{ sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); } break; } #endif /* SQLITE_OMIT_SUBQUERY */ /* ** x BETWEEN y AND z ** ** This is equivalent to ** ** x>=y AND x<=z ** ** X is stored in pExpr->pLeft. ** Y is stored in pExpr->pList->a[0].pExpr. ** Z is stored in pExpr->pList->a[1].pExpr. */ case TK_BETWEEN: { Expr *pX = pExpr->pLeft; Expr *pY = pExpr->x.pList->a[0].pExpr; Expr *pZ = pExpr->x.pList->a[1].pExpr; sqlite3TreeViewLine(pView, "BETWEEN"); sqlite3TreeViewExpr(pView, pX, 1); sqlite3TreeViewExpr(pView, pY, 1); sqlite3TreeViewExpr(pView, pZ, 0); break; } case TK_TRIGGER: { /* If the opcode is TK_TRIGGER, then the expression is a reference ** to a column in the new.* or old.* pseudo-tables available to ** trigger programs. In this case Expr.iTable is set to 1 for the ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn ** is set to the column of the pseudo-table to read, or to -1 to ** read the rowid field. */ sqlite3TreeViewLine(pView, "%s(%d)", pExpr->iTable ? "NEW" : "OLD", pExpr->iColumn); break; } case TK_CASE: { sqlite3TreeViewLine(pView, "CASE"); sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, 0); break; } #ifndef SQLITE_OMIT_TRIGGER case TK_RAISE: { const char *zType = "unk"; switch( pExpr->affinity ){ case OE_Rollback: zType = "rollback"; break; case OE_Abort: zType = "abort"; break; case OE_Fail: zType = "fail"; break; case OE_Ignore: zType = "ignore"; break; } sqlite3TreeViewLine(pView, "RAISE %s(%Q)", zType, pExpr->u.zToken); break; } #endif default: { sqlite3TreeViewLine(pView, "op=%d", pExpr->op); break; } } if( zBinOp ){ sqlite3TreeViewLine(pView, "%s%s", zBinOp, zFlgs); sqlite3TreeViewExpr(pView, pExpr->pLeft, 1); sqlite3TreeViewExpr(pView, pExpr->pRight, 0); }else if( zUniOp ){ sqlite3TreeViewLine(pView, "%s%s", zUniOp, zFlgs); sqlite3TreeViewExpr(pView, pExpr->pLeft, 0); } sqlite3TreeViewPop(pView); } /* ** Generate a human-readable explanation of an expression list. */ SQLITE_PRIVATE void sqlite3TreeViewExprList( TreeView *pView, const ExprList *pList, u8 moreToFollow, const char *zLabel ){ int i; pView = sqlite3TreeViewPush(pView, moreToFollow); if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST"; if( pList==0 ){ sqlite3TreeViewLine(pView, "%s (empty)", zLabel); }else{ sqlite3TreeViewLine(pView, "%s", zLabel); for(i=0; inExpr; i++){ sqlite3TreeViewExpr(pView, pList->a[i].pExpr, inExpr-1); } } sqlite3TreeViewPop(pView); } #endif /* SQLITE_DEBUG */ /************** End of treeview.c ********************************************/ /************** Begin file random.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code to implement a pseudo-random number ** generator (PRNG) for SQLite. ** ** Random numbers are used by some of the database backends in order ** to generate random integer keys for tables or random filenames. */ /* #include "sqliteInt.h" */ /* All threads share a single random number generator. ** This structure is the current state of the generator. */ static SQLITE_WSD struct sqlite3PrngType { unsigned char isInit; /* True if initialized */ unsigned char i, j; /* State variables */ unsigned char s[256]; /* State variables */ } sqlite3Prng; /* ** Return N random bytes. */ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){ unsigned char t; unsigned char *zBuf = pBuf; /* The "wsdPrng" macro will resolve to the pseudo-random number generator ** state vector. If writable static data is unsupported on the target, ** we have to locate the state vector at run-time. In the more common ** case where writable static data is supported, wsdPrng can refer directly ** to the "sqlite3Prng" state vector declared above. */ #ifdef SQLITE_OMIT_WSD struct sqlite3PrngType *p = &GLOBAL(struct sqlite3PrngType, sqlite3Prng); # define wsdPrng p[0] #else # define wsdPrng sqlite3Prng #endif #if SQLITE_THREADSAFE sqlite3_mutex *mutex; #endif #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize() ) return; #endif #if SQLITE_THREADSAFE mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PRNG); #endif sqlite3_mutex_enter(mutex); if( N<=0 || pBuf==0 ){ wsdPrng.isInit = 0; sqlite3_mutex_leave(mutex); return; } /* Initialize the state of the random number generator once, ** the first time this routine is called. The seed value does ** not need to contain a lot of randomness since we are not ** trying to do secure encryption or anything like that... ** ** Nothing in this file or anywhere else in SQLite does any kind of ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random ** number generator) not as an encryption device. */ if( !wsdPrng.isInit ){ int i; char k[256]; wsdPrng.j = 0; wsdPrng.i = 0; sqlite3OsRandomness(sqlite3_vfs_find(0), 256, k); for(i=0; i<256; i++){ wsdPrng.s[i] = (u8)i; } for(i=0; i<256; i++){ wsdPrng.j += wsdPrng.s[i] + k[i]; t = wsdPrng.s[wsdPrng.j]; wsdPrng.s[wsdPrng.j] = wsdPrng.s[i]; wsdPrng.s[i] = t; } wsdPrng.isInit = 1; } assert( N>0 ); do{ wsdPrng.i++; t = wsdPrng.s[wsdPrng.i]; wsdPrng.j += t; wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j]; wsdPrng.s[wsdPrng.j] = t; t += wsdPrng.s[wsdPrng.i]; *(zBuf++) = wsdPrng.s[t]; }while( --N ); sqlite3_mutex_leave(mutex); } #ifndef SQLITE_OMIT_BUILTIN_TEST /* ** For testing purposes, we sometimes want to preserve the state of ** PRNG and restore the PRNG to its saved state at a later time, or ** to reset the PRNG to its initial state. These routines accomplish ** those tasks. ** ** The sqlite3_test_control() interface calls these routines to ** control the PRNG. */ static SQLITE_WSD struct sqlite3PrngType sqlite3SavedPrng; SQLITE_PRIVATE void sqlite3PrngSaveState(void){ memcpy( &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), &GLOBAL(struct sqlite3PrngType, sqlite3Prng), sizeof(sqlite3Prng) ); } SQLITE_PRIVATE void sqlite3PrngRestoreState(void){ memcpy( &GLOBAL(struct sqlite3PrngType, sqlite3Prng), &GLOBAL(struct sqlite3PrngType, sqlite3SavedPrng), sizeof(sqlite3Prng) ); } #endif /* SQLITE_OMIT_BUILTIN_TEST */ /************** End of random.c **********************************************/ /************** Begin file threads.c *****************************************/ /* ** 2012 July 21 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file presents a simple cross-platform threading interface for ** use internally by SQLite. ** ** A "thread" can be created using sqlite3ThreadCreate(). This thread ** runs independently of its creator until it is joined using ** sqlite3ThreadJoin(), at which point it terminates. ** ** Threads do not have to be real. It could be that the work of the ** "thread" is done by the main thread at either the sqlite3ThreadCreate() ** or sqlite3ThreadJoin() call. This is, in fact, what happens in ** single threaded systems. Nothing in SQLite requires multiple threads. ** This interface exists so that applications that want to take advantage ** of multiple cores can do so, while also allowing applications to stay ** single-threaded if desired. */ /* #include "sqliteInt.h" */ #if SQLITE_OS_WIN /* # include "os_win.h" */ #endif #if SQLITE_MAX_WORKER_THREADS>0 /********************************* Unix Pthreads ****************************/ #if SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) && SQLITE_THREADSAFE>0 #define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ /* #include */ /* A running thread */ struct SQLiteThread { pthread_t tid; /* Thread ID */ int done; /* Set to true when thread finishes */ void *pOut; /* Result returned by the thread */ void *(*xTask)(void*); /* The thread routine */ void *pIn; /* Argument to the thread */ }; /* Create a new thread */ SQLITE_PRIVATE int sqlite3ThreadCreate( SQLiteThread **ppThread, /* OUT: Write the thread object here */ void *(*xTask)(void*), /* Routine to run in a separate thread */ void *pIn /* Argument passed into xTask() */ ){ SQLiteThread *p; int rc; assert( ppThread!=0 ); assert( xTask!=0 ); /* This routine is never used in single-threaded mode */ assert( sqlite3GlobalConfig.bCoreMutex!=0 ); *ppThread = 0; p = sqlite3Malloc(sizeof(*p)); if( p==0 ) return SQLITE_NOMEM; memset(p, 0, sizeof(*p)); p->xTask = xTask; p->pIn = pIn; if( sqlite3FaultSim(200) ){ rc = 1; }else{ rc = pthread_create(&p->tid, 0, xTask, pIn); } if( rc ){ p->done = 1; p->pOut = xTask(pIn); } *ppThread = p; return SQLITE_OK; } /* Get the results of the thread */ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ int rc; assert( ppOut!=0 ); if( NEVER(p==0) ) return SQLITE_NOMEM; if( p->done ){ *ppOut = p->pOut; rc = SQLITE_OK; }else{ rc = pthread_join(p->tid, ppOut) ? SQLITE_ERROR : SQLITE_OK; } sqlite3_free(p); return rc; } #endif /* SQLITE_OS_UNIX && defined(SQLITE_MUTEX_PTHREADS) */ /******************************** End Unix Pthreads *************************/ /********************************* Win32 Threads ****************************/ #if SQLITE_OS_WIN_THREADS #define SQLITE_THREADS_IMPLEMENTED 1 /* Prevent the single-thread code below */ #include /* A running thread */ struct SQLiteThread { void *tid; /* The thread handle */ unsigned id; /* The thread identifier */ void *(*xTask)(void*); /* The routine to run as a thread */ void *pIn; /* Argument to xTask */ void *pResult; /* Result of xTask */ }; /* Thread procedure Win32 compatibility shim */ static unsigned __stdcall sqlite3ThreadProc( void *pArg /* IN: Pointer to the SQLiteThread structure */ ){ SQLiteThread *p = (SQLiteThread *)pArg; assert( p!=0 ); #if 0 /* ** This assert appears to trigger spuriously on certain ** versions of Windows, possibly due to _beginthreadex() ** and/or CreateThread() not fully setting their thread ** ID parameter before starting the thread. */ assert( p->id==GetCurrentThreadId() ); #endif assert( p->xTask!=0 ); p->pResult = p->xTask(p->pIn); _endthreadex(0); return 0; /* NOT REACHED */ } /* Create a new thread */ SQLITE_PRIVATE int sqlite3ThreadCreate( SQLiteThread **ppThread, /* OUT: Write the thread object here */ void *(*xTask)(void*), /* Routine to run in a separate thread */ void *pIn /* Argument passed into xTask() */ ){ SQLiteThread *p; assert( ppThread!=0 ); assert( xTask!=0 ); *ppThread = 0; p = sqlite3Malloc(sizeof(*p)); if( p==0 ) return SQLITE_NOMEM; if( sqlite3GlobalConfig.bCoreMutex==0 ){ memset(p, 0, sizeof(*p)); }else{ p->xTask = xTask; p->pIn = pIn; p->tid = (void*)_beginthreadex(0, 0, sqlite3ThreadProc, p, 0, &p->id); if( p->tid==0 ){ memset(p, 0, sizeof(*p)); } } if( p->xTask==0 ){ p->id = GetCurrentThreadId(); p->pResult = xTask(pIn); } *ppThread = p; return SQLITE_OK; } SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject); /* os_win.c */ /* Get the results of the thread */ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ DWORD rc; BOOL bRc; assert( ppOut!=0 ); if( NEVER(p==0) ) return SQLITE_NOMEM; if( p->xTask==0 ){ assert( p->id==GetCurrentThreadId() ); rc = WAIT_OBJECT_0; assert( p->tid==0 ); }else{ assert( p->id!=0 && p->id!=GetCurrentThreadId() ); rc = sqlite3Win32Wait((HANDLE)p->tid); assert( rc!=WAIT_IO_COMPLETION ); bRc = CloseHandle((HANDLE)p->tid); assert( bRc ); } if( rc==WAIT_OBJECT_0 ) *ppOut = p->pResult; sqlite3_free(p); return (rc==WAIT_OBJECT_0) ? SQLITE_OK : SQLITE_ERROR; } #endif /* SQLITE_OS_WIN_THREADS */ /******************************** End Win32 Threads *************************/ /********************************* Single-Threaded **************************/ #ifndef SQLITE_THREADS_IMPLEMENTED /* ** This implementation does not actually create a new thread. It does the ** work of the thread in the main thread, when either the thread is created ** or when it is joined */ /* A running thread */ struct SQLiteThread { void *(*xTask)(void*); /* The routine to run as a thread */ void *pIn; /* Argument to xTask */ void *pResult; /* Result of xTask */ }; /* Create a new thread */ SQLITE_PRIVATE int sqlite3ThreadCreate( SQLiteThread **ppThread, /* OUT: Write the thread object here */ void *(*xTask)(void*), /* Routine to run in a separate thread */ void *pIn /* Argument passed into xTask() */ ){ SQLiteThread *p; assert( ppThread!=0 ); assert( xTask!=0 ); *ppThread = 0; p = sqlite3Malloc(sizeof(*p)); if( p==0 ) return SQLITE_NOMEM; if( (SQLITE_PTR_TO_INT(p)/17)&1 ){ p->xTask = xTask; p->pIn = pIn; }else{ p->xTask = 0; p->pResult = xTask(pIn); } *ppThread = p; return SQLITE_OK; } /* Get the results of the thread */ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread *p, void **ppOut){ assert( ppOut!=0 ); if( NEVER(p==0) ) return SQLITE_NOMEM; if( p->xTask ){ *ppOut = p->xTask(p->pIn); }else{ *ppOut = p->pResult; } sqlite3_free(p); #if defined(SQLITE_TEST) { void *pTstAlloc = sqlite3Malloc(10); if (!pTstAlloc) return SQLITE_NOMEM; sqlite3_free(pTstAlloc); } #endif return SQLITE_OK; } #endif /* !defined(SQLITE_THREADS_IMPLEMENTED) */ /****************************** End Single-Threaded *************************/ #endif /* SQLITE_MAX_WORKER_THREADS>0 */ /************** End of threads.c *********************************************/ /************** Begin file utf.c *********************************************/ /* ** 2004 April 13 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains routines used to translate between UTF-8, ** UTF-16, UTF-16BE, and UTF-16LE. ** ** Notes on UTF-8: ** ** Byte-0 Byte-1 Byte-2 Byte-3 Value ** 0xxxxxxx 00000000 00000000 0xxxxxxx ** 110yyyyy 10xxxxxx 00000000 00000yyy yyxxxxxx ** 1110zzzz 10yyyyyy 10xxxxxx 00000000 zzzzyyyy yyxxxxxx ** 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx 000uuuuu zzzzyyyy yyxxxxxx ** ** ** Notes on UTF-16: (with wwww+1==uuuuu) ** ** Word-0 Word-1 Value ** 110110ww wwzzzzyy 110111yy yyxxxxxx 000uuuuu zzzzyyyy yyxxxxxx ** zzzzyyyy yyxxxxxx 00000000 zzzzyyyy yyxxxxxx ** ** ** BOM or Byte Order Mark: ** 0xff 0xfe little-endian utf-16 follows ** 0xfe 0xff big-endian utf-16 follows ** */ /* #include "sqliteInt.h" */ /* #include */ /* #include "vdbeInt.h" */ #ifndef SQLITE_AMALGAMATION /* ** The following constant value is used by the SQLITE_BIGENDIAN and ** SQLITE_LITTLEENDIAN macros. */ SQLITE_PRIVATE const int sqlite3one = 1; #endif /* SQLITE_AMALGAMATION */ /* ** This lookup table is used to help decode the first byte of ** a multi-byte UTF8 character. */ static const unsigned char sqlite3Utf8Trans1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, }; #define WRITE_UTF8(zOut, c) { \ if( c<0x00080 ){ \ *zOut++ = (u8)(c&0xFF); \ } \ else if( c<0x00800 ){ \ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ else if( c<0x10000 ){ \ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ }else{ \ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ } #define WRITE_UTF16LE(zOut, c) { \ if( c<=0xFFFF ){ \ *zOut++ = (u8)(c&0x00FF); \ *zOut++ = (u8)((c>>8)&0x00FF); \ }else{ \ *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ *zOut++ = (u8)(c&0x00FF); \ *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ } \ } #define WRITE_UTF16BE(zOut, c) { \ if( c<=0xFFFF ){ \ *zOut++ = (u8)((c>>8)&0x00FF); \ *zOut++ = (u8)(c&0x00FF); \ }else{ \ *zOut++ = (u8)(0x00D8 + (((c-0x10000)>>18)&0x03)); \ *zOut++ = (u8)(((c>>10)&0x003F) + (((c-0x10000)>>10)&0x00C0)); \ *zOut++ = (u8)(0x00DC + ((c>>8)&0x03)); \ *zOut++ = (u8)(c&0x00FF); \ } \ } #define READ_UTF16LE(zIn, TERM, c){ \ c = (*zIn++); \ c += ((*zIn++)<<8); \ if( c>=0xD800 && c<0xE000 && TERM ){ \ int c2 = (*zIn++); \ c2 += ((*zIn++)<<8); \ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ } \ } #define READ_UTF16BE(zIn, TERM, c){ \ c = ((*zIn++)<<8); \ c += (*zIn++); \ if( c>=0xD800 && c<0xE000 && TERM ){ \ int c2 = ((*zIn++)<<8); \ c2 += (*zIn++); \ c = (c2&0x03FF) + ((c&0x003F)<<10) + (((c&0x03C0)+0x0040)<<10); \ } \ } /* ** Translate a single UTF-8 character. Return the unicode value. ** ** During translation, assume that the byte that zTerm points ** is a 0x00. ** ** Write a pointer to the next unread byte back into *pzNext. ** ** Notes On Invalid UTF-8: ** ** * This routine never allows a 7-bit character (0x00 through 0x7f) to ** be encoded as a multi-byte character. Any multi-byte character that ** attempts to encode a value between 0x00 and 0x7f is rendered as 0xfffd. ** ** * This routine never allows a UTF16 surrogate value to be encoded. ** If a multi-byte character attempts to encode a value between ** 0xd800 and 0xe000 then it is rendered as 0xfffd. ** ** * Bytes in the range of 0x80 through 0xbf which occur as the first ** byte of a character are interpreted as single-byte characters ** and rendered as themselves even though they are technically ** invalid characters. ** ** * This routine accepts over-length UTF8 encodings ** for unicode values 0x80 and greater. It does not change over-length ** encodings to 0xfffd as some systems recommend. */ #define READ_UTF8(zIn, zTerm, c) \ c = *(zIn++); \ if( c>=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ c = (c<<6) + (0x3f & *(zIn++)); \ } \ if( c<0x80 \ || (c&0xFFFFF800)==0xD800 \ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ } SQLITE_PRIVATE u32 sqlite3Utf8Read( const unsigned char **pz /* Pointer to string from which to read char */ ){ unsigned int c; /* Same as READ_UTF8() above but without the zTerm parameter. ** For this routine, we assume the UTF8 string is always zero-terminated. */ c = *((*pz)++); if( c>=0xc0 ){ c = sqlite3Utf8Trans1[c-0xc0]; while( (*(*pz) & 0xc0)==0x80 ){ c = (c<<6) + (0x3f & *((*pz)++)); } if( c<0x80 || (c&0xFFFFF800)==0xD800 || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } } return c; } /* ** If the TRANSLATE_TRACE macro is defined, the value of each Mem is ** printed on stderr on the way into and out of sqlite3VdbeMemTranslate(). */ /* #define TRANSLATE_TRACE 1 */ #ifndef SQLITE_OMIT_UTF16 /* ** This routine transforms the internal text encoding used by pMem to ** desiredEnc. It is an error if the string is already of the desired ** encoding, or if *pMem does not contain a string value. */ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemTranslate(Mem *pMem, u8 desiredEnc){ int len; /* Maximum length of output string in bytes */ unsigned char *zOut; /* Output buffer */ unsigned char *zIn; /* Input iterator */ unsigned char *zTerm; /* End of input */ unsigned char *z; /* Output iterator */ unsigned int c; assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( pMem->flags&MEM_Str ); assert( pMem->enc!=desiredEnc ); assert( pMem->enc!=0 ); assert( pMem->n>=0 ); #if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) { char zBuf[100]; sqlite3VdbeMemPrettyPrint(pMem, zBuf); fprintf(stderr, "INPUT: %s\n", zBuf); } #endif /* If the translation is between UTF-16 little and big endian, then ** all that is required is to swap the byte order. This case is handled ** differently from the others. */ if( pMem->enc!=SQLITE_UTF8 && desiredEnc!=SQLITE_UTF8 ){ u8 temp; int rc; rc = sqlite3VdbeMemMakeWriteable(pMem); if( rc!=SQLITE_OK ){ assert( rc==SQLITE_NOMEM ); return SQLITE_NOMEM; } zIn = (u8*)pMem->z; zTerm = &zIn[pMem->n&~1]; while( zInenc = desiredEnc; goto translate_out; } /* Set len to the maximum number of bytes required in the output buffer. */ if( desiredEnc==SQLITE_UTF8 ){ /* When converting from UTF-16, the maximum growth results from ** translating a 2-byte character to a 4-byte UTF-8 character. ** A single byte is required for the output string ** nul-terminator. */ pMem->n &= ~1; len = pMem->n * 2 + 1; }else{ /* When converting from UTF-8 to UTF-16 the maximum growth is caused ** when a 1-byte UTF-8 character is translated into a 2-byte UTF-16 ** character. Two bytes are required in the output buffer for the ** nul-terminator. */ len = pMem->n * 2 + 2; } /* Set zIn to point at the start of the input buffer and zTerm to point 1 ** byte past the end. ** ** Variable zOut is set to point at the output buffer, space obtained ** from sqlite3_malloc(). */ zIn = (u8*)pMem->z; zTerm = &zIn[pMem->n]; zOut = sqlite3DbMallocRaw(pMem->db, len); if( !zOut ){ return SQLITE_NOMEM; } z = zOut; if( pMem->enc==SQLITE_UTF8 ){ if( desiredEnc==SQLITE_UTF16LE ){ /* UTF-8 -> UTF-16 Little-endian */ while( zIn UTF-16 Big-endian */ while( zInn = (int)(z - zOut); *z++ = 0; }else{ assert( desiredEnc==SQLITE_UTF8 ); if( pMem->enc==SQLITE_UTF16LE ){ /* UTF-16 Little-endian -> UTF-8 */ while( zIn UTF-8 */ while( zInn = (int)(z - zOut); } *z = 0; assert( (pMem->n+(desiredEnc==SQLITE_UTF8?1:2))<=len ); c = pMem->flags; sqlite3VdbeMemRelease(pMem); pMem->flags = MEM_Str|MEM_Term|(c&MEM_AffMask); pMem->enc = desiredEnc; pMem->z = (char*)zOut; pMem->zMalloc = pMem->z; pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->z); translate_out: #if defined(TRANSLATE_TRACE) && defined(SQLITE_DEBUG) { char zBuf[100]; sqlite3VdbeMemPrettyPrint(pMem, zBuf); fprintf(stderr, "OUTPUT: %s\n", zBuf); } #endif return SQLITE_OK; } /* ** This routine checks for a byte-order mark at the beginning of the ** UTF-16 string stored in *pMem. If one is present, it is removed and ** the encoding of the Mem adjusted. This routine does not do any ** byte-swapping, it just sets Mem.enc appropriately. ** ** The allocation (static, dynamic etc.) and encoding of the Mem may be ** changed by this function. */ SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem){ int rc = SQLITE_OK; u8 bom = 0; assert( pMem->n>=0 ); if( pMem->n>1 ){ u8 b1 = *(u8 *)pMem->z; u8 b2 = *(((u8 *)pMem->z) + 1); if( b1==0xFE && b2==0xFF ){ bom = SQLITE_UTF16BE; } if( b1==0xFF && b2==0xFE ){ bom = SQLITE_UTF16LE; } } if( bom ){ rc = sqlite3VdbeMemMakeWriteable(pMem); if( rc==SQLITE_OK ){ pMem->n -= 2; memmove(pMem->z, &pMem->z[2], pMem->n); pMem->z[pMem->n] = '\0'; pMem->z[pMem->n+1] = '\0'; pMem->flags |= MEM_Term; pMem->enc = bom; } } return rc; } #endif /* SQLITE_OMIT_UTF16 */ /* ** pZ is a UTF-8 encoded unicode string. If nByte is less than zero, ** return the number of unicode characters in pZ up to (but not including) ** the first 0x00 byte. If nByte is not less than zero, return the ** number of unicode characters in the first nByte of pZ (or up to ** the first 0x00, whichever comes first). */ SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *zIn, int nByte){ int r = 0; const u8 *z = (const u8*)zIn; const u8 *zTerm; if( nByte>=0 ){ zTerm = &z[nByte]; }else{ zTerm = (const u8*)(-1); } assert( z<=zTerm ); while( *z!=0 && zmallocFailed ){ sqlite3VdbeMemRelease(&m); m.z = 0; } assert( (m.flags & MEM_Term)!=0 || db->mallocFailed ); assert( (m.flags & MEM_Str)!=0 || db->mallocFailed ); assert( m.z || db->mallocFailed ); return m.z; } /* ** zIn is a UTF-16 encoded unicode string at least nChar characters long. ** Return the number of bytes in the first nChar unicode characters ** in pZ. nChar must be non-negative. */ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *zIn, int nChar){ int c; unsigned char const *z = zIn; int n = 0; if( SQLITE_UTF16NATIVE==SQLITE_UTF16BE ){ while( n0 && n<=4 ); z[0] = 0; z = zBuf; c = sqlite3Utf8Read((const u8**)&z); t = i; if( i>=0xD800 && i<=0xDFFF ) t = 0xFFFD; if( (i&0xFFFFFFFE)==0xFFFE ) t = 0xFFFD; assert( c==t ); assert( (z-zBuf)==n ); } for(i=0; i<0x00110000; i++){ if( i>=0xD800 && i<0xE000 ) continue; z = zBuf; WRITE_UTF16LE(z, i); n = (int)(z-zBuf); assert( n>0 && n<=4 ); z[0] = 0; z = zBuf; READ_UTF16LE(z, 1, c); assert( c==i ); assert( (z-zBuf)==n ); } for(i=0; i<0x00110000; i++){ if( i>=0xD800 && i<0xE000 ) continue; z = zBuf; WRITE_UTF16BE(z, i); n = (int)(z-zBuf); assert( n>0 && n<=4 ); z[0] = 0; z = zBuf; READ_UTF16BE(z, 1, c); assert( c==i ); assert( (z-zBuf)==n ); } } #endif /* SQLITE_TEST */ #endif /* SQLITE_OMIT_UTF16 */ /************** End of utf.c *************************************************/ /************** Begin file util.c ********************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Utility functions used throughout sqlite. ** ** This file contains functions for allocating memory, comparing ** strings, and stuff like that. ** */ /* #include "sqliteInt.h" */ /* #include */ #if HAVE_ISNAN || SQLITE_HAVE_ISNAN # include #endif /* ** Routine needed to support the testcase() macro. */ #ifdef SQLITE_COVERAGE_TEST SQLITE_PRIVATE void sqlite3Coverage(int x){ static unsigned dummy = 0; dummy += (unsigned)x; } #endif /* ** Give a callback to the test harness that can be used to simulate faults ** in places where it is difficult or expensive to do so purely by means ** of inputs. ** ** The intent of the integer argument is to let the fault simulator know ** which of multiple sqlite3FaultSim() calls has been hit. ** ** Return whatever integer value the test callback returns, or return ** SQLITE_OK if no test callback is installed. */ #ifndef SQLITE_OMIT_BUILTIN_TEST SQLITE_PRIVATE int sqlite3FaultSim(int iTest){ int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback; return xCallback ? xCallback(iTest) : SQLITE_OK; } #endif #ifndef SQLITE_OMIT_FLOATING_POINT /* ** Return true if the floating point value is Not a Number (NaN). ** ** Use the math library isnan() function if compiled with SQLITE_HAVE_ISNAN. ** Otherwise, we have our own implementation that works on most systems. */ SQLITE_PRIVATE int sqlite3IsNaN(double x){ int rc; /* The value return */ #if !SQLITE_HAVE_ISNAN && !HAVE_ISNAN /* ** Systems that support the isnan() library function should probably ** make use of it by compiling with -DSQLITE_HAVE_ISNAN. But we have ** found that many systems do not have a working isnan() function so ** this implementation is provided as an alternative. ** ** This NaN test sometimes fails if compiled on GCC with -ffast-math. ** On the other hand, the use of -ffast-math comes with the following ** warning: ** ** This option [-ffast-math] should never be turned on by any ** -O option since it can result in incorrect output for programs ** which depend on an exact implementation of IEEE or ISO ** rules/specifications for math functions. ** ** Under MSVC, this NaN test may fail if compiled with a floating- ** point precision mode other than /fp:precise. From the MSDN ** documentation: ** ** The compiler [with /fp:precise] will properly handle comparisons ** involving NaN. For example, x != x evaluates to true if x is NaN ** ... */ #ifdef __FAST_MATH__ # error SQLite will not work correctly with the -ffast-math option of GCC. #endif volatile double y = x; volatile double z = y; rc = (y!=z); #else /* if HAVE_ISNAN */ rc = isnan(x); #endif /* HAVE_ISNAN */ testcase( rc ); return rc; } #endif /* SQLITE_OMIT_FLOATING_POINT */ /* ** Compute a string length that is limited to what can be stored in ** lower 30 bits of a 32-bit signed integer. ** ** The value returned will never be negative. Nor will it ever be greater ** than the actual length of the string. For very long strings (greater ** than 1GiB) the value returned might be less than the true string length. */ SQLITE_PRIVATE int sqlite3Strlen30(const char *z){ if( z==0 ) return 0; return 0x3fffffff & (int)strlen(z); } /* ** Set the current error code to err_code and clear any prior error message. */ SQLITE_PRIVATE void sqlite3Error(sqlite3 *db, int err_code){ assert( db!=0 ); db->errCode = err_code; if( db->pErr ) sqlite3ValueSetNull(db->pErr); } /* ** Set the most recent error code and error string for the sqlite ** handle "db". The error code is set to "err_code". ** ** If it is not NULL, string zFormat specifies the format of the ** error string in the style of the printf functions: The following ** format characters are allowed: ** ** %s Insert a string ** %z A string that should be freed after use ** %d Insert an integer ** %T Insert a token ** %S Insert the first element of a SrcList ** ** zFormat and any string tokens that follow it are assumed to be ** encoded in UTF-8. ** ** To clear the most recent error for sqlite handle "db", sqlite3Error ** should be called with err_code set to SQLITE_OK and zFormat set ** to NULL. */ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *zFormat, ...){ assert( db!=0 ); db->errCode = err_code; if( zFormat==0 ){ sqlite3Error(db, err_code); }else if( db->pErr || (db->pErr = sqlite3ValueNew(db))!=0 ){ char *z; va_list ap; va_start(ap, zFormat); z = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); sqlite3ValueSetStr(db->pErr, -1, z, SQLITE_UTF8, SQLITE_DYNAMIC); } } /* ** Add an error message to pParse->zErrMsg and increment pParse->nErr. ** The following formatting characters are allowed: ** ** %s Insert a string ** %z A string that should be freed after use ** %d Insert an integer ** %T Insert a token ** %S Insert the first element of a SrcList ** ** This function should be used to report any error that occurs while ** compiling an SQL statement (i.e. within sqlite3_prepare()). The ** last thing the sqlite3_prepare() function does is copy the error ** stored by this function into the database handle using sqlite3Error(). ** Functions sqlite3Error() or sqlite3ErrorWithMsg() should be used ** during statement execution (sqlite3_step() etc.). */ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ char *zMsg; va_list ap; sqlite3 *db = pParse->db; va_start(ap, zFormat); zMsg = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); if( db->suppressErr ){ sqlite3DbFree(db, zMsg); }else{ pParse->nErr++; sqlite3DbFree(db, pParse->zErrMsg); pParse->zErrMsg = zMsg; pParse->rc = SQLITE_ERROR; } } /* ** Convert an SQL-style quoted string into a normal string by removing ** the quote characters. The conversion is done in-place. If the ** input does not begin with a quote character, then this routine ** is a no-op. ** ** The input string must be zero-terminated. A new zero-terminator ** is added to the dequoted string. ** ** The return value is -1 if no dequoting occurs or the length of the ** dequoted string, exclusive of the zero terminator, if dequoting does ** occur. ** ** 2002-Feb-14: This routine is extended to remove MS-Access style ** brackets from around identifiers. For example: "[a-b-c]" becomes ** "a-b-c". */ SQLITE_PRIVATE int sqlite3Dequote(char *z){ char quote; int i, j; if( z==0 ) return -1; quote = z[0]; switch( quote ){ case '\'': break; case '"': break; case '`': break; /* For MySQL compatibility */ case '[': quote = ']'; break; /* For MS SqlServer compatibility */ default: return -1; } for(i=1, j=0;; i++){ assert( z[i] ); if( z[i]==quote ){ if( z[i+1]==quote ){ z[j++] = quote; i++; }else{ break; } }else{ z[j++] = z[i]; } } z[j] = 0; return j; } /* Convenient short-hand */ #define UpperToLower sqlite3UpperToLower /* ** Some systems have stricmp(). Others have strcasecmp(). Because ** there is no consistency, we will define our own. ** ** IMPLEMENTATION-OF: R-30243-02494 The sqlite3_stricmp() and ** sqlite3_strnicmp() APIs allow applications and extensions to compare ** the contents of two buffers containing UTF-8 strings in a ** case-independent fashion, using the same definition of "case ** independence" that SQLite uses internally when comparing identifiers. */ SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *zLeft, const char *zRight){ register unsigned char *a, *b; if( zLeft==0 ){ return zRight ? -1 : 0; }else if( zRight==0 ){ return 1; } a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } return UpperToLower[*a] - UpperToLower[*b]; } SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){ register unsigned char *a, *b; if( zLeft==0 ){ return zRight ? -1 : 0; }else if( zRight==0 ){ return 1; } a = (unsigned char *)zLeft; b = (unsigned char *)zRight; while( N-- > 0 && *a!=0 && UpperToLower[*a]==UpperToLower[*b]){ a++; b++; } return N<0 ? 0 : UpperToLower[*a] - UpperToLower[*b]; } /* ** The string z[] is an text representation of a real number. ** Convert this string to a double and write it into *pResult. ** ** The string z[] is length bytes in length (bytes, not characters) and ** uses the encoding enc. The string is not necessarily zero-terminated. ** ** Return TRUE if the result is a valid real number (or integer) and FALSE ** if the string is empty or contains extraneous text. Valid numbers ** are in one of these formats: ** ** [+-]digits[E[+-]digits] ** [+-]digits.[digits][E[+-]digits] ** [+-].digits[E[+-]digits] ** ** Leading and trailing whitespace is ignored for the purpose of determining ** validity. ** ** If some prefix of the input string is a valid number, this routine ** returns FALSE but it still converts the prefix and writes the result ** into *pResult. */ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 enc){ #ifndef SQLITE_OMIT_FLOATING_POINT int incr; const char *zEnd = z + length; /* sign * significand * (10 ^ (esign * exponent)) */ int sign = 1; /* sign of significand */ i64 s = 0; /* significand */ int d = 0; /* adjust exponent for shifting decimal point */ int esign = 1; /* sign of exponent */ int e = 0; /* exponent */ int eValid = 1; /* True exponent is either not used or is well-formed */ double result; int nDigits = 0; int nonNum = 0; assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); *pResult = 0.0; /* Default return value, in case of an error */ if( enc==SQLITE_UTF8 ){ incr = 1; }else{ int i; incr = 2; assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); for(i=3-enc; i=zEnd ) return 0; /* get sign of significand */ if( *z=='-' ){ sign = -1; z+=incr; }else if( *z=='+' ){ z+=incr; } /* skip leading zeroes */ while( z=zEnd ) goto do_atof_calc; /* if decimal point is present */ if( *z=='.' ){ z+=incr; /* copy digits from after decimal to significand ** (decrease exponent by d to shift decimal right) */ while( z=zEnd ) goto do_atof_calc; /* if exponent is present */ if( *z=='e' || *z=='E' ){ z+=incr; eValid = 0; if( z>=zEnd ) goto do_atof_calc; /* get sign of exponent */ if( *z=='-' ){ esign = -1; z+=incr; }else if( *z=='+' ){ z+=incr; } /* copy digits to exponent */ while( z0 ){ while( s<(LARGEST_INT64/10) && e>0 ) e--,s*=10; }else{ while( !(s%10) && e>0 ) e--,s/=10; } /* adjust the sign of significand */ s = sign<0 ? -s : s; /* if exponent, scale significand as appropriate ** and store in result. */ if( e ){ LONGDOUBLE_TYPE scale = 1.0; /* attempt to handle extremely small/large numbers better */ if( e>307 && e<342 ){ while( e%308 ) { scale *= 1.0e+1; e -= 1; } if( esign<0 ){ result = s / scale; result /= 1.0e+308; }else{ result = s * scale; result *= 1.0e+308; } }else if( e>=342 ){ if( esign<0 ){ result = 0.0*s; }else{ result = 1e308*1e308*s; /* Infinity */ } }else{ /* 1.0e+22 is the largest power of 10 than can be ** represented exactly. */ while( e%22 ) { scale *= 1.0e+1; e -= 1; } while( e>0 ) { scale *= 1.0e+22; e -= 22; } if( esign<0 ){ result = s / scale; }else{ result = s * scale; } } } else { result = (double)s; } } /* store the result */ *pResult = result; /* return true if number and no extra non-whitespace chracters after */ return z>=zEnd && nDigits>0 && eValid && nonNum==0; #else return !sqlite3Atoi64(z, pResult, length, enc); #endif /* SQLITE_OMIT_FLOATING_POINT */ } /* ** Compare the 19-character string zNum against the text representation ** value 2^63: 9223372036854775808. Return negative, zero, or positive ** if zNum is less than, equal to, or greater than the string. ** Note that zNum must contain exactly 19 characters. ** ** Unlike memcmp() this routine is guaranteed to return the difference ** in the values of the last digit if the only difference is in the ** last digit. So, for example, ** ** compare2pow63("9223372036854775800", 1) ** ** will return -8. */ static int compare2pow63(const char *zNum, int incr){ int c = 0; int i; /* 012345678901234567 */ const char *pow63 = "922337203685477580"; for(i=0; c==0 && i<18; i++){ c = (zNum[i*incr]-pow63[i])*10; } if( c==0 ){ c = zNum[18*incr] - '8'; testcase( c==(-1) ); testcase( c==0 ); testcase( c==(+1) ); } return c; } /* ** Convert zNum to a 64-bit signed integer. zNum must be decimal. This ** routine does *not* accept hexadecimal notation. ** ** If the zNum value is representable as a 64-bit twos-complement ** integer, then write that value into *pNum and return 0. ** ** If zNum is exactly 9223372036854775808, return 2. This special ** case is broken out because while 9223372036854775808 cannot be a ** signed 64-bit integer, its negative -9223372036854775808 can be. ** ** If zNum is too big for a 64-bit integer and is not ** 9223372036854775808 or if zNum contains any non-numeric text, ** then return 1. ** ** length is the number of bytes in the string (bytes, not characters). ** The string is not necessarily zero-terminated. The encoding is ** given by enc. */ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc){ int incr; u64 u = 0; int neg = 0; /* assume positive */ int i; int c = 0; int nonNum = 0; const char *zStart; const char *zEnd = zNum + length; assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); if( enc==SQLITE_UTF8 ){ incr = 1; }else{ incr = 2; assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); for(i=3-enc; i='0' && c<='9'; i+=incr){ u = u*10 + c - '0'; } if( u>LARGEST_INT64 ){ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; }else if( neg ){ *pNum = -(i64)u; }else{ *pNum = (i64)u; } testcase( i==18 ); testcase( i==19 ); testcase( i==20 ); if( (c!=0 && &zNum[i]19*incr || nonNum ){ /* zNum is empty or contains non-numeric text or is longer ** than 19 digits (thus guaranteeing that it is too large) */ return 1; }else if( i<19*incr ){ /* Less than 19 digits, so we know that it fits in 64 bits */ assert( u<=LARGEST_INT64 ); return 0; }else{ /* zNum is a 19-digit numbers. Compare it against 9223372036854775808. */ c = compare2pow63(zNum, incr); if( c<0 ){ /* zNum is less than 9223372036854775808 so it fits */ assert( u<=LARGEST_INT64 ); return 0; }else if( c>0 ){ /* zNum is greater than 9223372036854775808 so it overflows */ return 1; }else{ /* zNum is exactly 9223372036854775808. Fits if negative. The ** special case 2 overflow if positive */ assert( u-1==LARGEST_INT64 ); return neg ? 0 : 2; } } } /* ** Transform a UTF-8 integer literal, in either decimal or hexadecimal, ** into a 64-bit signed integer. This routine accepts hexadecimal literals, ** whereas sqlite3Atoi64() does not. ** ** Returns: ** ** 0 Successful transformation. Fits in a 64-bit signed integer. ** 1 Integer too large for a 64-bit signed integer or is malformed ** 2 Special case of 9223372036854775808 */ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){ #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ u64 u = 0; int i, k; for(i=2; z[i]=='0'; i++){} for(k=i; sqlite3Isxdigit(z[k]); k++){ u = u*16 + sqlite3HexToInt(z[k]); } memcpy(pOut, &u, 8); return (z[k]==0 && k-i<=16) ? 0 : 1; }else #endif /* SQLITE_OMIT_HEX_INTEGER */ { return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8); } } /* ** If zNum represents an integer that will fit in 32-bits, then set ** *pValue to that integer and return true. Otherwise return false. ** ** This routine accepts both decimal and hexadecimal notation for integers. ** ** Any non-numeric characters that following zNum are ignored. ** This is different from sqlite3Atoi64() which requires the ** input number to be zero-terminated. */ SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){ sqlite_int64 v = 0; int i, c; int neg = 0; if( zNum[0]=='-' ){ neg = 1; zNum++; }else if( zNum[0]=='+' ){ zNum++; } #ifndef SQLITE_OMIT_HEX_INTEGER else if( zNum[0]=='0' && (zNum[1]=='x' || zNum[1]=='X') && sqlite3Isxdigit(zNum[2]) ){ u32 u = 0; zNum += 2; while( zNum[0]=='0' ) zNum++; for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ u = u*16 + sqlite3HexToInt(zNum[i]); } if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){ memcpy(pValue, &u, 4); return 1; }else{ return 0; } } #endif while( zNum[0]=='0' ) zNum++; for(i=0; i<11 && (c = zNum[i] - '0')>=0 && c<=9; i++){ v = v*10 + c; } /* The longest decimal representation of a 32 bit integer is 10 digits: ** ** 1234567890 ** 2^31 -> 2147483648 */ testcase( i==10 ); if( i>10 ){ return 0; } testcase( v-neg==2147483647 ); if( v-neg>2147483647 ){ return 0; } if( neg ){ v = -v; } *pValue = (int)v; return 1; } /* ** Return a 32-bit integer value extracted from a string. If the ** string is not an integer, just return 0. */ SQLITE_PRIVATE int sqlite3Atoi(const char *z){ int x = 0; if( z ) sqlite3GetInt32(z, &x); return x; } /* ** The variable-length integer encoding is as follows: ** ** KEY: ** A = 0xxxxxxx 7 bits of data and one flag bit ** B = 1xxxxxxx 7 bits of data and one flag bit ** C = xxxxxxxx 8 bits of data ** ** 7 bits - A ** 14 bits - BA ** 21 bits - BBA ** 28 bits - BBBA ** 35 bits - BBBBA ** 42 bits - BBBBBA ** 49 bits - BBBBBBA ** 56 bits - BBBBBBBA ** 64 bits - BBBBBBBBC */ /* ** Write a 64-bit variable-length integer to memory starting at p[0]. ** The length of data write will be between 1 and 9 bytes. The number ** of bytes written is returned. ** ** A variable-length integer consists of the lower 7 bits of each byte ** for all bytes that have the 8th bit set and one byte with the 8th ** bit clear. Except, if we get to the 9th byte, it stores the full ** 8 bits and is the last byte. */ static int SQLITE_NOINLINE putVarint64(unsigned char *p, u64 v){ int i, j, n; u8 buf[10]; if( v & (((u64)0xff000000)<<32) ){ p[8] = (u8)v; v >>= 8; for(i=7; i>=0; i--){ p[i] = (u8)((v & 0x7f) | 0x80); v >>= 7; } return 9; } n = 0; do{ buf[n++] = (u8)((v & 0x7f) | 0x80); v >>= 7; }while( v!=0 ); buf[0] &= 0x7f; assert( n<=9 ); for(i=0, j=n-1; j>=0; j--, i++){ p[i] = buf[j]; } return n; } SQLITE_PRIVATE int sqlite3PutVarint(unsigned char *p, u64 v){ if( v<=0x7f ){ p[0] = v&0x7f; return 1; } if( v<=0x3fff ){ p[0] = ((v>>7)&0x7f)|0x80; p[1] = v&0x7f; return 2; } return putVarint64(p,v); } /* ** Bitmasks used by sqlite3GetVarint(). These precomputed constants ** are defined here rather than simply putting the constant expressions ** inline in order to work around bugs in the RVT compiler. ** ** SLOT_2_0 A mask for (0x7f<<14) | 0x7f ** ** SLOT_4_2_0 A mask for (0x7f<<28) | SLOT_2_0 */ #define SLOT_2_0 0x001fc07f #define SLOT_4_2_0 0xf01fc07f /* ** Read a 64-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read. The value is stored in *v. */ SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){ u32 a,b,s; a = *p; /* a: p0 (unmasked) */ if (!(a&0x80)) { *v = a; return 1; } p++; b = *p; /* b: p1 (unmasked) */ if (!(b&0x80)) { a &= 0x7f; a = a<<7; a |= b; *v = a; return 2; } /* Verify that constants are precomputed correctly */ assert( SLOT_2_0 == ((0x7f<<14) | (0x7f)) ); assert( SLOT_4_2_0 == ((0xfU<<28) | (0x7f<<14) | (0x7f)) ); p++; a = a<<14; a |= *p; /* a: p0<<14 | p2 (unmasked) */ if (!(a&0x80)) { a &= SLOT_2_0; b &= 0x7f; b = b<<7; a |= b; *v = a; return 3; } /* CSE1 from below */ a &= SLOT_2_0; p++; b = b<<14; b |= *p; /* b: p1<<14 | p3 (unmasked) */ if (!(b&0x80)) { b &= SLOT_2_0; /* moved CSE1 up */ /* a &= (0x7f<<14)|(0x7f); */ a = a<<7; a |= b; *v = a; return 4; } /* a: p0<<14 | p2 (masked) */ /* b: p1<<14 | p3 (unmasked) */ /* 1:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ /* moved CSE1 up */ /* a &= (0x7f<<14)|(0x7f); */ b &= SLOT_2_0; s = a; /* s: p0<<14 | p2 (masked) */ p++; a = a<<14; a |= *p; /* a: p0<<28 | p2<<14 | p4 (unmasked) */ if (!(a&0x80)) { /* we can skip these cause they were (effectively) done above in calc'ing s */ /* a &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ /* b &= (0x7f<<14)|(0x7f); */ b = b<<7; a |= b; s = s>>18; *v = ((u64)s)<<32 | a; return 5; } /* 2:save off p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ s = s<<7; s |= b; /* s: p0<<21 | p1<<14 | p2<<7 | p3 (masked) */ p++; b = b<<14; b |= *p; /* b: p1<<28 | p3<<14 | p5 (unmasked) */ if (!(b&0x80)) { /* we can skip this cause it was (effectively) done above in calc'ing s */ /* b &= (0x7f<<28)|(0x7f<<14)|(0x7f); */ a &= SLOT_2_0; a = a<<7; a |= b; s = s>>18; *v = ((u64)s)<<32 | a; return 6; } p++; a = a<<14; a |= *p; /* a: p2<<28 | p4<<14 | p6 (unmasked) */ if (!(a&0x80)) { a &= SLOT_4_2_0; b &= SLOT_2_0; b = b<<7; a |= b; s = s>>11; *v = ((u64)s)<<32 | a; return 7; } /* CSE2 from below */ a &= SLOT_2_0; p++; b = b<<14; b |= *p; /* b: p3<<28 | p5<<14 | p7 (unmasked) */ if (!(b&0x80)) { b &= SLOT_4_2_0; /* moved CSE2 up */ /* a &= (0x7f<<14)|(0x7f); */ a = a<<7; a |= b; s = s>>4; *v = ((u64)s)<<32 | a; return 8; } p++; a = a<<15; a |= *p; /* a: p4<<29 | p6<<15 | p8 (unmasked) */ /* moved CSE2 up */ /* a &= (0x7f<<29)|(0x7f<<15)|(0xff); */ b &= SLOT_2_0; b = b<<8; a |= b; s = s<<4; b = p[-4]; b &= 0x7f; b = b>>3; s |= b; *v = ((u64)s)<<32 | a; return 9; } /* ** Read a 32-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read. The value is stored in *v. ** ** If the varint stored in p[0] is larger than can fit in a 32-bit unsigned ** integer, then set *v to 0xffffffff. ** ** A MACRO version, getVarint32, is provided which inlines the ** single-byte case. All code should use the MACRO version as ** this function assumes the single-byte case has already been handled. */ SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ u32 a,b; /* The 1-byte case. Overwhelmingly the most common. Handled inline ** by the getVarin32() macro */ a = *p; /* a: p0 (unmasked) */ #ifndef getVarint32 if (!(a&0x80)) { /* Values between 0 and 127 */ *v = a; return 1; } #endif /* The 2-byte case */ p++; b = *p; /* b: p1 (unmasked) */ if (!(b&0x80)) { /* Values between 128 and 16383 */ a &= 0x7f; a = a<<7; *v = a | b; return 2; } /* The 3-byte case */ p++; a = a<<14; a |= *p; /* a: p0<<14 | p2 (unmasked) */ if (!(a&0x80)) { /* Values between 16384 and 2097151 */ a &= (0x7f<<14)|(0x7f); b &= 0x7f; b = b<<7; *v = a | b; return 3; } /* A 32-bit varint is used to store size information in btrees. ** Objects are rarely larger than 2MiB limit of a 3-byte varint. ** A 3-byte varint is sufficient, for example, to record the size ** of a 1048569-byte BLOB or string. ** ** We only unroll the first 1-, 2-, and 3- byte cases. The very ** rare larger cases can be handled by the slower 64-bit varint ** routine. */ #if 1 { u64 v64; u8 n; p -= 2; n = sqlite3GetVarint(p, &v64); assert( n>3 && n<=9 ); if( (v64 & SQLITE_MAX_U32)!=v64 ){ *v = 0xffffffff; }else{ *v = (u32)v64; } return n; } #else /* For following code (kept for historical record only) shows an ** unrolling for the 3- and 4-byte varint cases. This code is ** slightly faster, but it is also larger and much harder to test. */ p++; b = b<<14; b |= *p; /* b: p1<<14 | p3 (unmasked) */ if (!(b&0x80)) { /* Values between 2097152 and 268435455 */ b &= (0x7f<<14)|(0x7f); a &= (0x7f<<14)|(0x7f); a = a<<7; *v = a | b; return 4; } p++; a = a<<14; a |= *p; /* a: p0<<28 | p2<<14 | p4 (unmasked) */ if (!(a&0x80)) { /* Values between 268435456 and 34359738367 */ a &= SLOT_4_2_0; b &= SLOT_4_2_0; b = b<<7; *v = a | b; return 5; } /* We can only reach this point when reading a corrupt database ** file. In that case we are not in any hurry. Use the (relatively ** slow) general-purpose sqlite3GetVarint() routine to extract the ** value. */ { u64 v64; u8 n; p -= 4; n = sqlite3GetVarint(p, &v64); assert( n>5 && n<=9 ); *v = (u32)v64; return n; } #endif } /* ** Return the number of bytes that will be needed to store the given ** 64-bit integer. */ SQLITE_PRIVATE int sqlite3VarintLen(u64 v){ int i = 0; do{ i++; v >>= 7; }while( v!=0 && ALWAYS(i<9) ); return i; } /* ** Read or write a four-byte big-endian integer value. */ SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){ #if SQLITE_BYTEORDER==4321 u32 x; memcpy(&x,p,4); return x; #elif SQLITE_BYTEORDER==1234 && defined(__GNUC__) && GCC_VERSION>=4003000 u32 x; memcpy(&x,p,4); return __builtin_bswap32(x); #elif SQLITE_BYTEORDER==1234 && defined(_MSC_VER) && _MSC_VER>=1300 u32 x; memcpy(&x,p,4); return _byteswap_ulong(x); #else testcase( p[0]&0x80 ); return ((unsigned)p[0]<<24) | (p[1]<<16) | (p[2]<<8) | p[3]; #endif } SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){ #if SQLITE_BYTEORDER==4321 memcpy(p,&v,4); #elif SQLITE_BYTEORDER==1234 && defined(__GNUC__) && GCC_VERSION>=4003000 u32 x = __builtin_bswap32(v); memcpy(p,&x,4); #elif SQLITE_BYTEORDER==1234 && defined(_MSC_VER) && _MSC_VER>=1300 u32 x = _byteswap_ulong(v); memcpy(p,&x,4); #else p[0] = (u8)(v>>24); p[1] = (u8)(v>>16); p[2] = (u8)(v>>8); p[3] = (u8)v; #endif } /* ** Translate a single byte of Hex into an integer. ** This routine only works if h really is a valid hexadecimal ** character: 0..9a..fA..F */ SQLITE_PRIVATE u8 sqlite3HexToInt(int h){ assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); #ifdef SQLITE_ASCII h += 9*(1&(h>>6)); #endif #ifdef SQLITE_EBCDIC h += 9*(1&~(h>>4)); #endif return (u8)(h & 0xf); } #if !defined(SQLITE_OMIT_BLOB_LITERAL) || defined(SQLITE_HAS_CODEC) /* ** Convert a BLOB literal of the form "x'hhhhhh'" into its binary ** value. Return a pointer to its binary value. Space to hold the ** binary value has been obtained from malloc and must be freed by ** the calling routine. */ SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3 *db, const char *z, int n){ char *zBlob; int i; zBlob = (char *)sqlite3DbMallocRaw(db, n/2 + 1); n--; if( zBlob ){ for(i=0; imagic; if( magic!=SQLITE_MAGIC_OPEN ){ if( sqlite3SafetyCheckSickOrOk(db) ){ testcase( sqlite3GlobalConfig.xLog!=0 ); logBadConnection("unopened"); } return 0; }else{ return 1; } } SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ u32 magic; magic = db->magic; if( magic!=SQLITE_MAGIC_SICK && magic!=SQLITE_MAGIC_OPEN && magic!=SQLITE_MAGIC_BUSY ){ testcase( sqlite3GlobalConfig.xLog!=0 ); logBadConnection("invalid"); return 0; }else{ return 1; } } /* ** Attempt to add, substract, or multiply the 64-bit signed value iB against ** the other 64-bit signed integer at *pA and store the result in *pA. ** Return 0 on success. Or if the operation would have resulted in an ** overflow, leave *pA unchanged and return 1. */ SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){ i64 iA = *pA; testcase( iA==0 ); testcase( iA==1 ); testcase( iB==-1 ); testcase( iB==0 ); if( iB>=0 ){ testcase( iA>0 && LARGEST_INT64 - iA == iB ); testcase( iA>0 && LARGEST_INT64 - iA == iB - 1 ); if( iA>0 && LARGEST_INT64 - iA < iB ) return 1; }else{ testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 1 ); testcase( iA<0 && -(iA + LARGEST_INT64) == iB + 2 ); if( iA<0 && -(iA + LARGEST_INT64) > iB + 1 ) return 1; } *pA += iB; return 0; } SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){ testcase( iB==SMALLEST_INT64+1 ); if( iB==SMALLEST_INT64 ){ testcase( (*pA)==(-1) ); testcase( (*pA)==0 ); if( (*pA)>=0 ) return 1; *pA -= iB; return 0; }else{ return sqlite3AddInt64(pA, -iB); } } #define TWOPOWER32 (((i64)1)<<32) #define TWOPOWER31 (((i64)1)<<31) SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){ i64 iA = *pA; i64 iA1, iA0, iB1, iB0, r; iA1 = iA/TWOPOWER32; iA0 = iA % TWOPOWER32; iB1 = iB/TWOPOWER32; iB0 = iB % TWOPOWER32; if( iA1==0 ){ if( iB1==0 ){ *pA *= iB; return 0; } r = iA0*iB1; }else if( iB1==0 ){ r = iA1*iB0; }else{ /* If both iA1 and iB1 are non-zero, overflow will result */ return 1; } testcase( r==(-TWOPOWER31)-1 ); testcase( r==(-TWOPOWER31) ); testcase( r==TWOPOWER31 ); testcase( r==TWOPOWER31-1 ); if( r<(-TWOPOWER31) || r>=TWOPOWER31 ) return 1; r *= TWOPOWER32; if( sqlite3AddInt64(&r, iA0*iB0) ) return 1; *pA = r; return 0; } /* ** Compute the absolute value of a 32-bit signed integer, of possible. Or ** if the integer has a value of -2147483648, return +2147483647 */ SQLITE_PRIVATE int sqlite3AbsInt32(int x){ if( x>=0 ) return x; if( x==(int)0x80000000 ) return 0x7fffffff; return -x; } #ifdef SQLITE_ENABLE_8_3_NAMES /* ** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database ** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and ** if filename in z[] has a suffix (a.k.a. "extension") that is longer than ** three characters, then shorten the suffix on z[] to be the last three ** characters of the original suffix. ** ** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always ** do the suffix shortening regardless of URI parameter. ** ** Examples: ** ** test.db-journal => test.nal ** test.db-wal => test.wal ** test.db-shm => test.shm ** test.db-mj7f3319fa => test.9fa */ SQLITE_PRIVATE void sqlite3FileSuffix3(const char *zBaseFilename, char *z){ #if SQLITE_ENABLE_8_3_NAMES<2 if( sqlite3_uri_boolean(zBaseFilename, "8_3_names", 0) ) #endif { int i, sz; sz = sqlite3Strlen30(z); for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); } } #endif /* ** Find (an approximate) sum of two LogEst values. This computation is ** not a simple "+" operator because LogEst is stored as a logarithmic ** value. ** */ SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst a, LogEst b){ static const unsigned char x[] = { 10, 10, /* 0,1 */ 9, 9, /* 2,3 */ 8, 8, /* 4,5 */ 7, 7, 7, /* 6,7,8 */ 6, 6, 6, /* 9,10,11 */ 5, 5, 5, /* 12-14 */ 4, 4, 4, 4, /* 15-18 */ 3, 3, 3, 3, 3, 3, /* 19-24 */ 2, 2, 2, 2, 2, 2, 2, /* 25-31 */ }; if( a>=b ){ if( a>b+49 ) return a; if( a>b+31 ) return a+1; return a+x[a-b]; }else{ if( b>a+49 ) return b; if( b>a+31 ) return b+1; return b+x[b-a]; } } /* ** Convert an integer into a LogEst. In other words, compute an ** approximation for 10*log2(x). */ SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){ static LogEst a[] = { 0, 2, 3, 5, 6, 7, 8, 9 }; LogEst y = 40; if( x<8 ){ if( x<2 ) return 0; while( x<8 ){ y -= 10; x <<= 1; } }else{ while( x>255 ){ y += 40; x >>= 4; } while( x>15 ){ y += 10; x >>= 1; } } return a[x&7] + y - 10; } #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Convert a double into a LogEst ** In other words, compute an approximation for 10*log2(x). */ SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){ u64 a; LogEst e; assert( sizeof(x)==8 && sizeof(a)==8 ); if( x<=1 ) return 0; if( x<=2000000000 ) return sqlite3LogEst((u64)x); memcpy(&a, &x, 8); e = (a>>52) - 1022; return e*10; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* ** Convert a LogEst into an integer. */ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ u64 n; if( x<10 ) return 1; n = x%10; x /= 10; if( n>=5 ) n -= 2; else if( n>=1 ) n -= 1; if( x>=3 ){ return x>60 ? (u64)LARGEST_INT64 : (n+8)<<(x-3); } return (n+8)>>(3-x); } /************** End of util.c ************************************************/ /************** Begin file hash.c ********************************************/ /* ** 2001 September 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This is the implementation of generic hash-tables ** used in SQLite. */ /* #include "sqliteInt.h" */ /* #include */ /* Turn bulk memory into a hash table object by initializing the ** fields of the Hash structure. ** ** "pNew" is a pointer to the hash table that is to be initialized. */ SQLITE_PRIVATE void sqlite3HashInit(Hash *pNew){ assert( pNew!=0 ); pNew->first = 0; pNew->count = 0; pNew->htsize = 0; pNew->ht = 0; } /* Remove all entries from a hash table. Reclaim all memory. ** Call this routine to delete a hash table or to reset a hash table ** to the empty state. */ SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){ HashElem *elem; /* For looping over all elements of the table */ assert( pH!=0 ); elem = pH->first; pH->first = 0; sqlite3_free(pH->ht); pH->ht = 0; pH->htsize = 0; while( elem ){ HashElem *next_elem = elem->next; sqlite3_free(elem); elem = next_elem; } pH->count = 0; } /* ** The hashing function. */ static unsigned int strHash(const char *z){ unsigned int h = 0; unsigned char c; while( (c = (unsigned char)*z++)!=0 ){ h = (h<<3) ^ h ^ sqlite3UpperToLower[c]; } return h; } /* Link pNew element into the hash table pH. If pEntry!=0 then also ** insert pNew into the pEntry hash bucket. */ static void insertElement( Hash *pH, /* The complete hash table */ struct _ht *pEntry, /* The entry into which pNew is inserted */ HashElem *pNew /* The element to be inserted */ ){ HashElem *pHead; /* First element already in pEntry */ if( pEntry ){ pHead = pEntry->count ? pEntry->chain : 0; pEntry->count++; pEntry->chain = pNew; }else{ pHead = 0; } if( pHead ){ pNew->next = pHead; pNew->prev = pHead->prev; if( pHead->prev ){ pHead->prev->next = pNew; } else { pH->first = pNew; } pHead->prev = pNew; }else{ pNew->next = pH->first; if( pH->first ){ pH->first->prev = pNew; } pNew->prev = 0; pH->first = pNew; } } /* Resize the hash table so that it cantains "new_size" buckets. ** ** The hash table might fail to resize if sqlite3_malloc() fails or ** if the new size is the same as the prior size. ** Return TRUE if the resize occurs and false if not. */ static int rehash(Hash *pH, unsigned int new_size){ struct _ht *new_ht; /* The new hash table */ HashElem *elem, *next_elem; /* For looping over existing elements */ #if SQLITE_MALLOC_SOFT_LIMIT>0 if( new_size*sizeof(struct _ht)>SQLITE_MALLOC_SOFT_LIMIT ){ new_size = SQLITE_MALLOC_SOFT_LIMIT/sizeof(struct _ht); } if( new_size==pH->htsize ) return 0; #endif /* The inability to allocates space for a larger hash table is ** a performance hit but it is not a fatal error. So mark the ** allocation as a benign. Use sqlite3Malloc()/memset(0) instead of ** sqlite3MallocZero() to make the allocation, as sqlite3MallocZero() ** only zeroes the requested number of bytes whereas this module will ** use the actual amount of space allocated for the hash table (which ** may be larger than the requested amount). */ sqlite3BeginBenignMalloc(); new_ht = (struct _ht *)sqlite3Malloc( new_size*sizeof(struct _ht) ); sqlite3EndBenignMalloc(); if( new_ht==0 ) return 0; sqlite3_free(pH->ht); pH->ht = new_ht; pH->htsize = new_size = sqlite3MallocSize(new_ht)/sizeof(struct _ht); memset(new_ht, 0, new_size*sizeof(struct _ht)); for(elem=pH->first, pH->first=0; elem; elem = next_elem){ unsigned int h = strHash(elem->pKey) % new_size; next_elem = elem->next; insertElement(pH, &new_ht[h], elem); } return 1; } /* This function (for internal use only) locates an element in an ** hash table that matches the given key. The hash for this key is ** also computed and returned in the *pH parameter. */ static HashElem *findElementWithHash( const Hash *pH, /* The pH to be searched */ const char *pKey, /* The key we are searching for */ unsigned int *pHash /* Write the hash value here */ ){ HashElem *elem; /* Used to loop thru the element list */ int count; /* Number of elements left to test */ unsigned int h; /* The computed hash */ if( pH->ht ){ struct _ht *pEntry; h = strHash(pKey) % pH->htsize; pEntry = &pH->ht[h]; elem = pEntry->chain; count = pEntry->count; }else{ h = 0; elem = pH->first; count = pH->count; } *pHash = h; while( count-- ){ assert( elem!=0 ); if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ return elem; } elem = elem->next; } return 0; } /* Remove a single entry from the hash table given a pointer to that ** element and a hash on the element's key. */ static void removeElementGivenHash( Hash *pH, /* The pH containing "elem" */ HashElem* elem, /* The element to be removed from the pH */ unsigned int h /* Hash value for the element */ ){ struct _ht *pEntry; if( elem->prev ){ elem->prev->next = elem->next; }else{ pH->first = elem->next; } if( elem->next ){ elem->next->prev = elem->prev; } if( pH->ht ){ pEntry = &pH->ht[h]; if( pEntry->chain==elem ){ pEntry->chain = elem->next; } pEntry->count--; assert( pEntry->count>=0 ); } sqlite3_free( elem ); pH->count--; if( pH->count==0 ){ assert( pH->first==0 ); assert( pH->count==0 ); sqlite3HashClear(pH); } } /* Attempt to locate an element of the hash table pH with a key ** that matches pKey. Return the data for this element if it is ** found, or NULL if there is no match. */ SQLITE_PRIVATE void *sqlite3HashFind(const Hash *pH, const char *pKey){ HashElem *elem; /* The element that matches key */ unsigned int h; /* A hash on key */ assert( pH!=0 ); assert( pKey!=0 ); elem = findElementWithHash(pH, pKey, &h); return elem ? elem->data : 0; } /* Insert an element into the hash table pH. The key is pKey ** and the data is "data". ** ** If no element exists with a matching key, then a new ** element is created and NULL is returned. ** ** If another element already exists with the same key, then the ** new data replaces the old data and the old data is returned. ** The key is not copied in this instance. If a malloc fails, then ** the new data is returned and the hash table is unchanged. ** ** If the "data" parameter to this function is NULL, then the ** element corresponding to "key" is removed from the hash table. */ SQLITE_PRIVATE void *sqlite3HashInsert(Hash *pH, const char *pKey, void *data){ unsigned int h; /* the hash of the key modulo hash table size */ HashElem *elem; /* Used to loop thru the element list */ HashElem *new_elem; /* New element added to the pH */ assert( pH!=0 ); assert( pKey!=0 ); elem = findElementWithHash(pH,pKey,&h); if( elem ){ void *old_data = elem->data; if( data==0 ){ removeElementGivenHash(pH,elem,h); }else{ elem->data = data; elem->pKey = pKey; } return old_data; } if( data==0 ) return 0; new_elem = (HashElem*)sqlite3Malloc( sizeof(HashElem) ); if( new_elem==0 ) return data; new_elem->pKey = pKey; new_elem->data = data; pH->count++; if( pH->count>=10 && pH->count > 2*pH->htsize ){ if( rehash(pH, pH->count*2) ){ assert( pH->htsize>0 ); h = strHash(pKey) % pH->htsize; } } insertElement(pH, pH->ht ? &pH->ht[h] : 0, new_elem); return 0; } /************** End of hash.c ************************************************/ /************** Begin file opcodes.c *****************************************/ /* Automatically generated. Do not edit */ /* See the mkopcodec.awk script for details. */ #if !defined(SQLITE_OMIT_EXPLAIN) || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) #if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) || defined(SQLITE_DEBUG) # define OpHelp(X) "\0" X #else # define OpHelp(X) #endif SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ static const char *const azName[] = { "?", /* 1 */ "Savepoint" OpHelp(""), /* 2 */ "AutoCommit" OpHelp(""), /* 3 */ "Transaction" OpHelp(""), /* 4 */ "SorterNext" OpHelp(""), /* 5 */ "PrevIfOpen" OpHelp(""), /* 6 */ "NextIfOpen" OpHelp(""), /* 7 */ "Prev" OpHelp(""), /* 8 */ "Next" OpHelp(""), /* 9 */ "Checkpoint" OpHelp(""), /* 10 */ "JournalMode" OpHelp(""), /* 11 */ "Vacuum" OpHelp(""), /* 12 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"), /* 13 */ "VUpdate" OpHelp("data=r[P3@P2]"), /* 14 */ "Goto" OpHelp(""), /* 15 */ "Gosub" OpHelp(""), /* 16 */ "Return" OpHelp(""), /* 17 */ "InitCoroutine" OpHelp(""), /* 18 */ "EndCoroutine" OpHelp(""), /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), /* 20 */ "Yield" OpHelp(""), /* 21 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), /* 22 */ "Halt" OpHelp(""), /* 23 */ "Integer" OpHelp("r[P2]=P1"), /* 24 */ "Int64" OpHelp("r[P2]=P4"), /* 25 */ "String" OpHelp("r[P2]='P4' (len=P1)"), /* 26 */ "Null" OpHelp("r[P2..P3]=NULL"), /* 27 */ "SoftNull" OpHelp("r[P1]=NULL"), /* 28 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), /* 29 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), /* 30 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), /* 31 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), /* 32 */ "SCopy" OpHelp("r[P2]=r[P1]"), /* 33 */ "ResultRow" OpHelp("output=r[P1@P2]"), /* 34 */ "CollSeq" OpHelp(""), /* 35 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), /* 36 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), /* 37 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), /* 38 */ "MustBeInt" OpHelp(""), /* 39 */ "RealAffinity" OpHelp(""), /* 40 */ "Cast" OpHelp("affinity(r[P1])"), /* 41 */ "Permutation" OpHelp(""), /* 42 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), /* 43 */ "Jump" OpHelp(""), /* 44 */ "Once" OpHelp(""), /* 45 */ "If" OpHelp(""), /* 46 */ "IfNot" OpHelp(""), /* 47 */ "Column" OpHelp("r[P3]=PX"), /* 48 */ "Affinity" OpHelp("affinity(r[P1@P2])"), /* 49 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), /* 50 */ "Count" OpHelp("r[P2]=count()"), /* 51 */ "ReadCookie" OpHelp(""), /* 52 */ "SetCookie" OpHelp(""), /* 53 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), /* 54 */ "OpenRead" OpHelp("root=P2 iDb=P3"), /* 55 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), /* 56 */ "OpenAutoindex" OpHelp("nColumn=P2"), /* 57 */ "OpenEphemeral" OpHelp("nColumn=P2"), /* 58 */ "SorterOpen" OpHelp(""), /* 59 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), /* 60 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), /* 61 */ "Close" OpHelp(""), /* 62 */ "ColumnsUsed" OpHelp(""), /* 63 */ "SeekLT" OpHelp("key=r[P3@P4]"), /* 64 */ "SeekLE" OpHelp("key=r[P3@P4]"), /* 65 */ "SeekGE" OpHelp("key=r[P3@P4]"), /* 66 */ "SeekGT" OpHelp("key=r[P3@P4]"), /* 67 */ "Seek" OpHelp("intkey=r[P2]"), /* 68 */ "NoConflict" OpHelp("key=r[P3@P4]"), /* 69 */ "NotFound" OpHelp("key=r[P3@P4]"), /* 70 */ "Found" OpHelp("key=r[P3@P4]"), /* 71 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), /* 72 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), /* 73 */ "NotExists" OpHelp("intkey=r[P3]"), /* 74 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), /* 75 */ "NewRowid" OpHelp("r[P2]=rowid"), /* 76 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), /* 77 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), /* 78 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"), /* 79 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"), /* 80 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"), /* 81 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"), /* 82 */ "Lt" OpHelp("if r[P1]=r[P3] goto P2"), /* 84 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), /* 85 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), /* 86 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), /* 87 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), /* 89 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), /* 90 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), /* 91 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), /* 92 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), /* 93 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), /* 94 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), /* 95 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), /* 96 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), /* 97 */ "String8" OpHelp("r[P2]='P4'"), /* 98 */ "Delete" OpHelp(""), /* 99 */ "ResetCount" OpHelp(""), /* 100 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), /* 101 */ "SorterData" OpHelp("r[P2]=data"), /* 102 */ "RowKey" OpHelp("r[P2]=key"), /* 103 */ "RowData" OpHelp("r[P2]=data"), /* 104 */ "Rowid" OpHelp("r[P2]=rowid"), /* 105 */ "NullRow" OpHelp(""), /* 106 */ "Last" OpHelp(""), /* 107 */ "SorterSort" OpHelp(""), /* 108 */ "Sort" OpHelp(""), /* 109 */ "Rewind" OpHelp(""), /* 110 */ "SorterInsert" OpHelp(""), /* 111 */ "IdxInsert" OpHelp("key=r[P2]"), /* 112 */ "IdxDelete" OpHelp("key=r[P2@P3]"), /* 113 */ "IdxRowid" OpHelp("r[P2]=rowid"), /* 114 */ "IdxLE" OpHelp("key=r[P3@P4]"), /* 115 */ "IdxGT" OpHelp("key=r[P3@P4]"), /* 116 */ "IdxLT" OpHelp("key=r[P3@P4]"), /* 117 */ "IdxGE" OpHelp("key=r[P3@P4]"), /* 118 */ "Destroy" OpHelp(""), /* 119 */ "Clear" OpHelp(""), /* 120 */ "ResetSorter" OpHelp(""), /* 121 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), /* 122 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), /* 123 */ "ParseSchema" OpHelp(""), /* 124 */ "LoadAnalysis" OpHelp(""), /* 125 */ "DropTable" OpHelp(""), /* 126 */ "DropIndex" OpHelp(""), /* 127 */ "DropTrigger" OpHelp(""), /* 128 */ "IntegrityCk" OpHelp(""), /* 129 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), /* 130 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), /* 131 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), /* 132 */ "Program" OpHelp(""), /* 133 */ "Real" OpHelp("r[P2]=P4"), /* 134 */ "Param" OpHelp(""), /* 135 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), /* 136 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), /* 137 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), /* 138 */ "IfPos" OpHelp("if r[P1]>0 goto P2"), /* 139 */ "IfNeg" OpHelp("r[P1]+=P3, if r[P1]<0 goto P2"), /* 140 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]+=P3, goto P2"), /* 141 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), /* 142 */ "JumpZeroIncr" OpHelp("if (r[P1]++)==0 ) goto P2"), /* 143 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), /* 144 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), /* 145 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), /* 146 */ "IncrVacuum" OpHelp(""), /* 147 */ "Expire" OpHelp(""), /* 148 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), /* 149 */ "VBegin" OpHelp(""), /* 150 */ "VCreate" OpHelp(""), /* 151 */ "VDestroy" OpHelp(""), /* 152 */ "VOpen" OpHelp(""), /* 153 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), /* 154 */ "VNext" OpHelp(""), /* 155 */ "VRename" OpHelp(""), /* 156 */ "Pagecount" OpHelp(""), /* 157 */ "MaxPgcnt" OpHelp(""), /* 158 */ "Init" OpHelp("Start at P2"), /* 159 */ "Noop" OpHelp(""), /* 160 */ "Explain" OpHelp(""), }; return azName[i]; } #endif /************** End of opcodes.c *********************************************/ /************** Begin file os_unix.c *****************************************/ /* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains the VFS implementation for unix-like operating systems ** include Linux, MacOSX, *BSD, QNX, VxWorks, AIX, HPUX, and others. ** ** There are actually several different VFS implementations in this file. ** The differences are in the way that file locking is done. The default ** implementation uses Posix Advisory Locks. Alternative implementations ** use flock(), dot-files, various proprietary locking schemas, or simply ** skip locking all together. ** ** This source file is organized into divisions where the logic for various ** subfunctions is contained within the appropriate division. PLEASE ** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed ** in the correct division and should be clearly labeled. ** ** The layout of divisions is as follows: ** ** * General-purpose declarations and utility functions. ** * Unique file ID logic used by VxWorks. ** * Various locking primitive implementations (all except proxy locking): ** + for Posix Advisory Locks ** + for no-op locks ** + for dot-file locks ** + for flock() locking ** + for named semaphore locks (VxWorks only) ** + for AFP filesystem locks (MacOSX only) ** * sqlite3_file methods not associated with locking. ** * Definitions of sqlite3_io_methods objects for all locking ** methods plus "finder" functions for each locking method. ** * sqlite3_vfs method implementations. ** * Locking primitives for the proxy uber-locking-method. (MacOSX only) ** * Definitions of sqlite3_vfs objects for all locking methods ** plus implementations of sqlite3_os_init() and sqlite3_os_end(). */ /* #include "sqliteInt.h" */ #if SQLITE_OS_UNIX /* This file is used on unix only */ /* ** There are various methods for file locking used for concurrency ** control: ** ** 1. POSIX locking (the default), ** 2. No locking, ** 3. Dot-file locking, ** 4. flock() locking, ** 5. AFP locking (OSX only), ** 6. Named POSIX semaphores (VXWorks only), ** 7. proxy locking. (OSX only) ** ** Styles 4, 5, and 7 are only available of SQLITE_ENABLE_LOCKING_STYLE ** is defined to 1. The SQLITE_ENABLE_LOCKING_STYLE also enables automatic ** selection of the appropriate locking style based on the filesystem ** where the database is located. */ #if !defined(SQLITE_ENABLE_LOCKING_STYLE) # if defined(__APPLE__) # define SQLITE_ENABLE_LOCKING_STYLE 1 # else # define SQLITE_ENABLE_LOCKING_STYLE 0 # endif #endif /* ** standard include files. */ #include #include #include #include /* #include */ #include #include #if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 # include #endif #if SQLITE_ENABLE_LOCKING_STYLE # include # include # include #endif /* SQLITE_ENABLE_LOCKING_STYLE */ #if defined(__APPLE__) && ((__MAC_OS_X_VERSION_MIN_REQUIRED > 1050) || \ (__IPHONE_OS_VERSION_MIN_REQUIRED > 2000)) # if (!defined(TARGET_OS_EMBEDDED) || (TARGET_OS_EMBEDDED==0)) \ && (!defined(TARGET_IPHONE_SIMULATOR) || (TARGET_IPHONE_SIMULATOR==0)) # define HAVE_GETHOSTUUID 1 # else # warning "gethostuuid() is disabled." # endif #endif #if OS_VXWORKS /* # include */ # include # include #endif /* OS_VXWORKS */ #if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE # include #endif #ifdef HAVE_UTIME # include #endif /* ** Allowed values of unixFile.fsFlags */ #define SQLITE_FSFLAGS_IS_MSDOS 0x1 /* ** If we are to be thread-safe, include the pthreads header and define ** the SQLITE_UNIX_THREADS macro. */ #if SQLITE_THREADSAFE /* # include */ # define SQLITE_UNIX_THREADS 1 #endif /* ** Default permissions when creating a new file */ #ifndef SQLITE_DEFAULT_FILE_PERMISSIONS # define SQLITE_DEFAULT_FILE_PERMISSIONS 0644 #endif /* ** Default permissions when creating auto proxy dir */ #ifndef SQLITE_DEFAULT_PROXYDIR_PERMISSIONS # define SQLITE_DEFAULT_PROXYDIR_PERMISSIONS 0755 #endif /* ** Maximum supported path-length. */ #define MAX_PATHNAME 512 /* Always cast the getpid() return type for compatibility with ** kernel modules in VxWorks. */ #define osGetpid(X) (pid_t)getpid() /* ** Only set the lastErrno if the error code is a real error and not ** a normal expected return code of SQLITE_BUSY or SQLITE_OK */ #define IS_LOCK_ERROR(x) ((x != SQLITE_OK) && (x != SQLITE_BUSY)) /* Forward references */ typedef struct unixShm unixShm; /* Connection shared memory */ typedef struct unixShmNode unixShmNode; /* Shared memory instance */ typedef struct unixInodeInfo unixInodeInfo; /* An i-node */ typedef struct UnixUnusedFd UnixUnusedFd; /* An unused file descriptor */ /* ** Sometimes, after a file handle is closed by SQLite, the file descriptor ** cannot be closed immediately. In these cases, instances of the following ** structure are used to store the file descriptor while waiting for an ** opportunity to either close or reuse it. */ struct UnixUnusedFd { int fd; /* File descriptor to close */ int flags; /* Flags this file descriptor was opened with */ UnixUnusedFd *pNext; /* Next unused file descriptor on same file */ }; /* ** The unixFile structure is subclass of sqlite3_file specific to the unix ** VFS implementations. */ typedef struct unixFile unixFile; struct unixFile { sqlite3_io_methods const *pMethod; /* Always the first entry */ sqlite3_vfs *pVfs; /* The VFS that created this unixFile */ unixInodeInfo *pInode; /* Info about locks on this inode */ int h; /* The file descriptor */ unsigned char eFileLock; /* The type of lock held on this fd */ unsigned short int ctrlFlags; /* Behavioral bits. UNIXFILE_* flags */ int lastErrno; /* The unix errno from last I/O error */ void *lockingContext; /* Locking style specific state */ UnixUnusedFd *pUnused; /* Pre-allocated UnixUnusedFd */ const char *zPath; /* Name of the file */ unixShm *pShm; /* Shared memory segment information */ int szChunk; /* Configured by FCNTL_CHUNK_SIZE */ #if SQLITE_MAX_MMAP_SIZE>0 int nFetchOut; /* Number of outstanding xFetch refs */ sqlite3_int64 mmapSize; /* Usable size of mapping at pMapRegion */ sqlite3_int64 mmapSizeActual; /* Actual size of mapping at pMapRegion */ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ void *pMapRegion; /* Memory mapped region */ #endif #ifdef __QNXNTO__ int sectorSize; /* Device sector size */ int deviceCharacteristics; /* Precomputed device characteristics */ #endif #if SQLITE_ENABLE_LOCKING_STYLE int openFlags; /* The flags specified at open() */ #endif #if SQLITE_ENABLE_LOCKING_STYLE || defined(__APPLE__) unsigned fsFlags; /* cached details from statfs() */ #endif #if OS_VXWORKS struct vxworksFileId *pId; /* Unique file ID */ #endif #ifdef SQLITE_DEBUG /* The next group of variables are used to track whether or not the ** transaction counter in bytes 24-27 of database files are updated ** whenever any part of the database changes. An assertion fault will ** occur if a file is updated without also updating the transaction ** counter. This test is made to avoid new problems similar to the ** one described by ticket #3584. */ unsigned char transCntrChng; /* True if the transaction counter changed */ unsigned char dbUpdate; /* True if any part of database file changed */ unsigned char inNormalWrite; /* True if in a normal write operation */ #endif #ifdef SQLITE_TEST /* In test mode, increase the size of this structure a bit so that ** it is larger than the struct CrashFile defined in test6.c. */ char aPadding[32]; #endif }; /* This variable holds the process id (pid) from when the xRandomness() ** method was called. If xOpen() is called from a different process id, ** indicating that a fork() has occurred, the PRNG will be reset. */ static pid_t randomnessPid = 0; /* ** Allowed values for the unixFile.ctrlFlags bitmask: */ #define UNIXFILE_EXCL 0x01 /* Connections from one process only */ #define UNIXFILE_RDONLY 0x02 /* Connection is read only */ #define UNIXFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ #ifndef SQLITE_DISABLE_DIRSYNC # define UNIXFILE_DIRSYNC 0x08 /* Directory sync needed */ #else # define UNIXFILE_DIRSYNC 0x00 #endif #define UNIXFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ #define UNIXFILE_DELETE 0x20 /* Delete on close */ #define UNIXFILE_URI 0x40 /* Filename might have query parameters */ #define UNIXFILE_NOLOCK 0x80 /* Do no file locking */ #define UNIXFILE_WARNED 0x0100 /* verifyDbFile() warnings issued */ #define UNIXFILE_BLOCK 0x0200 /* Next SHM lock might block */ /* ** Include code that is common to all os_*.c files */ /************** Include os_common.h in the middle of os_unix.c ***************/ /************** Begin file os_common.h ***************************************/ /* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains macros and a little bit of code that is common to ** all of the platform-specific files (os_*.c) and is #included into those ** files. ** ** This file should be #included by the os_*.c files only. It is not a ** general purpose header file. */ #ifndef _OS_COMMON_H_ #define _OS_COMMON_H_ /* ** At least two bugs have slipped in because we changed the MEMORY_DEBUG ** macro to SQLITE_DEBUG and some older makefiles have not yet made the ** switch. The following code should catch this problem at compile-time. */ #ifdef MEMORY_DEBUG # error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." #endif /* ** Macros for performance tracing. Normally turned off. Only works ** on i486 hardware. */ #ifdef SQLITE_PERFORMANCE_TRACE /* ** hwtime.h contains inline assembler code for implementing ** high-performance timing routines. */ /************** Include hwtime.h in the middle of os_common.h ****************/ /************** Begin file hwtime.h ******************************************/ /* ** 2008 May 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ #ifndef _HWTIME_H_ #define _HWTIME_H_ /* ** The following routine only works on pentium-class (or newer) processors. ** It uses the RDTSC opcode to read the cycle count value out of the ** processor and returns that value. This can be used for high-res ** profiling. */ #if (defined(__GNUC__) || defined(_MSC_VER)) && \ (defined(i386) || defined(__i386__) || defined(_M_IX86)) #if defined(__GNUC__) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned int lo, hi; __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (sqlite_uint64)hi << 32 | lo; } #elif defined(_MSC_VER) __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ __asm { rdtsc ret ; return value at EDX:EAX } } #endif #elif (defined(__GNUC__) && defined(__x86_64__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long val; __asm__ __volatile__ ("rdtsc" : "=A" (val)); return val; } #elif (defined(__GNUC__) && defined(__ppc__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long long retval; unsigned long junk; __asm__ __volatile__ ("\n\ 1: mftbu %1\n\ mftb %L0\n\ mftbu %0\n\ cmpw %0,%1\n\ bne 1b" : "=r" (retval), "=r" (junk)); return retval; } #else #error Need implementation of sqlite3Hwtime() for your platform. /* ** To compile without implementing sqlite3Hwtime() for your platform, ** you can remove the above #error and use the following ** stub function. You will lose timing support for many ** of the debugging and testing utilities, but it should at ** least compile and run. */ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif #endif /* !defined(_HWTIME_H_) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime() #define TIMER_END g_elapsed=sqlite3Hwtime()-g_start #define TIMER_ELAPSED g_elapsed #else #define TIMER_START #define TIMER_END #define TIMER_ELAPSED ((sqlite_uint64)0) #endif /* ** If we compile with the SQLITE_TEST macro set, then the following block ** of code will give us the ability to simulate a disk I/O error. This ** is used for testing the I/O recovery logic. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ SQLITE_API int sqlite3_diskfull_pending = 0; SQLITE_API int sqlite3_diskfull = 0; #define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) #define SimulateIOError(CODE) \ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ || sqlite3_io_error_pending-- == 1 ) \ { local_ioerr(); CODE; } static void local_ioerr(){ IOTRACE(("IOERR\n")); sqlite3_io_error_hit++; if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; } #define SimulateDiskfullError(CODE) \ if( sqlite3_diskfull_pending ){ \ if( sqlite3_diskfull_pending == 1 ){ \ local_ioerr(); \ sqlite3_diskfull = 1; \ sqlite3_io_error_hit = 1; \ CODE; \ }else{ \ sqlite3_diskfull_pending--; \ } \ } #else #define SimulateIOErrorBenign(X) #define SimulateIOError(A) #define SimulateDiskfullError(A) #endif /* ** When testing, keep a count of the number of open files. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_open_file_count = 0; #define OpenCounter(X) sqlite3_open_file_count+=(X) #else #define OpenCounter(X) #endif #endif /* !defined(_OS_COMMON_H_) */ /************** End of os_common.h *******************************************/ /************** Continuing where we left off in os_unix.c ********************/ /* ** Define various macros that are missing from some systems. */ #ifndef O_LARGEFILE # define O_LARGEFILE 0 #endif #ifdef SQLITE_DISABLE_LFS # undef O_LARGEFILE # define O_LARGEFILE 0 #endif #ifndef O_NOFOLLOW # define O_NOFOLLOW 0 #endif #ifndef O_BINARY # define O_BINARY 0 #endif /* ** The threadid macro resolves to the thread-id or to 0. Used for ** testing and debugging only. */ #if SQLITE_THREADSAFE #define threadid pthread_self() #else #define threadid 0 #endif /* ** HAVE_MREMAP defaults to true on Linux and false everywhere else. */ #if !defined(HAVE_MREMAP) # if defined(__linux__) && defined(_GNU_SOURCE) # define HAVE_MREMAP 1 # else # define HAVE_MREMAP 0 # endif #endif /* ** Explicitly call the 64-bit version of lseek() on Android. Otherwise, lseek() ** is the 32-bit version, even if _FILE_OFFSET_BITS=64 is defined. */ #ifdef __ANDROID__ # define lseek lseek64 #endif /* ** Different Unix systems declare open() in different ways. Same use ** open(const char*,int,mode_t). Others use open(const char*,int,...). ** The difference is important when using a pointer to the function. ** ** The safest way to deal with the problem is to always use this wrapper ** which always has the same well-defined interface. */ static int posixOpen(const char *zFile, int flags, int mode){ return open(zFile, flags, mode); } /* ** On some systems, calls to fchown() will trigger a message in a security ** log if they come from non-root processes. So avoid calling fchown() if ** we are not running as root. */ static int posixFchown(int fd, uid_t uid, gid_t gid){ #if OS_VXWORKS return 0; #else return geteuid() ? 0 : fchown(fd,uid,gid); #endif } /* Forward reference */ static int openDirectory(const char*, int*); static int unixGetpagesize(void); /* ** Many system calls are accessed through pointer-to-functions so that ** they may be overridden at runtime to facilitate fault injection during ** testing and sandboxing. The following array holds the names and pointers ** to all overrideable system calls. */ static struct unix_syscall { const char *zName; /* Name of the system call */ sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ sqlite3_syscall_ptr pDefault; /* Default value */ } aSyscall[] = { { "open", (sqlite3_syscall_ptr)posixOpen, 0 }, #define osOpen ((int(*)(const char*,int,int))aSyscall[0].pCurrent) { "close", (sqlite3_syscall_ptr)close, 0 }, #define osClose ((int(*)(int))aSyscall[1].pCurrent) { "access", (sqlite3_syscall_ptr)access, 0 }, #define osAccess ((int(*)(const char*,int))aSyscall[2].pCurrent) { "getcwd", (sqlite3_syscall_ptr)getcwd, 0 }, #define osGetcwd ((char*(*)(char*,size_t))aSyscall[3].pCurrent) { "stat", (sqlite3_syscall_ptr)stat, 0 }, #define osStat ((int(*)(const char*,struct stat*))aSyscall[4].pCurrent) /* ** The DJGPP compiler environment looks mostly like Unix, but it ** lacks the fcntl() system call. So redefine fcntl() to be something ** that always succeeds. This means that locking does not occur under ** DJGPP. But it is DOS - what did you expect? */ #ifdef __DJGPP__ { "fstat", 0, 0 }, #define osFstat(a,b,c) 0 #else { "fstat", (sqlite3_syscall_ptr)fstat, 0 }, #define osFstat ((int(*)(int,struct stat*))aSyscall[5].pCurrent) #endif { "ftruncate", (sqlite3_syscall_ptr)ftruncate, 0 }, #define osFtruncate ((int(*)(int,off_t))aSyscall[6].pCurrent) { "fcntl", (sqlite3_syscall_ptr)fcntl, 0 }, #define osFcntl ((int(*)(int,int,...))aSyscall[7].pCurrent) { "read", (sqlite3_syscall_ptr)read, 0 }, #define osRead ((ssize_t(*)(int,void*,size_t))aSyscall[8].pCurrent) #if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE { "pread", (sqlite3_syscall_ptr)pread, 0 }, #else { "pread", (sqlite3_syscall_ptr)0, 0 }, #endif #define osPread ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[9].pCurrent) #if defined(USE_PREAD64) { "pread64", (sqlite3_syscall_ptr)pread64, 0 }, #else { "pread64", (sqlite3_syscall_ptr)0, 0 }, #endif #define osPread64 ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[10].pCurrent) { "write", (sqlite3_syscall_ptr)write, 0 }, #define osWrite ((ssize_t(*)(int,const void*,size_t))aSyscall[11].pCurrent) #if defined(USE_PREAD) || SQLITE_ENABLE_LOCKING_STYLE { "pwrite", (sqlite3_syscall_ptr)pwrite, 0 }, #else { "pwrite", (sqlite3_syscall_ptr)0, 0 }, #endif #define osPwrite ((ssize_t(*)(int,const void*,size_t,off_t))\ aSyscall[12].pCurrent) #if defined(USE_PREAD64) { "pwrite64", (sqlite3_syscall_ptr)pwrite64, 0 }, #else { "pwrite64", (sqlite3_syscall_ptr)0, 0 }, #endif #define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off_t))\ aSyscall[13].pCurrent) { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, #define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE { "fallocate", (sqlite3_syscall_ptr)posix_fallocate, 0 }, #else { "fallocate", (sqlite3_syscall_ptr)0, 0 }, #endif #define osFallocate ((int(*)(int,off_t,off_t))aSyscall[15].pCurrent) { "unlink", (sqlite3_syscall_ptr)unlink, 0 }, #define osUnlink ((int(*)(const char*))aSyscall[16].pCurrent) { "openDirectory", (sqlite3_syscall_ptr)openDirectory, 0 }, #define osOpenDirectory ((int(*)(const char*,int*))aSyscall[17].pCurrent) { "mkdir", (sqlite3_syscall_ptr)mkdir, 0 }, #define osMkdir ((int(*)(const char*,mode_t))aSyscall[18].pCurrent) { "rmdir", (sqlite3_syscall_ptr)rmdir, 0 }, #define osRmdir ((int(*)(const char*))aSyscall[19].pCurrent) { "fchown", (sqlite3_syscall_ptr)posixFchown, 0 }, #define osFchown ((int(*)(int,uid_t,gid_t))aSyscall[20].pCurrent) #if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 { "mmap", (sqlite3_syscall_ptr)mmap, 0 }, #define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[21].pCurrent) { "munmap", (sqlite3_syscall_ptr)munmap, 0 }, #define osMunmap ((void*(*)(void*,size_t))aSyscall[22].pCurrent) #if HAVE_MREMAP { "mremap", (sqlite3_syscall_ptr)mremap, 0 }, #else { "mremap", (sqlite3_syscall_ptr)0, 0 }, #endif #define osMremap ((void*(*)(void*,size_t,size_t,int,...))aSyscall[23].pCurrent) { "getpagesize", (sqlite3_syscall_ptr)unixGetpagesize, 0 }, #define osGetpagesize ((int(*)(void))aSyscall[24].pCurrent) #endif }; /* End of the overrideable system calls */ /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the ** "unix" VFSes. Return SQLITE_OK opon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */ static int unixSetSystemCall( sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ const char *zName, /* Name of system call to override */ sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ ){ unsigned int i; int rc = SQLITE_NOTFOUND; UNUSED_PARAMETER(pNotUsed); if( zName==0 ){ /* If no zName is given, restore all system calls to their default ** settings and return NULL */ rc = SQLITE_OK; for(i=0; i=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; osClose(fd); sqlite3_log(SQLITE_WARNING, "attempt to open \"%s\" as file descriptor %d", z, fd); fd = -1; if( osOpen("/dev/null", f, m)<0 ) break; } if( fd>=0 ){ if( m!=0 ){ struct stat statbuf; if( osFstat(fd, &statbuf)==0 && statbuf.st_size==0 && (statbuf.st_mode&0777)!=m ){ osFchmod(fd, m); } } #if defined(FD_CLOEXEC) && (!defined(O_CLOEXEC) || O_CLOEXEC==0) osFcntl(fd, F_SETFD, osFcntl(fd, F_GETFD, 0) | FD_CLOEXEC); #endif } return fd; } /* ** Helper functions to obtain and relinquish the global mutex. The ** global mutex is used to protect the unixInodeInfo and ** vxworksFileId objects used by this file, all of which may be ** shared by multiple threads. ** ** Function unixMutexHeld() is used to assert() that the global mutex ** is held when required. This function is only used as part of assert() ** statements. e.g. ** ** unixEnterMutex() ** assert( unixMutexHeld() ); ** unixEnterLeave() */ static void unixEnterMutex(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } static void unixLeaveMutex(void){ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #ifdef SQLITE_DEBUG static int unixMutexHeld(void) { return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #endif #ifdef SQLITE_HAVE_OS_TRACE /* ** Helper function for printing out trace information from debugging ** binaries. This returns the string representation of the supplied ** integer lock-type. */ static const char *azFileLock(int eFileLock){ switch( eFileLock ){ case NO_LOCK: return "NONE"; case SHARED_LOCK: return "SHARED"; case RESERVED_LOCK: return "RESERVED"; case PENDING_LOCK: return "PENDING"; case EXCLUSIVE_LOCK: return "EXCLUSIVE"; } return "ERROR"; } #endif #ifdef SQLITE_LOCK_TRACE /* ** Print out information about all locking operations. ** ** This routine is used for troubleshooting locks on multithreaded ** platforms. Enable by compiling with the -DSQLITE_LOCK_TRACE ** command-line option on the compiler. This code is normally ** turned off. */ static int lockTrace(int fd, int op, struct flock *p){ char *zOpName, *zType; int s; int savedErrno; if( op==F_GETLK ){ zOpName = "GETLK"; }else if( op==F_SETLK ){ zOpName = "SETLK"; }else{ s = osFcntl(fd, op, p); sqlite3DebugPrintf("fcntl unknown %d %d %d\n", fd, op, s); return s; } if( p->l_type==F_RDLCK ){ zType = "RDLCK"; }else if( p->l_type==F_WRLCK ){ zType = "WRLCK"; }else if( p->l_type==F_UNLCK ){ zType = "UNLCK"; }else{ assert( 0 ); } assert( p->l_whence==SEEK_SET ); s = osFcntl(fd, op, p); savedErrno = errno; sqlite3DebugPrintf("fcntl %d %d %s %s %d %d %d %d\n", threadid, fd, zOpName, zType, (int)p->l_start, (int)p->l_len, (int)p->l_pid, s); if( s==(-1) && op==F_SETLK && (p->l_type==F_RDLCK || p->l_type==F_WRLCK) ){ struct flock l2; l2 = *p; osFcntl(fd, F_GETLK, &l2); if( l2.l_type==F_RDLCK ){ zType = "RDLCK"; }else if( l2.l_type==F_WRLCK ){ zType = "WRLCK"; }else if( l2.l_type==F_UNLCK ){ zType = "UNLCK"; }else{ assert( 0 ); } sqlite3DebugPrintf("fcntl-failure-reason: %s %d %d %d\n", zType, (int)l2.l_start, (int)l2.l_len, (int)l2.l_pid); } errno = savedErrno; return s; } #undef osFcntl #define osFcntl lockTrace #endif /* SQLITE_LOCK_TRACE */ /* ** Retry ftruncate() calls that fail due to EINTR ** ** All calls to ftruncate() within this file should be made through ** this wrapper. On the Android platform, bypassing the logic below ** could lead to a corrupt database. */ static int robust_ftruncate(int h, sqlite3_int64 sz){ int rc; #ifdef __ANDROID__ /* On Android, ftruncate() always uses 32-bit offsets, even if ** _FILE_OFFSET_BITS=64 is defined. This means it is unsafe to attempt to ** truncate a file to any size larger than 2GiB. Silently ignore any ** such attempts. */ if( sz>(sqlite3_int64)0x7FFFFFFF ){ rc = SQLITE_OK; }else #endif do{ rc = osFtruncate(h,sz); }while( rc<0 && errno==EINTR ); return rc; } /* ** This routine translates a standard POSIX errno code into something ** useful to the clients of the sqlite3 functions. Specifically, it is ** intended to translate a variety of "try again" errors into SQLITE_BUSY ** and a variety of "please close the file descriptor NOW" errors into ** SQLITE_IOERR ** ** Errors during initialization of locks, or file system support for locks, ** should handle ENOLCK, ENOTSUP, EOPNOTSUPP separately. */ static int sqliteErrorFromPosixError(int posixError, int sqliteIOErr) { switch (posixError) { #if 0 /* At one point this code was not commented out. In theory, this branch ** should never be hit, as this function should only be called after ** a locking-related function (i.e. fcntl()) has returned non-zero with ** the value of errno as the first argument. Since a system call has failed, ** errno should be non-zero. ** ** Despite this, if errno really is zero, we still don't want to return ** SQLITE_OK. The system call failed, and *some* SQLite error should be ** propagated back to the caller. Commenting this branch out means errno==0 ** will be handled by the "default:" case below. */ case 0: return SQLITE_OK; #endif case EAGAIN: case ETIMEDOUT: case EBUSY: case EINTR: case ENOLCK: /* random NFS retry error, unless during file system support * introspection, in which it actually means what it says */ return SQLITE_BUSY; case EACCES: /* EACCES is like EAGAIN during locking operations, but not any other time*/ if( (sqliteIOErr == SQLITE_IOERR_LOCK) || (sqliteIOErr == SQLITE_IOERR_UNLOCK) || (sqliteIOErr == SQLITE_IOERR_RDLOCK) || (sqliteIOErr == SQLITE_IOERR_CHECKRESERVEDLOCK) ){ return SQLITE_BUSY; } /* else fall through */ case EPERM: return SQLITE_PERM; #if EOPNOTSUPP!=ENOTSUP case EOPNOTSUPP: /* something went terribly awry, unless during file system support * introspection, in which it actually means what it says */ #endif #ifdef ENOTSUP case ENOTSUP: /* invalid fd, unless during file system support introspection, in which * it actually means what it says */ #endif case EIO: case EBADF: case EINVAL: case ENOTCONN: case ENODEV: case ENXIO: case ENOENT: #ifdef ESTALE /* ESTALE is not defined on Interix systems */ case ESTALE: #endif case ENOSYS: /* these should force the client to close the file and reconnect */ default: return sqliteIOErr; } } /****************************************************************************** ****************** Begin Unique File ID Utility Used By VxWorks *************** ** ** On most versions of unix, we can get a unique ID for a file by concatenating ** the device number and the inode number. But this does not work on VxWorks. ** On VxWorks, a unique file id must be based on the canonical filename. ** ** A pointer to an instance of the following structure can be used as a ** unique file ID in VxWorks. Each instance of this structure contains ** a copy of the canonical filename. There is also a reference count. ** The structure is reclaimed when the number of pointers to it drops to ** zero. ** ** There are never very many files open at one time and lookups are not ** a performance-critical path, so it is sufficient to put these ** structures on a linked list. */ struct vxworksFileId { struct vxworksFileId *pNext; /* Next in a list of them all */ int nRef; /* Number of references to this one */ int nName; /* Length of the zCanonicalName[] string */ char *zCanonicalName; /* Canonical filename */ }; #if OS_VXWORKS /* ** All unique filenames are held on a linked list headed by this ** variable: */ static struct vxworksFileId *vxworksFileList = 0; /* ** Simplify a filename into its canonical form ** by making the following changes: ** ** * removing any trailing and duplicate / ** * convert /./ into just / ** * convert /A/../ where A is any simple name into just / ** ** Changes are made in-place. Return the new name length. ** ** The original filename is in z[0..n-1]. Return the number of ** characters in the simplified name. */ static int vxworksSimplifyName(char *z, int n){ int i, j; while( n>1 && z[n-1]=='/' ){ n--; } for(i=j=0; i0 && z[j-1]!='/' ){ j--; } if( j>0 ){ j--; } i += 2; continue; } } z[j++] = z[i]; } z[j] = 0; return j; } /* ** Find a unique file ID for the given absolute pathname. Return ** a pointer to the vxworksFileId object. This pointer is the unique ** file ID. ** ** The nRef field of the vxworksFileId object is incremented before ** the object is returned. A new vxworksFileId object is created ** and added to the global list if necessary. ** ** If a memory allocation error occurs, return NULL. */ static struct vxworksFileId *vxworksFindFileId(const char *zAbsoluteName){ struct vxworksFileId *pNew; /* search key and new file ID */ struct vxworksFileId *pCandidate; /* For looping over existing file IDs */ int n; /* Length of zAbsoluteName string */ assert( zAbsoluteName[0]=='/' ); n = (int)strlen(zAbsoluteName); pNew = sqlite3_malloc64( sizeof(*pNew) + (n+1) ); if( pNew==0 ) return 0; pNew->zCanonicalName = (char*)&pNew[1]; memcpy(pNew->zCanonicalName, zAbsoluteName, n+1); n = vxworksSimplifyName(pNew->zCanonicalName, n); /* Search for an existing entry that matching the canonical name. ** If found, increment the reference count and return a pointer to ** the existing file ID. */ unixEnterMutex(); for(pCandidate=vxworksFileList; pCandidate; pCandidate=pCandidate->pNext){ if( pCandidate->nName==n && memcmp(pCandidate->zCanonicalName, pNew->zCanonicalName, n)==0 ){ sqlite3_free(pNew); pCandidate->nRef++; unixLeaveMutex(); return pCandidate; } } /* No match was found. We will make a new file ID */ pNew->nRef = 1; pNew->nName = n; pNew->pNext = vxworksFileList; vxworksFileList = pNew; unixLeaveMutex(); return pNew; } /* ** Decrement the reference count on a vxworksFileId object. Free ** the object when the reference count reaches zero. */ static void vxworksReleaseFileId(struct vxworksFileId *pId){ unixEnterMutex(); assert( pId->nRef>0 ); pId->nRef--; if( pId->nRef==0 ){ struct vxworksFileId **pp; for(pp=&vxworksFileList; *pp && *pp!=pId; pp = &((*pp)->pNext)){} assert( *pp==pId ); *pp = pId->pNext; sqlite3_free(pId); } unixLeaveMutex(); } #endif /* OS_VXWORKS */ /*************** End of Unique File ID Utility Used By VxWorks **************** ******************************************************************************/ /****************************************************************************** *************************** Posix Advisory Locking **************************** ** ** POSIX advisory locks are broken by design. ANSI STD 1003.1 (1996) ** section 6.5.2.2 lines 483 through 490 specify that when a process ** sets or clears a lock, that operation overrides any prior locks set ** by the same process. It does not explicitly say so, but this implies ** that it overrides locks set by the same process using a different ** file descriptor. Consider this test case: ** ** int fd1 = open("./file1", O_RDWR|O_CREAT, 0644); ** int fd2 = open("./file2", O_RDWR|O_CREAT, 0644); ** ** Suppose ./file1 and ./file2 are really the same file (because ** one is a hard or symbolic link to the other) then if you set ** an exclusive lock on fd1, then try to get an exclusive lock ** on fd2, it works. I would have expected the second lock to ** fail since there was already a lock on the file due to fd1. ** But not so. Since both locks came from the same process, the ** second overrides the first, even though they were on different ** file descriptors opened on different file names. ** ** This means that we cannot use POSIX locks to synchronize file access ** among competing threads of the same process. POSIX locks will work fine ** to synchronize access for threads in separate processes, but not ** threads within the same process. ** ** To work around the problem, SQLite has to manage file locks internally ** on its own. Whenever a new database is opened, we have to find the ** specific inode of the database file (the inode is determined by the ** st_dev and st_ino fields of the stat structure that fstat() fills in) ** and check for locks already existing on that inode. When locks are ** created or removed, we have to look at our own internal record of the ** locks to see if another thread has previously set a lock on that same ** inode. ** ** (Aside: The use of inode numbers as unique IDs does not work on VxWorks. ** For VxWorks, we have to use the alternative unique ID system based on ** canonical filename and implemented in the previous division.) ** ** The sqlite3_file structure for POSIX is no longer just an integer file ** descriptor. It is now a structure that holds the integer file ** descriptor and a pointer to a structure that describes the internal ** locks on the corresponding inode. There is one locking structure ** per inode, so if the same inode is opened twice, both unixFile structures ** point to the same locking structure. The locking structure keeps ** a reference count (so we will know when to delete it) and a "cnt" ** field that tells us its internal lock status. cnt==0 means the ** file is unlocked. cnt==-1 means the file has an exclusive lock. ** cnt>0 means there are cnt shared locks on the file. ** ** Any attempt to lock or unlock a file first checks the locking ** structure. The fcntl() system call is only invoked to set a ** POSIX lock if the internal lock structure transitions between ** a locked and an unlocked state. ** ** But wait: there are yet more problems with POSIX advisory locks. ** ** If you close a file descriptor that points to a file that has locks, ** all locks on that file that are owned by the current process are ** released. To work around this problem, each unixInodeInfo object ** maintains a count of the number of pending locks on tha inode. ** When an attempt is made to close an unixFile, if there are ** other unixFile open on the same inode that are holding locks, the call ** to close() the file descriptor is deferred until all of the locks clear. ** The unixInodeInfo structure keeps a list of file descriptors that need to ** be closed and that list is walked (and cleared) when the last lock ** clears. ** ** Yet another problem: LinuxThreads do not play well with posix locks. ** ** Many older versions of linux use the LinuxThreads library which is ** not posix compliant. Under LinuxThreads, a lock created by thread ** A cannot be modified or overridden by a different thread B. ** Only thread A can modify the lock. Locking behavior is correct ** if the appliation uses the newer Native Posix Thread Library (NPTL) ** on linux - with NPTL a lock created by thread A can override locks ** in thread B. But there is no way to know at compile-time which ** threading library is being used. So there is no way to know at ** compile-time whether or not thread A can override locks on thread B. ** One has to do a run-time check to discover the behavior of the ** current process. ** ** SQLite used to support LinuxThreads. But support for LinuxThreads ** was dropped beginning with version 3.7.0. SQLite will still work with ** LinuxThreads provided that (1) there is no more than one connection ** per database file in the same process and (2) database connections ** do not move across threads. */ /* ** An instance of the following structure serves as the key used ** to locate a particular unixInodeInfo object. */ struct unixFileId { dev_t dev; /* Device number */ #if OS_VXWORKS struct vxworksFileId *pId; /* Unique file ID for vxworks. */ #else ino_t ino; /* Inode number */ #endif }; /* ** An instance of the following structure is allocated for each open ** inode. Or, on LinuxThreads, there is one of these structures for ** each inode opened by each thread. ** ** A single inode can have multiple file descriptors, so each unixFile ** structure contains a pointer to an instance of this object and this ** object keeps a count of the number of unixFile pointing to it. */ struct unixInodeInfo { struct unixFileId fileId; /* The lookup key */ int nShared; /* Number of SHARED locks held */ unsigned char eFileLock; /* One of SHARED_LOCK, RESERVED_LOCK etc. */ unsigned char bProcessLock; /* An exclusive process lock is held */ int nRef; /* Number of pointers to this structure */ unixShmNode *pShmNode; /* Shared memory associated with this inode */ int nLock; /* Number of outstanding file locks */ UnixUnusedFd *pUnused; /* Unused file descriptors to close */ unixInodeInfo *pNext; /* List of all unixInodeInfo objects */ unixInodeInfo *pPrev; /* .... doubly linked */ #if SQLITE_ENABLE_LOCKING_STYLE unsigned long long sharedByte; /* for AFP simulated shared lock */ #endif #if OS_VXWORKS sem_t *pSem; /* Named POSIX semaphore */ char aSemName[MAX_PATHNAME+2]; /* Name of that semaphore */ #endif }; /* ** A lists of all unixInodeInfo objects. */ static unixInodeInfo *inodeList = 0; /* ** ** This function - unixLogError_x(), is only ever called via the macro ** unixLogError(). ** ** It is invoked after an error occurs in an OS function and errno has been ** set. It logs a message using sqlite3_log() containing the current value of ** errno and, if possible, the human-readable equivalent from strerror() or ** strerror_r(). ** ** The first argument passed to the macro should be the error code that ** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). ** The two subsequent arguments should be the name of the OS function that ** failed (e.g. "unlink", "open") and the associated file-system path, ** if any. */ #define unixLogError(a,b,c) unixLogErrorAtLine(a,b,c,__LINE__) static int unixLogErrorAtLine( int errcode, /* SQLite error code */ const char *zFunc, /* Name of OS function that failed */ const char *zPath, /* File path associated with error */ int iLine /* Source line number where error occurred */ ){ char *zErr; /* Message from strerror() or equivalent */ int iErrno = errno; /* Saved syscall error number */ /* If this is not a threadsafe build (SQLITE_THREADSAFE==0), then use ** the strerror() function to obtain the human-readable error message ** equivalent to errno. Otherwise, use strerror_r(). */ #if SQLITE_THREADSAFE && defined(HAVE_STRERROR_R) char aErr[80]; memset(aErr, 0, sizeof(aErr)); zErr = aErr; /* If STRERROR_R_CHAR_P (set by autoconf scripts) or __USE_GNU is defined, ** assume that the system provides the GNU version of strerror_r() that ** returns a pointer to a buffer containing the error message. That pointer ** may point to aErr[], or it may point to some static storage somewhere. ** Otherwise, assume that the system provides the POSIX version of ** strerror_r(), which always writes an error message into aErr[]. ** ** If the code incorrectly assumes that it is the POSIX version that is ** available, the error message will often be an empty string. Not a ** huge problem. Incorrectly concluding that the GNU version is available ** could lead to a segfault though. */ #if defined(STRERROR_R_CHAR_P) || defined(__USE_GNU) zErr = # endif strerror_r(iErrno, aErr, sizeof(aErr)-1); #elif SQLITE_THREADSAFE /* This is a threadsafe build, but strerror_r() is not available. */ zErr = ""; #else /* Non-threadsafe build, use strerror(). */ zErr = strerror(iErrno); #endif if( zPath==0 ) zPath = ""; sqlite3_log(errcode, "os_unix.c:%d: (%d) %s(%s) - %s", iLine, iErrno, zFunc, zPath, zErr ); return errcode; } /* ** Close a file descriptor. ** ** We assume that close() almost always works, since it is only in a ** very sick application or on a very sick platform that it might fail. ** If it does fail, simply leak the file descriptor, but do log the ** error. ** ** Note that it is not safe to retry close() after EINTR since the ** file descriptor might have already been reused by another thread. ** So we don't even try to recover from an EINTR. Just log the error ** and move on. */ static void robust_close(unixFile *pFile, int h, int lineno){ if( osClose(h) ){ unixLogErrorAtLine(SQLITE_IOERR_CLOSE, "close", pFile ? pFile->zPath : 0, lineno); } } /* ** Set the pFile->lastErrno. Do this in a subroutine as that provides ** a convenient place to set a breakpoint. */ static void storeLastErrno(unixFile *pFile, int error){ pFile->lastErrno = error; } /* ** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. */ static void closePendingFds(unixFile *pFile){ unixInodeInfo *pInode = pFile->pInode; UnixUnusedFd *p; UnixUnusedFd *pNext; for(p=pInode->pUnused; p; p=pNext){ pNext = p->pNext; robust_close(pFile, p->fd, __LINE__); sqlite3_free(p); } pInode->pUnused = 0; } /* ** Release a unixInodeInfo structure previously allocated by findInodeInfo(). ** ** The mutex entered using the unixEnterMutex() function must be held ** when this function is called. */ static void releaseInodeInfo(unixFile *pFile){ unixInodeInfo *pInode = pFile->pInode; assert( unixMutexHeld() ); if( ALWAYS(pInode) ){ pInode->nRef--; if( pInode->nRef==0 ){ assert( pInode->pShmNode==0 ); closePendingFds(pFile); if( pInode->pPrev ){ assert( pInode->pPrev->pNext==pInode ); pInode->pPrev->pNext = pInode->pNext; }else{ assert( inodeList==pInode ); inodeList = pInode->pNext; } if( pInode->pNext ){ assert( pInode->pNext->pPrev==pInode ); pInode->pNext->pPrev = pInode->pPrev; } sqlite3_free(pInode); } } } /* ** Given a file descriptor, locate the unixInodeInfo object that ** describes that file descriptor. Create a new one if necessary. The ** return value might be uninitialized if an error occurs. ** ** The mutex entered using the unixEnterMutex() function must be held ** when this function is called. ** ** Return an appropriate error code. */ static int findInodeInfo( unixFile *pFile, /* Unix file with file desc used in the key */ unixInodeInfo **ppInode /* Return the unixInodeInfo object here */ ){ int rc; /* System call return code */ int fd; /* The file descriptor for pFile */ struct unixFileId fileId; /* Lookup key for the unixInodeInfo */ struct stat statbuf; /* Low-level file information */ unixInodeInfo *pInode = 0; /* Candidate unixInodeInfo object */ assert( unixMutexHeld() ); /* Get low-level information about the file that we can used to ** create a unique name for the file. */ fd = pFile->h; rc = osFstat(fd, &statbuf); if( rc!=0 ){ storeLastErrno(pFile, errno); #ifdef EOVERFLOW if( pFile->lastErrno==EOVERFLOW ) return SQLITE_NOLFS; #endif return SQLITE_IOERR; } #ifdef __APPLE__ /* On OS X on an msdos filesystem, the inode number is reported ** incorrectly for zero-size files. See ticket #3260. To work ** around this problem (we consider it a bug in OS X, not SQLite) ** we always increase the file size to 1 by writing a single byte ** prior to accessing the inode number. The one byte written is ** an ASCII 'S' character which also happens to be the first byte ** in the header of every SQLite database. In this way, if there ** is a race condition such that another thread has already populated ** the first page of the database, no damage is done. */ if( statbuf.st_size==0 && (pFile->fsFlags & SQLITE_FSFLAGS_IS_MSDOS)!=0 ){ do{ rc = osWrite(fd, "S", 1); }while( rc<0 && errno==EINTR ); if( rc!=1 ){ storeLastErrno(pFile, errno); return SQLITE_IOERR; } rc = osFstat(fd, &statbuf); if( rc!=0 ){ storeLastErrno(pFile, errno); return SQLITE_IOERR; } } #endif memset(&fileId, 0, sizeof(fileId)); fileId.dev = statbuf.st_dev; #if OS_VXWORKS fileId.pId = pFile->pId; #else fileId.ino = statbuf.st_ino; #endif pInode = inodeList; while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){ pInode = pInode->pNext; } if( pInode==0 ){ pInode = sqlite3_malloc64( sizeof(*pInode) ); if( pInode==0 ){ return SQLITE_NOMEM; } memset(pInode, 0, sizeof(*pInode)); memcpy(&pInode->fileId, &fileId, sizeof(fileId)); pInode->nRef = 1; pInode->pNext = inodeList; pInode->pPrev = 0; if( inodeList ) inodeList->pPrev = pInode; inodeList = pInode; }else{ pInode->nRef++; } *ppInode = pInode; return SQLITE_OK; } /* ** Return TRUE if pFile has been renamed or unlinked since it was first opened. */ static int fileHasMoved(unixFile *pFile){ #if OS_VXWORKS return pFile->pInode!=0 && pFile->pId!=pFile->pInode->fileId.pId; #else struct stat buf; return pFile->pInode!=0 && (osStat(pFile->zPath, &buf)!=0 || buf.st_ino!=pFile->pInode->fileId.ino); #endif } /* ** Check a unixFile that is a database. Verify the following: ** ** (1) There is exactly one hard link on the file ** (2) The file is not a symbolic link ** (3) The file has not been renamed or unlinked ** ** Issue sqlite3_log(SQLITE_WARNING,...) messages if anything is not right. */ static void verifyDbFile(unixFile *pFile){ struct stat buf; int rc; if( pFile->ctrlFlags & UNIXFILE_WARNED ){ /* One or more of the following warnings have already been issued. Do not ** repeat them so as not to clutter the error log */ return; } rc = osFstat(pFile->h, &buf); if( rc!=0 ){ sqlite3_log(SQLITE_WARNING, "cannot fstat db file %s", pFile->zPath); pFile->ctrlFlags |= UNIXFILE_WARNED; return; } if( buf.st_nlink==0 && (pFile->ctrlFlags & UNIXFILE_DELETE)==0 ){ sqlite3_log(SQLITE_WARNING, "file unlinked while open: %s", pFile->zPath); pFile->ctrlFlags |= UNIXFILE_WARNED; return; } if( buf.st_nlink>1 ){ sqlite3_log(SQLITE_WARNING, "multiple links to file: %s", pFile->zPath); pFile->ctrlFlags |= UNIXFILE_WARNED; return; } if( fileHasMoved(pFile) ){ sqlite3_log(SQLITE_WARNING, "file renamed while open: %s", pFile->zPath); pFile->ctrlFlags |= UNIXFILE_WARNED; return; } } /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int unixCheckReservedLock(sqlite3_file *id, int *pResOut){ int rc = SQLITE_OK; int reserved = 0; unixFile *pFile = (unixFile*)id; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); unixEnterMutex(); /* Because pFile->pInode is shared across threads */ /* Check if a thread in this process holds such a lock */ if( pFile->pInode->eFileLock>SHARED_LOCK ){ reserved = 1; } /* Otherwise see if some other process holds it. */ #ifndef __DJGPP__ if( !reserved && !pFile->pInode->bProcessLock ){ struct flock lock; lock.l_whence = SEEK_SET; lock.l_start = RESERVED_BYTE; lock.l_len = 1; lock.l_type = F_WRLCK; if( osFcntl(pFile->h, F_GETLK, &lock) ){ rc = SQLITE_IOERR_CHECKRESERVEDLOCK; storeLastErrno(pFile, errno); } else if( lock.l_type!=F_UNLCK ){ reserved = 1; } } #endif unixLeaveMutex(); OSTRACE(("TEST WR-LOCK %d %d %d (unix)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* ** Attempt to set a system-lock on the file pFile. The lock is ** described by pLock. ** ** If the pFile was opened read/write from unix-excl, then the only lock ** ever obtained is an exclusive lock, and it is obtained exactly once ** the first time any lock is attempted. All subsequent system locking ** operations become no-ops. Locking operations still happen internally, ** in order to coordinate access between separate database connections ** within this process, but all of that is handled in memory and the ** operating system does not participate. ** ** This function is a pass-through to fcntl(F_SETLK) if pFile is using ** any VFS other than "unix-excl" or if pFile is opened on "unix-excl" ** and is read-only. ** ** Zero is returned if the call completes successfully, or -1 if a call ** to fcntl() fails. In this case, errno is set appropriately (by fcntl()). */ static int unixFileLock(unixFile *pFile, struct flock *pLock){ int rc; unixInodeInfo *pInode = pFile->pInode; assert( unixMutexHeld() ); assert( pInode!=0 ); if( ((pFile->ctrlFlags & UNIXFILE_EXCL)!=0 || pInode->bProcessLock) && ((pFile->ctrlFlags & UNIXFILE_RDONLY)==0) ){ if( pInode->bProcessLock==0 ){ struct flock lock; assert( pInode->nLock==0 ); lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; lock.l_type = F_WRLCK; rc = osFcntl(pFile->h, F_SETLK, &lock); if( rc<0 ) return rc; pInode->bProcessLock = 1; pInode->nLock++; }else{ rc = 0; } }else{ rc = osFcntl(pFile->h, F_SETLK, pLock); } return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ static int unixLock(sqlite3_file *id, int eFileLock){ /* The following describes the implementation of the various locks and ** lock transitions in terms of the POSIX advisory shared and exclusive ** lock primitives (called read-locks and write-locks below, to avoid ** confusion with SQLite lock names). The algorithms are complicated ** slightly in order to be compatible with windows systems simultaneously ** accessing the same database file, in case that is ever required. ** ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved ** byte', each single bytes at well known offsets, and the 'shared byte ** range', a range of 510 bytes at a well known offset. ** ** To obtain a SHARED lock, a read-lock is obtained on the 'pending ** byte'. If this is successful, a random byte from the 'shared byte ** range' is read-locked and the lock on the 'pending byte' released. ** ** A process may only obtain a RESERVED lock after it has a SHARED lock. ** A RESERVED lock is implemented by grabbing a write-lock on the ** 'reserved byte'. ** ** A process may only obtain a PENDING lock after it has obtained a ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock ** on the 'pending byte'. This ensures that no new SHARED locks can be ** obtained, but existing SHARED locks are allowed to persist. A process ** does not have to obtain a RESERVED lock on the way to a PENDING lock. ** This property is used by the algorithm for rolling back a journal file ** after a crash. ** ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is ** implemented by obtaining a write-lock on the entire 'shared byte ** range'. Since all other locks require a read-lock on one of the bytes ** within this range, this ensures that no other locks are held on the ** database. ** ** The reason a single byte cannot be used instead of the 'shared byte ** range' is that some versions of windows do not support read-locks. By ** locking a random byte from a range, concurrent SHARED locks may exist ** even if the locking primitive used is always a write-lock. */ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; unixInodeInfo *pInode; struct flock lock; int tErrno = 0; assert( pFile ); OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (unix)\n", pFile->h, azFileLock(eFileLock), azFileLock(pFile->eFileLock), azFileLock(pFile->pInode->eFileLock), pFile->pInode->nShared, osGetpid(0))); /* If there is already a lock of this type or more restrictive on the ** unixFile, do nothing. Don't use the end_lock: exit path, as ** unixEnterMutex() hasn't been called yet. */ if( pFile->eFileLock>=eFileLock ){ OSTRACE(("LOCK %d %s ok (already held) (unix)\n", pFile->h, azFileLock(eFileLock))); return SQLITE_OK; } /* Make sure the locking sequence is correct. ** (1) We never move from unlocked to anything higher than shared lock. ** (2) SQLite never explicitly requests a pendig lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); assert( eFileLock!=PENDING_LOCK ); assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); /* This mutex is needed because pFile->pInode is shared across threads */ unixEnterMutex(); pInode = pFile->pInode; /* If some thread using this PID has a lock via a different unixFile* ** handle that precludes the requested lock, return BUSY. */ if( (pFile->eFileLock!=pInode->eFileLock && (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) ){ rc = SQLITE_BUSY; goto end_lock; } /* If a SHARED lock is requested, and some thread using this PID already ** has a SHARED or RESERVED lock, then increment reference counts and ** return SQLITE_OK. */ if( eFileLock==SHARED_LOCK && (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ assert( eFileLock==SHARED_LOCK ); assert( pFile->eFileLock==0 ); assert( pInode->nShared>0 ); pFile->eFileLock = SHARED_LOCK; pInode->nShared++; pInode->nLock++; goto end_lock; } /* A PENDING lock is needed before acquiring a SHARED lock and before ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will ** be released. */ lock.l_len = 1L; lock.l_whence = SEEK_SET; if( eFileLock==SHARED_LOCK || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLocknShared==0 ); assert( pInode->eFileLock==0 ); assert( rc==SQLITE_OK ); /* Now get the read-lock */ lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; if( unixFileLock(pFile, &lock) ){ tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); } /* Drop the temporary PENDING lock */ lock.l_start = PENDING_BYTE; lock.l_len = 1L; lock.l_type = F_UNLCK; if( unixFileLock(pFile, &lock) && rc==SQLITE_OK ){ /* This could happen with a network mount */ tErrno = errno; rc = SQLITE_IOERR_UNLOCK; } if( rc ){ if( rc!=SQLITE_BUSY ){ storeLastErrno(pFile, tErrno); } goto end_lock; }else{ pFile->eFileLock = SHARED_LOCK; pInode->nLock++; pInode->nShared = 1; } }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; }else{ /* The request was for a RESERVED or EXCLUSIVE lock. It is ** assumed that there is a SHARED or greater lock on the file ** already. */ assert( 0!=pFile->eFileLock ); lock.l_type = F_WRLCK; assert( eFileLock==RESERVED_LOCK || eFileLock==EXCLUSIVE_LOCK ); if( eFileLock==RESERVED_LOCK ){ lock.l_start = RESERVED_BYTE; lock.l_len = 1L; }else{ lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; } if( unixFileLock(pFile, &lock) ){ tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); if( rc!=SQLITE_BUSY ){ storeLastErrno(pFile, tErrno); } } } #ifdef SQLITE_DEBUG /* Set up the transaction-counter change checking flags when ** transitioning from a SHARED to a RESERVED lock. The change ** from SHARED to RESERVED marks the beginning of a normal ** write operation (not a hot journal rollback). */ if( rc==SQLITE_OK && pFile->eFileLock<=SHARED_LOCK && eFileLock==RESERVED_LOCK ){ pFile->transCntrChng = 0; pFile->dbUpdate = 0; pFile->inNormalWrite = 1; } #endif if( rc==SQLITE_OK ){ pFile->eFileLock = eFileLock; pInode->eFileLock = eFileLock; }else if( eFileLock==EXCLUSIVE_LOCK ){ pFile->eFileLock = PENDING_LOCK; pInode->eFileLock = PENDING_LOCK; } end_lock: unixLeaveMutex(); OSTRACE(("LOCK %d %s %s (unix)\n", pFile->h, azFileLock(eFileLock), rc==SQLITE_OK ? "ok" : "failed")); return rc; } /* ** Add the file descriptor used by file handle pFile to the corresponding ** pUnused list. */ static void setPendingFd(unixFile *pFile){ unixInodeInfo *pInode = pFile->pInode; UnixUnusedFd *p = pFile->pUnused; p->pNext = pInode->pUnused; pInode->pUnused = p; pFile->h = -1; pFile->pUnused = 0; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. ** ** If handleNFSUnlock is true, then on downgrading an EXCLUSIVE_LOCK to SHARED ** the byte range is divided into 2 parts and the first part is unlocked then ** set to a read lock, then the other part is simply unlocked. This works ** around a bug in BSD NFS lockd (also seen on MacOSX 10.3+) that fails to ** remove the write lock on a region when a read lock is set. */ static int posixUnlock(sqlite3_file *id, int eFileLock, int handleNFSUnlock){ unixFile *pFile = (unixFile*)id; unixInodeInfo *pInode; struct flock lock; int rc = SQLITE_OK; assert( pFile ); OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (unix)\n", pFile->h, eFileLock, pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, osGetpid(0))); assert( eFileLock<=SHARED_LOCK ); if( pFile->eFileLock<=eFileLock ){ return SQLITE_OK; } unixEnterMutex(); pInode = pFile->pInode; assert( pInode->nShared!=0 ); if( pFile->eFileLock>SHARED_LOCK ){ assert( pInode->eFileLock==pFile->eFileLock ); #ifdef SQLITE_DEBUG /* When reducing a lock such that other processes can start ** reading the database file again, make sure that the ** transaction counter was updated if any part of the database ** file changed. If the transaction counter is not updated, ** other connections to the same file might not realize that ** the file has changed and hence might not know to flush their ** cache. The use of a stale cache can lead to database corruption. */ pFile->inNormalWrite = 0; #endif /* downgrading to a shared lock on NFS involves clearing the write lock ** before establishing the readlock - to avoid a race condition we downgrade ** the lock in 2 blocks, so that part of the range will be covered by a ** write lock until the rest is covered by a read lock: ** 1: [WWWWW] ** 2: [....W] ** 3: [RRRRW] ** 4: [RRRR.] */ if( eFileLock==SHARED_LOCK ){ #if !defined(__APPLE__) || !SQLITE_ENABLE_LOCKING_STYLE (void)handleNFSUnlock; assert( handleNFSUnlock==0 ); #endif #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE if( handleNFSUnlock ){ int tErrno; /* Error code from system call errors */ off_t divSize = SHARED_SIZE - 1; lock.l_type = F_UNLCK; lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST; lock.l_len = divSize; if( unixFileLock(pFile, &lock)==(-1) ){ tErrno = errno; rc = SQLITE_IOERR_UNLOCK; if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } goto end_unlock; } lock.l_type = F_RDLCK; lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST; lock.l_len = divSize; if( unixFileLock(pFile, &lock)==(-1) ){ tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_RDLOCK); if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } goto end_unlock; } lock.l_type = F_UNLCK; lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST+divSize; lock.l_len = SHARED_SIZE-divSize; if( unixFileLock(pFile, &lock)==(-1) ){ tErrno = errno; rc = SQLITE_IOERR_UNLOCK; if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } goto end_unlock; } }else #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ { lock.l_type = F_RDLCK; lock.l_whence = SEEK_SET; lock.l_start = SHARED_FIRST; lock.l_len = SHARED_SIZE; if( unixFileLock(pFile, &lock) ){ /* In theory, the call to unixFileLock() cannot fail because another ** process is holding an incompatible lock. If it does, this ** indicates that the other process is not following the locking ** protocol. If this happens, return SQLITE_IOERR_RDLOCK. Returning ** SQLITE_BUSY would confuse the upper layer (in practice it causes ** an assert to fail). */ rc = SQLITE_IOERR_RDLOCK; storeLastErrno(pFile, errno); goto end_unlock; } } } lock.l_type = F_UNLCK; lock.l_whence = SEEK_SET; lock.l_start = PENDING_BYTE; lock.l_len = 2L; assert( PENDING_BYTE+1==RESERVED_BYTE ); if( unixFileLock(pFile, &lock)==0 ){ pInode->eFileLock = SHARED_LOCK; }else{ rc = SQLITE_IOERR_UNLOCK; storeLastErrno(pFile, errno); goto end_unlock; } } if( eFileLock==NO_LOCK ){ /* Decrement the shared lock counter. Release the lock using an ** OS call only when all threads in this same process have released ** the lock. */ pInode->nShared--; if( pInode->nShared==0 ){ lock.l_type = F_UNLCK; lock.l_whence = SEEK_SET; lock.l_start = lock.l_len = 0L; if( unixFileLock(pFile, &lock)==0 ){ pInode->eFileLock = NO_LOCK; }else{ rc = SQLITE_IOERR_UNLOCK; storeLastErrno(pFile, errno); pInode->eFileLock = NO_LOCK; pFile->eFileLock = NO_LOCK; } } /* Decrement the count of locks against this same file. When the ** count reaches zero, close any other file descriptors whose close ** was deferred because of outstanding locks. */ pInode->nLock--; assert( pInode->nLock>=0 ); if( pInode->nLock==0 ){ closePendingFds(pFile); } } end_unlock: unixLeaveMutex(); if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int unixUnlock(sqlite3_file *id, int eFileLock){ #if SQLITE_MAX_MMAP_SIZE>0 assert( eFileLock==SHARED_LOCK || ((unixFile *)id)->nFetchOut==0 ); #endif return posixUnlock(id, eFileLock, 0); } #if SQLITE_MAX_MMAP_SIZE>0 static int unixMapfile(unixFile *pFd, i64 nByte); static void unixUnmapfile(unixFile *pFd); #endif /* ** This function performs the parts of the "close file" operation ** common to all locking schemes. It closes the directory and file ** handles, if they are valid, and sets all fields of the unixFile ** structure to 0. ** ** It is *not* necessary to hold the mutex when this routine is called, ** even on VxWorks. A mutex will be acquired on VxWorks by the ** vxworksReleaseFileId() routine. */ static int closeUnixFile(sqlite3_file *id){ unixFile *pFile = (unixFile*)id; #if SQLITE_MAX_MMAP_SIZE>0 unixUnmapfile(pFile); #endif if( pFile->h>=0 ){ robust_close(pFile, pFile->h, __LINE__); pFile->h = -1; } #if OS_VXWORKS if( pFile->pId ){ if( pFile->ctrlFlags & UNIXFILE_DELETE ){ osUnlink(pFile->pId->zCanonicalName); } vxworksReleaseFileId(pFile->pId); pFile->pId = 0; } #endif #ifdef SQLITE_UNLINK_AFTER_CLOSE if( pFile->ctrlFlags & UNIXFILE_DELETE ){ osUnlink(pFile->zPath); sqlite3_free(*(char**)&pFile->zPath); pFile->zPath = 0; } #endif OSTRACE(("CLOSE %-3d\n", pFile->h)); OpenCounter(-1); sqlite3_free(pFile->pUnused); memset(pFile, 0, sizeof(unixFile)); return SQLITE_OK; } /* ** Close a file. */ static int unixClose(sqlite3_file *id){ int rc = SQLITE_OK; unixFile *pFile = (unixFile *)id; verifyDbFile(pFile); unixUnlock(id, NO_LOCK); unixEnterMutex(); /* unixFile.pInode is always valid here. Otherwise, a different close ** routine (e.g. nolockClose()) would be called instead. */ assert( pFile->pInode->nLock>0 || pFile->pInode->bProcessLock==0 ); if( ALWAYS(pFile->pInode) && pFile->pInode->nLock ){ /* If there are outstanding locks, do not actually close the file just ** yet because that would clear those locks. Instead, add the file ** descriptor to pInode->pUnused list. It will be automatically closed ** when the last lock is cleared. */ setPendingFd(pFile); } releaseInodeInfo(pFile); rc = closeUnixFile(id); unixLeaveMutex(); return rc; } /************** End of the posix advisory lock implementation ***************** ******************************************************************************/ /****************************************************************************** ****************************** No-op Locking ********************************** ** ** Of the various locking implementations available, this is by far the ** simplest: locking is ignored. No attempt is made to lock the database ** file for reading or writing. ** ** This locking mode is appropriate for use on read-only databases ** (ex: databases that are burned into CD-ROM, for example.) It can ** also be used if the application employs some external mechanism to ** prevent simultaneous access of the same database by two or more ** database connections. But there is a serious risk of database ** corruption if this locking mode is used in situations where multiple ** database connections are accessing the same database file at the same ** time and one or more of those connections are writing. */ static int nolockCheckReservedLock(sqlite3_file *NotUsed, int *pResOut){ UNUSED_PARAMETER(NotUsed); *pResOut = 0; return SQLITE_OK; } static int nolockLock(sqlite3_file *NotUsed, int NotUsed2){ UNUSED_PARAMETER2(NotUsed, NotUsed2); return SQLITE_OK; } static int nolockUnlock(sqlite3_file *NotUsed, int NotUsed2){ UNUSED_PARAMETER2(NotUsed, NotUsed2); return SQLITE_OK; } /* ** Close the file. */ static int nolockClose(sqlite3_file *id) { return closeUnixFile(id); } /******************* End of the no-op lock implementation ********************* ******************************************************************************/ /****************************************************************************** ************************* Begin dot-file Locking ****************************** ** ** The dotfile locking implementation uses the existence of separate lock ** files (really a directory) to control access to the database. This works ** on just about every filesystem imaginable. But there are serious downsides: ** ** (1) There is zero concurrency. A single reader blocks all other ** connections from reading or writing the database. ** ** (2) An application crash or power loss can leave stale lock files ** sitting around that need to be cleared manually. ** ** Nevertheless, a dotlock is an appropriate locking mode for use if no ** other locking strategy is available. ** ** Dotfile locking works by creating a subdirectory in the same directory as ** the database and with the same name but with a ".lock" extension added. ** The existence of a lock directory implies an EXCLUSIVE lock. All other ** lock types (SHARED, RESERVED, PENDING) are mapped into EXCLUSIVE. */ /* ** The file suffix added to the data base filename in order to create the ** lock directory. */ #define DOTLOCK_SUFFIX ".lock" /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. ** ** In dotfile locking, either a lock exists or it does not. So in this ** variation of CheckReservedLock(), *pResOut is set to true if any lock ** is held on the file and false if the file is unlocked. */ static int dotlockCheckReservedLock(sqlite3_file *id, int *pResOut) { int rc = SQLITE_OK; int reserved = 0; unixFile *pFile = (unixFile*)id; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); /* Check if a thread in this process holds such a lock */ if( pFile->eFileLock>SHARED_LOCK ){ /* Either this connection or some other connection in the same process ** holds a lock on the file. No need to check further. */ reserved = 1; }else{ /* The lock is held if and only if the lockfile exists */ const char *zLockFile = (const char*)pFile->lockingContext; reserved = osAccess(zLockFile, 0)==0; } OSTRACE(("TEST WR-LOCK %d %d %d (dotlock)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. ** ** With dotfile locking, we really only support state (4): EXCLUSIVE. ** But we track the other locking levels internally. */ static int dotlockLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; char *zLockFile = (char *)pFile->lockingContext; int rc = SQLITE_OK; /* If we have any lock, then the lock file already exists. All we have ** to do is adjust our internal record of the lock level. */ if( pFile->eFileLock > NO_LOCK ){ pFile->eFileLock = eFileLock; /* Always update the timestamp on the old file */ #ifdef HAVE_UTIME utime(zLockFile, NULL); #else utimes(zLockFile, NULL); #endif return SQLITE_OK; } /* grab an exclusive lock */ rc = osMkdir(zLockFile, 0777); if( rc<0 ){ /* failed to open/create the lock directory */ int tErrno = errno; if( EEXIST == tErrno ){ rc = SQLITE_BUSY; } else { rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } } return rc; } /* got it, set the type and return ok */ pFile->eFileLock = eFileLock; return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. ** ** When the locking level reaches NO_LOCK, delete the lock file. */ static int dotlockUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; char *zLockFile = (char *)pFile->lockingContext; int rc; assert( pFile ); OSTRACE(("UNLOCK %d %d was %d pid=%d (dotlock)\n", pFile->h, eFileLock, pFile->eFileLock, osGetpid(0))); assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* To downgrade to shared, simply update our internal notion of the ** lock state. No need to mess with the file on disk. */ if( eFileLock==SHARED_LOCK ){ pFile->eFileLock = SHARED_LOCK; return SQLITE_OK; } /* To fully unlock the database, delete the lock file */ assert( eFileLock==NO_LOCK ); rc = osRmdir(zLockFile); if( rc<0 && errno==ENOTDIR ) rc = osUnlink(zLockFile); if( rc<0 ){ int tErrno = errno; rc = 0; if( ENOENT != tErrno ){ rc = SQLITE_IOERR_UNLOCK; } if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } return rc; } pFile->eFileLock = NO_LOCK; return SQLITE_OK; } /* ** Close a file. Make sure the lock has been released before closing. */ static int dotlockClose(sqlite3_file *id) { int rc = SQLITE_OK; if( id ){ unixFile *pFile = (unixFile*)id; dotlockUnlock(id, NO_LOCK); sqlite3_free(pFile->lockingContext); rc = closeUnixFile(id); } return rc; } /****************** End of the dot-file lock implementation ******************* ******************************************************************************/ /****************************************************************************** ************************** Begin flock Locking ******************************** ** ** Use the flock() system call to do file locking. ** ** flock() locking is like dot-file locking in that the various ** fine-grain locking levels supported by SQLite are collapsed into ** a single exclusive lock. In other words, SHARED, RESERVED, and ** PENDING locks are the same thing as an EXCLUSIVE lock. SQLite ** still works when you do this, but concurrency is reduced since ** only a single process can be reading the database at a time. ** ** Omit this section if SQLITE_ENABLE_LOCKING_STYLE is turned off */ #if SQLITE_ENABLE_LOCKING_STYLE /* ** Retry flock() calls that fail with EINTR */ #ifdef EINTR static int robust_flock(int fd, int op){ int rc; do{ rc = flock(fd,op); }while( rc<0 && errno==EINTR ); return rc; } #else # define robust_flock(a,b) flock(a,b) #endif /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int flockCheckReservedLock(sqlite3_file *id, int *pResOut){ int rc = SQLITE_OK; int reserved = 0; unixFile *pFile = (unixFile*)id; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); /* Check if a thread in this process holds such a lock */ if( pFile->eFileLock>SHARED_LOCK ){ reserved = 1; } /* Otherwise see if some other process holds it. */ if( !reserved ){ /* attempt to get the lock */ int lrc = robust_flock(pFile->h, LOCK_EX | LOCK_NB); if( !lrc ){ /* got the lock, unlock it */ lrc = robust_flock(pFile->h, LOCK_UN); if ( lrc ) { int tErrno = errno; /* unlock failed with an error */ lrc = SQLITE_IOERR_UNLOCK; if( IS_LOCK_ERROR(lrc) ){ storeLastErrno(pFile, tErrno); rc = lrc; } } } else { int tErrno = errno; reserved = 1; /* someone else might have it reserved */ lrc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); if( IS_LOCK_ERROR(lrc) ){ storeLastErrno(pFile, tErrno); rc = lrc; } } } OSTRACE(("TEST WR-LOCK %d %d %d (flock)\n", pFile->h, rc, reserved)); #ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ rc = SQLITE_OK; reserved=1; } #endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ *pResOut = reserved; return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** flock() only really support EXCLUSIVE locks. We track intermediate ** lock states in the sqlite3_file structure, but all locks SHARED or ** above are really EXCLUSIVE locks and exclude all other processes from ** access the file. ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ static int flockLock(sqlite3_file *id, int eFileLock) { int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; assert( pFile ); /* if we already have a lock, it is exclusive. ** Just adjust level and punt on outta here. */ if (pFile->eFileLock > NO_LOCK) { pFile->eFileLock = eFileLock; return SQLITE_OK; } /* grab an exclusive lock */ if (robust_flock(pFile->h, LOCK_EX | LOCK_NB)) { int tErrno = errno; /* didn't get, must be busy */ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_LOCK); if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } } else { /* got it, set the type and return ok */ pFile->eFileLock = eFileLock; } OSTRACE(("LOCK %d %s %s (flock)\n", pFile->h, azFileLock(eFileLock), rc==SQLITE_OK ? "ok" : "failed")); #ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS if( (rc & SQLITE_IOERR) == SQLITE_IOERR ){ rc = SQLITE_BUSY; } #endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int flockUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; assert( pFile ); OSTRACE(("UNLOCK %d %d was %d pid=%d (flock)\n", pFile->h, eFileLock, pFile->eFileLock, osGetpid(0))); assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* shared can just be set because we always have an exclusive */ if (eFileLock==SHARED_LOCK) { pFile->eFileLock = eFileLock; return SQLITE_OK; } /* no, really, unlock. */ if( robust_flock(pFile->h, LOCK_UN) ){ #ifdef SQLITE_IGNORE_FLOCK_LOCK_ERRORS return SQLITE_OK; #endif /* SQLITE_IGNORE_FLOCK_LOCK_ERRORS */ return SQLITE_IOERR_UNLOCK; }else{ pFile->eFileLock = NO_LOCK; return SQLITE_OK; } } /* ** Close a file. */ static int flockClose(sqlite3_file *id) { int rc = SQLITE_OK; if( id ){ flockUnlock(id, NO_LOCK); rc = closeUnixFile(id); } return rc; } #endif /* SQLITE_ENABLE_LOCKING_STYLE && !OS_VXWORK */ /******************* End of the flock lock implementation ********************* ******************************************************************************/ /****************************************************************************** ************************ Begin Named Semaphore Locking ************************ ** ** Named semaphore locking is only supported on VxWorks. ** ** Semaphore locking is like dot-lock and flock in that it really only ** supports EXCLUSIVE locking. Only a single process can read or write ** the database file at a time. This reduces potential concurrency, but ** makes the lock implementation much easier. */ #if OS_VXWORKS /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int semXCheckReservedLock(sqlite3_file *id, int *pResOut) { int rc = SQLITE_OK; int reserved = 0; unixFile *pFile = (unixFile*)id; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); /* Check if a thread in this process holds such a lock */ if( pFile->eFileLock>SHARED_LOCK ){ reserved = 1; } /* Otherwise see if some other process holds it. */ if( !reserved ){ sem_t *pSem = pFile->pInode->pSem; if( sem_trywait(pSem)==-1 ){ int tErrno = errno; if( EAGAIN != tErrno ){ rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_CHECKRESERVEDLOCK); storeLastErrno(pFile, tErrno); } else { /* someone else has the lock when we are in NO_LOCK */ reserved = (pFile->eFileLock < SHARED_LOCK); } }else{ /* we could have it if we want it */ sem_post(pSem); } } OSTRACE(("TEST WR-LOCK %d %d %d (sem)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** Semaphore locks only really support EXCLUSIVE locks. We track intermediate ** lock states in the sqlite3_file structure, but all locks SHARED or ** above are really EXCLUSIVE locks and exclude all other processes from ** access the file. ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ static int semXLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; sem_t *pSem = pFile->pInode->pSem; int rc = SQLITE_OK; /* if we already have a lock, it is exclusive. ** Just adjust level and punt on outta here. */ if (pFile->eFileLock > NO_LOCK) { pFile->eFileLock = eFileLock; rc = SQLITE_OK; goto sem_end_lock; } /* lock semaphore now but bail out when already locked. */ if( sem_trywait(pSem)==-1 ){ rc = SQLITE_BUSY; goto sem_end_lock; } /* got it, set the type and return ok */ pFile->eFileLock = eFileLock; sem_end_lock: return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int semXUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; sem_t *pSem = pFile->pInode->pSem; assert( pFile ); assert( pSem ); OSTRACE(("UNLOCK %d %d was %d pid=%d (sem)\n", pFile->h, eFileLock, pFile->eFileLock, osGetpid(0))); assert( eFileLock<=SHARED_LOCK ); /* no-op if possible */ if( pFile->eFileLock==eFileLock ){ return SQLITE_OK; } /* shared can just be set because we always have an exclusive */ if (eFileLock==SHARED_LOCK) { pFile->eFileLock = eFileLock; return SQLITE_OK; } /* no, really unlock. */ if ( sem_post(pSem)==-1 ) { int rc, tErrno = errno; rc = sqliteErrorFromPosixError(tErrno, SQLITE_IOERR_UNLOCK); if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } return rc; } pFile->eFileLock = NO_LOCK; return SQLITE_OK; } /* ** Close a file. */ static int semXClose(sqlite3_file *id) { if( id ){ unixFile *pFile = (unixFile*)id; semXUnlock(id, NO_LOCK); assert( pFile ); unixEnterMutex(); releaseInodeInfo(pFile); unixLeaveMutex(); closeUnixFile(id); } return SQLITE_OK; } #endif /* OS_VXWORKS */ /* ** Named semaphore locking is only available on VxWorks. ** *************** End of the named semaphore lock implementation **************** ******************************************************************************/ /****************************************************************************** *************************** Begin AFP Locking ********************************* ** ** AFP is the Apple Filing Protocol. AFP is a network filesystem found ** on Apple Macintosh computers - both OS9 and OSX. ** ** Third-party implementations of AFP are available. But this code here ** only works on OSX. */ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE /* ** The afpLockingContext structure contains all afp lock specific state */ typedef struct afpLockingContext afpLockingContext; struct afpLockingContext { int reserved; const char *dbPath; /* Name of the open file */ }; struct ByteRangeLockPB2 { unsigned long long offset; /* offset to first byte to lock */ unsigned long long length; /* nbr of bytes to lock */ unsigned long long retRangeStart; /* nbr of 1st byte locked if successful */ unsigned char unLockFlag; /* 1 = unlock, 0 = lock */ unsigned char startEndFlag; /* 1=rel to end of fork, 0=rel to start */ int fd; /* file desc to assoc this lock with */ }; #define afpfsByteRangeLock2FSCTL _IOWR('z', 23, struct ByteRangeLockPB2) /* ** This is a utility for setting or clearing a bit-range lock on an ** AFP filesystem. ** ** Return SQLITE_OK on success, SQLITE_BUSY on failure. */ static int afpSetLock( const char *path, /* Name of the file to be locked or unlocked */ unixFile *pFile, /* Open file descriptor on path */ unsigned long long offset, /* First byte to be locked */ unsigned long long length, /* Number of bytes to lock */ int setLockFlag /* True to set lock. False to clear lock */ ){ struct ByteRangeLockPB2 pb; int err; pb.unLockFlag = setLockFlag ? 0 : 1; pb.startEndFlag = 0; pb.offset = offset; pb.length = length; pb.fd = pFile->h; OSTRACE(("AFPSETLOCK [%s] for %d%s in range %llx:%llx\n", (setLockFlag?"ON":"OFF"), pFile->h, (pb.fd==-1?"[testval-1]":""), offset, length)); err = fsctl(path, afpfsByteRangeLock2FSCTL, &pb, 0); if ( err==-1 ) { int rc; int tErrno = errno; OSTRACE(("AFPSETLOCK failed to fsctl() '%s' %d %s\n", path, tErrno, strerror(tErrno))); #ifdef SQLITE_IGNORE_AFP_LOCK_ERRORS rc = SQLITE_BUSY; #else rc = sqliteErrorFromPosixError(tErrno, setLockFlag ? SQLITE_IOERR_LOCK : SQLITE_IOERR_UNLOCK); #endif /* SQLITE_IGNORE_AFP_LOCK_ERRORS */ if( IS_LOCK_ERROR(rc) ){ storeLastErrno(pFile, tErrno); } return rc; } else { return SQLITE_OK; } } /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int afpCheckReservedLock(sqlite3_file *id, int *pResOut){ int rc = SQLITE_OK; int reserved = 0; unixFile *pFile = (unixFile*)id; afpLockingContext *context; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); assert( pFile ); context = (afpLockingContext *) pFile->lockingContext; if( context->reserved ){ *pResOut = 1; return SQLITE_OK; } unixEnterMutex(); /* Because pFile->pInode is shared across threads */ /* Check if a thread in this process holds such a lock */ if( pFile->pInode->eFileLock>SHARED_LOCK ){ reserved = 1; } /* Otherwise see if some other process holds it. */ if( !reserved ){ /* lock the RESERVED byte */ int lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); if( SQLITE_OK==lrc ){ /* if we succeeded in taking the reserved lock, unlock it to restore ** the original state */ lrc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); } else { /* if we failed to get the lock then someone else must have it */ reserved = 1; } if( IS_LOCK_ERROR(lrc) ){ rc=lrc; } } unixLeaveMutex(); OSTRACE(("TEST WR-LOCK %d %d %d (afp)\n", pFile->h, rc, reserved)); *pResOut = reserved; return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ static int afpLock(sqlite3_file *id, int eFileLock){ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; unixInodeInfo *pInode = pFile->pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; assert( pFile ); OSTRACE(("LOCK %d %s was %s(%s,%d) pid=%d (afp)\n", pFile->h, azFileLock(eFileLock), azFileLock(pFile->eFileLock), azFileLock(pInode->eFileLock), pInode->nShared , osGetpid(0))); /* If there is already a lock of this type or more restrictive on the ** unixFile, do nothing. Don't use the afp_end_lock: exit path, as ** unixEnterMutex() hasn't been called yet. */ if( pFile->eFileLock>=eFileLock ){ OSTRACE(("LOCK %d %s ok (already held) (afp)\n", pFile->h, azFileLock(eFileLock))); return SQLITE_OK; } /* Make sure the locking sequence is correct ** (1) We never move from unlocked to anything higher than shared lock. ** (2) SQLite never explicitly requests a pendig lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); assert( eFileLock!=PENDING_LOCK ); assert( eFileLock!=RESERVED_LOCK || pFile->eFileLock==SHARED_LOCK ); /* This mutex is needed because pFile->pInode is shared across threads */ unixEnterMutex(); pInode = pFile->pInode; /* If some thread using this PID has a lock via a different unixFile* ** handle that precludes the requested lock, return BUSY. */ if( (pFile->eFileLock!=pInode->eFileLock && (pInode->eFileLock>=PENDING_LOCK || eFileLock>SHARED_LOCK)) ){ rc = SQLITE_BUSY; goto afp_end_lock; } /* If a SHARED lock is requested, and some thread using this PID already ** has a SHARED or RESERVED lock, then increment reference counts and ** return SQLITE_OK. */ if( eFileLock==SHARED_LOCK && (pInode->eFileLock==SHARED_LOCK || pInode->eFileLock==RESERVED_LOCK) ){ assert( eFileLock==SHARED_LOCK ); assert( pFile->eFileLock==0 ); assert( pInode->nShared>0 ); pFile->eFileLock = SHARED_LOCK; pInode->nShared++; pInode->nLock++; goto afp_end_lock; } /* A PENDING lock is needed before acquiring a SHARED lock and before ** acquiring an EXCLUSIVE lock. For the SHARED lock, the PENDING will ** be released. */ if( eFileLock==SHARED_LOCK || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLockdbPath, pFile, PENDING_BYTE, 1, 1); if (failed) { rc = failed; goto afp_end_lock; } } /* If control gets to this point, then actually go ahead and make ** operating system calls for the specified lock. */ if( eFileLock==SHARED_LOCK ){ int lrc1, lrc2, lrc1Errno = 0; long lk, mask; assert( pInode->nShared==0 ); assert( pInode->eFileLock==0 ); mask = (sizeof(long)==8) ? LARGEST_INT64 : 0x7fffffff; /* Now get the read-lock SHARED_LOCK */ /* note that the quality of the randomness doesn't matter that much */ lk = random(); pInode->sharedByte = (lk & mask)%(SHARED_SIZE - 1); lrc1 = afpSetLock(context->dbPath, pFile, SHARED_FIRST+pInode->sharedByte, 1, 1); if( IS_LOCK_ERROR(lrc1) ){ lrc1Errno = pFile->lastErrno; } /* Drop the temporary PENDING lock */ lrc2 = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); if( IS_LOCK_ERROR(lrc1) ) { storeLastErrno(pFile, lrc1Errno); rc = lrc1; goto afp_end_lock; } else if( IS_LOCK_ERROR(lrc2) ){ rc = lrc2; goto afp_end_lock; } else if( lrc1 != SQLITE_OK ) { rc = lrc1; } else { pFile->eFileLock = SHARED_LOCK; pInode->nLock++; pInode->nShared = 1; } }else if( eFileLock==EXCLUSIVE_LOCK && pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; }else{ /* The request was for a RESERVED or EXCLUSIVE lock. It is ** assumed that there is a SHARED or greater lock on the file ** already. */ int failed = 0; assert( 0!=pFile->eFileLock ); if (eFileLock >= RESERVED_LOCK && pFile->eFileLock < RESERVED_LOCK) { /* Acquire a RESERVED lock */ failed = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1,1); if( !failed ){ context->reserved = 1; } } if (!failed && eFileLock == EXCLUSIVE_LOCK) { /* Acquire an EXCLUSIVE lock */ /* Remove the shared lock before trying the range. we'll need to ** reestablish the shared lock if we can't get the afpUnlock */ if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + pInode->sharedByte, 1, 0)) ){ int failed2 = SQLITE_OK; /* now attemmpt to get the exclusive lock range */ failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 1); if( failed && (failed2 = afpSetLock(context->dbPath, pFile, SHARED_FIRST + pInode->sharedByte, 1, 1)) ){ /* Can't reestablish the shared lock. Sqlite can't deal, this is ** a critical I/O error */ rc = ((failed & SQLITE_IOERR) == SQLITE_IOERR) ? failed2 : SQLITE_IOERR_LOCK; goto afp_end_lock; } }else{ rc = failed; } } if( failed ){ rc = failed; } } if( rc==SQLITE_OK ){ pFile->eFileLock = eFileLock; pInode->eFileLock = eFileLock; }else if( eFileLock==EXCLUSIVE_LOCK ){ pFile->eFileLock = PENDING_LOCK; pInode->eFileLock = PENDING_LOCK; } afp_end_lock: unixLeaveMutex(); OSTRACE(("LOCK %d %s %s (afp)\n", pFile->h, azFileLock(eFileLock), rc==SQLITE_OK ? "ok" : "failed")); return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int afpUnlock(sqlite3_file *id, int eFileLock) { int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; unixInodeInfo *pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; int skipShared = 0; #ifdef SQLITE_TEST int h = pFile->h; #endif assert( pFile ); OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock, pFile->eFileLock, pFile->pInode->eFileLock, pFile->pInode->nShared, osGetpid(0))); assert( eFileLock<=SHARED_LOCK ); if( pFile->eFileLock<=eFileLock ){ return SQLITE_OK; } unixEnterMutex(); pInode = pFile->pInode; assert( pInode->nShared!=0 ); if( pFile->eFileLock>SHARED_LOCK ){ assert( pInode->eFileLock==pFile->eFileLock ); SimulateIOErrorBenign(1); SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); #ifdef SQLITE_DEBUG /* When reducing a lock such that other processes can start ** reading the database file again, make sure that the ** transaction counter was updated if any part of the database ** file changed. If the transaction counter is not updated, ** other connections to the same file might not realize that ** the file has changed and hence might not know to flush their ** cache. The use of a stale cache can lead to database corruption. */ assert( pFile->inNormalWrite==0 || pFile->dbUpdate==0 || pFile->transCntrChng==1 ); pFile->inNormalWrite = 0; #endif if( pFile->eFileLock==EXCLUSIVE_LOCK ){ rc = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 0); if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1) ){ /* only re-establish the shared lock if necessary */ int sharedLockByte = SHARED_FIRST+pInode->sharedByte; rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 1); } else { skipShared = 1; } } if( rc==SQLITE_OK && pFile->eFileLock>=PENDING_LOCK ){ rc = afpSetLock(context->dbPath, pFile, PENDING_BYTE, 1, 0); } if( rc==SQLITE_OK && pFile->eFileLock>=RESERVED_LOCK && context->reserved ){ rc = afpSetLock(context->dbPath, pFile, RESERVED_BYTE, 1, 0); if( !rc ){ context->reserved = 0; } } if( rc==SQLITE_OK && (eFileLock==SHARED_LOCK || pInode->nShared>1)){ pInode->eFileLock = SHARED_LOCK; } } if( rc==SQLITE_OK && eFileLock==NO_LOCK ){ /* Decrement the shared lock counter. Release the lock using an ** OS call only when all threads in this same process have released ** the lock. */ unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; pInode->nShared--; if( pInode->nShared==0 ){ SimulateIOErrorBenign(1); SimulateIOError( h=(-1) ) SimulateIOErrorBenign(0); if( !skipShared ){ rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); } if( !rc ){ pInode->eFileLock = NO_LOCK; pFile->eFileLock = NO_LOCK; } } if( rc==SQLITE_OK ){ pInode->nLock--; assert( pInode->nLock>=0 ); if( pInode->nLock==0 ){ closePendingFds(pFile); } } } unixLeaveMutex(); if( rc==SQLITE_OK ) pFile->eFileLock = eFileLock; return rc; } /* ** Close a file & cleanup AFP specific locking context */ static int afpClose(sqlite3_file *id) { int rc = SQLITE_OK; if( id ){ unixFile *pFile = (unixFile*)id; afpUnlock(id, NO_LOCK); unixEnterMutex(); if( pFile->pInode && pFile->pInode->nLock ){ /* If there are outstanding locks, do not actually close the file just ** yet because that would clear those locks. Instead, add the file ** descriptor to pInode->aPending. It will be automatically closed when ** the last lock is cleared. */ setPendingFd(pFile); } releaseInodeInfo(pFile); sqlite3_free(pFile->lockingContext); rc = closeUnixFile(id); unixLeaveMutex(); } return rc; } #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ /* ** The code above is the AFP lock implementation. The code is specific ** to MacOSX and does not work on other unix platforms. No alternative ** is available. If you don't compile for a mac, then the "unix-afp" ** VFS is not available. ** ********************* End of the AFP lock implementation ********************** ******************************************************************************/ /****************************************************************************** *************************** Begin NFS Locking ********************************/ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int nfsUnlock(sqlite3_file *id, int eFileLock){ return posixUnlock(id, eFileLock, 1); } #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ /* ** The code above is the NFS lock implementation. The code is specific ** to MacOSX and does not work on other unix platforms. No alternative ** is available. ** ********************* End of the NFS lock implementation ********************** ******************************************************************************/ /****************************************************************************** **************** Non-locking sqlite3_file methods ***************************** ** ** The next division contains implementations for all methods of the ** sqlite3_file object other than the locking methods. The locking ** methods were defined in divisions above (one locking method per ** division). Those methods that are common to all locking modes ** are gather together into this division. */ /* ** Seek to the offset passed as the second argument, then read cnt ** bytes into pBuf. Return the number of bytes actually read. ** ** NB: If you define USE_PREAD or USE_PREAD64, then it might also ** be necessary to define _XOPEN_SOURCE to be 500. This varies from ** one system to another. Since SQLite does not define USE_PREAD ** in any form by default, we will not attempt to define _XOPEN_SOURCE. ** See tickets #2741 and #2681. ** ** To avoid stomping the errno value on a failed read the lastErrno value ** is set before returning. */ static int seekAndRead(unixFile *id, sqlite3_int64 offset, void *pBuf, int cnt){ int got; int prior = 0; #if (!defined(USE_PREAD) && !defined(USE_PREAD64)) i64 newOffset; #endif TIMER_START; assert( cnt==(cnt&0x1ffff) ); assert( id->h>2 ); cnt &= 0x1ffff; do{ #if defined(USE_PREAD) got = osPread(id->h, pBuf, cnt, offset); SimulateIOError( got = -1 ); #elif defined(USE_PREAD64) got = osPread64(id->h, pBuf, cnt, offset); SimulateIOError( got = -1 ); #else newOffset = lseek(id->h, offset, SEEK_SET); SimulateIOError( newOffset-- ); if( newOffset!=offset ){ if( newOffset == -1 ){ storeLastErrno((unixFile*)id, errno); }else{ storeLastErrno((unixFile*)id, 0); } return -1; } got = osRead(id->h, pBuf, cnt); #endif if( got==cnt ) break; if( got<0 ){ if( errno==EINTR ){ got = 1; continue; } prior = 0; storeLastErrno((unixFile*)id, errno); break; }else if( got>0 ){ cnt -= got; offset += got; prior += got; pBuf = (void*)(got + (char*)pBuf); } }while( got>0 ); TIMER_END; OSTRACE(("READ %-3d %5d %7lld %llu\n", id->h, got+prior, offset-prior, TIMER_ELAPSED)); return got+prior; } /* ** Read data from a file into a buffer. Return SQLITE_OK if all ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ static int unixRead( sqlite3_file *id, void *pBuf, int amt, sqlite3_int64 offset ){ unixFile *pFile = (unixFile *)id; int got; assert( id ); assert( offset>=0 ); assert( amt>0 ); /* If this is a database file (not a journal, master-journal or temp ** file), the bytes in the locking range should never be read or written. */ #if 0 assert( pFile->pUnused==0 || offset>=PENDING_BYTE+512 || offset+amt<=PENDING_BYTE ); #endif #if SQLITE_MAX_MMAP_SIZE>0 /* Deal with as much of this read request as possible by transfering ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); return SQLITE_OK; }else{ int nCopy = pFile->mmapSize - offset; memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); pBuf = &((u8 *)pBuf)[nCopy]; amt -= nCopy; offset += nCopy; } } #endif got = seekAndRead(pFile, offset, pBuf, amt); if( got==amt ){ return SQLITE_OK; }else if( got<0 ){ /* lastErrno set by seekAndRead */ return SQLITE_IOERR_READ; }else{ storeLastErrno(pFile, 0); /* not a system error */ /* Unread parts of the buffer must be zero-filled */ memset(&((char*)pBuf)[got], 0, amt-got); return SQLITE_IOERR_SHORT_READ; } } /* ** Attempt to seek the file-descriptor passed as the first argument to ** absolute offset iOff, then attempt to write nBuf bytes of data from ** pBuf to it. If an error occurs, return -1 and set *piErrno. Otherwise, ** return the actual number of bytes written (which may be less than ** nBuf). */ static int seekAndWriteFd( int fd, /* File descriptor to write to */ i64 iOff, /* File offset to begin writing at */ const void *pBuf, /* Copy data from this buffer to the file */ int nBuf, /* Size of buffer pBuf in bytes */ int *piErrno /* OUT: Error number if error occurs */ ){ int rc = 0; /* Value returned by system call */ assert( nBuf==(nBuf&0x1ffff) ); assert( fd>2 ); nBuf &= 0x1ffff; TIMER_START; #if defined(USE_PREAD) do{ rc = (int)osPwrite(fd, pBuf, nBuf, iOff); }while( rc<0 && errno==EINTR ); #elif defined(USE_PREAD64) do{ rc = (int)osPwrite64(fd, pBuf, nBuf, iOff);}while( rc<0 && errno==EINTR); #else do{ i64 iSeek = lseek(fd, iOff, SEEK_SET); SimulateIOError( iSeek-- ); if( iSeek!=iOff ){ if( piErrno ) *piErrno = (iSeek==-1 ? errno : 0); return -1; } rc = osWrite(fd, pBuf, nBuf); }while( rc<0 && errno==EINTR ); #endif TIMER_END; OSTRACE(("WRITE %-3d %5d %7lld %llu\n", fd, rc, iOff, TIMER_ELAPSED)); if( rc<0 && piErrno ) *piErrno = errno; return rc; } /* ** Seek to the offset in id->offset then read cnt bytes into pBuf. ** Return the number of bytes actually read. Update the offset. ** ** To avoid stomping the errno value on a failed write the lastErrno value ** is set before returning. */ static int seekAndWrite(unixFile *id, i64 offset, const void *pBuf, int cnt){ return seekAndWriteFd(id->h, offset, pBuf, cnt, &id->lastErrno); } /* ** Write data from a buffer into a file. Return SQLITE_OK on success ** or some other error code on failure. */ static int unixWrite( sqlite3_file *id, const void *pBuf, int amt, sqlite3_int64 offset ){ unixFile *pFile = (unixFile*)id; int wrote = 0; assert( id ); assert( amt>0 ); /* If this is a database file (not a journal, master-journal or temp ** file), the bytes in the locking range should never be read or written. */ #if 0 assert( pFile->pUnused==0 || offset>=PENDING_BYTE+512 || offset+amt<=PENDING_BYTE ); #endif #ifdef SQLITE_DEBUG /* If we are doing a normal write to a database file (as opposed to ** doing a hot-journal rollback or a write to some file other than a ** normal database file) then record the fact that the database ** has changed. If the transaction counter is modified, record that ** fact too. */ if( pFile->inNormalWrite ){ pFile->dbUpdate = 1; /* The database has been modified */ if( offset<=24 && offset+amt>=27 ){ int rc; char oldCntr[4]; SimulateIOErrorBenign(1); rc = seekAndRead(pFile, 24, oldCntr, 4); SimulateIOErrorBenign(0); if( rc!=4 || memcmp(oldCntr, &((char*)pBuf)[24-offset], 4)!=0 ){ pFile->transCntrChng = 1; /* The transaction counter has changed */ } } } #endif #if SQLITE_MAX_MMAP_SIZE>0 /* Deal with as much of this write request as possible by transfering ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); return SQLITE_OK; }else{ int nCopy = pFile->mmapSize - offset; memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); pBuf = &((u8 *)pBuf)[nCopy]; amt -= nCopy; offset += nCopy; } } #endif while( amt>0 && (wrote = seekAndWrite(pFile, offset, pBuf, amt))>0 ){ amt -= wrote; offset += wrote; pBuf = &((char*)pBuf)[wrote]; } SimulateIOError(( wrote=(-1), amt=1 )); SimulateDiskfullError(( wrote=0, amt=1 )); if( amt>0 ){ if( wrote<0 && pFile->lastErrno!=ENOSPC ){ /* lastErrno set by seekAndWrite */ return SQLITE_IOERR_WRITE; }else{ storeLastErrno(pFile, 0); /* not a system error */ return SQLITE_FULL; } } return SQLITE_OK; } #ifdef SQLITE_TEST /* ** Count the number of fullsyncs and normal syncs. This is used to test ** that syncs and fullsyncs are occurring at the right times. */ SQLITE_API int sqlite3_sync_count = 0; SQLITE_API int sqlite3_fullsync_count = 0; #endif /* ** We do not trust systems to provide a working fdatasync(). Some do. ** Others do no. To be safe, we will stick with the (slightly slower) ** fsync(). If you know that your system does support fdatasync() correctly, ** then simply compile with -Dfdatasync=fdatasync or -DHAVE_FDATASYNC */ #if !defined(fdatasync) && !HAVE_FDATASYNC # define fdatasync fsync #endif /* ** Define HAVE_FULLFSYNC to 0 or 1 depending on whether or not ** the F_FULLFSYNC macro is defined. F_FULLFSYNC is currently ** only available on Mac OS X. But that could change. */ #ifdef F_FULLFSYNC # define HAVE_FULLFSYNC 1 #else # define HAVE_FULLFSYNC 0 #endif /* ** The fsync() system call does not work as advertised on many ** unix systems. The following procedure is an attempt to make ** it work better. ** ** The SQLITE_NO_SYNC macro disables all fsync()s. This is useful ** for testing when we want to run through the test suite quickly. ** You are strongly advised *not* to deploy with SQLITE_NO_SYNC ** enabled, however, since with SQLITE_NO_SYNC enabled, an OS crash ** or power failure will likely corrupt the database file. ** ** SQLite sets the dataOnly flag if the size of the file is unchanged. ** The idea behind dataOnly is that it should only write the file content ** to disk, not the inode. We only set dataOnly if the file size is ** unchanged since the file size is part of the inode. However, ** Ted Ts'o tells us that fdatasync() will also write the inode if the ** file size has changed. The only real difference between fdatasync() ** and fsync(), Ted tells us, is that fdatasync() will not flush the ** inode if the mtime or owner or other inode attributes have changed. ** We only care about the file size, not the other file attributes, so ** as far as SQLite is concerned, an fdatasync() is always adequate. ** So, we always use fdatasync() if it is available, regardless of ** the value of the dataOnly flag. */ static int full_fsync(int fd, int fullSync, int dataOnly){ int rc; /* The following "ifdef/elif/else/" block has the same structure as ** the one below. It is replicated here solely to avoid cluttering ** up the real code with the UNUSED_PARAMETER() macros. */ #ifdef SQLITE_NO_SYNC UNUSED_PARAMETER(fd); UNUSED_PARAMETER(fullSync); UNUSED_PARAMETER(dataOnly); #elif HAVE_FULLFSYNC UNUSED_PARAMETER(dataOnly); #else UNUSED_PARAMETER(fullSync); UNUSED_PARAMETER(dataOnly); #endif /* Record the number of times that we do a normal fsync() and ** FULLSYNC. This is used during testing to verify that this procedure ** gets called with the correct arguments. */ #ifdef SQLITE_TEST if( fullSync ) sqlite3_fullsync_count++; sqlite3_sync_count++; #endif /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a ** no-op */ #ifdef SQLITE_NO_SYNC rc = SQLITE_OK; #elif HAVE_FULLFSYNC if( fullSync ){ rc = osFcntl(fd, F_FULLFSYNC, 0); }else{ rc = 1; } /* If the FULLFSYNC failed, fall back to attempting an fsync(). ** It shouldn't be possible for fullfsync to fail on the local ** file system (on OSX), so failure indicates that FULLFSYNC ** isn't supported for this file system. So, attempt an fsync ** and (for now) ignore the overhead of a superfluous fcntl call. ** It'd be better to detect fullfsync support once and avoid ** the fcntl call every time sync is called. */ if( rc ) rc = fsync(fd); #elif defined(__APPLE__) /* fdatasync() on HFS+ doesn't yet flush the file size if it changed correctly ** so currently we default to the macro that redefines fdatasync to fsync */ rc = fsync(fd); #else rc = fdatasync(fd); #if OS_VXWORKS if( rc==-1 && errno==ENOTSUP ){ rc = fsync(fd); } #endif /* OS_VXWORKS */ #endif /* ifdef SQLITE_NO_SYNC elif HAVE_FULLFSYNC */ if( OS_VXWORKS && rc!= -1 ){ rc = 0; } return rc; } /* ** Open a file descriptor to the directory containing file zFilename. ** If successful, *pFd is set to the opened file descriptor and ** SQLITE_OK is returned. If an error occurs, either SQLITE_NOMEM ** or SQLITE_CANTOPEN is returned and *pFd is set to an undefined ** value. ** ** The directory file descriptor is used for only one thing - to ** fsync() a directory to make sure file creation and deletion events ** are flushed to disk. Such fsyncs are not needed on newer ** journaling filesystems, but are required on older filesystems. ** ** This routine can be overridden using the xSetSysCall interface. ** The ability to override this routine was added in support of the ** chromium sandbox. Opening a directory is a security risk (we are ** told) so making it overrideable allows the chromium sandbox to ** replace this routine with a harmless no-op. To make this routine ** a no-op, replace it with a stub that returns SQLITE_OK but leaves ** *pFd set to a negative number. ** ** If SQLITE_OK is returned, the caller is responsible for closing ** the file descriptor *pFd using close(). */ static int openDirectory(const char *zFilename, int *pFd){ int ii; int fd = -1; char zDirname[MAX_PATHNAME+1]; sqlite3_snprintf(MAX_PATHNAME, zDirname, "%s", zFilename); for(ii=(int)strlen(zDirname); ii>1 && zDirname[ii]!='/'; ii--); if( ii>0 ){ zDirname[ii] = '\0'; fd = robust_open(zDirname, O_RDONLY|O_BINARY, 0); if( fd>=0 ){ OSTRACE(("OPENDIR %-3d %s\n", fd, zDirname)); } } *pFd = fd; return (fd>=0?SQLITE_OK:unixLogError(SQLITE_CANTOPEN_BKPT, "open", zDirname)); } /* ** Make sure all writes to a particular file are committed to disk. ** ** If dataOnly==0 then both the file itself and its metadata (file ** size, access time, etc) are synced. If dataOnly!=0 then only the ** file data is synced. ** ** Under Unix, also make sure that the directory entry for the file ** has been created by fsync-ing the directory that contains the file. ** If we do not do this and we encounter a power failure, the directory ** entry for the journal might not exist after we reboot. The next ** SQLite to access the file will not know that the journal exists (because ** the directory entry for the journal was never created) and the transaction ** will not roll back - possibly leading to database corruption. */ static int unixSync(sqlite3_file *id, int flags){ int rc; unixFile *pFile = (unixFile*)id; int isDataOnly = (flags&SQLITE_SYNC_DATAONLY); int isFullsync = (flags&0x0F)==SQLITE_SYNC_FULL; /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ assert((flags&0x0F)==SQLITE_SYNC_NORMAL || (flags&0x0F)==SQLITE_SYNC_FULL ); /* Unix cannot, but some systems may return SQLITE_FULL from here. This ** line is to test that doing so does not cause any problems. */ SimulateDiskfullError( return SQLITE_FULL ); assert( pFile ); OSTRACE(("SYNC %-3d\n", pFile->h)); rc = full_fsync(pFile->h, isFullsync, isDataOnly); SimulateIOError( rc=1 ); if( rc ){ storeLastErrno(pFile, errno); return unixLogError(SQLITE_IOERR_FSYNC, "full_fsync", pFile->zPath); } /* Also fsync the directory containing the file if the DIRSYNC flag ** is set. This is a one-time occurrence. Many systems (examples: AIX) ** are unable to fsync a directory, so ignore errors on the fsync. */ if( pFile->ctrlFlags & UNIXFILE_DIRSYNC ){ int dirfd; OSTRACE(("DIRSYNC %s (have_fullfsync=%d fullsync=%d)\n", pFile->zPath, HAVE_FULLFSYNC, isFullsync)); rc = osOpenDirectory(pFile->zPath, &dirfd); if( rc==SQLITE_OK && dirfd>=0 ){ full_fsync(dirfd, 0, 0); robust_close(pFile, dirfd, __LINE__); }else if( rc==SQLITE_CANTOPEN ){ rc = SQLITE_OK; } pFile->ctrlFlags &= ~UNIXFILE_DIRSYNC; } return rc; } /* ** Truncate an open file to a specified size */ static int unixTruncate(sqlite3_file *id, i64 nByte){ unixFile *pFile = (unixFile *)id; int rc; assert( pFile ); SimulateIOError( return SQLITE_IOERR_TRUNCATE ); /* If the user has configured a chunk-size for this file, truncate the ** file so that it consists of an integer number of chunks (i.e. the ** actual file size after the operation may be larger than the requested ** size). */ if( pFile->szChunk>0 ){ nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; } rc = robust_ftruncate(pFile->h, nByte); if( rc ){ storeLastErrno(pFile, errno); return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); }else{ #ifdef SQLITE_DEBUG /* If we are doing a normal write to a database file (as opposed to ** doing a hot-journal rollback or a write to some file other than a ** normal database file) and we truncate the file to zero length, ** that effectively updates the change counter. This might happen ** when restoring a database using the backup API from a zero-length ** source. */ if( pFile->inNormalWrite && nByte==0 ){ pFile->transCntrChng = 1; } #endif #if SQLITE_MAX_MMAP_SIZE>0 /* If the file was just truncated to a size smaller than the currently ** mapped region, reduce the effective mapping size as well. SQLite will ** use read() and write() to access data beyond this point from now on. */ if( nBytemmapSize ){ pFile->mmapSize = nByte; } #endif return SQLITE_OK; } } /* ** Determine the current size of a file in bytes */ static int unixFileSize(sqlite3_file *id, i64 *pSize){ int rc; struct stat buf; assert( id ); rc = osFstat(((unixFile*)id)->h, &buf); SimulateIOError( rc=1 ); if( rc!=0 ){ storeLastErrno((unixFile*)id, errno); return SQLITE_IOERR_FSTAT; } *pSize = buf.st_size; /* When opening a zero-size database, the findInodeInfo() procedure ** writes a single byte into that file in order to work around a bug ** in the OS-X msdos filesystem. In order to avoid problems with upper ** layers, we need to report this file size as zero even though it is ** really 1. Ticket #3260. */ if( *pSize==1 ) *pSize = 0; return SQLITE_OK; } #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) /* ** Handler for proxy-locking file-control verbs. Defined below in the ** proxying locking division. */ static int proxyFileControl(sqlite3_file*,int,void*); #endif /* ** This function is called to handle the SQLITE_FCNTL_SIZE_HINT ** file-control operation. Enlarge the database to nBytes in size ** (rounded up to the next chunk-size). If the database is already ** nBytes or larger, this routine is a no-op. */ static int fcntlSizeHint(unixFile *pFile, i64 nByte){ if( pFile->szChunk>0 ){ i64 nSize; /* Required file size */ struct stat buf; /* Used to hold return values of fstat() */ if( osFstat(pFile->h, &buf) ){ return SQLITE_IOERR_FSTAT; } nSize = ((nByte+pFile->szChunk-1) / pFile->szChunk) * pFile->szChunk; if( nSize>(i64)buf.st_size ){ #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE /* The code below is handling the return value of osFallocate() ** correctly. posix_fallocate() is defined to "returns zero on success, ** or an error number on failure". See the manpage for details. */ int err; do{ err = osFallocate(pFile->h, buf.st_size, nSize-buf.st_size); }while( err==EINTR ); if( err ) return SQLITE_IOERR_WRITE; #else /* If the OS does not have posix_fallocate(), fake it. Write a ** single byte to the last byte in each block that falls entirely ** within the extended region. Then, if required, a single byte ** at offset (nSize-1), to set the size of the file correctly. ** This is a similar technique to that used by glibc on systems ** that do not have a real fallocate() call. */ int nBlk = buf.st_blksize; /* File-system block size */ int nWrite = 0; /* Number of bytes written by seekAndWrite */ i64 iWrite; /* Next offset to write to */ iWrite = ((buf.st_size + 2*nBlk - 1)/nBlk)*nBlk-1; assert( iWrite>=buf.st_size ); assert( (iWrite/nBlk)==((buf.st_size+nBlk-1)/nBlk) ); assert( ((iWrite+1)%nBlk)==0 ); for(/*no-op*/; iWrite0 if( pFile->mmapSizeMax>0 && nByte>pFile->mmapSize ){ int rc; if( pFile->szChunk<=0 ){ if( robust_ftruncate(pFile->h, nByte) ){ storeLastErrno(pFile, errno); return unixLogError(SQLITE_IOERR_TRUNCATE, "ftruncate", pFile->zPath); } } rc = unixMapfile(pFile, nByte); return rc; } #endif return SQLITE_OK; } /* ** If *pArg is initially negative then this is a query. Set *pArg to ** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. ** ** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. */ static void unixModeBit(unixFile *pFile, unsigned char mask, int *pArg){ if( *pArg<0 ){ *pArg = (pFile->ctrlFlags & mask)!=0; }else if( (*pArg)==0 ){ pFile->ctrlFlags &= ~mask; }else{ pFile->ctrlFlags |= mask; } } /* Forward declaration */ static int unixGetTempname(int nBuf, char *zBuf); /* ** Information and control of an open file handle. */ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ unixFile *pFile = (unixFile*)id; switch( op ){ case SQLITE_FCNTL_WAL_BLOCK: { /* pFile->ctrlFlags |= UNIXFILE_BLOCK; // Deferred feature */ return SQLITE_OK; } case SQLITE_FCNTL_LOCKSTATE: { *(int*)pArg = pFile->eFileLock; return SQLITE_OK; } case SQLITE_FCNTL_LAST_ERRNO: { *(int*)pArg = pFile->lastErrno; return SQLITE_OK; } case SQLITE_FCNTL_CHUNK_SIZE: { pFile->szChunk = *(int *)pArg; return SQLITE_OK; } case SQLITE_FCNTL_SIZE_HINT: { int rc; SimulateIOErrorBenign(1); rc = fcntlSizeHint(pFile, *(i64 *)pArg); SimulateIOErrorBenign(0); return rc; } case SQLITE_FCNTL_PERSIST_WAL: { unixModeBit(pFile, UNIXFILE_PERSIST_WAL, (int*)pArg); return SQLITE_OK; } case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { unixModeBit(pFile, UNIXFILE_PSOW, (int*)pArg); return SQLITE_OK; } case SQLITE_FCNTL_VFSNAME: { *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); return SQLITE_OK; } case SQLITE_FCNTL_TEMPFILENAME: { char *zTFile = sqlite3_malloc64( pFile->pVfs->mxPathname ); if( zTFile ){ unixGetTempname(pFile->pVfs->mxPathname, zTFile); *(char**)pArg = zTFile; } return SQLITE_OK; } case SQLITE_FCNTL_HAS_MOVED: { *(int*)pArg = fileHasMoved(pFile); return SQLITE_OK; } #if SQLITE_MAX_MMAP_SIZE>0 case SQLITE_FCNTL_MMAP_SIZE: { i64 newLimit = *(i64*)pArg; int rc = SQLITE_OK; if( newLimit>sqlite3GlobalConfig.mxMmap ){ newLimit = sqlite3GlobalConfig.mxMmap; } *(i64*)pArg = pFile->mmapSizeMax; if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ pFile->mmapSizeMax = newLimit; if( pFile->mmapSize>0 ){ unixUnmapfile(pFile); rc = unixMapfile(pFile, -1); } } return rc; } #endif #ifdef SQLITE_DEBUG /* The pager calls this method to signal that it has done ** a rollback and that the database is therefore unchanged and ** it hence it is OK for the transaction change counter to be ** unchanged. */ case SQLITE_FCNTL_DB_UNCHANGED: { ((unixFile*)id)->dbUpdate = 0; return SQLITE_OK; } #endif #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) case SQLITE_FCNTL_SET_LOCKPROXYFILE: case SQLITE_FCNTL_GET_LOCKPROXYFILE: { return proxyFileControl(id,op,pArg); } #endif /* SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) */ } return SQLITE_NOTFOUND; } /* ** Return the sector size in bytes of the underlying block device for ** the specified file. This is almost always 512 bytes, but may be ** larger for some devices. ** ** SQLite code assumes this function cannot fail. It also assumes that ** if two files are created in the same file-system directory (i.e. ** a database and its journal file) that the sector size will be the ** same for both. */ #ifndef __QNXNTO__ static int unixSectorSize(sqlite3_file *NotUsed){ UNUSED_PARAMETER(NotUsed); return SQLITE_DEFAULT_SECTOR_SIZE; } #endif /* ** The following version of unixSectorSize() is optimized for QNX. */ #ifdef __QNXNTO__ #include #include static int unixSectorSize(sqlite3_file *id){ unixFile *pFile = (unixFile*)id; if( pFile->sectorSize == 0 ){ struct statvfs fsInfo; /* Set defaults for non-supported filesystems */ pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; pFile->deviceCharacteristics = 0; if( fstatvfs(pFile->h, &fsInfo) == -1 ) { return pFile->sectorSize; } if( !strcmp(fsInfo.f_basetype, "tmp") ) { pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = SQLITE_IOCAP_ATOMIC4K | /* All ram filesystem writes are atomic */ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until ** the write succeeds */ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; }else if( strstr(fsInfo.f_basetype, "etfs") ){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = /* etfs cluster size writes are atomic */ (pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) | SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until ** the write succeeds */ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; }else if( !strcmp(fsInfo.f_basetype, "qnx6") ){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = SQLITE_IOCAP_ATOMIC | /* All filesystem writes are atomic */ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until ** the write succeeds */ SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; }else if( !strcmp(fsInfo.f_basetype, "qnx4") ){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = /* full bitset of atomics from max sector size and smaller */ ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; }else if( strstr(fsInfo.f_basetype, "dos") ){ pFile->sectorSize = fsInfo.f_bsize; pFile->deviceCharacteristics = /* full bitset of atomics from max sector size and smaller */ ((pFile->sectorSize / 512 * SQLITE_IOCAP_ATOMIC512) << 1) - 2 | SQLITE_IOCAP_SEQUENTIAL | /* The ram filesystem has no write behind ** so it is ordered */ 0; }else{ pFile->deviceCharacteristics = SQLITE_IOCAP_ATOMIC512 | /* blocks are atomic */ SQLITE_IOCAP_SAFE_APPEND | /* growing the file does not occur until ** the write succeeds */ 0; } } /* Last chance verification. If the sector size isn't a multiple of 512 ** then it isn't valid.*/ if( pFile->sectorSize % 512 != 0 ){ pFile->deviceCharacteristics = 0; pFile->sectorSize = SQLITE_DEFAULT_SECTOR_SIZE; } return pFile->sectorSize; } #endif /* __QNXNTO__ */ /* ** Return the device characteristics for the file. ** ** This VFS is set up to return SQLITE_IOCAP_POWERSAFE_OVERWRITE by default. ** However, that choice is controversial since technically the underlying ** file system does not always provide powersafe overwrites. (In other ** words, after a power-loss event, parts of the file that were never ** written might end up being altered.) However, non-PSOW behavior is very, ** very rare. And asserting PSOW makes a large reduction in the amount ** of required I/O for journaling, since a lot of padding is eliminated. ** Hence, while POWERSAFE_OVERWRITE is on by default, there is a file-control ** available to turn it off and URI query parameter available to turn it off. */ static int unixDeviceCharacteristics(sqlite3_file *id){ unixFile *p = (unixFile*)id; int rc = 0; #ifdef __QNXNTO__ if( p->sectorSize==0 ) unixSectorSize(id); rc = p->deviceCharacteristics; #endif if( p->ctrlFlags & UNIXFILE_PSOW ){ rc |= SQLITE_IOCAP_POWERSAFE_OVERWRITE; } return rc; } #if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 /* ** Return the system page size. ** ** This function should not be called directly by other code in this file. ** Instead, it should be called via macro osGetpagesize(). */ static int unixGetpagesize(void){ #if OS_VXWORKS return 1024; #elif defined(_BSD_SOURCE) return getpagesize(); #else return (int)sysconf(_SC_PAGESIZE); #endif } #endif /* !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 */ #ifndef SQLITE_OMIT_WAL /* ** Object used to represent an shared memory buffer. ** ** When multiple threads all reference the same wal-index, each thread ** has its own unixShm object, but they all point to a single instance ** of this unixShmNode object. In other words, each wal-index is opened ** only once per process. ** ** Each unixShmNode object is connected to a single unixInodeInfo object. ** We could coalesce this object into unixInodeInfo, but that would mean ** every open file that does not use shared memory (in other words, most ** open files) would have to carry around this extra information. So ** the unixInodeInfo object contains a pointer to this unixShmNode object ** and the unixShmNode object is created only when needed. ** ** unixMutexHeld() must be true when creating or destroying ** this object or while reading or writing the following fields: ** ** nRef ** ** The following fields are read-only after the object is created: ** ** fid ** zFilename ** ** Either unixShmNode.mutex must be held or unixShmNode.nRef==0 and ** unixMutexHeld() is true when reading or writing any other field ** in this structure. */ struct unixShmNode { unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ sqlite3_mutex *mutex; /* Mutex to access this object */ char *zFilename; /* Name of the mmapped file */ int h; /* Open file descriptor */ int szRegion; /* Size of shared-memory regions */ u16 nRegion; /* Size of array apRegion */ u8 isReadonly; /* True if read-only */ char **apRegion; /* Array of mapped shared-memory regions */ int nRef; /* Number of unixShm objects pointing to this */ unixShm *pFirst; /* All unixShm objects pointing to this */ #ifdef SQLITE_DEBUG u8 exclMask; /* Mask of exclusive locks held */ u8 sharedMask; /* Mask of shared locks held */ u8 nextShmId; /* Next available unixShm.id value */ #endif }; /* ** Structure used internally by this VFS to record the state of an ** open shared memory connection. ** ** The following fields are initialized when this object is created and ** are read-only thereafter: ** ** unixShm.pFile ** unixShm.id ** ** All other fields are read/write. The unixShm.pFile->mutex must be held ** while accessing any read/write fields. */ struct unixShm { unixShmNode *pShmNode; /* The underlying unixShmNode object */ unixShm *pNext; /* Next unixShm with the same unixShmNode */ u8 hasMutex; /* True if holding the unixShmNode mutex */ u8 id; /* Id of this connection within its unixShmNode */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ }; /* ** Constants used for locking */ #define UNIX_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define UNIX_SHM_DMS (UNIX_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ /* ** Apply posix advisory locks for all bytes from ofst through ofst+n-1. ** ** Locks block if the mask is exactly UNIX_SHM_C and are non-blocking ** otherwise. */ static int unixShmSystemLock( unixFile *pFile, /* Open connection to the WAL file */ int lockType, /* F_UNLCK, F_RDLCK, or F_WRLCK */ int ofst, /* First byte of the locking range */ int n /* Number of bytes to lock */ ){ unixShmNode *pShmNode; /* Apply locks to this open shared-memory segment */ struct flock f; /* The posix advisory locking structure */ int rc = SQLITE_OK; /* Result code form fcntl() */ /* Access to the unixShmNode object is serialized by the caller */ pShmNode = pFile->pInode->pShmNode; assert( sqlite3_mutex_held(pShmNode->mutex) || pShmNode->nRef==0 ); /* Shared locks never span more than one byte */ assert( n==1 || lockType!=F_RDLCK ); /* Locks are within range */ assert( n>=1 && nh>=0 ){ int lkType; /* Initialize the locking parameters */ memset(&f, 0, sizeof(f)); f.l_type = lockType; f.l_whence = SEEK_SET; f.l_start = ofst; f.l_len = n; lkType = (pFile->ctrlFlags & UNIXFILE_BLOCK)!=0 ? F_SETLKW : F_SETLK; rc = osFcntl(pShmNode->h, lkType, &f); rc = (rc!=(-1)) ? SQLITE_OK : SQLITE_BUSY; pFile->ctrlFlags &= ~UNIXFILE_BLOCK; } /* Update the global lock state and do debug tracing */ #ifdef SQLITE_DEBUG { u16 mask; OSTRACE(("SHM-LOCK ")); mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; pShmNode->sharedMask &= ~mask; }else if( lockType==F_RDLCK ){ OSTRACE(("read-lock %d ok", ofst)); pShmNode->exclMask &= ~mask; pShmNode->sharedMask |= mask; }else{ assert( lockType==F_WRLCK ); OSTRACE(("write-lock %d ok", ofst)); pShmNode->exclMask |= mask; pShmNode->sharedMask &= ~mask; } }else{ if( lockType==F_UNLCK ){ OSTRACE(("unlock %d failed", ofst)); }else if( lockType==F_RDLCK ){ OSTRACE(("read-lock failed")); }else{ assert( lockType==F_WRLCK ); OSTRACE(("write-lock %d failed", ofst)); } } OSTRACE((" - afterwards %03x,%03x\n", pShmNode->sharedMask, pShmNode->exclMask)); } #endif return rc; } /* ** Return the minimum number of 32KB shm regions that should be mapped at ** a time, assuming that each mapping must be an integer multiple of the ** current system page-size. ** ** Usually, this is 1. The exception seems to be systems that are configured ** to use 64KB pages - in this case each mapping must cover at least two ** shm regions. */ static int unixShmRegionPerMap(void){ int shmsz = 32*1024; /* SHM region size */ int pgsz = osGetpagesize(); /* System page size */ assert( ((pgsz-1)&pgsz)==0 ); /* Page size must be a power of 2 */ if( pgszpInode->pShmNode; assert( unixMutexHeld() ); if( p && p->nRef==0 ){ int nShmPerMap = unixShmRegionPerMap(); int i; assert( p->pInode==pFd->pInode ); sqlite3_mutex_free(p->mutex); for(i=0; inRegion; i+=nShmPerMap){ if( p->h>=0 ){ osMunmap(p->apRegion[i], p->szRegion); }else{ sqlite3_free(p->apRegion[i]); } } sqlite3_free(p->apRegion); if( p->h>=0 ){ robust_close(pFd, p->h, __LINE__); p->h = -1; } p->pInode->pShmNode = 0; sqlite3_free(p); } } /* ** Open a shared-memory area associated with open database file pDbFd. ** This particular implementation uses mmapped files. ** ** The file used to implement shared-memory is in the same directory ** as the open database file and has the same name as the open database ** file with the "-shm" suffix added. For example, if the database file ** is "/home/user1/config.db" then the file that is created and mmapped ** for shared memory will be called "/home/user1/config.db-shm". ** ** Another approach to is to use files in /dev/shm or /dev/tmp or an ** some other tmpfs mount. But if a file in a different directory ** from the database file is used, then differing access permissions ** or a chroot() might cause two different processes on the same ** database to end up using different files for shared memory - ** meaning that their memory would not really be shared - resulting ** in database corruption. Nevertheless, this tmpfs file usage ** can be enabled at compile-time using -DSQLITE_SHM_DIRECTORY="/dev/shm" ** or the equivalent. The use of the SQLITE_SHM_DIRECTORY compile-time ** option results in an incompatible build of SQLite; builds of SQLite ** that with differing SQLITE_SHM_DIRECTORY settings attempt to use the ** same database file at the same time, database corruption will likely ** result. The SQLITE_SHM_DIRECTORY compile-time option is considered ** "unsupported" and may go away in a future SQLite release. ** ** When opening a new shared-memory file, if no other instances of that ** file are currently open, in this process or in other processes, then ** the file must be truncated to zero length or have its header cleared. ** ** If the original database file (pDbFd) is using the "unix-excl" VFS ** that means that an exclusive lock is held on the database file and ** that no other processes are able to read or write the database. In ** that case, we do not really need shared memory. No shared memory ** file is created. The shared memory will be simulated with heap memory. */ static int unixOpenSharedMemory(unixFile *pDbFd){ struct unixShm *p = 0; /* The connection to be opened */ struct unixShmNode *pShmNode; /* The underlying mmapped file */ int rc; /* Result code */ unixInodeInfo *pInode; /* The inode of fd */ char *zShmFilename; /* Name of the file used for SHM */ int nShmFilename; /* Size of the SHM filename in bytes */ /* Allocate space for the new unixShm object. */ p = sqlite3_malloc64( sizeof(*p) ); if( p==0 ) return SQLITE_NOMEM; memset(p, 0, sizeof(*p)); assert( pDbFd->pShm==0 ); /* Check to see if a unixShmNode object already exists. Reuse an existing ** one if present. Create a new one if necessary. */ unixEnterMutex(); pInode = pDbFd->pInode; pShmNode = pInode->pShmNode; if( pShmNode==0 ){ struct stat sStat; /* fstat() info for database file */ #ifndef SQLITE_SHM_DIRECTORY const char *zBasePath = pDbFd->zPath; #endif /* Call fstat() to figure out the permissions on the database file. If ** a new *-shm file is created, an attempt will be made to create it ** with the same permissions. */ if( osFstat(pDbFd->h, &sStat) && pInode->bProcessLock==0 ){ rc = SQLITE_IOERR_FSTAT; goto shm_open_err; } #ifdef SQLITE_SHM_DIRECTORY nShmFilename = sizeof(SQLITE_SHM_DIRECTORY) + 31; #else nShmFilename = 6 + (int)strlen(zBasePath); #endif pShmNode = sqlite3_malloc64( sizeof(*pShmNode) + nShmFilename ); if( pShmNode==0 ){ rc = SQLITE_NOMEM; goto shm_open_err; } memset(pShmNode, 0, sizeof(*pShmNode)+nShmFilename); zShmFilename = pShmNode->zFilename = (char*)&pShmNode[1]; #ifdef SQLITE_SHM_DIRECTORY sqlite3_snprintf(nShmFilename, zShmFilename, SQLITE_SHM_DIRECTORY "/sqlite-shm-%x-%x", (u32)sStat.st_ino, (u32)sStat.st_dev); #else sqlite3_snprintf(nShmFilename, zShmFilename, "%s-shm", zBasePath); sqlite3FileSuffix3(pDbFd->zPath, zShmFilename); #endif pShmNode->h = -1; pDbFd->pInode->pShmNode = pShmNode; pShmNode->pInode = pDbFd->pInode; pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); if( pShmNode->mutex==0 ){ rc = SQLITE_NOMEM; goto shm_open_err; } if( pInode->bProcessLock==0 ){ int openFlags = O_RDWR | O_CREAT; if( sqlite3_uri_boolean(pDbFd->zPath, "readonly_shm", 0) ){ openFlags = O_RDONLY; pShmNode->isReadonly = 1; } pShmNode->h = robust_open(zShmFilename, openFlags, (sStat.st_mode&0777)); if( pShmNode->h<0 ){ rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zShmFilename); goto shm_open_err; } /* If this process is running as root, make sure that the SHM file ** is owned by the same user that owns the original database. Otherwise, ** the original owner will not be able to connect. */ osFchown(pShmNode->h, sStat.st_uid, sStat.st_gid); /* Check to see if another process is holding the dead-man switch. ** If not, truncate the file to zero length. */ rc = SQLITE_OK; if( unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1)==SQLITE_OK ){ if( robust_ftruncate(pShmNode->h, 0) ){ rc = unixLogError(SQLITE_IOERR_SHMOPEN, "ftruncate", zShmFilename); } } if( rc==SQLITE_OK ){ rc = unixShmSystemLock(pDbFd, F_RDLCK, UNIX_SHM_DMS, 1); } if( rc ) goto shm_open_err; } } /* Make the new connection a child of the unixShmNode */ p->pShmNode = pShmNode; #ifdef SQLITE_DEBUG p->id = pShmNode->nextShmId++; #endif pShmNode->nRef++; pDbFd->pShm = p; unixLeaveMutex(); /* The reference count on pShmNode has already been incremented under ** the cover of the unixEnterMutex() mutex and the pointer from the ** new (struct unixShm) object to the pShmNode has been set. All that is ** left to do is to link the new object into the linked list starting ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex ** mutex. */ sqlite3_mutex_enter(pShmNode->mutex); p->pNext = pShmNode->pFirst; pShmNode->pFirst = p; sqlite3_mutex_leave(pShmNode->mutex); return SQLITE_OK; /* Jump here on any error */ shm_open_err: unixShmPurge(pDbFd); /* This call frees pShmNode if required */ sqlite3_free(p); unixLeaveMutex(); return rc; } /* ** This function is called to obtain a pointer to region iRegion of the ** shared-memory associated with the database file fd. Shared-memory regions ** are numbered starting from zero. Each shared-memory region is szRegion ** bytes in size. ** ** If an error occurs, an error code is returned and *pp is set to NULL. ** ** Otherwise, if the bExtend parameter is 0 and the requested shared-memory ** region has not been allocated (by any client, including one running in a ** separate process), then *pp is set to NULL and SQLITE_OK returned. If ** bExtend is non-zero and the requested shared-memory region has not yet ** been allocated, it is allocated by this function. ** ** If the shared-memory region has already been allocated or is allocated by ** this call as described above, then it is mapped into this processes ** address space (if it is not already), *pp is set to point to the mapped ** memory and SQLITE_OK returned. */ static int unixShmMap( sqlite3_file *fd, /* Handle open on database file */ int iRegion, /* Region to retrieve */ int szRegion, /* Size of regions */ int bExtend, /* True to extend file if necessary */ void volatile **pp /* OUT: Mapped memory */ ){ unixFile *pDbFd = (unixFile*)fd; unixShm *p; unixShmNode *pShmNode; int rc = SQLITE_OK; int nShmPerMap = unixShmRegionPerMap(); int nReqRegion; /* If the shared-memory file has not yet been opened, open it now. */ if( pDbFd->pShm==0 ){ rc = unixOpenSharedMemory(pDbFd); if( rc!=SQLITE_OK ) return rc; } p = pDbFd->pShm; pShmNode = p->pShmNode; sqlite3_mutex_enter(pShmNode->mutex); assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); assert( pShmNode->pInode==pDbFd->pInode ); assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); /* Minimum number of regions required to be mapped. */ nReqRegion = ((iRegion+nShmPerMap) / nShmPerMap) * nShmPerMap; if( pShmNode->nRegionszRegion = szRegion; if( pShmNode->h>=0 ){ /* The requested region is not mapped into this processes address space. ** Check to see if it has been allocated (i.e. if the wal-index file is ** large enough to contain the requested region). */ if( osFstat(pShmNode->h, &sStat) ){ rc = SQLITE_IOERR_SHMSIZE; goto shmpage_out; } if( sStat.st_sizeh, iPg*pgsz + pgsz-1, "", 1, 0)!=1 ){ const char *zFile = pShmNode->zFilename; rc = unixLogError(SQLITE_IOERR_SHMSIZE, "write", zFile); goto shmpage_out; } } } } } /* Map the requested memory region into this processes address space. */ apNew = (char **)sqlite3_realloc( pShmNode->apRegion, nReqRegion*sizeof(char *) ); if( !apNew ){ rc = SQLITE_IOERR_NOMEM; goto shmpage_out; } pShmNode->apRegion = apNew; while( pShmNode->nRegionh>=0 ){ pMem = osMmap(0, nMap, pShmNode->isReadonly ? PROT_READ : PROT_READ|PROT_WRITE, MAP_SHARED, pShmNode->h, szRegion*(i64)pShmNode->nRegion ); if( pMem==MAP_FAILED ){ rc = unixLogError(SQLITE_IOERR_SHMMAP, "mmap", pShmNode->zFilename); goto shmpage_out; } }else{ pMem = sqlite3_malloc64(szRegion); if( pMem==0 ){ rc = SQLITE_NOMEM; goto shmpage_out; } memset(pMem, 0, szRegion); } for(i=0; iapRegion[pShmNode->nRegion+i] = &((char*)pMem)[szRegion*i]; } pShmNode->nRegion += nShmPerMap; } } shmpage_out: if( pShmNode->nRegion>iRegion ){ *pp = pShmNode->apRegion[iRegion]; }else{ *pp = 0; } if( pShmNode->isReadonly && rc==SQLITE_OK ) rc = SQLITE_READONLY; sqlite3_mutex_leave(pShmNode->mutex); return rc; } /* ** Change the lock state for a shared-memory segment. ** ** Note that the relationship between SHAREd and EXCLUSIVE locks is a little ** different here than in posix. In xShmLock(), one can go from unlocked ** to shared and back or from unlocked to exclusive and back. But one may ** not go from shared to exclusive or from exclusive to shared. */ static int unixShmLock( sqlite3_file *fd, /* Database file holding the shared memory */ int ofst, /* First lock to acquire or release */ int n, /* Number of locks to acquire or release */ int flags /* What to do with the lock */ ){ unixFile *pDbFd = (unixFile*)fd; /* Connection holding shared memory */ unixShm *p = pDbFd->pShm; /* The shared memory being locked */ unixShm *pX; /* For looping over all siblings */ unixShmNode *pShmNode = p->pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ u16 mask; /* Mask of locks to take or release */ assert( pShmNode==pDbFd->pInode->pShmNode ); assert( pShmNode->pInode==pDbFd->pInode ); assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); assert( n>=1 ); assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); assert( pShmNode->h>=0 || pDbFd->pInode->bProcessLock==1 ); assert( pShmNode->h<0 || pDbFd->pInode->bProcessLock==0 ); mask = (1<<(ofst+n)) - (1<1 || mask==(1<mutex); if( flags & SQLITE_SHM_UNLOCK ){ u16 allMask = 0; /* Mask of locks held by siblings */ /* See if any siblings hold this same lock */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( pX==p ) continue; assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); allMask |= pX->sharedMask; } /* Unlock the system-level locks */ if( (mask & allMask)==0 ){ rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); }else{ rc = SQLITE_OK; } /* Undo the local locks */ if( rc==SQLITE_OK ){ p->exclMask &= ~mask; p->sharedMask &= ~mask; } }else if( flags & SQLITE_SHM_SHARED ){ u16 allShared = 0; /* Union of locks held by connections other than "p" */ /* Find out which shared locks are already held by sibling connections. ** If any sibling already holds an exclusive lock, go ahead and return ** SQLITE_BUSY. */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( (pX->exclMask & mask)!=0 ){ rc = SQLITE_BUSY; break; } allShared |= pX->sharedMask; } /* Get shared locks at the system level, if necessary */ if( rc==SQLITE_OK ){ if( (allShared & mask)==0 ){ rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); }else{ rc = SQLITE_OK; } } /* Get the local shared locks */ if( rc==SQLITE_OK ){ p->sharedMask |= mask; } }else{ /* Make sure no sibling connections hold locks that will block this ** lock. If any do, return SQLITE_BUSY right away. */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ rc = SQLITE_BUSY; break; } } /* Get the exclusive locks at the system level. Then if successful ** also mark the local connection as being locked. */ if( rc==SQLITE_OK ){ rc = unixShmSystemLock(pDbFd, F_WRLCK, ofst+UNIX_SHM_BASE, n); if( rc==SQLITE_OK ){ assert( (p->sharedMask & mask)==0 ); p->exclMask |= mask; } } } sqlite3_mutex_leave(pShmNode->mutex); OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", p->id, osGetpid(0), p->sharedMask, p->exclMask)); return rc; } /* ** Implement a memory barrier or memory fence on shared memory. ** ** All loads and stores begun before the barrier must complete before ** any load or store begun after the barrier. */ static void unixShmBarrier( sqlite3_file *fd /* Database file holding the shared memory */ ){ UNUSED_PARAMETER(fd); unixEnterMutex(); unixLeaveMutex(); } /* ** Close a connection to shared-memory. Delete the underlying ** storage if deleteFlag is true. ** ** If there is no shared memory associated with the connection then this ** routine is a harmless no-op. */ static int unixShmUnmap( sqlite3_file *fd, /* The underlying database file */ int deleteFlag /* Delete shared-memory if true */ ){ unixShm *p; /* The connection to be closed */ unixShmNode *pShmNode; /* The underlying shared-memory file */ unixShm **pp; /* For looping over sibling connections */ unixFile *pDbFd; /* The underlying database file */ pDbFd = (unixFile*)fd; p = pDbFd->pShm; if( p==0 ) return SQLITE_OK; pShmNode = p->pShmNode; assert( pShmNode==pDbFd->pInode->pShmNode ); assert( pShmNode->pInode==pDbFd->pInode ); /* Remove connection p from the set of connections associated ** with pShmNode */ sqlite3_mutex_enter(pShmNode->mutex); for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} *pp = p->pNext; /* Free the connection p */ sqlite3_free(p); pDbFd->pShm = 0; sqlite3_mutex_leave(pShmNode->mutex); /* If pShmNode->nRef has reached 0, then close the underlying ** shared-memory file, too */ unixEnterMutex(); assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ if( deleteFlag && pShmNode->h>=0 ){ osUnlink(pShmNode->zFilename); } unixShmPurge(pDbFd); } unixLeaveMutex(); return SQLITE_OK; } #else # define unixShmMap 0 # define unixShmLock 0 # define unixShmBarrier 0 # define unixShmUnmap 0 #endif /* #ifndef SQLITE_OMIT_WAL */ #if SQLITE_MAX_MMAP_SIZE>0 /* ** If it is currently memory mapped, unmap file pFd. */ static void unixUnmapfile(unixFile *pFd){ assert( pFd->nFetchOut==0 ); if( pFd->pMapRegion ){ osMunmap(pFd->pMapRegion, pFd->mmapSizeActual); pFd->pMapRegion = 0; pFd->mmapSize = 0; pFd->mmapSizeActual = 0; } } /* ** Attempt to set the size of the memory mapping maintained by file ** descriptor pFd to nNew bytes. Any existing mapping is discarded. ** ** If successful, this function sets the following variables: ** ** unixFile.pMapRegion ** unixFile.mmapSize ** unixFile.mmapSizeActual ** ** If unsuccessful, an error message is logged via sqlite3_log() and ** the three variables above are zeroed. In this case SQLite should ** continue accessing the database using the xRead() and xWrite() ** methods. */ static void unixRemapfile( unixFile *pFd, /* File descriptor object */ i64 nNew /* Required mapping size */ ){ const char *zErr = "mmap"; int h = pFd->h; /* File descriptor open on db file */ u8 *pOrig = (u8 *)pFd->pMapRegion; /* Pointer to current file mapping */ i64 nOrig = pFd->mmapSizeActual; /* Size of pOrig region in bytes */ u8 *pNew = 0; /* Location of new mapping */ int flags = PROT_READ; /* Flags to pass to mmap() */ assert( pFd->nFetchOut==0 ); assert( nNew>pFd->mmapSize ); assert( nNew<=pFd->mmapSizeMax ); assert( nNew>0 ); assert( pFd->mmapSizeActual>=pFd->mmapSize ); assert( MAP_FAILED!=0 ); if( (pFd->ctrlFlags & UNIXFILE_RDONLY)==0 ) flags |= PROT_WRITE; if( pOrig ){ #if HAVE_MREMAP i64 nReuse = pFd->mmapSize; #else const int szSyspage = osGetpagesize(); i64 nReuse = (pFd->mmapSize & ~(szSyspage-1)); #endif u8 *pReq = &pOrig[nReuse]; /* Unmap any pages of the existing mapping that cannot be reused. */ if( nReuse!=nOrig ){ osMunmap(pReq, nOrig-nReuse); } #if HAVE_MREMAP pNew = osMremap(pOrig, nReuse, nNew, MREMAP_MAYMOVE); zErr = "mremap"; #else pNew = osMmap(pReq, nNew-nReuse, flags, MAP_SHARED, h, nReuse); if( pNew!=MAP_FAILED ){ if( pNew!=pReq ){ osMunmap(pNew, nNew - nReuse); pNew = 0; }else{ pNew = pOrig; } } #endif /* The attempt to extend the existing mapping failed. Free it. */ if( pNew==MAP_FAILED || pNew==0 ){ osMunmap(pOrig, nReuse); } } /* If pNew is still NULL, try to create an entirely new mapping. */ if( pNew==0 ){ pNew = osMmap(0, nNew, flags, MAP_SHARED, h, 0); } if( pNew==MAP_FAILED ){ pNew = 0; nNew = 0; unixLogError(SQLITE_OK, zErr, pFd->zPath); /* If the mmap() above failed, assume that all subsequent mmap() calls ** will probably fail too. Fall back to using xRead/xWrite exclusively ** in this case. */ pFd->mmapSizeMax = 0; } pFd->pMapRegion = (void *)pNew; pFd->mmapSize = pFd->mmapSizeActual = nNew; } /* ** Memory map or remap the file opened by file-descriptor pFd (if the file ** is already mapped, the existing mapping is replaced by the new). Or, if ** there already exists a mapping for this file, and there are still ** outstanding xFetch() references to it, this function is a no-op. ** ** If parameter nByte is non-negative, then it is the requested size of ** the mapping to create. Otherwise, if nByte is less than zero, then the ** requested size is the size of the file on disk. The actual size of the ** created mapping is either the requested size or the value configured ** using SQLITE_FCNTL_MMAP_LIMIT, whichever is smaller. ** ** SQLITE_OK is returned if no error occurs (even if the mapping is not ** recreated as a result of outstanding references) or an SQLite error ** code otherwise. */ static int unixMapfile(unixFile *pFd, i64 nByte){ i64 nMap = nByte; int rc; assert( nMap>=0 || pFd->nFetchOut==0 ); if( pFd->nFetchOut>0 ) return SQLITE_OK; if( nMap<0 ){ struct stat statbuf; /* Low-level file information */ rc = osFstat(pFd->h, &statbuf); if( rc!=SQLITE_OK ){ return SQLITE_IOERR_FSTAT; } nMap = statbuf.st_size; } if( nMap>pFd->mmapSizeMax ){ nMap = pFd->mmapSizeMax; } if( nMap!=pFd->mmapSize ){ if( nMap>0 ){ unixRemapfile(pFd, nMap); }else{ unixUnmapfile(pFd); } } return SQLITE_OK; } #endif /* SQLITE_MAX_MMAP_SIZE>0 */ /* ** If possible, return a pointer to a mapping of file fd starting at offset ** iOff. The mapping must be valid for at least nAmt bytes. ** ** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. ** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. ** Finally, if an error does occur, return an SQLite error code. The final ** value of *pp is undefined in this case. ** ** If this function does return a pointer, the caller must eventually ** release the reference by calling unixUnfetch(). */ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 unixFile *pFd = (unixFile *)fd; /* The underlying database file */ #endif *pp = 0; #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ if( pFd->pMapRegion==0 ){ int rc = unixMapfile(pFd, -1); if( rc!=SQLITE_OK ) return rc; } if( pFd->mmapSize >= iOff+nAmt ){ *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; } } #endif return SQLITE_OK; } /* ** If the third argument is non-NULL, then this function releases a ** reference obtained by an earlier call to unixFetch(). The second ** argument passed to this function must be the same as the corresponding ** argument that was passed to the unixFetch() invocation. ** ** Or, if the third argument is NULL, then this function is being called ** to inform the VFS layer that, according to POSIX, any existing mapping ** may now be invalid and should be unmapped. */ static int unixUnfetch(sqlite3_file *fd, i64 iOff, void *p){ #if SQLITE_MAX_MMAP_SIZE>0 unixFile *pFd = (unixFile *)fd; /* The underlying database file */ UNUSED_PARAMETER(iOff); /* If p==0 (unmap the entire file) then there must be no outstanding ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), ** then there must be at least one outstanding. */ assert( (p==0)==(pFd->nFetchOut==0) ); /* If p!=0, it must match the iOff value. */ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); if( p ){ pFd->nFetchOut--; }else{ unixUnmapfile(pFd); } assert( pFd->nFetchOut>=0 ); #else UNUSED_PARAMETER(fd); UNUSED_PARAMETER(p); UNUSED_PARAMETER(iOff); #endif return SQLITE_OK; } /* ** Here ends the implementation of all sqlite3_file methods. ** ********************** End sqlite3_file Methods ******************************* ******************************************************************************/ /* ** This division contains definitions of sqlite3_io_methods objects that ** implement various file locking strategies. It also contains definitions ** of "finder" functions. A finder-function is used to locate the appropriate ** sqlite3_io_methods object for a particular database file. The pAppData ** field of the sqlite3_vfs VFS objects are initialized to be pointers to ** the correct finder-function for that VFS. ** ** Most finder functions return a pointer to a fixed sqlite3_io_methods ** object. The only interesting finder-function is autolockIoFinder, which ** looks at the filesystem type and tries to guess the best locking ** strategy from that. ** ** For finder-function F, two objects are created: ** ** (1) The real finder-function named "FImpt()". ** ** (2) A constant pointer to this function named just "F". ** ** ** A pointer to the F pointer is used as the pAppData value for VFS ** objects. We have to do this instead of letting pAppData point ** directly at the finder-function since C90 rules prevent a void* ** from be cast into a function pointer. ** ** ** Each instance of this macro generates two objects: ** ** * A constant sqlite3_io_methods object call METHOD that has locking ** methods CLOSE, LOCK, UNLOCK, CKRESLOCK. ** ** * An I/O method finder function called FINDER that returns a pointer ** to the METHOD object in the previous bullet. */ #define IOMETHODS(FINDER,METHOD,VERSION,CLOSE,LOCK,UNLOCK,CKLOCK,SHMMAP) \ static const sqlite3_io_methods METHOD = { \ VERSION, /* iVersion */ \ CLOSE, /* xClose */ \ unixRead, /* xRead */ \ unixWrite, /* xWrite */ \ unixTruncate, /* xTruncate */ \ unixSync, /* xSync */ \ unixFileSize, /* xFileSize */ \ LOCK, /* xLock */ \ UNLOCK, /* xUnlock */ \ CKLOCK, /* xCheckReservedLock */ \ unixFileControl, /* xFileControl */ \ unixSectorSize, /* xSectorSize */ \ unixDeviceCharacteristics, /* xDeviceCapabilities */ \ SHMMAP, /* xShmMap */ \ unixShmLock, /* xShmLock */ \ unixShmBarrier, /* xShmBarrier */ \ unixShmUnmap, /* xShmUnmap */ \ unixFetch, /* xFetch */ \ unixUnfetch, /* xUnfetch */ \ }; \ static const sqlite3_io_methods *FINDER##Impl(const char *z, unixFile *p){ \ UNUSED_PARAMETER(z); UNUSED_PARAMETER(p); \ return &METHOD; \ } \ static const sqlite3_io_methods *(*const FINDER)(const char*,unixFile *p) \ = FINDER##Impl; /* ** Here are all of the sqlite3_io_methods objects for each of the ** locking strategies. Functions that return pointers to these methods ** are also created. */ IOMETHODS( posixIoFinder, /* Finder function name */ posixIoMethods, /* sqlite3_io_methods object name */ 3, /* shared memory and mmap are enabled */ unixClose, /* xClose method */ unixLock, /* xLock method */ unixUnlock, /* xUnlock method */ unixCheckReservedLock, /* xCheckReservedLock method */ unixShmMap /* xShmMap method */ ) IOMETHODS( nolockIoFinder, /* Finder function name */ nolockIoMethods, /* sqlite3_io_methods object name */ 3, /* shared memory is disabled */ nolockClose, /* xClose method */ nolockLock, /* xLock method */ nolockUnlock, /* xUnlock method */ nolockCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) IOMETHODS( dotlockIoFinder, /* Finder function name */ dotlockIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ dotlockClose, /* xClose method */ dotlockLock, /* xLock method */ dotlockUnlock, /* xUnlock method */ dotlockCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #if SQLITE_ENABLE_LOCKING_STYLE IOMETHODS( flockIoFinder, /* Finder function name */ flockIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ flockClose, /* xClose method */ flockLock, /* xLock method */ flockUnlock, /* xUnlock method */ flockCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #endif #if OS_VXWORKS IOMETHODS( semIoFinder, /* Finder function name */ semIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ semXClose, /* xClose method */ semXLock, /* xLock method */ semXUnlock, /* xUnlock method */ semXCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #endif #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE IOMETHODS( afpIoFinder, /* Finder function name */ afpIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ afpClose, /* xClose method */ afpLock, /* xLock method */ afpUnlock, /* xUnlock method */ afpCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #endif /* ** The proxy locking method is a "super-method" in the sense that it ** opens secondary file descriptors for the conch and lock files and ** it uses proxy, dot-file, AFP, and flock() locking methods on those ** secondary files. For this reason, the division that implements ** proxy locking is located much further down in the file. But we need ** to go ahead and define the sqlite3_io_methods and finder function ** for proxy locking here. So we forward declare the I/O methods. */ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE static int proxyClose(sqlite3_file*); static int proxyLock(sqlite3_file*, int); static int proxyUnlock(sqlite3_file*, int); static int proxyCheckReservedLock(sqlite3_file*, int*); IOMETHODS( proxyIoFinder, /* Finder function name */ proxyIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ proxyClose, /* xClose method */ proxyLock, /* xLock method */ proxyUnlock, /* xUnlock method */ proxyCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #endif /* nfs lockd on OSX 10.3+ doesn't clear write locks when a read lock is set */ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE IOMETHODS( nfsIoFinder, /* Finder function name */ nfsIoMethods, /* sqlite3_io_methods object name */ 1, /* shared memory is disabled */ unixClose, /* xClose method */ unixLock, /* xLock method */ nfsUnlock, /* xUnlock method */ unixCheckReservedLock, /* xCheckReservedLock method */ 0 /* xShmMap method */ ) #endif #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE /* ** This "finder" function attempts to determine the best locking strategy ** for the database file "filePath". It then returns the sqlite3_io_methods ** object that implements that strategy. ** ** This is for MacOSX only. */ static const sqlite3_io_methods *autolockIoFinderImpl( const char *filePath, /* name of the database file */ unixFile *pNew /* open file object for the database file */ ){ static const struct Mapping { const char *zFilesystem; /* Filesystem type name */ const sqlite3_io_methods *pMethods; /* Appropriate locking method */ } aMap[] = { { "hfs", &posixIoMethods }, { "ufs", &posixIoMethods }, { "afpfs", &afpIoMethods }, { "smbfs", &afpIoMethods }, { "webdav", &nolockIoMethods }, { 0, 0 } }; int i; struct statfs fsInfo; struct flock lockInfo; if( !filePath ){ /* If filePath==NULL that means we are dealing with a transient file ** that does not need to be locked. */ return &nolockIoMethods; } if( statfs(filePath, &fsInfo) != -1 ){ if( fsInfo.f_flags & MNT_RDONLY ){ return &nolockIoMethods; } for(i=0; aMap[i].zFilesystem; i++){ if( strcmp(fsInfo.f_fstypename, aMap[i].zFilesystem)==0 ){ return aMap[i].pMethods; } } } /* Default case. Handles, amongst others, "nfs". ** Test byte-range lock using fcntl(). If the call succeeds, ** assume that the file-system supports POSIX style locks. */ lockInfo.l_len = 1; lockInfo.l_start = 0; lockInfo.l_whence = SEEK_SET; lockInfo.l_type = F_RDLCK; if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { if( strcmp(fsInfo.f_fstypename, "nfs")==0 ){ return &nfsIoMethods; } else { return &posixIoMethods; } }else{ return &dotlockIoMethods; } } static const sqlite3_io_methods *(*const autolockIoFinder)(const char*,unixFile*) = autolockIoFinderImpl; #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ #if OS_VXWORKS /* ** This "finder" function for VxWorks checks to see if posix advisory ** locking works. If it does, then that is what is used. If it does not ** work, then fallback to named semaphore locking. */ static const sqlite3_io_methods *vxworksIoFinderImpl( const char *filePath, /* name of the database file */ unixFile *pNew /* the open file object */ ){ struct flock lockInfo; if( !filePath ){ /* If filePath==NULL that means we are dealing with a transient file ** that does not need to be locked. */ return &nolockIoMethods; } /* Test if fcntl() is supported and use POSIX style locks. ** Otherwise fall back to the named semaphore method. */ lockInfo.l_len = 1; lockInfo.l_start = 0; lockInfo.l_whence = SEEK_SET; lockInfo.l_type = F_RDLCK; if( osFcntl(pNew->h, F_GETLK, &lockInfo)!=-1 ) { return &posixIoMethods; }else{ return &semIoMethods; } } static const sqlite3_io_methods *(*const vxworksIoFinder)(const char*,unixFile*) = vxworksIoFinderImpl; #endif /* OS_VXWORKS */ /* ** An abstract type for a pointer to an IO method finder function: */ typedef const sqlite3_io_methods *(*finder_type)(const char*,unixFile*); /**************************************************************************** **************************** sqlite3_vfs methods **************************** ** ** This division contains the implementation of methods on the ** sqlite3_vfs object. */ /* ** Initialize the contents of the unixFile structure pointed to by pId. */ static int fillInUnixFile( sqlite3_vfs *pVfs, /* Pointer to vfs object */ int h, /* Open file descriptor of file being opened */ sqlite3_file *pId, /* Write to the unixFile structure here */ const char *zFilename, /* Name of the file being opened */ int ctrlFlags /* Zero or more UNIXFILE_* values */ ){ const sqlite3_io_methods *pLockingStyle; unixFile *pNew = (unixFile *)pId; int rc = SQLITE_OK; assert( pNew->pInode==NULL ); /* Usually the path zFilename should not be a relative pathname. The ** exception is when opening the proxy "conch" file in builds that ** include the special Apple locking styles. */ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE assert( zFilename==0 || zFilename[0]=='/' || pVfs->pAppData==(void*)&autolockIoFinder ); #else assert( zFilename==0 || zFilename[0]=='/' ); #endif /* No locking occurs in temporary files */ assert( zFilename!=0 || (ctrlFlags & UNIXFILE_NOLOCK)!=0 ); OSTRACE(("OPEN %-3d %s\n", h, zFilename)); pNew->h = h; pNew->pVfs = pVfs; pNew->zPath = zFilename; pNew->ctrlFlags = (u8)ctrlFlags; #if SQLITE_MAX_MMAP_SIZE>0 pNew->mmapSizeMax = sqlite3GlobalConfig.szMmap; #endif if( sqlite3_uri_boolean(((ctrlFlags & UNIXFILE_URI) ? zFilename : 0), "psow", SQLITE_POWERSAFE_OVERWRITE) ){ pNew->ctrlFlags |= UNIXFILE_PSOW; } if( strcmp(pVfs->zName,"unix-excl")==0 ){ pNew->ctrlFlags |= UNIXFILE_EXCL; } #if OS_VXWORKS pNew->pId = vxworksFindFileId(zFilename); if( pNew->pId==0 ){ ctrlFlags |= UNIXFILE_NOLOCK; rc = SQLITE_NOMEM; } #endif if( ctrlFlags & UNIXFILE_NOLOCK ){ pLockingStyle = &nolockIoMethods; }else{ pLockingStyle = (**(finder_type*)pVfs->pAppData)(zFilename, pNew); #if SQLITE_ENABLE_LOCKING_STYLE /* Cache zFilename in the locking context (AFP and dotlock override) for ** proxyLock activation is possible (remote proxy is based on db name) ** zFilename remains valid until file is closed, to support */ pNew->lockingContext = (void*)zFilename; #endif } if( pLockingStyle == &posixIoMethods #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE || pLockingStyle == &nfsIoMethods #endif ){ unixEnterMutex(); rc = findInodeInfo(pNew, &pNew->pInode); if( rc!=SQLITE_OK ){ /* If an error occurred in findInodeInfo(), close the file descriptor ** immediately, before releasing the mutex. findInodeInfo() may fail ** in two scenarios: ** ** (a) A call to fstat() failed. ** (b) A malloc failed. ** ** Scenario (b) may only occur if the process is holding no other ** file descriptors open on the same file. If there were other file ** descriptors on this file, then no malloc would be required by ** findInodeInfo(). If this is the case, it is quite safe to close ** handle h - as it is guaranteed that no posix locks will be released ** by doing so. ** ** If scenario (a) caused the error then things are not so safe. The ** implicit assumption here is that if fstat() fails, things are in ** such bad shape that dropping a lock or two doesn't matter much. */ robust_close(pNew, h, __LINE__); h = -1; } unixLeaveMutex(); } #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) else if( pLockingStyle == &afpIoMethods ){ /* AFP locking uses the file path so it needs to be included in ** the afpLockingContext. */ afpLockingContext *pCtx; pNew->lockingContext = pCtx = sqlite3_malloc64( sizeof(*pCtx) ); if( pCtx==0 ){ rc = SQLITE_NOMEM; }else{ /* NB: zFilename exists and remains valid until the file is closed ** according to requirement F11141. So we do not need to make a ** copy of the filename. */ pCtx->dbPath = zFilename; pCtx->reserved = 0; srandomdev(); unixEnterMutex(); rc = findInodeInfo(pNew, &pNew->pInode); if( rc!=SQLITE_OK ){ sqlite3_free(pNew->lockingContext); robust_close(pNew, h, __LINE__); h = -1; } unixLeaveMutex(); } } #endif else if( pLockingStyle == &dotlockIoMethods ){ /* Dotfile locking uses the file path so it needs to be included in ** the dotlockLockingContext */ char *zLockFile; int nFilename; assert( zFilename!=0 ); nFilename = (int)strlen(zFilename) + 6; zLockFile = (char *)sqlite3_malloc64(nFilename); if( zLockFile==0 ){ rc = SQLITE_NOMEM; }else{ sqlite3_snprintf(nFilename, zLockFile, "%s" DOTLOCK_SUFFIX, zFilename); } pNew->lockingContext = zLockFile; } #if OS_VXWORKS else if( pLockingStyle == &semIoMethods ){ /* Named semaphore locking uses the file path so it needs to be ** included in the semLockingContext */ unixEnterMutex(); rc = findInodeInfo(pNew, &pNew->pInode); if( (rc==SQLITE_OK) && (pNew->pInode->pSem==NULL) ){ char *zSemName = pNew->pInode->aSemName; int n; sqlite3_snprintf(MAX_PATHNAME, zSemName, "/%s.sem", pNew->pId->zCanonicalName); for( n=1; zSemName[n]; n++ ) if( zSemName[n]=='/' ) zSemName[n] = '_'; pNew->pInode->pSem = sem_open(zSemName, O_CREAT, 0666, 1); if( pNew->pInode->pSem == SEM_FAILED ){ rc = SQLITE_NOMEM; pNew->pInode->aSemName[0] = '\0'; } } unixLeaveMutex(); } #endif storeLastErrno(pNew, 0); #if OS_VXWORKS if( rc!=SQLITE_OK ){ if( h>=0 ) robust_close(pNew, h, __LINE__); h = -1; osUnlink(zFilename); pNew->ctrlFlags |= UNIXFILE_DELETE; } #endif if( rc!=SQLITE_OK ){ if( h>=0 ) robust_close(pNew, h, __LINE__); }else{ pNew->pMethod = pLockingStyle; OpenCounter(+1); verifyDbFile(pNew); } return rc; } /* ** Return the name of a directory in which to put temporary files. ** If no suitable temporary file directory can be found, return NULL. */ static const char *unixTempFileDir(void){ static const char *azDirs[] = { 0, 0, 0, "/var/tmp", "/usr/tmp", "/tmp", 0 /* List terminator */ }; unsigned int i; struct stat buf; const char *zDir = 0; azDirs[0] = sqlite3_temp_directory; if( !azDirs[1] ) azDirs[1] = getenv("SQLITE_TMPDIR"); if( !azDirs[2] ) azDirs[2] = getenv("TMPDIR"); for(i=0; imxPathname bytes. */ static int unixGetTempname(int nBuf, char *zBuf){ static const unsigned char zChars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; unsigned int i, j; const char *zDir; /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. */ SimulateIOError( return SQLITE_IOERR ); zDir = unixTempFileDir(); if( zDir==0 ) zDir = "."; /* Check that the output buffer is large enough for the temporary file ** name. If it is not, return SQLITE_ERROR. */ if( (strlen(zDir) + strlen(SQLITE_TEMP_FILE_PREFIX) + 18) >= (size_t)nBuf ){ return SQLITE_ERROR; } do{ sqlite3_snprintf(nBuf-18, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX, zDir); j = (int)strlen(zBuf); sqlite3_randomness(15, &zBuf[j]); for(i=0; i<15; i++, j++){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0; zBuf[j+1] = 0; }while( osAccess(zBuf,0)==0 ); return SQLITE_OK; } #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) /* ** Routine to transform a unixFile into a proxy-locking unixFile. ** Implementation in the proxy-lock division, but used by unixOpen() ** if SQLITE_PREFER_PROXY_LOCKING is defined. */ static int proxyTransformUnixFile(unixFile*, const char*); #endif /* ** Search for an unused file descriptor that was opened on the database ** file (not a journal or master-journal file) identified by pathname ** zPath with SQLITE_OPEN_XXX flags matching those passed as the second ** argument to this function. ** ** Such a file descriptor may exist if a database connection was closed ** but the associated file descriptor could not be closed because some ** other file descriptor open on the same file is holding a file-lock. ** Refer to comments in the unixClose() function and the lengthy comment ** describing "Posix Advisory Locking" at the start of this file for ** further details. Also, ticket #4018. ** ** If a suitable file descriptor is found, then it is returned. If no ** such file descriptor is located, -1 is returned. */ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){ UnixUnusedFd *pUnused = 0; /* Do not search for an unused file descriptor on vxworks. Not because ** vxworks would not benefit from the change (it might, we're not sure), ** but because no way to test it is currently available. It is better ** not to risk breaking vxworks support for the sake of such an obscure ** feature. */ #if !OS_VXWORKS struct stat sStat; /* Results of stat() call */ /* A stat() call may fail for various reasons. If this happens, it is ** almost certain that an open() call on the same path will also fail. ** For this reason, if an error occurs in the stat() call here, it is ** ignored and -1 is returned. The caller will try to open a new file ** descriptor on the same path, fail, and return an error to SQLite. ** ** Even if a subsequent open() call does succeed, the consequences of ** not searching for a reusable file descriptor are not dire. */ if( 0==osStat(zPath, &sStat) ){ unixInodeInfo *pInode; unixEnterMutex(); pInode = inodeList; while( pInode && (pInode->fileId.dev!=sStat.st_dev || pInode->fileId.ino!=sStat.st_ino) ){ pInode = pInode->pNext; } if( pInode ){ UnixUnusedFd **pp; for(pp=&pInode->pUnused; *pp && (*pp)->flags!=flags; pp=&((*pp)->pNext)); pUnused = *pp; if( pUnused ){ *pp = pUnused->pNext; } } unixLeaveMutex(); } #endif /* if !OS_VXWORKS */ return pUnused; } /* ** This function is called by unixOpen() to determine the unix permissions ** to create new files with. If no error occurs, then SQLITE_OK is returned ** and a value suitable for passing as the third argument to open(2) is ** written to *pMode. If an IO error occurs, an SQLite error code is ** returned and the value of *pMode is not modified. ** ** In most cases, this routine sets *pMode to 0, which will become ** an indication to robust_open() to create the file using ** SQLITE_DEFAULT_FILE_PERMISSIONS adjusted by the umask. ** But if the file being opened is a WAL or regular journal file, then ** this function queries the file-system for the permissions on the ** corresponding database file and sets *pMode to this value. Whenever ** possible, WAL and journal files are created using the same permissions ** as the associated database file. ** ** If the SQLITE_ENABLE_8_3_NAMES option is enabled, then the ** original filename is unavailable. But 8_3_NAMES is only used for ** FAT filesystems and permissions do not matter there, so just use ** the default permissions. */ static int findCreateFileMode( const char *zPath, /* Path of file (possibly) being created */ int flags, /* Flags passed as 4th argument to xOpen() */ mode_t *pMode, /* OUT: Permissions to open file with */ uid_t *pUid, /* OUT: uid to set on the file */ gid_t *pGid /* OUT: gid to set on the file */ ){ int rc = SQLITE_OK; /* Return Code */ *pMode = 0; *pUid = 0; *pGid = 0; if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ char zDb[MAX_PATHNAME+1]; /* Database file path */ int nDb; /* Number of valid bytes in zDb */ struct stat sStat; /* Output of stat() on database file */ /* zPath is a path to a WAL or journal file. The following block derives ** the path to the associated database file from zPath. This block handles ** the following naming conventions: ** ** "-journal" ** "-wal" ** "-journalNN" ** "-walNN" ** ** where NN is a decimal number. The NN naming schemes are ** used by the test_multiplex.c module. */ nDb = sqlite3Strlen30(zPath) - 1; #ifdef SQLITE_ENABLE_8_3_NAMES while( nDb>0 && sqlite3Isalnum(zPath[nDb]) ) nDb--; if( nDb==0 || zPath[nDb]!='-' ) return SQLITE_OK; #else while( zPath[nDb]!='-' ){ assert( nDb>0 ); assert( zPath[nDb]!='\n' ); nDb--; } #endif memcpy(zDb, zPath, nDb); zDb[nDb] = '\0'; if( 0==osStat(zDb, &sStat) ){ *pMode = sStat.st_mode & 0777; *pUid = sStat.st_uid; *pGid = sStat.st_gid; }else{ rc = SQLITE_IOERR_FSTAT; } }else if( flags & SQLITE_OPEN_DELETEONCLOSE ){ *pMode = 0600; } return rc; } /* ** Open the file zPath. ** ** Previously, the SQLite OS layer used three functions in place of this ** one: ** ** sqlite3OsOpenReadWrite(); ** sqlite3OsOpenReadOnly(); ** sqlite3OsOpenExclusive(); ** ** These calls correspond to the following combinations of flags: ** ** ReadWrite() -> (READWRITE | CREATE) ** ReadOnly() -> (READONLY) ** OpenExclusive() -> (READWRITE | CREATE | EXCLUSIVE) ** ** The old OpenExclusive() accepted a boolean argument - "delFlag". If ** true, the file was configured to be automatically deleted when the ** file handle closed. To achieve the same effect using this new ** interface, add the DELETEONCLOSE flag to those specified above for ** OpenExclusive(). */ static int unixOpen( sqlite3_vfs *pVfs, /* The VFS for which this is the xOpen method */ const char *zPath, /* Pathname of file to be opened */ sqlite3_file *pFile, /* The file descriptor to be filled in */ int flags, /* Input flags to control the opening */ int *pOutFlags /* Output flags returned to SQLite core */ ){ unixFile *p = (unixFile *)pFile; int fd = -1; /* File descriptor returned by open() */ int openFlags = 0; /* Flags to pass to open() */ int eType = flags&0xFFFFFF00; /* Type of file to open */ int noLock; /* True to omit locking primitives */ int rc = SQLITE_OK; /* Function Return Code */ int ctrlFlags = 0; /* UNIXFILE_* flags */ int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); int isCreate = (flags & SQLITE_OPEN_CREATE); int isReadonly = (flags & SQLITE_OPEN_READONLY); int isReadWrite = (flags & SQLITE_OPEN_READWRITE); #if SQLITE_ENABLE_LOCKING_STYLE int isAutoProxy = (flags & SQLITE_OPEN_AUTOPROXY); #endif #if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE struct statfs fsInfo; #endif /* If creating a master or main-file journal, this function will open ** a file-descriptor on the directory too. The first time unixSync() ** is called the directory file descriptor will be fsync()ed and close()d. */ int syncDir = (isCreate && ( eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_WAL )); /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. */ char zTmpname[MAX_PATHNAME+2]; const char *zName = zPath; /* Check the following statements are true: ** ** (a) Exactly one of the READWRITE and READONLY flags must be set, and ** (b) if CREATE is set, then READWRITE must also be set, and ** (c) if EXCLUSIVE is set, then CREATE must also be set. ** (d) if DELETEONCLOSE is set, then CREATE must also be set. */ assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); assert(isCreate==0 || isReadWrite); assert(isExclusive==0 || isCreate); assert(isDelete==0 || isCreate); /* The main DB, main journal, WAL file and master journal are never ** automatically deleted. Nor are they ever temporary files. */ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); /* Assert that the upper layer has set one of the "file-type" flags. */ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL ); /* Detect a pid change and reset the PRNG. There is a race condition ** here such that two or more threads all trying to open databases at ** the same instant might all reset the PRNG. But multiple resets ** are harmless. */ if( randomnessPid!=osGetpid(0) ){ randomnessPid = osGetpid(0); sqlite3_randomness(0,0); } memset(p, 0, sizeof(unixFile)); if( eType==SQLITE_OPEN_MAIN_DB ){ UnixUnusedFd *pUnused; pUnused = findReusableFd(zName, flags); if( pUnused ){ fd = pUnused->fd; }else{ pUnused = sqlite3_malloc64(sizeof(*pUnused)); if( !pUnused ){ return SQLITE_NOMEM; } } p->pUnused = pUnused; /* Database filenames are double-zero terminated if they are not ** URIs with parameters. Hence, they can always be passed into ** sqlite3_uri_parameter(). */ assert( (flags & SQLITE_OPEN_URI) || zName[strlen(zName)+1]==0 ); }else if( !zName ){ /* If zName is NULL, the upper layer is requesting a temp file. */ assert(isDelete && !syncDir); rc = unixGetTempname(MAX_PATHNAME+2, zTmpname); if( rc!=SQLITE_OK ){ return rc; } zName = zTmpname; /* Generated temporary filenames are always double-zero terminated ** for use by sqlite3_uri_parameter(). */ assert( zName[strlen(zName)+1]==0 ); } /* Determine the value of the flags parameter passed to POSIX function ** open(). These must be calculated even if open() is not called, as ** they may be stored as part of the file handle and used by the ** 'conch file' locking functions later on. */ if( isReadonly ) openFlags |= O_RDONLY; if( isReadWrite ) openFlags |= O_RDWR; if( isCreate ) openFlags |= O_CREAT; if( isExclusive ) openFlags |= (O_EXCL|O_NOFOLLOW); openFlags |= (O_LARGEFILE|O_BINARY); if( fd<0 ){ mode_t openMode; /* Permissions to create file with */ uid_t uid; /* Userid for the file */ gid_t gid; /* Groupid for the file */ rc = findCreateFileMode(zName, flags, &openMode, &uid, &gid); if( rc!=SQLITE_OK ){ assert( !p->pUnused ); assert( eType==SQLITE_OPEN_WAL || eType==SQLITE_OPEN_MAIN_JOURNAL ); return rc; } fd = robust_open(zName, openFlags, openMode); OSTRACE(("OPENX %-3d %s 0%o\n", fd, zName, openFlags)); if( fd<0 && errno!=EISDIR && isReadWrite && !isExclusive ){ /* Failed to open the file for read/write access. Try read-only. */ flags &= ~(SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE); openFlags &= ~(O_RDWR|O_CREAT); flags |= SQLITE_OPEN_READONLY; openFlags |= O_RDONLY; isReadonly = 1; fd = robust_open(zName, openFlags, openMode); } if( fd<0 ){ rc = unixLogError(SQLITE_CANTOPEN_BKPT, "open", zName); goto open_finished; } /* If this process is running as root and if creating a new rollback ** journal or WAL file, set the ownership of the journal or WAL to be ** the same as the original database. */ if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){ osFchown(fd, uid, gid); } } assert( fd>=0 ); if( pOutFlags ){ *pOutFlags = flags; } if( p->pUnused ){ p->pUnused->fd = fd; p->pUnused->flags = flags; } if( isDelete ){ #if OS_VXWORKS zPath = zName; #elif defined(SQLITE_UNLINK_AFTER_CLOSE) zPath = sqlite3_mprintf("%s", zName); if( zPath==0 ){ robust_close(p, fd, __LINE__); return SQLITE_NOMEM; } #else osUnlink(zName); #endif } #if SQLITE_ENABLE_LOCKING_STYLE else{ p->openFlags = openFlags; } #endif noLock = eType!=SQLITE_OPEN_MAIN_DB; #if defined(__APPLE__) || SQLITE_ENABLE_LOCKING_STYLE if( fstatfs(fd, &fsInfo) == -1 ){ storeLastErrno(p, errno); robust_close(p, fd, __LINE__); return SQLITE_IOERR_ACCESS; } if (0 == strncmp("msdos", fsInfo.f_fstypename, 5)) { ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; } if (0 == strncmp("exfat", fsInfo.f_fstypename, 5)) { ((unixFile*)pFile)->fsFlags |= SQLITE_FSFLAGS_IS_MSDOS; } #endif /* Set up appropriate ctrlFlags */ if( isDelete ) ctrlFlags |= UNIXFILE_DELETE; if( isReadonly ) ctrlFlags |= UNIXFILE_RDONLY; if( noLock ) ctrlFlags |= UNIXFILE_NOLOCK; if( syncDir ) ctrlFlags |= UNIXFILE_DIRSYNC; if( flags & SQLITE_OPEN_URI ) ctrlFlags |= UNIXFILE_URI; #if SQLITE_ENABLE_LOCKING_STYLE #if SQLITE_PREFER_PROXY_LOCKING isAutoProxy = 1; #endif if( isAutoProxy && (zPath!=NULL) && (!noLock) && pVfs->xOpen ){ char *envforce = getenv("SQLITE_FORCE_PROXY_LOCKING"); int useProxy = 0; /* SQLITE_FORCE_PROXY_LOCKING==1 means force always use proxy, 0 means ** never use proxy, NULL means use proxy for non-local files only. */ if( envforce!=NULL ){ useProxy = atoi(envforce)>0; }else{ useProxy = !(fsInfo.f_flags&MNT_LOCAL); } if( useProxy ){ rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); if( rc==SQLITE_OK ){ rc = proxyTransformUnixFile((unixFile*)pFile, ":auto:"); if( rc!=SQLITE_OK ){ /* Use unixClose to clean up the resources added in fillInUnixFile ** and clear all the structure's references. Specifically, ** pFile->pMethods will be NULL so sqlite3OsClose will be a no-op */ unixClose(pFile); return rc; } } goto open_finished; } } #endif rc = fillInUnixFile(pVfs, fd, pFile, zPath, ctrlFlags); open_finished: if( rc!=SQLITE_OK ){ sqlite3_free(p->pUnused); } return rc; } /* ** Delete the file at zPath. If the dirSync argument is true, fsync() ** the directory after deleting the file. */ static int unixDelete( sqlite3_vfs *NotUsed, /* VFS containing this as the xDelete method */ const char *zPath, /* Name of file to be deleted */ int dirSync /* If true, fsync() directory after deleting file */ ){ int rc = SQLITE_OK; UNUSED_PARAMETER(NotUsed); SimulateIOError(return SQLITE_IOERR_DELETE); if( osUnlink(zPath)==(-1) ){ if( errno==ENOENT #if OS_VXWORKS || osAccess(zPath,0)!=0 #endif ){ rc = SQLITE_IOERR_DELETE_NOENT; }else{ rc = unixLogError(SQLITE_IOERR_DELETE, "unlink", zPath); } return rc; } #ifndef SQLITE_DISABLE_DIRSYNC if( (dirSync & 1)!=0 ){ int fd; rc = osOpenDirectory(zPath, &fd); if( rc==SQLITE_OK ){ #if OS_VXWORKS if( fsync(fd)==-1 ) #else if( fsync(fd) ) #endif { rc = unixLogError(SQLITE_IOERR_DIR_FSYNC, "fsync", zPath); } robust_close(0, fd, __LINE__); }else if( rc==SQLITE_CANTOPEN ){ rc = SQLITE_OK; } } #endif return rc; } /* ** Test the existence of or access permissions of file zPath. The ** test performed depends on the value of flags: ** ** SQLITE_ACCESS_EXISTS: Return 1 if the file exists ** SQLITE_ACCESS_READWRITE: Return 1 if the file is read and writable. ** SQLITE_ACCESS_READONLY: Return 1 if the file is readable. ** ** Otherwise return 0. */ static int unixAccess( sqlite3_vfs *NotUsed, /* The VFS containing this xAccess method */ const char *zPath, /* Path of the file to examine */ int flags, /* What do we want to learn about the zPath file? */ int *pResOut /* Write result boolean here */ ){ int amode = 0; UNUSED_PARAMETER(NotUsed); SimulateIOError( return SQLITE_IOERR_ACCESS; ); switch( flags ){ case SQLITE_ACCESS_EXISTS: amode = F_OK; break; case SQLITE_ACCESS_READWRITE: amode = W_OK|R_OK; break; case SQLITE_ACCESS_READ: amode = R_OK; break; default: assert(!"Invalid flags argument"); } *pResOut = (osAccess(zPath, amode)==0); if( flags==SQLITE_ACCESS_EXISTS && *pResOut ){ struct stat buf; if( 0==osStat(zPath, &buf) && buf.st_size==0 ){ *pResOut = 0; } } return SQLITE_OK; } /* ** Turn a relative pathname into a full pathname. The relative path ** is stored as a nul-terminated string in the buffer pointed to by ** zPath. ** ** zOut points to a buffer of at least sqlite3_vfs.mxPathname bytes ** (in this case, MAX_PATHNAME bytes). The full-path is written to ** this buffer before returning. */ static int unixFullPathname( sqlite3_vfs *pVfs, /* Pointer to vfs object */ const char *zPath, /* Possibly relative input path */ int nOut, /* Size of output buffer in bytes */ char *zOut /* Output buffer */ ){ /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the ** current working directory has been unlinked. */ SimulateIOError( return SQLITE_ERROR ); assert( pVfs->mxPathname==MAX_PATHNAME ); UNUSED_PARAMETER(pVfs); zOut[nOut-1] = '\0'; if( zPath[0]=='/' ){ sqlite3_snprintf(nOut, zOut, "%s", zPath); }else{ int nCwd; if( osGetcwd(zOut, nOut-1)==0 ){ return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath); } nCwd = (int)strlen(zOut); sqlite3_snprintf(nOut-nCwd, &zOut[nCwd], "/%s", zPath); } return SQLITE_OK; } #ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** Interfaces for opening a shared library, finding entry points ** within the shared library, and closing the shared library. */ #include static void *unixDlOpen(sqlite3_vfs *NotUsed, const char *zFilename){ UNUSED_PARAMETER(NotUsed); return dlopen(zFilename, RTLD_NOW | RTLD_GLOBAL); } /* ** SQLite calls this function immediately after a call to unixDlSym() or ** unixDlOpen() fails (returns a null pointer). If a more detailed error ** message is available, it is written to zBufOut. If no error message ** is available, zBufOut is left unmodified and SQLite uses a default ** error message. */ static void unixDlError(sqlite3_vfs *NotUsed, int nBuf, char *zBufOut){ const char *zErr; UNUSED_PARAMETER(NotUsed); unixEnterMutex(); zErr = dlerror(); if( zErr ){ sqlite3_snprintf(nBuf, zBufOut, "%s", zErr); } unixLeaveMutex(); } static void (*unixDlSym(sqlite3_vfs *NotUsed, void *p, const char*zSym))(void){ /* ** GCC with -pedantic-errors says that C90 does not allow a void* to be ** cast into a pointer to a function. And yet the library dlsym() routine ** returns a void* which is really a pointer to a function. So how do we ** use dlsym() with -pedantic-errors? ** ** Variable x below is defined to be a pointer to a function taking ** parameters void* and const char* and returning a pointer to a function. ** We initialize x by assigning it a pointer to the dlsym() function. ** (That assignment requires a cast.) Then we call the function that ** x points to. ** ** This work-around is unlikely to work correctly on any system where ** you really cannot cast a function pointer into void*. But then, on the ** other hand, dlsym() will not work on such a system either, so we have ** not really lost anything. */ void (*(*x)(void*,const char*))(void); UNUSED_PARAMETER(NotUsed); x = (void(*(*)(void*,const char*))(void))dlsym; return (*x)(p, zSym); } static void unixDlClose(sqlite3_vfs *NotUsed, void *pHandle){ UNUSED_PARAMETER(NotUsed); dlclose(pHandle); } #else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ #define unixDlOpen 0 #define unixDlError 0 #define unixDlSym 0 #define unixDlClose 0 #endif /* ** Write nBuf bytes of random data to the supplied buffer zBuf. */ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){ UNUSED_PARAMETER(NotUsed); assert((size_t)nBuf>=(sizeof(time_t)+sizeof(int))); /* We have to initialize zBuf to prevent valgrind from reporting ** errors. The reports issued by valgrind are incorrect - we would ** prefer that the randomness be increased by making use of the ** uninitialized space in zBuf - but valgrind errors tend to worry ** some users. Rather than argue, it seems easier just to initialize ** the whole array and silence valgrind, even if that means less randomness ** in the random seed. ** ** When testing, initializing zBuf[] to zero is all we do. That means ** that we always use the same random number sequence. This makes the ** tests repeatable. */ memset(zBuf, 0, nBuf); randomnessPid = osGetpid(0); #if !defined(SQLITE_TEST) && !defined(SQLITE_OMIT_RANDOMNESS) { int fd, got; fd = robust_open("/dev/urandom", O_RDONLY, 0); if( fd<0 ){ time_t t; time(&t); memcpy(zBuf, &t, sizeof(t)); memcpy(&zBuf[sizeof(t)], &randomnessPid, sizeof(randomnessPid)); assert( sizeof(t)+sizeof(randomnessPid)<=(size_t)nBuf ); nBuf = sizeof(t) + sizeof(randomnessPid); }else{ do{ got = osRead(fd, zBuf, nBuf); }while( got<0 && errno==EINTR ); robust_close(0, fd, __LINE__); } } #endif return nBuf; } /* ** Sleep for a little while. Return the amount of time slept. ** The argument is the number of microseconds we want to sleep. ** The return value is the number of microseconds of sleep actually ** requested from the underlying operating system, a number which ** might be greater than or equal to the argument, but not less ** than the argument. */ static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ #if OS_VXWORKS struct timespec sp; sp.tv_sec = microseconds / 1000000; sp.tv_nsec = (microseconds % 1000000) * 1000; nanosleep(&sp, NULL); UNUSED_PARAMETER(NotUsed); return microseconds; #elif defined(HAVE_USLEEP) && HAVE_USLEEP usleep(microseconds); UNUSED_PARAMETER(NotUsed); return microseconds; #else int seconds = (microseconds+999999)/1000000; sleep(seconds); UNUSED_PARAMETER(NotUsed); return seconds*1000000; #endif } /* ** The following variable, if set to a non-zero value, is interpreted as ** the number of seconds since 1970 and is used to set the result of ** sqlite3OsCurrentTime() during testing. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ #endif /* ** Find the current time (in Universal Coordinated Time). Write into *piNow ** the current time and date as a Julian Day number times 86_400_000. In ** other words, write into *piNow the number of milliseconds since the Julian ** epoch of noon in Greenwich on November 24, 4714 B.C according to the ** proleptic Gregorian calendar. ** ** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date ** cannot be found. */ static int unixCurrentTimeInt64(sqlite3_vfs *NotUsed, sqlite3_int64 *piNow){ static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; int rc = SQLITE_OK; #if defined(NO_GETTOD) time_t t; time(&t); *piNow = ((sqlite3_int64)t)*1000 + unixEpoch; #elif OS_VXWORKS struct timespec sNow; clock_gettime(CLOCK_REALTIME, &sNow); *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_nsec/1000000; #else struct timeval sNow; if( gettimeofday(&sNow, 0)==0 ){ *piNow = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; }else{ rc = SQLITE_ERROR; } #endif #ifdef SQLITE_TEST if( sqlite3_current_time ){ *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; } #endif UNUSED_PARAMETER(NotUsed); return rc; } /* ** Find the current time (in Universal Coordinated Time). Write the ** current time and date as a Julian Day number into *prNow and ** return 0. Return 1 if the time and date cannot be found. */ static int unixCurrentTime(sqlite3_vfs *NotUsed, double *prNow){ sqlite3_int64 i = 0; int rc; UNUSED_PARAMETER(NotUsed); rc = unixCurrentTimeInt64(0, &i); *prNow = i/86400000.0; return rc; } /* ** We added the xGetLastError() method with the intention of providing ** better low-level error messages when operating-system problems come up ** during SQLite operation. But so far, none of that has been implemented ** in the core. So this routine is never called. For now, it is merely ** a place-holder. */ static int unixGetLastError(sqlite3_vfs *NotUsed, int NotUsed2, char *NotUsed3){ UNUSED_PARAMETER(NotUsed); UNUSED_PARAMETER(NotUsed2); UNUSED_PARAMETER(NotUsed3); return 0; } /* ************************ End of sqlite3_vfs methods *************************** ******************************************************************************/ /****************************************************************************** ************************** Begin Proxy Locking ******************************** ** ** Proxy locking is a "uber-locking-method" in this sense: It uses the ** other locking methods on secondary lock files. Proxy locking is a ** meta-layer over top of the primitive locking implemented above. For ** this reason, the division that implements of proxy locking is deferred ** until late in the file (here) after all of the other I/O methods have ** been defined - so that the primitive locking methods are available ** as services to help with the implementation of proxy locking. ** **** ** ** The default locking schemes in SQLite use byte-range locks on the ** database file to coordinate safe, concurrent access by multiple readers ** and writers [http://sqlite.org/lockingv3.html]. The five file locking ** states (UNLOCKED, PENDING, SHARED, RESERVED, EXCLUSIVE) are implemented ** as POSIX read & write locks over fixed set of locations (via fsctl), ** on AFP and SMB only exclusive byte-range locks are available via fsctl ** with _IOWR('z', 23, struct ByteRangeLockPB2) to track the same 5 states. ** To simulate a F_RDLCK on the shared range, on AFP a randomly selected ** address in the shared range is taken for a SHARED lock, the entire ** shared range is taken for an EXCLUSIVE lock): ** ** PENDING_BYTE 0x40000000 ** RESERVED_BYTE 0x40000001 ** SHARED_RANGE 0x40000002 -> 0x40000200 ** ** This works well on the local file system, but shows a nearly 100x ** slowdown in read performance on AFP because the AFP client disables ** the read cache when byte-range locks are present. Enabling the read ** cache exposes a cache coherency problem that is present on all OS X ** supported network file systems. NFS and AFP both observe the ** close-to-open semantics for ensuring cache coherency ** [http://nfs.sourceforge.net/#faq_a8], which does not effectively ** address the requirements for concurrent database access by multiple ** readers and writers ** [http://www.nabble.com/SQLite-on-NFS-cache-coherency-td15655701.html]. ** ** To address the performance and cache coherency issues, proxy file locking ** changes the way database access is controlled by limiting access to a ** single host at a time and moving file locks off of the database file ** and onto a proxy file on the local file system. ** ** ** Using proxy locks ** ----------------- ** ** C APIs ** ** sqlite3_file_control(db, dbname, SQLITE_FCNTL_SET_LOCKPROXYFILE, ** | ":auto:"); ** sqlite3_file_control(db, dbname, SQLITE_FCNTL_GET_LOCKPROXYFILE, ** &); ** ** ** SQL pragmas ** ** PRAGMA [database.]lock_proxy_file= | :auto: ** PRAGMA [database.]lock_proxy_file ** ** Specifying ":auto:" means that if there is a conch file with a matching ** host ID in it, the proxy path in the conch file will be used, otherwise ** a proxy path based on the user's temp dir ** (via confstr(_CS_DARWIN_USER_TEMP_DIR,...)) will be used and the ** actual proxy file name is generated from the name and path of the ** database file. For example: ** ** For database path "/Users/me/foo.db" ** The lock path will be "/sqliteplocks/_Users_me_foo.db:auto:") ** ** Once a lock proxy is configured for a database connection, it can not ** be removed, however it may be switched to a different proxy path via ** the above APIs (assuming the conch file is not being held by another ** connection or process). ** ** ** How proxy locking works ** ----------------------- ** ** Proxy file locking relies primarily on two new supporting files: ** ** * conch file to limit access to the database file to a single host ** at a time ** ** * proxy file to act as a proxy for the advisory locks normally ** taken on the database ** ** The conch file - to use a proxy file, sqlite must first "hold the conch" ** by taking an sqlite-style shared lock on the conch file, reading the ** contents and comparing the host's unique host ID (see below) and lock ** proxy path against the values stored in the conch. The conch file is ** stored in the same directory as the database file and the file name ** is patterned after the database file name as ".-conch". ** If the conch file does not exist, or its contents do not match the ** host ID and/or proxy path, then the lock is escalated to an exclusive ** lock and the conch file contents is updated with the host ID and proxy ** path and the lock is downgraded to a shared lock again. If the conch ** is held by another process (with a shared lock), the exclusive lock ** will fail and SQLITE_BUSY is returned. ** ** The proxy file - a single-byte file used for all advisory file locks ** normally taken on the database file. This allows for safe sharing ** of the database file for multiple readers and writers on the same ** host (the conch ensures that they all use the same local lock file). ** ** Requesting the lock proxy does not immediately take the conch, it is ** only taken when the first request to lock database file is made. ** This matches the semantics of the traditional locking behavior, where ** opening a connection to a database file does not take a lock on it. ** The shared lock and an open file descriptor are maintained until ** the connection to the database is closed. ** ** The proxy file and the lock file are never deleted so they only need ** to be created the first time they are used. ** ** Configuration options ** --------------------- ** ** SQLITE_PREFER_PROXY_LOCKING ** ** Database files accessed on non-local file systems are ** automatically configured for proxy locking, lock files are ** named automatically using the same logic as ** PRAGMA lock_proxy_file=":auto:" ** ** SQLITE_PROXY_DEBUG ** ** Enables the logging of error messages during host id file ** retrieval and creation ** ** LOCKPROXYDIR ** ** Overrides the default directory used for lock proxy files that ** are named automatically via the ":auto:" setting ** ** SQLITE_DEFAULT_PROXYDIR_PERMISSIONS ** ** Permissions to use when creating a directory for storing the ** lock proxy files, only used when LOCKPROXYDIR is not set. ** ** ** As mentioned above, when compiled with SQLITE_PREFER_PROXY_LOCKING, ** setting the environment variable SQLITE_FORCE_PROXY_LOCKING to 1 will ** force proxy locking to be used for every database file opened, and 0 ** will force automatic proxy locking to be disabled for all database ** files (explicitly calling the SQLITE_FCNTL_SET_LOCKPROXYFILE pragma or ** sqlite_file_control API is not affected by SQLITE_FORCE_PROXY_LOCKING). */ /* ** Proxy locking is only available on MacOSX */ #if defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE /* ** The proxyLockingContext has the path and file structures for the remote ** and local proxy files in it */ typedef struct proxyLockingContext proxyLockingContext; struct proxyLockingContext { unixFile *conchFile; /* Open conch file */ char *conchFilePath; /* Name of the conch file */ unixFile *lockProxy; /* Open proxy lock file */ char *lockProxyPath; /* Name of the proxy lock file */ char *dbPath; /* Name of the open file */ int conchHeld; /* 1 if the conch is held, -1 if lockless */ int nFails; /* Number of conch taking failures */ void *oldLockingContext; /* Original lockingcontext to restore on close */ sqlite3_io_methods const *pOldMethod; /* Original I/O methods for close */ }; /* ** The proxy lock file path for the database at dbPath is written into lPath, ** which must point to valid, writable memory large enough for a maxLen length ** file path. */ static int proxyGetLockPath(const char *dbPath, char *lPath, size_t maxLen){ int len; int dbLen; int i; #ifdef LOCKPROXYDIR len = strlcpy(lPath, LOCKPROXYDIR, maxLen); #else # ifdef _CS_DARWIN_USER_TEMP_DIR { if( !confstr(_CS_DARWIN_USER_TEMP_DIR, lPath, maxLen) ){ OSTRACE(("GETLOCKPATH failed %s errno=%d pid=%d\n", lPath, errno, osGetpid(0))); return SQLITE_IOERR_LOCK; } len = strlcat(lPath, "sqliteplocks", maxLen); } # else len = strlcpy(lPath, "/tmp/", maxLen); # endif #endif if( lPath[len-1]!='/' ){ len = strlcat(lPath, "/", maxLen); } /* transform the db path to a unique cache name */ dbLen = (int)strlen(dbPath); for( i=0; i 0) ){ /* only mkdir if leaf dir != "." or "/" or ".." */ if( i-start>2 || (i-start==1 && buf[start] != '.' && buf[start] != '/') || (i-start==2 && buf[start] != '.' && buf[start+1] != '.') ){ buf[i]='\0'; if( osMkdir(buf, SQLITE_DEFAULT_PROXYDIR_PERMISSIONS) ){ int err=errno; if( err!=EEXIST ) { OSTRACE(("CREATELOCKPATH FAILED creating %s, " "'%s' proxy lock path=%s pid=%d\n", buf, strerror(err), lockPath, osGetpid(0))); return err; } } } start=i+1; } buf[i] = lockPath[i]; } OSTRACE(("CREATELOCKPATH proxy lock path=%s pid=%d\n", lockPath, osGetpid(0))); return 0; } /* ** Create a new VFS file descriptor (stored in memory obtained from ** sqlite3_malloc) and open the file named "path" in the file descriptor. ** ** The caller is responsible not only for closing the file descriptor ** but also for freeing the memory associated with the file descriptor. */ static int proxyCreateUnixFile( const char *path, /* path for the new unixFile */ unixFile **ppFile, /* unixFile created and returned by ref */ int islockfile /* if non zero missing dirs will be created */ ) { int fd = -1; unixFile *pNew; int rc = SQLITE_OK; int openFlags = O_RDWR | O_CREAT; sqlite3_vfs dummyVfs; int terrno = 0; UnixUnusedFd *pUnused = NULL; /* 1. first try to open/create the file ** 2. if that fails, and this is a lock file (not-conch), try creating ** the parent directories and then try again. ** 3. if that fails, try to open the file read-only ** otherwise return BUSY (if lock file) or CANTOPEN for the conch file */ pUnused = findReusableFd(path, openFlags); if( pUnused ){ fd = pUnused->fd; }else{ pUnused = sqlite3_malloc64(sizeof(*pUnused)); if( !pUnused ){ return SQLITE_NOMEM; } } if( fd<0 ){ fd = robust_open(path, openFlags, 0); terrno = errno; if( fd<0 && errno==ENOENT && islockfile ){ if( proxyCreateLockPath(path) == SQLITE_OK ){ fd = robust_open(path, openFlags, 0); } } } if( fd<0 ){ openFlags = O_RDONLY; fd = robust_open(path, openFlags, 0); terrno = errno; } if( fd<0 ){ if( islockfile ){ return SQLITE_BUSY; } switch (terrno) { case EACCES: return SQLITE_PERM; case EIO: return SQLITE_IOERR_LOCK; /* even though it is the conch */ default: return SQLITE_CANTOPEN_BKPT; } } pNew = (unixFile *)sqlite3_malloc64(sizeof(*pNew)); if( pNew==NULL ){ rc = SQLITE_NOMEM; goto end_create_proxy; } memset(pNew, 0, sizeof(unixFile)); pNew->openFlags = openFlags; memset(&dummyVfs, 0, sizeof(dummyVfs)); dummyVfs.pAppData = (void*)&autolockIoFinder; dummyVfs.zName = "dummy"; pUnused->fd = fd; pUnused->flags = openFlags; pNew->pUnused = pUnused; rc = fillInUnixFile(&dummyVfs, fd, (sqlite3_file*)pNew, path, 0); if( rc==SQLITE_OK ){ *ppFile = pNew; return SQLITE_OK; } end_create_proxy: robust_close(pNew, fd, __LINE__); sqlite3_free(pNew); sqlite3_free(pUnused); return rc; } #ifdef SQLITE_TEST /* simulate multiple hosts by creating unique hostid file paths */ SQLITE_API int sqlite3_hostid_num = 0; #endif #define PROXY_HOSTIDLEN 16 /* conch file host id length */ #ifdef HAVE_GETHOSTUUID /* Not always defined in the headers as it ought to be */ extern int gethostuuid(uuid_t id, const struct timespec *wait); #endif /* get the host ID via gethostuuid(), pHostID must point to PROXY_HOSTIDLEN ** bytes of writable memory. */ static int proxyGetHostID(unsigned char *pHostID, int *pError){ assert(PROXY_HOSTIDLEN == sizeof(uuid_t)); memset(pHostID, 0, PROXY_HOSTIDLEN); #ifdef HAVE_GETHOSTUUID { struct timespec timeout = {1, 0}; /* 1 sec timeout */ if( gethostuuid(pHostID, &timeout) ){ int err = errno; if( pError ){ *pError = err; } return SQLITE_IOERR; } } #else UNUSED_PARAMETER(pError); #endif #ifdef SQLITE_TEST /* simulate multiple hosts by creating unique hostid file paths */ if( sqlite3_hostid_num != 0){ pHostID[0] = (char)(pHostID[0] + (char)(sqlite3_hostid_num & 0xFF)); } #endif return SQLITE_OK; } /* The conch file contains the header, host id and lock file path */ #define PROXY_CONCHVERSION 2 /* 1-byte header, 16-byte host id, path */ #define PROXY_HEADERLEN 1 /* conch file header length */ #define PROXY_PATHINDEX (PROXY_HEADERLEN+PROXY_HOSTIDLEN) #define PROXY_MAXCONCHLEN (PROXY_HEADERLEN+PROXY_HOSTIDLEN+MAXPATHLEN) /* ** Takes an open conch file, copies the contents to a new path and then moves ** it back. The newly created file's file descriptor is assigned to the ** conch file structure and finally the original conch file descriptor is ** closed. Returns zero if successful. */ static int proxyBreakConchLock(unixFile *pFile, uuid_t myHostID){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; unixFile *conchFile = pCtx->conchFile; char tPath[MAXPATHLEN]; char buf[PROXY_MAXCONCHLEN]; char *cPath = pCtx->conchFilePath; size_t readLen = 0; size_t pathLen = 0; char errmsg[64] = ""; int fd = -1; int rc = -1; UNUSED_PARAMETER(myHostID); /* create a new path by replace the trailing '-conch' with '-break' */ pathLen = strlcpy(tPath, cPath, MAXPATHLEN); if( pathLen>MAXPATHLEN || pathLen<6 || (strlcpy(&tPath[pathLen-5], "break", 6) != 5) ){ sqlite3_snprintf(sizeof(errmsg),errmsg,"path error (len %d)",(int)pathLen); goto end_breaklock; } /* read the conch content */ readLen = osPread(conchFile->h, buf, PROXY_MAXCONCHLEN, 0); if( readLenh, __LINE__); conchFile->h = fd; conchFile->openFlags = O_RDWR | O_CREAT; end_breaklock: if( rc ){ if( fd>=0 ){ osUnlink(tPath); robust_close(pFile, fd, __LINE__); } fprintf(stderr, "failed to break stale lock on %s, %s\n", cPath, errmsg); } return rc; } /* Take the requested lock on the conch file and break a stale lock if the ** host id matches. */ static int proxyConchLock(unixFile *pFile, uuid_t myHostID, int lockType){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; unixFile *conchFile = pCtx->conchFile; int rc = SQLITE_OK; int nTries = 0; struct timespec conchModTime; memset(&conchModTime, 0, sizeof(conchModTime)); do { rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); nTries ++; if( rc==SQLITE_BUSY ){ /* If the lock failed (busy): * 1st try: get the mod time of the conch, wait 0.5s and try again. * 2nd try: fail if the mod time changed or host id is different, wait * 10 sec and try again * 3rd try: break the lock unless the mod time has changed. */ struct stat buf; if( osFstat(conchFile->h, &buf) ){ storeLastErrno(pFile, errno); return SQLITE_IOERR_LOCK; } if( nTries==1 ){ conchModTime = buf.st_mtimespec; usleep(500000); /* wait 0.5 sec and try the lock again*/ continue; } assert( nTries>1 ); if( conchModTime.tv_sec != buf.st_mtimespec.tv_sec || conchModTime.tv_nsec != buf.st_mtimespec.tv_nsec ){ return SQLITE_BUSY; } if( nTries==2 ){ char tBuf[PROXY_MAXCONCHLEN]; int len = osPread(conchFile->h, tBuf, PROXY_MAXCONCHLEN, 0); if( len<0 ){ storeLastErrno(pFile, errno); return SQLITE_IOERR_LOCK; } if( len>PROXY_PATHINDEX && tBuf[0]==(char)PROXY_CONCHVERSION){ /* don't break the lock if the host id doesn't match */ if( 0!=memcmp(&tBuf[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN) ){ return SQLITE_BUSY; } }else{ /* don't break the lock on short read or a version mismatch */ return SQLITE_BUSY; } usleep(10000000); /* wait 10 sec and try the lock again */ continue; } assert( nTries==3 ); if( 0==proxyBreakConchLock(pFile, myHostID) ){ rc = SQLITE_OK; if( lockType==EXCLUSIVE_LOCK ){ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, SHARED_LOCK); } if( !rc ){ rc = conchFile->pMethod->xLock((sqlite3_file*)conchFile, lockType); } } } } while( rc==SQLITE_BUSY && nTries<3 ); return rc; } /* Takes the conch by taking a shared lock and read the contents conch, if ** lockPath is non-NULL, the host ID and lock file path must match. A NULL ** lockPath means that the lockPath in the conch file will be used if the ** host IDs match, or a new lock path will be generated automatically ** and written to the conch file. */ static int proxyTakeConch(unixFile *pFile){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld!=0 ){ return SQLITE_OK; }else{ unixFile *conchFile = pCtx->conchFile; uuid_t myHostID; int pError = 0; char readBuf[PROXY_MAXCONCHLEN]; char lockPath[MAXPATHLEN]; char *tempLockPath = NULL; int rc = SQLITE_OK; int createConch = 0; int hostIdMatch = 0; int readLen = 0; int tryOldLockPath = 0; int forceNewLockPath = 0; OSTRACE(("TAKECONCH %d for %s pid=%d\n", conchFile->h, (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), osGetpid(0))); rc = proxyGetHostID(myHostID, &pError); if( (rc&0xff)==SQLITE_IOERR ){ storeLastErrno(pFile, pError); goto end_takeconch; } rc = proxyConchLock(pFile, myHostID, SHARED_LOCK); if( rc!=SQLITE_OK ){ goto end_takeconch; } /* read the existing conch file */ readLen = seekAndRead((unixFile*)conchFile, 0, readBuf, PROXY_MAXCONCHLEN); if( readLen<0 ){ /* I/O error: lastErrno set by seekAndRead */ storeLastErrno(pFile, conchFile->lastErrno); rc = SQLITE_IOERR_READ; goto end_takeconch; }else if( readLen<=(PROXY_HEADERLEN+PROXY_HOSTIDLEN) || readBuf[0]!=(char)PROXY_CONCHVERSION ){ /* a short read or version format mismatch means we need to create a new ** conch file. */ createConch = 1; } /* if the host id matches and the lock path already exists in the conch ** we'll try to use the path there, if we can't open that path, we'll ** retry with a new auto-generated path */ do { /* in case we need to try again for an :auto: named lock file */ if( !createConch && !forceNewLockPath ){ hostIdMatch = !memcmp(&readBuf[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN); /* if the conch has data compare the contents */ if( !pCtx->lockProxyPath ){ /* for auto-named local lock file, just check the host ID and we'll ** use the local lock file path that's already in there */ if( hostIdMatch ){ size_t pathLen = (readLen - PROXY_PATHINDEX); if( pathLen>=MAXPATHLEN ){ pathLen=MAXPATHLEN-1; } memcpy(lockPath, &readBuf[PROXY_PATHINDEX], pathLen); lockPath[pathLen] = 0; tempLockPath = lockPath; tryOldLockPath = 1; /* create a copy of the lock path if the conch is taken */ goto end_takeconch; } }else if( hostIdMatch && !strncmp(pCtx->lockProxyPath, &readBuf[PROXY_PATHINDEX], readLen-PROXY_PATHINDEX) ){ /* conch host and lock path match */ goto end_takeconch; } } /* if the conch isn't writable and doesn't match, we can't take it */ if( (conchFile->openFlags&O_RDWR) == 0 ){ rc = SQLITE_BUSY; goto end_takeconch; } /* either the conch didn't match or we need to create a new one */ if( !pCtx->lockProxyPath ){ proxyGetLockPath(pCtx->dbPath, lockPath, MAXPATHLEN); tempLockPath = lockPath; /* create a copy of the lock path _only_ if the conch is taken */ } /* update conch with host and path (this will fail if other process ** has a shared lock already), if the host id matches, use the big ** stick. */ futimes(conchFile->h, NULL); if( hostIdMatch && !createConch ){ if( conchFile->pInode && conchFile->pInode->nShared>1 ){ /* We are trying for an exclusive lock but another thread in this ** same process is still holding a shared lock. */ rc = SQLITE_BUSY; } else { rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK); } }else{ rc = proxyConchLock(pFile, myHostID, EXCLUSIVE_LOCK); } if( rc==SQLITE_OK ){ char writeBuffer[PROXY_MAXCONCHLEN]; int writeSize = 0; writeBuffer[0] = (char)PROXY_CONCHVERSION; memcpy(&writeBuffer[PROXY_HEADERLEN], myHostID, PROXY_HOSTIDLEN); if( pCtx->lockProxyPath!=NULL ){ strlcpy(&writeBuffer[PROXY_PATHINDEX], pCtx->lockProxyPath, MAXPATHLEN); }else{ strlcpy(&writeBuffer[PROXY_PATHINDEX], tempLockPath, MAXPATHLEN); } writeSize = PROXY_PATHINDEX + strlen(&writeBuffer[PROXY_PATHINDEX]); robust_ftruncate(conchFile->h, writeSize); rc = unixWrite((sqlite3_file *)conchFile, writeBuffer, writeSize, 0); fsync(conchFile->h); /* If we created a new conch file (not just updated the contents of a ** valid conch file), try to match the permissions of the database */ if( rc==SQLITE_OK && createConch ){ struct stat buf; int err = osFstat(pFile->h, &buf); if( err==0 ){ mode_t cmode = buf.st_mode&(S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH); /* try to match the database file R/W permissions, ignore failure */ #ifndef SQLITE_PROXY_DEBUG osFchmod(conchFile->h, cmode); #else do{ rc = osFchmod(conchFile->h, cmode); }while( rc==(-1) && errno==EINTR ); if( rc!=0 ){ int code = errno; fprintf(stderr, "fchmod %o FAILED with %d %s\n", cmode, code, strerror(code)); } else { fprintf(stderr, "fchmod %o SUCCEDED\n",cmode); } }else{ int code = errno; fprintf(stderr, "STAT FAILED[%d] with %d %s\n", err, code, strerror(code)); #endif } } } conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, SHARED_LOCK); end_takeconch: OSTRACE(("TRANSPROXY: CLOSE %d\n", pFile->h)); if( rc==SQLITE_OK && pFile->openFlags ){ int fd; if( pFile->h>=0 ){ robust_close(pFile, pFile->h, __LINE__); } pFile->h = -1; fd = robust_open(pCtx->dbPath, pFile->openFlags, 0); OSTRACE(("TRANSPROXY: OPEN %d\n", fd)); if( fd>=0 ){ pFile->h = fd; }else{ rc=SQLITE_CANTOPEN_BKPT; /* SQLITE_BUSY? proxyTakeConch called during locking */ } } if( rc==SQLITE_OK && !pCtx->lockProxy ){ char *path = tempLockPath ? tempLockPath : pCtx->lockProxyPath; rc = proxyCreateUnixFile(path, &pCtx->lockProxy, 1); if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM && tryOldLockPath ){ /* we couldn't create the proxy lock file with the old lock file path ** so try again via auto-naming */ forceNewLockPath = 1; tryOldLockPath = 0; continue; /* go back to the do {} while start point, try again */ } } if( rc==SQLITE_OK ){ /* Need to make a copy of path if we extracted the value ** from the conch file or the path was allocated on the stack */ if( tempLockPath ){ pCtx->lockProxyPath = sqlite3DbStrDup(0, tempLockPath); if( !pCtx->lockProxyPath ){ rc = SQLITE_NOMEM; } } } if( rc==SQLITE_OK ){ pCtx->conchHeld = 1; if( pCtx->lockProxy->pMethod == &afpIoMethods ){ afpLockingContext *afpCtx; afpCtx = (afpLockingContext *)pCtx->lockProxy->lockingContext; afpCtx->dbPath = pCtx->lockProxyPath; } } else { conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); } OSTRACE(("TAKECONCH %d %s\n", conchFile->h, rc==SQLITE_OK?"ok":"failed")); return rc; } while (1); /* in case we need to retry the :auto: lock file - ** we should never get here except via the 'continue' call. */ } } /* ** If pFile holds a lock on a conch file, then release that lock. */ static int proxyReleaseConch(unixFile *pFile){ int rc = SQLITE_OK; /* Subroutine return code */ proxyLockingContext *pCtx; /* The locking context for the proxy lock */ unixFile *conchFile; /* Name of the conch file */ pCtx = (proxyLockingContext *)pFile->lockingContext; conchFile = pCtx->conchFile; OSTRACE(("RELEASECONCH %d for %s pid=%d\n", conchFile->h, (pCtx->lockProxyPath ? pCtx->lockProxyPath : ":auto:"), osGetpid(0))); if( pCtx->conchHeld>0 ){ rc = conchFile->pMethod->xUnlock((sqlite3_file*)conchFile, NO_LOCK); } pCtx->conchHeld = 0; OSTRACE(("RELEASECONCH %d %s\n", conchFile->h, (rc==SQLITE_OK ? "ok" : "failed"))); return rc; } /* ** Given the name of a database file, compute the name of its conch file. ** Store the conch filename in memory obtained from sqlite3_malloc64(). ** Make *pConchPath point to the new name. Return SQLITE_OK on success ** or SQLITE_NOMEM if unable to obtain memory. ** ** The caller is responsible for ensuring that the allocated memory ** space is eventually freed. ** ** *pConchPath is set to NULL if a memory allocation error occurs. */ static int proxyCreateConchPathname(char *dbPath, char **pConchPath){ int i; /* Loop counter */ int len = (int)strlen(dbPath); /* Length of database filename - dbPath */ char *conchPath; /* buffer in which to construct conch name */ /* Allocate space for the conch filename and initialize the name to ** the name of the original database file. */ *pConchPath = conchPath = (char *)sqlite3_malloc64(len + 8); if( conchPath==0 ){ return SQLITE_NOMEM; } memcpy(conchPath, dbPath, len+1); /* now insert a "." before the last / character */ for( i=(len-1); i>=0; i-- ){ if( conchPath[i]=='/' ){ i++; break; } } conchPath[i]='.'; while ( ilockingContext; char *oldPath = pCtx->lockProxyPath; int rc = SQLITE_OK; if( pFile->eFileLock!=NO_LOCK ){ return SQLITE_BUSY; } /* nothing to do if the path is NULL, :auto: or matches the existing path */ if( !path || path[0]=='\0' || !strcmp(path, ":auto:") || (oldPath && !strncmp(oldPath, path, MAXPATHLEN)) ){ return SQLITE_OK; }else{ unixFile *lockProxy = pCtx->lockProxy; pCtx->lockProxy=NULL; pCtx->conchHeld = 0; if( lockProxy!=NULL ){ rc=lockProxy->pMethod->xClose((sqlite3_file *)lockProxy); if( rc ) return rc; sqlite3_free(lockProxy); } sqlite3_free(oldPath); pCtx->lockProxyPath = sqlite3DbStrDup(0, path); } return rc; } /* ** pFile is a file that has been opened by a prior xOpen call. dbPath ** is a string buffer at least MAXPATHLEN+1 characters in size. ** ** This routine find the filename associated with pFile and writes it ** int dbPath. */ static int proxyGetDbPathForUnixFile(unixFile *pFile, char *dbPath){ #if defined(__APPLE__) if( pFile->pMethod == &afpIoMethods ){ /* afp style keeps a reference to the db path in the filePath field ** of the struct */ assert( (int)strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); strlcpy(dbPath, ((afpLockingContext *)pFile->lockingContext)->dbPath, MAXPATHLEN); } else #endif if( pFile->pMethod == &dotlockIoMethods ){ /* dot lock style uses the locking context to store the dot lock ** file path */ int len = strlen((char *)pFile->lockingContext) - strlen(DOTLOCK_SUFFIX); memcpy(dbPath, (char *)pFile->lockingContext, len + 1); }else{ /* all other styles use the locking context to store the db file path */ assert( strlen((char*)pFile->lockingContext)<=MAXPATHLEN ); strlcpy(dbPath, (char *)pFile->lockingContext, MAXPATHLEN); } return SQLITE_OK; } /* ** Takes an already filled in unix file and alters it so all file locking ** will be performed on the local proxy lock file. The following fields ** are preserved in the locking context so that they can be restored and ** the unix structure properly cleaned up at close time: ** ->lockingContext ** ->pMethod */ static int proxyTransformUnixFile(unixFile *pFile, const char *path) { proxyLockingContext *pCtx; char dbPath[MAXPATHLEN+1]; /* Name of the database file */ char *lockPath=NULL; int rc = SQLITE_OK; if( pFile->eFileLock!=NO_LOCK ){ return SQLITE_BUSY; } proxyGetDbPathForUnixFile(pFile, dbPath); if( !path || path[0]=='\0' || !strcmp(path, ":auto:") ){ lockPath=NULL; }else{ lockPath=(char *)path; } OSTRACE(("TRANSPROXY %d for %s pid=%d\n", pFile->h, (lockPath ? lockPath : ":auto:"), osGetpid(0))); pCtx = sqlite3_malloc64( sizeof(*pCtx) ); if( pCtx==0 ){ return SQLITE_NOMEM; } memset(pCtx, 0, sizeof(*pCtx)); rc = proxyCreateConchPathname(dbPath, &pCtx->conchFilePath); if( rc==SQLITE_OK ){ rc = proxyCreateUnixFile(pCtx->conchFilePath, &pCtx->conchFile, 0); if( rc==SQLITE_CANTOPEN && ((pFile->openFlags&O_RDWR) == 0) ){ /* if (a) the open flags are not O_RDWR, (b) the conch isn't there, and ** (c) the file system is read-only, then enable no-locking access. ** Ugh, since O_RDONLY==0x0000 we test for !O_RDWR since unixOpen asserts ** that openFlags will have only one of O_RDONLY or O_RDWR. */ struct statfs fsInfo; struct stat conchInfo; int goLockless = 0; if( osStat(pCtx->conchFilePath, &conchInfo) == -1 ) { int err = errno; if( (err==ENOENT) && (statfs(dbPath, &fsInfo) != -1) ){ goLockless = (fsInfo.f_flags&MNT_RDONLY) == MNT_RDONLY; } } if( goLockless ){ pCtx->conchHeld = -1; /* read only FS/ lockless */ rc = SQLITE_OK; } } } if( rc==SQLITE_OK && lockPath ){ pCtx->lockProxyPath = sqlite3DbStrDup(0, lockPath); } if( rc==SQLITE_OK ){ pCtx->dbPath = sqlite3DbStrDup(0, dbPath); if( pCtx->dbPath==NULL ){ rc = SQLITE_NOMEM; } } if( rc==SQLITE_OK ){ /* all memory is allocated, proxys are created and assigned, ** switch the locking context and pMethod then return. */ pCtx->oldLockingContext = pFile->lockingContext; pFile->lockingContext = pCtx; pCtx->pOldMethod = pFile->pMethod; pFile->pMethod = &proxyIoMethods; }else{ if( pCtx->conchFile ){ pCtx->conchFile->pMethod->xClose((sqlite3_file *)pCtx->conchFile); sqlite3_free(pCtx->conchFile); } sqlite3DbFree(0, pCtx->lockProxyPath); sqlite3_free(pCtx->conchFilePath); sqlite3_free(pCtx); } OSTRACE(("TRANSPROXY %d %s\n", pFile->h, (rc==SQLITE_OK ? "ok" : "failed"))); return rc; } /* ** This routine handles sqlite3_file_control() calls that are specific ** to proxy locking. */ static int proxyFileControl(sqlite3_file *id, int op, void *pArg){ switch( op ){ case SQLITE_FCNTL_GET_LOCKPROXYFILE: { unixFile *pFile = (unixFile*)id; if( pFile->pMethod == &proxyIoMethods ){ proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext; proxyTakeConch(pFile); if( pCtx->lockProxyPath ){ *(const char **)pArg = pCtx->lockProxyPath; }else{ *(const char **)pArg = ":auto: (not held)"; } } else { *(const char **)pArg = NULL; } return SQLITE_OK; } case SQLITE_FCNTL_SET_LOCKPROXYFILE: { unixFile *pFile = (unixFile*)id; int rc = SQLITE_OK; int isProxyStyle = (pFile->pMethod == &proxyIoMethods); if( pArg==NULL || (const char *)pArg==0 ){ if( isProxyStyle ){ /* turn off proxy locking - not supported. If support is added for ** switching proxy locking mode off then it will need to fail if ** the journal mode is WAL mode. */ rc = SQLITE_ERROR /*SQLITE_PROTOCOL? SQLITE_MISUSE?*/; }else{ /* turn off proxy locking - already off - NOOP */ rc = SQLITE_OK; } }else{ const char *proxyPath = (const char *)pArg; if( isProxyStyle ){ proxyLockingContext *pCtx = (proxyLockingContext*)pFile->lockingContext; if( !strcmp(pArg, ":auto:") || (pCtx->lockProxyPath && !strncmp(pCtx->lockProxyPath, proxyPath, MAXPATHLEN)) ){ rc = SQLITE_OK; }else{ rc = switchLockProxyPath(pFile, proxyPath); } }else{ /* turn on proxy file locking */ rc = proxyTransformUnixFile(pFile, proxyPath); } } return rc; } default: { assert( 0 ); /* The call assures that only valid opcodes are sent */ } } /*NOTREACHED*/ return SQLITE_ERROR; } /* ** Within this division (the proxying locking implementation) the procedures ** above this point are all utilities. The lock-related methods of the ** proxy-locking sqlite3_io_method object follow. */ /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, set *pResOut ** to a non-zero value otherwise *pResOut is set to zero. The return value ** is set to SQLITE_OK unless an I/O error occurs during lock checking. */ static int proxyCheckReservedLock(sqlite3_file *id, int *pResOut) { unixFile *pFile = (unixFile*)id; int rc = proxyTakeConch(pFile); if( rc==SQLITE_OK ){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld>0 ){ unixFile *proxy = pCtx->lockProxy; return proxy->pMethod->xCheckReservedLock((sqlite3_file*)proxy, pResOut); }else{ /* conchHeld < 0 is lockless */ pResOut=0; } } return rc; } /* ** Lock the file with the lock specified by parameter eFileLock - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** This routine will only increase a lock. Use the sqlite3OsUnlock() ** routine to lower a locking level. */ static int proxyLock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int rc = proxyTakeConch(pFile); if( rc==SQLITE_OK ){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld>0 ){ unixFile *proxy = pCtx->lockProxy; rc = proxy->pMethod->xLock((sqlite3_file*)proxy, eFileLock); pFile->eFileLock = proxy->eFileLock; }else{ /* conchHeld < 0 is lockless */ } } return rc; } /* ** Lower the locking level on file descriptor pFile to eFileLock. eFileLock ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. */ static int proxyUnlock(sqlite3_file *id, int eFileLock) { unixFile *pFile = (unixFile*)id; int rc = proxyTakeConch(pFile); if( rc==SQLITE_OK ){ proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; if( pCtx->conchHeld>0 ){ unixFile *proxy = pCtx->lockProxy; rc = proxy->pMethod->xUnlock((sqlite3_file*)proxy, eFileLock); pFile->eFileLock = proxy->eFileLock; }else{ /* conchHeld < 0 is lockless */ } } return rc; } /* ** Close a file that uses proxy locks. */ static int proxyClose(sqlite3_file *id) { if( id ){ unixFile *pFile = (unixFile*)id; proxyLockingContext *pCtx = (proxyLockingContext *)pFile->lockingContext; unixFile *lockProxy = pCtx->lockProxy; unixFile *conchFile = pCtx->conchFile; int rc = SQLITE_OK; if( lockProxy ){ rc = lockProxy->pMethod->xUnlock((sqlite3_file*)lockProxy, NO_LOCK); if( rc ) return rc; rc = lockProxy->pMethod->xClose((sqlite3_file*)lockProxy); if( rc ) return rc; sqlite3_free(lockProxy); pCtx->lockProxy = 0; } if( conchFile ){ if( pCtx->conchHeld ){ rc = proxyReleaseConch(pFile); if( rc ) return rc; } rc = conchFile->pMethod->xClose((sqlite3_file*)conchFile); if( rc ) return rc; sqlite3_free(conchFile); } sqlite3DbFree(0, pCtx->lockProxyPath); sqlite3_free(pCtx->conchFilePath); sqlite3DbFree(0, pCtx->dbPath); /* restore the original locking context and pMethod then close it */ pFile->lockingContext = pCtx->oldLockingContext; pFile->pMethod = pCtx->pOldMethod; sqlite3_free(pCtx); return pFile->pMethod->xClose(id); } return SQLITE_OK; } #endif /* defined(__APPLE__) && SQLITE_ENABLE_LOCKING_STYLE */ /* ** The proxy locking style is intended for use with AFP filesystems. ** And since AFP is only supported on MacOSX, the proxy locking is also ** restricted to MacOSX. ** ** ******************* End of the proxy lock implementation ********************** ******************************************************************************/ /* ** Initialize the operating system interface. ** ** This routine registers all VFS implementations for unix-like operating ** systems. This routine, and the sqlite3_os_end() routine that follows, ** should be the only routines in this file that are visible from other ** files. ** ** This routine is called once during SQLite initialization and by a ** single thread. The memory allocation and mutex subsystems have not ** necessarily been initialized when this routine is called, and so they ** should not be used. */ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){ /* ** The following macro defines an initializer for an sqlite3_vfs object. ** The name of the VFS is NAME. The pAppData is a pointer to a pointer ** to the "finder" function. (pAppData is a pointer to a pointer because ** silly C90 rules prohibit a void* from being cast to a function pointer ** and so we have to go through the intermediate pointer to avoid problems ** when compiling with -pedantic-errors on GCC.) ** ** The FINDER parameter to this macro is the name of the pointer to the ** finder-function. The finder-function returns a pointer to the ** sqlite_io_methods object that implements the desired locking ** behaviors. See the division above that contains the IOMETHODS ** macro for addition information on finder-functions. ** ** Most finders simply return a pointer to a fixed sqlite3_io_methods ** object. But the "autolockIoFinder" available on MacOSX does a little ** more than that; it looks at the filesystem type that hosts the ** database file and tries to choose an locking method appropriate for ** that filesystem time. */ #define UNIXVFS(VFSNAME, FINDER) { \ 3, /* iVersion */ \ sizeof(unixFile), /* szOsFile */ \ MAX_PATHNAME, /* mxPathname */ \ 0, /* pNext */ \ VFSNAME, /* zName */ \ (void*)&FINDER, /* pAppData */ \ unixOpen, /* xOpen */ \ unixDelete, /* xDelete */ \ unixAccess, /* xAccess */ \ unixFullPathname, /* xFullPathname */ \ unixDlOpen, /* xDlOpen */ \ unixDlError, /* xDlError */ \ unixDlSym, /* xDlSym */ \ unixDlClose, /* xDlClose */ \ unixRandomness, /* xRandomness */ \ unixSleep, /* xSleep */ \ unixCurrentTime, /* xCurrentTime */ \ unixGetLastError, /* xGetLastError */ \ unixCurrentTimeInt64, /* xCurrentTimeInt64 */ \ unixSetSystemCall, /* xSetSystemCall */ \ unixGetSystemCall, /* xGetSystemCall */ \ unixNextSystemCall, /* xNextSystemCall */ \ } /* ** All default VFSes for unix are contained in the following array. ** ** Note that the sqlite3_vfs.pNext field of the VFS object is modified ** by the SQLite core when the VFS is registered. So the following ** array cannot be const. */ static sqlite3_vfs aVfs[] = { #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) UNIXVFS("unix", autolockIoFinder ), #elif OS_VXWORKS UNIXVFS("unix", vxworksIoFinder ), #else UNIXVFS("unix", posixIoFinder ), #endif UNIXVFS("unix-none", nolockIoFinder ), UNIXVFS("unix-dotfile", dotlockIoFinder ), UNIXVFS("unix-excl", posixIoFinder ), #if OS_VXWORKS UNIXVFS("unix-namedsem", semIoFinder ), #endif #if SQLITE_ENABLE_LOCKING_STYLE || OS_VXWORKS UNIXVFS("unix-posix", posixIoFinder ), #endif #if SQLITE_ENABLE_LOCKING_STYLE UNIXVFS("unix-flock", flockIoFinder ), #endif #if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__) UNIXVFS("unix-afp", afpIoFinder ), UNIXVFS("unix-nfs", nfsIoFinder ), UNIXVFS("unix-proxy", proxyIoFinder ), #endif }; unsigned int i; /* Loop counter */ /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ assert( ArraySize(aSyscall)==25 ); /* Register all VFSes defined in the aVfs[] array */ for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ sqlite3_vfs_register(&aVfs[i], i==0); } return SQLITE_OK; } /* ** Shutdown the operating system interface. ** ** Some operating systems might need to do some cleanup in this routine, ** to release dynamically allocated objects. But not on unix. ** This routine is a no-op for unix. */ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){ return SQLITE_OK; } #endif /* SQLITE_OS_UNIX */ /************** End of os_unix.c *********************************************/ /************** Begin file os_win.c ******************************************/ /* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code that is specific to Windows. */ /* #include "sqliteInt.h" */ #if SQLITE_OS_WIN /* This file is used for Windows only */ /* ** Include code that is common to all os_*.c files */ /************** Include os_common.h in the middle of os_win.c ****************/ /************** Begin file os_common.h ***************************************/ /* ** 2004 May 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains macros and a little bit of code that is common to ** all of the platform-specific files (os_*.c) and is #included into those ** files. ** ** This file should be #included by the os_*.c files only. It is not a ** general purpose header file. */ #ifndef _OS_COMMON_H_ #define _OS_COMMON_H_ /* ** At least two bugs have slipped in because we changed the MEMORY_DEBUG ** macro to SQLITE_DEBUG and some older makefiles have not yet made the ** switch. The following code should catch this problem at compile-time. */ #ifdef MEMORY_DEBUG # error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead." #endif /* ** Macros for performance tracing. Normally turned off. Only works ** on i486 hardware. */ #ifdef SQLITE_PERFORMANCE_TRACE /* ** hwtime.h contains inline assembler code for implementing ** high-performance timing routines. */ /************** Include hwtime.h in the middle of os_common.h ****************/ /************** Begin file hwtime.h ******************************************/ /* ** 2008 May 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ #ifndef _HWTIME_H_ #define _HWTIME_H_ /* ** The following routine only works on pentium-class (or newer) processors. ** It uses the RDTSC opcode to read the cycle count value out of the ** processor and returns that value. This can be used for high-res ** profiling. */ #if (defined(__GNUC__) || defined(_MSC_VER)) && \ (defined(i386) || defined(__i386__) || defined(_M_IX86)) #if defined(__GNUC__) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned int lo, hi; __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (sqlite_uint64)hi << 32 | lo; } #elif defined(_MSC_VER) __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ __asm { rdtsc ret ; return value at EDX:EAX } } #endif #elif (defined(__GNUC__) && defined(__x86_64__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long val; __asm__ __volatile__ ("rdtsc" : "=A" (val)); return val; } #elif (defined(__GNUC__) && defined(__ppc__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long long retval; unsigned long junk; __asm__ __volatile__ ("\n\ 1: mftbu %1\n\ mftb %L0\n\ mftbu %0\n\ cmpw %0,%1\n\ bne 1b" : "=r" (retval), "=r" (junk)); return retval; } #else #error Need implementation of sqlite3Hwtime() for your platform. /* ** To compile without implementing sqlite3Hwtime() for your platform, ** you can remove the above #error and use the following ** stub function. You will lose timing support for many ** of the debugging and testing utilities, but it should at ** least compile and run. */ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif #endif /* !defined(_HWTIME_H_) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime() #define TIMER_END g_elapsed=sqlite3Hwtime()-g_start #define TIMER_ELAPSED g_elapsed #else #define TIMER_START #define TIMER_END #define TIMER_ELAPSED ((sqlite_uint64)0) #endif /* ** If we compile with the SQLITE_TEST macro set, then the following block ** of code will give us the ability to simulate a disk I/O error. This ** is used for testing the I/O recovery logic. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */ SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */ SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */ SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */ SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */ SQLITE_API int sqlite3_diskfull_pending = 0; SQLITE_API int sqlite3_diskfull = 0; #define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X) #define SimulateIOError(CODE) \ if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \ || sqlite3_io_error_pending-- == 1 ) \ { local_ioerr(); CODE; } static void local_ioerr(){ IOTRACE(("IOERR\n")); sqlite3_io_error_hit++; if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++; } #define SimulateDiskfullError(CODE) \ if( sqlite3_diskfull_pending ){ \ if( sqlite3_diskfull_pending == 1 ){ \ local_ioerr(); \ sqlite3_diskfull = 1; \ sqlite3_io_error_hit = 1; \ CODE; \ }else{ \ sqlite3_diskfull_pending--; \ } \ } #else #define SimulateIOErrorBenign(X) #define SimulateIOError(A) #define SimulateDiskfullError(A) #endif /* ** When testing, keep a count of the number of open files. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_open_file_count = 0; #define OpenCounter(X) sqlite3_open_file_count+=(X) #else #define OpenCounter(X) #endif #endif /* !defined(_OS_COMMON_H_) */ /************** End of os_common.h *******************************************/ /************** Continuing where we left off in os_win.c *********************/ /* ** Include the header file for the Windows VFS. */ /* #include "os_win.h" */ /* ** Compiling and using WAL mode requires several APIs that are only ** available in Windows platforms based on the NT kernel. */ #if !SQLITE_OS_WINNT && !defined(SQLITE_OMIT_WAL) # error "WAL mode requires support from the Windows NT kernel, compile\ with SQLITE_OMIT_WAL." #endif #if !SQLITE_OS_WINNT && SQLITE_MAX_MMAP_SIZE>0 # error "Memory mapped files require support from the Windows NT kernel,\ compile with SQLITE_MAX_MMAP_SIZE=0." #endif /* ** Are most of the Win32 ANSI APIs available (i.e. with certain exceptions ** based on the sub-platform)? */ #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(SQLITE_WIN32_NO_ANSI) # define SQLITE_WIN32_HAS_ANSI #endif /* ** Are most of the Win32 Unicode APIs available (i.e. with certain exceptions ** based on the sub-platform)? */ #if (SQLITE_OS_WINCE || SQLITE_OS_WINNT || SQLITE_OS_WINRT) && \ !defined(SQLITE_WIN32_NO_WIDE) # define SQLITE_WIN32_HAS_WIDE #endif /* ** Make sure at least one set of Win32 APIs is available. */ #if !defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_WIN32_HAS_WIDE) # error "At least one of SQLITE_WIN32_HAS_ANSI and SQLITE_WIN32_HAS_WIDE\ must be defined." #endif /* ** Define the required Windows SDK version constants if they are not ** already available. */ #ifndef NTDDI_WIN8 # define NTDDI_WIN8 0x06020000 #endif #ifndef NTDDI_WINBLUE # define NTDDI_WINBLUE 0x06030000 #endif /* ** Check to see if the GetVersionEx[AW] functions are deprecated on the ** target system. GetVersionEx was first deprecated in Win8.1. */ #ifndef SQLITE_WIN32_GETVERSIONEX # if defined(NTDDI_VERSION) && NTDDI_VERSION >= NTDDI_WINBLUE # define SQLITE_WIN32_GETVERSIONEX 0 /* GetVersionEx() is deprecated */ # else # define SQLITE_WIN32_GETVERSIONEX 1 /* GetVersionEx() is current */ # endif #endif /* ** This constant should already be defined (in the "WinDef.h" SDK file). */ #ifndef MAX_PATH # define MAX_PATH (260) #endif /* ** Maximum pathname length (in chars) for Win32. This should normally be ** MAX_PATH. */ #ifndef SQLITE_WIN32_MAX_PATH_CHARS # define SQLITE_WIN32_MAX_PATH_CHARS (MAX_PATH) #endif /* ** This constant should already be defined (in the "WinNT.h" SDK file). */ #ifndef UNICODE_STRING_MAX_CHARS # define UNICODE_STRING_MAX_CHARS (32767) #endif /* ** Maximum pathname length (in chars) for WinNT. This should normally be ** UNICODE_STRING_MAX_CHARS. */ #ifndef SQLITE_WINNT_MAX_PATH_CHARS # define SQLITE_WINNT_MAX_PATH_CHARS (UNICODE_STRING_MAX_CHARS) #endif /* ** Maximum pathname length (in bytes) for Win32. The MAX_PATH macro is in ** characters, so we allocate 4 bytes per character assuming worst-case of ** 4-bytes-per-character for UTF8. */ #ifndef SQLITE_WIN32_MAX_PATH_BYTES # define SQLITE_WIN32_MAX_PATH_BYTES (SQLITE_WIN32_MAX_PATH_CHARS*4) #endif /* ** Maximum pathname length (in bytes) for WinNT. This should normally be ** UNICODE_STRING_MAX_CHARS * sizeof(WCHAR). */ #ifndef SQLITE_WINNT_MAX_PATH_BYTES # define SQLITE_WINNT_MAX_PATH_BYTES \ (sizeof(WCHAR) * SQLITE_WINNT_MAX_PATH_CHARS) #endif /* ** Maximum error message length (in chars) for WinRT. */ #ifndef SQLITE_WIN32_MAX_ERRMSG_CHARS # define SQLITE_WIN32_MAX_ERRMSG_CHARS (1024) #endif /* ** Returns non-zero if the character should be treated as a directory ** separator. */ #ifndef winIsDirSep # define winIsDirSep(a) (((a) == '/') || ((a) == '\\')) #endif /* ** This macro is used when a local variable is set to a value that is ** [sometimes] not used by the code (e.g. via conditional compilation). */ #ifndef UNUSED_VARIABLE_VALUE # define UNUSED_VARIABLE_VALUE(x) (void)(x) #endif /* ** Returns the character that should be used as the directory separator. */ #ifndef winGetDirSep # define winGetDirSep() '\\' #endif /* ** Do we need to manually define the Win32 file mapping APIs for use with WAL ** mode or memory mapped files (e.g. these APIs are available in the Windows ** CE SDK; however, they are not present in the header file)? */ #if SQLITE_WIN32_FILEMAPPING_API && \ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) /* ** Two of the file mapping APIs are different under WinRT. Figure out which ** set we need. */ #if SQLITE_OS_WINRT WINBASEAPI HANDLE WINAPI CreateFileMappingFromApp(HANDLE, \ LPSECURITY_ATTRIBUTES, ULONG, ULONG64, LPCWSTR); WINBASEAPI LPVOID WINAPI MapViewOfFileFromApp(HANDLE, ULONG, ULONG64, SIZE_T); #else #if defined(SQLITE_WIN32_HAS_ANSI) WINBASEAPI HANDLE WINAPI CreateFileMappingA(HANDLE, LPSECURITY_ATTRIBUTES, \ DWORD, DWORD, DWORD, LPCSTR); #endif /* defined(SQLITE_WIN32_HAS_ANSI) */ #if defined(SQLITE_WIN32_HAS_WIDE) WINBASEAPI HANDLE WINAPI CreateFileMappingW(HANDLE, LPSECURITY_ATTRIBUTES, \ DWORD, DWORD, DWORD, LPCWSTR); #endif /* defined(SQLITE_WIN32_HAS_WIDE) */ WINBASEAPI LPVOID WINAPI MapViewOfFile(HANDLE, DWORD, DWORD, DWORD, SIZE_T); #endif /* SQLITE_OS_WINRT */ /* ** These file mapping APIs are common to both Win32 and WinRT. */ WINBASEAPI BOOL WINAPI FlushViewOfFile(LPCVOID, SIZE_T); WINBASEAPI BOOL WINAPI UnmapViewOfFile(LPCVOID); #endif /* SQLITE_WIN32_FILEMAPPING_API */ /* ** Some Microsoft compilers lack this definition. */ #ifndef INVALID_FILE_ATTRIBUTES # define INVALID_FILE_ATTRIBUTES ((DWORD)-1) #endif #ifndef FILE_FLAG_MASK # define FILE_FLAG_MASK (0xFF3C0000) #endif #ifndef FILE_ATTRIBUTE_MASK # define FILE_ATTRIBUTE_MASK (0x0003FFF7) #endif #ifndef SQLITE_OMIT_WAL /* Forward references to structures used for WAL */ typedef struct winShm winShm; /* A connection to shared-memory */ typedef struct winShmNode winShmNode; /* A region of shared-memory */ #endif /* ** WinCE lacks native support for file locking so we have to fake it ** with some code of our own. */ #if SQLITE_OS_WINCE typedef struct winceLock { int nReaders; /* Number of reader locks obtained */ BOOL bPending; /* Indicates a pending lock has been obtained */ BOOL bReserved; /* Indicates a reserved lock has been obtained */ BOOL bExclusive; /* Indicates an exclusive lock has been obtained */ } winceLock; #endif /* ** The winFile structure is a subclass of sqlite3_file* specific to the win32 ** portability layer. */ typedef struct winFile winFile; struct winFile { const sqlite3_io_methods *pMethod; /*** Must be first ***/ sqlite3_vfs *pVfs; /* The VFS used to open this file */ HANDLE h; /* Handle for accessing the file */ u8 locktype; /* Type of lock currently held on this file */ short sharedLockByte; /* Randomly chosen byte used as a shared lock */ u8 ctrlFlags; /* Flags. See WINFILE_* below */ DWORD lastErrno; /* The Windows errno from the last I/O error */ #ifndef SQLITE_OMIT_WAL winShm *pShm; /* Instance of shared memory on this file */ #endif const char *zPath; /* Full pathname of this file */ int szChunk; /* Chunk size configured by FCNTL_CHUNK_SIZE */ #if SQLITE_OS_WINCE LPWSTR zDeleteOnClose; /* Name of file to delete when closing */ HANDLE hMutex; /* Mutex used to control access to shared lock */ HANDLE hShared; /* Shared memory segment used for locking */ winceLock local; /* Locks obtained by this instance of winFile */ winceLock *shared; /* Global shared lock memory for the file */ #endif #if SQLITE_MAX_MMAP_SIZE>0 int nFetchOut; /* Number of outstanding xFetch references */ HANDLE hMap; /* Handle for accessing memory mapping */ void *pMapRegion; /* Area memory mapped */ sqlite3_int64 mmapSize; /* Usable size of mapped region */ sqlite3_int64 mmapSizeActual; /* Actual size of mapped region */ sqlite3_int64 mmapSizeMax; /* Configured FCNTL_MMAP_SIZE value */ #endif }; /* ** Allowed values for winFile.ctrlFlags */ #define WINFILE_RDONLY 0x02 /* Connection is read only */ #define WINFILE_PERSIST_WAL 0x04 /* Persistent WAL mode */ #define WINFILE_PSOW 0x10 /* SQLITE_IOCAP_POWERSAFE_OVERWRITE */ /* * The size of the buffer used by sqlite3_win32_write_debug(). */ #ifndef SQLITE_WIN32_DBG_BUF_SIZE # define SQLITE_WIN32_DBG_BUF_SIZE ((int)(4096-sizeof(DWORD))) #endif /* * The value used with sqlite3_win32_set_directory() to specify that * the data directory should be changed. */ #ifndef SQLITE_WIN32_DATA_DIRECTORY_TYPE # define SQLITE_WIN32_DATA_DIRECTORY_TYPE (1) #endif /* * The value used with sqlite3_win32_set_directory() to specify that * the temporary directory should be changed. */ #ifndef SQLITE_WIN32_TEMP_DIRECTORY_TYPE # define SQLITE_WIN32_TEMP_DIRECTORY_TYPE (2) #endif /* * If compiled with SQLITE_WIN32_MALLOC on Windows, we will use the * various Win32 API heap functions instead of our own. */ #ifdef SQLITE_WIN32_MALLOC /* * If this is non-zero, an isolated heap will be created by the native Win32 * allocator subsystem; otherwise, the default process heap will be used. This * setting has no effect when compiling for WinRT. By default, this is enabled * and an isolated heap will be created to store all allocated data. * ****************************************************************************** * WARNING: It is important to note that when this setting is non-zero and the * winMemShutdown function is called (e.g. by the sqlite3_shutdown * function), all data that was allocated using the isolated heap will * be freed immediately and any attempt to access any of that freed * data will almost certainly result in an immediate access violation. ****************************************************************************** */ #ifndef SQLITE_WIN32_HEAP_CREATE # define SQLITE_WIN32_HEAP_CREATE (TRUE) #endif /* * The initial size of the Win32-specific heap. This value may be zero. */ #ifndef SQLITE_WIN32_HEAP_INIT_SIZE # define SQLITE_WIN32_HEAP_INIT_SIZE ((SQLITE_DEFAULT_CACHE_SIZE) * \ (SQLITE_DEFAULT_PAGE_SIZE) + 4194304) #endif /* * The maximum size of the Win32-specific heap. This value may be zero. */ #ifndef SQLITE_WIN32_HEAP_MAX_SIZE # define SQLITE_WIN32_HEAP_MAX_SIZE (0) #endif /* * The extra flags to use in calls to the Win32 heap APIs. This value may be * zero for the default behavior. */ #ifndef SQLITE_WIN32_HEAP_FLAGS # define SQLITE_WIN32_HEAP_FLAGS (0) #endif /* ** The winMemData structure stores information required by the Win32-specific ** sqlite3_mem_methods implementation. */ typedef struct winMemData winMemData; struct winMemData { #ifndef NDEBUG u32 magic1; /* Magic number to detect structure corruption. */ #endif HANDLE hHeap; /* The handle to our heap. */ BOOL bOwned; /* Do we own the heap (i.e. destroy it on shutdown)? */ #ifndef NDEBUG u32 magic2; /* Magic number to detect structure corruption. */ #endif }; #ifndef NDEBUG #define WINMEM_MAGIC1 0x42b2830b #define WINMEM_MAGIC2 0xbd4d7cf4 #endif static struct winMemData win_mem_data = { #ifndef NDEBUG WINMEM_MAGIC1, #endif NULL, FALSE #ifndef NDEBUG ,WINMEM_MAGIC2 #endif }; #ifndef NDEBUG #define winMemAssertMagic1() assert( win_mem_data.magic1==WINMEM_MAGIC1 ) #define winMemAssertMagic2() assert( win_mem_data.magic2==WINMEM_MAGIC2 ) #define winMemAssertMagic() winMemAssertMagic1(); winMemAssertMagic2(); #else #define winMemAssertMagic() #endif #define winMemGetDataPtr() &win_mem_data #define winMemGetHeap() win_mem_data.hHeap #define winMemGetOwned() win_mem_data.bOwned static void *winMemMalloc(int nBytes); static void winMemFree(void *pPrior); static void *winMemRealloc(void *pPrior, int nBytes); static int winMemSize(void *p); static int winMemRoundup(int n); static int winMemInit(void *pAppData); static void winMemShutdown(void *pAppData); SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void); #endif /* SQLITE_WIN32_MALLOC */ /* ** The following variable is (normally) set once and never changes ** thereafter. It records whether the operating system is Win9x ** or WinNT. ** ** 0: Operating system unknown. ** 1: Operating system is Win9x. ** 2: Operating system is WinNT. ** ** In order to facilitate testing on a WinNT system, the test fixture ** can manually set this value to 1 to emulate Win98 behavior. */ #ifdef SQLITE_TEST SQLITE_API LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0; #else static LONG SQLITE_WIN32_VOLATILE sqlite3_os_type = 0; #endif #ifndef SYSCALL # define SYSCALL sqlite3_syscall_ptr #endif /* ** This function is not available on Windows CE or WinRT. */ #if SQLITE_OS_WINCE || SQLITE_OS_WINRT # define osAreFileApisANSI() 1 #endif /* ** Many system calls are accessed through pointer-to-functions so that ** they may be overridden at runtime to facilitate fault injection during ** testing and sandboxing. The following array holds the names and pointers ** to all overrideable system calls. */ static struct win_syscall { const char *zName; /* Name of the system call */ sqlite3_syscall_ptr pCurrent; /* Current value of the system call */ sqlite3_syscall_ptr pDefault; /* Default value */ } aSyscall[] = { #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT { "AreFileApisANSI", (SYSCALL)AreFileApisANSI, 0 }, #else { "AreFileApisANSI", (SYSCALL)0, 0 }, #endif #ifndef osAreFileApisANSI #define osAreFileApisANSI ((BOOL(WINAPI*)(VOID))aSyscall[0].pCurrent) #endif #if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) { "CharLowerW", (SYSCALL)CharLowerW, 0 }, #else { "CharLowerW", (SYSCALL)0, 0 }, #endif #define osCharLowerW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[1].pCurrent) #if SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_WIDE) { "CharUpperW", (SYSCALL)CharUpperW, 0 }, #else { "CharUpperW", (SYSCALL)0, 0 }, #endif #define osCharUpperW ((LPWSTR(WINAPI*)(LPWSTR))aSyscall[2].pCurrent) { "CloseHandle", (SYSCALL)CloseHandle, 0 }, #define osCloseHandle ((BOOL(WINAPI*)(HANDLE))aSyscall[3].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "CreateFileA", (SYSCALL)CreateFileA, 0 }, #else { "CreateFileA", (SYSCALL)0, 0 }, #endif #define osCreateFileA ((HANDLE(WINAPI*)(LPCSTR,DWORD,DWORD, \ LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[4].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "CreateFileW", (SYSCALL)CreateFileW, 0 }, #else { "CreateFileW", (SYSCALL)0, 0 }, #endif #define osCreateFileW ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD, \ LPSECURITY_ATTRIBUTES,DWORD,DWORD,HANDLE))aSyscall[5].pCurrent) #if (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_ANSI) && \ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)) { "CreateFileMappingA", (SYSCALL)CreateFileMappingA, 0 }, #else { "CreateFileMappingA", (SYSCALL)0, 0 }, #endif #define osCreateFileMappingA ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ DWORD,DWORD,DWORD,LPCSTR))aSyscall[6].pCurrent) #if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)) { "CreateFileMappingW", (SYSCALL)CreateFileMappingW, 0 }, #else { "CreateFileMappingW", (SYSCALL)0, 0 }, #endif #define osCreateFileMappingW ((HANDLE(WINAPI*)(HANDLE,LPSECURITY_ATTRIBUTES, \ DWORD,DWORD,DWORD,LPCWSTR))aSyscall[7].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "CreateMutexW", (SYSCALL)CreateMutexW, 0 }, #else { "CreateMutexW", (SYSCALL)0, 0 }, #endif #define osCreateMutexW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,BOOL, \ LPCWSTR))aSyscall[8].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "DeleteFileA", (SYSCALL)DeleteFileA, 0 }, #else { "DeleteFileA", (SYSCALL)0, 0 }, #endif #define osDeleteFileA ((BOOL(WINAPI*)(LPCSTR))aSyscall[9].pCurrent) #if defined(SQLITE_WIN32_HAS_WIDE) { "DeleteFileW", (SYSCALL)DeleteFileW, 0 }, #else { "DeleteFileW", (SYSCALL)0, 0 }, #endif #define osDeleteFileW ((BOOL(WINAPI*)(LPCWSTR))aSyscall[10].pCurrent) #if SQLITE_OS_WINCE { "FileTimeToLocalFileTime", (SYSCALL)FileTimeToLocalFileTime, 0 }, #else { "FileTimeToLocalFileTime", (SYSCALL)0, 0 }, #endif #define osFileTimeToLocalFileTime ((BOOL(WINAPI*)(CONST FILETIME*, \ LPFILETIME))aSyscall[11].pCurrent) #if SQLITE_OS_WINCE { "FileTimeToSystemTime", (SYSCALL)FileTimeToSystemTime, 0 }, #else { "FileTimeToSystemTime", (SYSCALL)0, 0 }, #endif #define osFileTimeToSystemTime ((BOOL(WINAPI*)(CONST FILETIME*, \ LPSYSTEMTIME))aSyscall[12].pCurrent) { "FlushFileBuffers", (SYSCALL)FlushFileBuffers, 0 }, #define osFlushFileBuffers ((BOOL(WINAPI*)(HANDLE))aSyscall[13].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "FormatMessageA", (SYSCALL)FormatMessageA, 0 }, #else { "FormatMessageA", (SYSCALL)0, 0 }, #endif #define osFormatMessageA ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPSTR, \ DWORD,va_list*))aSyscall[14].pCurrent) #if defined(SQLITE_WIN32_HAS_WIDE) { "FormatMessageW", (SYSCALL)FormatMessageW, 0 }, #else { "FormatMessageW", (SYSCALL)0, 0 }, #endif #define osFormatMessageW ((DWORD(WINAPI*)(DWORD,LPCVOID,DWORD,DWORD,LPWSTR, \ DWORD,va_list*))aSyscall[15].pCurrent) #if !defined(SQLITE_OMIT_LOAD_EXTENSION) { "FreeLibrary", (SYSCALL)FreeLibrary, 0 }, #else { "FreeLibrary", (SYSCALL)0, 0 }, #endif #define osFreeLibrary ((BOOL(WINAPI*)(HMODULE))aSyscall[16].pCurrent) { "GetCurrentProcessId", (SYSCALL)GetCurrentProcessId, 0 }, #define osGetCurrentProcessId ((DWORD(WINAPI*)(VOID))aSyscall[17].pCurrent) #if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) { "GetDiskFreeSpaceA", (SYSCALL)GetDiskFreeSpaceA, 0 }, #else { "GetDiskFreeSpaceA", (SYSCALL)0, 0 }, #endif #define osGetDiskFreeSpaceA ((BOOL(WINAPI*)(LPCSTR,LPDWORD,LPDWORD,LPDWORD, \ LPDWORD))aSyscall[18].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "GetDiskFreeSpaceW", (SYSCALL)GetDiskFreeSpaceW, 0 }, #else { "GetDiskFreeSpaceW", (SYSCALL)0, 0 }, #endif #define osGetDiskFreeSpaceW ((BOOL(WINAPI*)(LPCWSTR,LPDWORD,LPDWORD,LPDWORD, \ LPDWORD))aSyscall[19].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "GetFileAttributesA", (SYSCALL)GetFileAttributesA, 0 }, #else { "GetFileAttributesA", (SYSCALL)0, 0 }, #endif #define osGetFileAttributesA ((DWORD(WINAPI*)(LPCSTR))aSyscall[20].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "GetFileAttributesW", (SYSCALL)GetFileAttributesW, 0 }, #else { "GetFileAttributesW", (SYSCALL)0, 0 }, #endif #define osGetFileAttributesW ((DWORD(WINAPI*)(LPCWSTR))aSyscall[21].pCurrent) #if defined(SQLITE_WIN32_HAS_WIDE) { "GetFileAttributesExW", (SYSCALL)GetFileAttributesExW, 0 }, #else { "GetFileAttributesExW", (SYSCALL)0, 0 }, #endif #define osGetFileAttributesExW ((BOOL(WINAPI*)(LPCWSTR,GET_FILEEX_INFO_LEVELS, \ LPVOID))aSyscall[22].pCurrent) #if !SQLITE_OS_WINRT { "GetFileSize", (SYSCALL)GetFileSize, 0 }, #else { "GetFileSize", (SYSCALL)0, 0 }, #endif #define osGetFileSize ((DWORD(WINAPI*)(HANDLE,LPDWORD))aSyscall[23].pCurrent) #if !SQLITE_OS_WINCE && defined(SQLITE_WIN32_HAS_ANSI) { "GetFullPathNameA", (SYSCALL)GetFullPathNameA, 0 }, #else { "GetFullPathNameA", (SYSCALL)0, 0 }, #endif #define osGetFullPathNameA ((DWORD(WINAPI*)(LPCSTR,DWORD,LPSTR, \ LPSTR*))aSyscall[24].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "GetFullPathNameW", (SYSCALL)GetFullPathNameW, 0 }, #else { "GetFullPathNameW", (SYSCALL)0, 0 }, #endif #define osGetFullPathNameW ((DWORD(WINAPI*)(LPCWSTR,DWORD,LPWSTR, \ LPWSTR*))aSyscall[25].pCurrent) { "GetLastError", (SYSCALL)GetLastError, 0 }, #define osGetLastError ((DWORD(WINAPI*)(VOID))aSyscall[26].pCurrent) #if !defined(SQLITE_OMIT_LOAD_EXTENSION) #if SQLITE_OS_WINCE /* The GetProcAddressA() routine is only available on Windows CE. */ { "GetProcAddressA", (SYSCALL)GetProcAddressA, 0 }, #else /* All other Windows platforms expect GetProcAddress() to take ** an ANSI string regardless of the _UNICODE setting */ { "GetProcAddressA", (SYSCALL)GetProcAddress, 0 }, #endif #else { "GetProcAddressA", (SYSCALL)0, 0 }, #endif #define osGetProcAddressA ((FARPROC(WINAPI*)(HMODULE, \ LPCSTR))aSyscall[27].pCurrent) #if !SQLITE_OS_WINRT { "GetSystemInfo", (SYSCALL)GetSystemInfo, 0 }, #else { "GetSystemInfo", (SYSCALL)0, 0 }, #endif #define osGetSystemInfo ((VOID(WINAPI*)(LPSYSTEM_INFO))aSyscall[28].pCurrent) { "GetSystemTime", (SYSCALL)GetSystemTime, 0 }, #define osGetSystemTime ((VOID(WINAPI*)(LPSYSTEMTIME))aSyscall[29].pCurrent) #if !SQLITE_OS_WINCE { "GetSystemTimeAsFileTime", (SYSCALL)GetSystemTimeAsFileTime, 0 }, #else { "GetSystemTimeAsFileTime", (SYSCALL)0, 0 }, #endif #define osGetSystemTimeAsFileTime ((VOID(WINAPI*)( \ LPFILETIME))aSyscall[30].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "GetTempPathA", (SYSCALL)GetTempPathA, 0 }, #else { "GetTempPathA", (SYSCALL)0, 0 }, #endif #define osGetTempPathA ((DWORD(WINAPI*)(DWORD,LPSTR))aSyscall[31].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) { "GetTempPathW", (SYSCALL)GetTempPathW, 0 }, #else { "GetTempPathW", (SYSCALL)0, 0 }, #endif #define osGetTempPathW ((DWORD(WINAPI*)(DWORD,LPWSTR))aSyscall[32].pCurrent) #if !SQLITE_OS_WINRT { "GetTickCount", (SYSCALL)GetTickCount, 0 }, #else { "GetTickCount", (SYSCALL)0, 0 }, #endif #define osGetTickCount ((DWORD(WINAPI*)(VOID))aSyscall[33].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) && defined(SQLITE_WIN32_GETVERSIONEX) && \ SQLITE_WIN32_GETVERSIONEX { "GetVersionExA", (SYSCALL)GetVersionExA, 0 }, #else { "GetVersionExA", (SYSCALL)0, 0 }, #endif #define osGetVersionExA ((BOOL(WINAPI*)( \ LPOSVERSIONINFOA))aSyscall[34].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX { "GetVersionExW", (SYSCALL)GetVersionExW, 0 }, #else { "GetVersionExW", (SYSCALL)0, 0 }, #endif #define osGetVersionExW ((BOOL(WINAPI*)( \ LPOSVERSIONINFOW))aSyscall[35].pCurrent) { "HeapAlloc", (SYSCALL)HeapAlloc, 0 }, #define osHeapAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD, \ SIZE_T))aSyscall[36].pCurrent) #if !SQLITE_OS_WINRT { "HeapCreate", (SYSCALL)HeapCreate, 0 }, #else { "HeapCreate", (SYSCALL)0, 0 }, #endif #define osHeapCreate ((HANDLE(WINAPI*)(DWORD,SIZE_T, \ SIZE_T))aSyscall[37].pCurrent) #if !SQLITE_OS_WINRT { "HeapDestroy", (SYSCALL)HeapDestroy, 0 }, #else { "HeapDestroy", (SYSCALL)0, 0 }, #endif #define osHeapDestroy ((BOOL(WINAPI*)(HANDLE))aSyscall[38].pCurrent) { "HeapFree", (SYSCALL)HeapFree, 0 }, #define osHeapFree ((BOOL(WINAPI*)(HANDLE,DWORD,LPVOID))aSyscall[39].pCurrent) { "HeapReAlloc", (SYSCALL)HeapReAlloc, 0 }, #define osHeapReAlloc ((LPVOID(WINAPI*)(HANDLE,DWORD,LPVOID, \ SIZE_T))aSyscall[40].pCurrent) { "HeapSize", (SYSCALL)HeapSize, 0 }, #define osHeapSize ((SIZE_T(WINAPI*)(HANDLE,DWORD, \ LPCVOID))aSyscall[41].pCurrent) #if !SQLITE_OS_WINRT { "HeapValidate", (SYSCALL)HeapValidate, 0 }, #else { "HeapValidate", (SYSCALL)0, 0 }, #endif #define osHeapValidate ((BOOL(WINAPI*)(HANDLE,DWORD, \ LPCVOID))aSyscall[42].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT { "HeapCompact", (SYSCALL)HeapCompact, 0 }, #else { "HeapCompact", (SYSCALL)0, 0 }, #endif #define osHeapCompact ((UINT(WINAPI*)(HANDLE,DWORD))aSyscall[43].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) && !defined(SQLITE_OMIT_LOAD_EXTENSION) { "LoadLibraryA", (SYSCALL)LoadLibraryA, 0 }, #else { "LoadLibraryA", (SYSCALL)0, 0 }, #endif #define osLoadLibraryA ((HMODULE(WINAPI*)(LPCSTR))aSyscall[44].pCurrent) #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_HAS_WIDE) && \ !defined(SQLITE_OMIT_LOAD_EXTENSION) { "LoadLibraryW", (SYSCALL)LoadLibraryW, 0 }, #else { "LoadLibraryW", (SYSCALL)0, 0 }, #endif #define osLoadLibraryW ((HMODULE(WINAPI*)(LPCWSTR))aSyscall[45].pCurrent) #if !SQLITE_OS_WINRT { "LocalFree", (SYSCALL)LocalFree, 0 }, #else { "LocalFree", (SYSCALL)0, 0 }, #endif #define osLocalFree ((HLOCAL(WINAPI*)(HLOCAL))aSyscall[46].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT { "LockFile", (SYSCALL)LockFile, 0 }, #else { "LockFile", (SYSCALL)0, 0 }, #endif #ifndef osLockFile #define osLockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ DWORD))aSyscall[47].pCurrent) #endif #if !SQLITE_OS_WINCE { "LockFileEx", (SYSCALL)LockFileEx, 0 }, #else { "LockFileEx", (SYSCALL)0, 0 }, #endif #ifndef osLockFileEx #define osLockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD,DWORD, \ LPOVERLAPPED))aSyscall[48].pCurrent) #endif #if SQLITE_OS_WINCE || (!SQLITE_OS_WINRT && \ (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0)) { "MapViewOfFile", (SYSCALL)MapViewOfFile, 0 }, #else { "MapViewOfFile", (SYSCALL)0, 0 }, #endif #define osMapViewOfFile ((LPVOID(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ SIZE_T))aSyscall[49].pCurrent) { "MultiByteToWideChar", (SYSCALL)MultiByteToWideChar, 0 }, #define osMultiByteToWideChar ((int(WINAPI*)(UINT,DWORD,LPCSTR,int,LPWSTR, \ int))aSyscall[50].pCurrent) { "QueryPerformanceCounter", (SYSCALL)QueryPerformanceCounter, 0 }, #define osQueryPerformanceCounter ((BOOL(WINAPI*)( \ LARGE_INTEGER*))aSyscall[51].pCurrent) { "ReadFile", (SYSCALL)ReadFile, 0 }, #define osReadFile ((BOOL(WINAPI*)(HANDLE,LPVOID,DWORD,LPDWORD, \ LPOVERLAPPED))aSyscall[52].pCurrent) { "SetEndOfFile", (SYSCALL)SetEndOfFile, 0 }, #define osSetEndOfFile ((BOOL(WINAPI*)(HANDLE))aSyscall[53].pCurrent) #if !SQLITE_OS_WINRT { "SetFilePointer", (SYSCALL)SetFilePointer, 0 }, #else { "SetFilePointer", (SYSCALL)0, 0 }, #endif #define osSetFilePointer ((DWORD(WINAPI*)(HANDLE,LONG,PLONG, \ DWORD))aSyscall[54].pCurrent) #if !SQLITE_OS_WINRT { "Sleep", (SYSCALL)Sleep, 0 }, #else { "Sleep", (SYSCALL)0, 0 }, #endif #define osSleep ((VOID(WINAPI*)(DWORD))aSyscall[55].pCurrent) { "SystemTimeToFileTime", (SYSCALL)SystemTimeToFileTime, 0 }, #define osSystemTimeToFileTime ((BOOL(WINAPI*)(CONST SYSTEMTIME*, \ LPFILETIME))aSyscall[56].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT { "UnlockFile", (SYSCALL)UnlockFile, 0 }, #else { "UnlockFile", (SYSCALL)0, 0 }, #endif #ifndef osUnlockFile #define osUnlockFile ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ DWORD))aSyscall[57].pCurrent) #endif #if !SQLITE_OS_WINCE { "UnlockFileEx", (SYSCALL)UnlockFileEx, 0 }, #else { "UnlockFileEx", (SYSCALL)0, 0 }, #endif #define osUnlockFileEx ((BOOL(WINAPI*)(HANDLE,DWORD,DWORD,DWORD, \ LPOVERLAPPED))aSyscall[58].pCurrent) #if SQLITE_OS_WINCE || !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 { "UnmapViewOfFile", (SYSCALL)UnmapViewOfFile, 0 }, #else { "UnmapViewOfFile", (SYSCALL)0, 0 }, #endif #define osUnmapViewOfFile ((BOOL(WINAPI*)(LPCVOID))aSyscall[59].pCurrent) { "WideCharToMultiByte", (SYSCALL)WideCharToMultiByte, 0 }, #define osWideCharToMultiByte ((int(WINAPI*)(UINT,DWORD,LPCWSTR,int,LPSTR,int, \ LPCSTR,LPBOOL))aSyscall[60].pCurrent) { "WriteFile", (SYSCALL)WriteFile, 0 }, #define osWriteFile ((BOOL(WINAPI*)(HANDLE,LPCVOID,DWORD,LPDWORD, \ LPOVERLAPPED))aSyscall[61].pCurrent) #if SQLITE_OS_WINRT { "CreateEventExW", (SYSCALL)CreateEventExW, 0 }, #else { "CreateEventExW", (SYSCALL)0, 0 }, #endif #define osCreateEventExW ((HANDLE(WINAPI*)(LPSECURITY_ATTRIBUTES,LPCWSTR, \ DWORD,DWORD))aSyscall[62].pCurrent) #if !SQLITE_OS_WINRT { "WaitForSingleObject", (SYSCALL)WaitForSingleObject, 0 }, #else { "WaitForSingleObject", (SYSCALL)0, 0 }, #endif #define osWaitForSingleObject ((DWORD(WINAPI*)(HANDLE, \ DWORD))aSyscall[63].pCurrent) #if !SQLITE_OS_WINCE { "WaitForSingleObjectEx", (SYSCALL)WaitForSingleObjectEx, 0 }, #else { "WaitForSingleObjectEx", (SYSCALL)0, 0 }, #endif #define osWaitForSingleObjectEx ((DWORD(WINAPI*)(HANDLE,DWORD, \ BOOL))aSyscall[64].pCurrent) #if SQLITE_OS_WINRT { "SetFilePointerEx", (SYSCALL)SetFilePointerEx, 0 }, #else { "SetFilePointerEx", (SYSCALL)0, 0 }, #endif #define osSetFilePointerEx ((BOOL(WINAPI*)(HANDLE,LARGE_INTEGER, \ PLARGE_INTEGER,DWORD))aSyscall[65].pCurrent) #if SQLITE_OS_WINRT { "GetFileInformationByHandleEx", (SYSCALL)GetFileInformationByHandleEx, 0 }, #else { "GetFileInformationByHandleEx", (SYSCALL)0, 0 }, #endif #define osGetFileInformationByHandleEx ((BOOL(WINAPI*)(HANDLE, \ FILE_INFO_BY_HANDLE_CLASS,LPVOID,DWORD))aSyscall[66].pCurrent) #if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) { "MapViewOfFileFromApp", (SYSCALL)MapViewOfFileFromApp, 0 }, #else { "MapViewOfFileFromApp", (SYSCALL)0, 0 }, #endif #define osMapViewOfFileFromApp ((LPVOID(WINAPI*)(HANDLE,ULONG,ULONG64, \ SIZE_T))aSyscall[67].pCurrent) #if SQLITE_OS_WINRT { "CreateFile2", (SYSCALL)CreateFile2, 0 }, #else { "CreateFile2", (SYSCALL)0, 0 }, #endif #define osCreateFile2 ((HANDLE(WINAPI*)(LPCWSTR,DWORD,DWORD,DWORD, \ LPCREATEFILE2_EXTENDED_PARAMETERS))aSyscall[68].pCurrent) #if SQLITE_OS_WINRT && !defined(SQLITE_OMIT_LOAD_EXTENSION) { "LoadPackagedLibrary", (SYSCALL)LoadPackagedLibrary, 0 }, #else { "LoadPackagedLibrary", (SYSCALL)0, 0 }, #endif #define osLoadPackagedLibrary ((HMODULE(WINAPI*)(LPCWSTR, \ DWORD))aSyscall[69].pCurrent) #if SQLITE_OS_WINRT { "GetTickCount64", (SYSCALL)GetTickCount64, 0 }, #else { "GetTickCount64", (SYSCALL)0, 0 }, #endif #define osGetTickCount64 ((ULONGLONG(WINAPI*)(VOID))aSyscall[70].pCurrent) #if SQLITE_OS_WINRT { "GetNativeSystemInfo", (SYSCALL)GetNativeSystemInfo, 0 }, #else { "GetNativeSystemInfo", (SYSCALL)0, 0 }, #endif #define osGetNativeSystemInfo ((VOID(WINAPI*)( \ LPSYSTEM_INFO))aSyscall[71].pCurrent) #if defined(SQLITE_WIN32_HAS_ANSI) { "OutputDebugStringA", (SYSCALL)OutputDebugStringA, 0 }, #else { "OutputDebugStringA", (SYSCALL)0, 0 }, #endif #define osOutputDebugStringA ((VOID(WINAPI*)(LPCSTR))aSyscall[72].pCurrent) #if defined(SQLITE_WIN32_HAS_WIDE) { "OutputDebugStringW", (SYSCALL)OutputDebugStringW, 0 }, #else { "OutputDebugStringW", (SYSCALL)0, 0 }, #endif #define osOutputDebugStringW ((VOID(WINAPI*)(LPCWSTR))aSyscall[73].pCurrent) { "GetProcessHeap", (SYSCALL)GetProcessHeap, 0 }, #define osGetProcessHeap ((HANDLE(WINAPI*)(VOID))aSyscall[74].pCurrent) #if SQLITE_OS_WINRT && (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) { "CreateFileMappingFromApp", (SYSCALL)CreateFileMappingFromApp, 0 }, #else { "CreateFileMappingFromApp", (SYSCALL)0, 0 }, #endif #define osCreateFileMappingFromApp ((HANDLE(WINAPI*)(HANDLE, \ LPSECURITY_ATTRIBUTES,ULONG,ULONG64,LPCWSTR))aSyscall[75].pCurrent) /* ** NOTE: On some sub-platforms, the InterlockedCompareExchange "function" ** is really just a macro that uses a compiler intrinsic (e.g. x64). ** So do not try to make this is into a redefinable interface. */ #if defined(InterlockedCompareExchange) { "InterlockedCompareExchange", (SYSCALL)0, 0 }, #define osInterlockedCompareExchange InterlockedCompareExchange #else { "InterlockedCompareExchange", (SYSCALL)InterlockedCompareExchange, 0 }, #define osInterlockedCompareExchange ((LONG(WINAPI*)(LONG \ SQLITE_WIN32_VOLATILE*, LONG,LONG))aSyscall[76].pCurrent) #endif /* defined(InterlockedCompareExchange) */ #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID { "UuidCreate", (SYSCALL)UuidCreate, 0 }, #else { "UuidCreate", (SYSCALL)0, 0 }, #endif #define osUuidCreate ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[77].pCurrent) #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID { "UuidCreateSequential", (SYSCALL)UuidCreateSequential, 0 }, #else { "UuidCreateSequential", (SYSCALL)0, 0 }, #endif #define osUuidCreateSequential \ ((RPC_STATUS(RPC_ENTRY*)(UUID*))aSyscall[78].pCurrent) #if !defined(SQLITE_NO_SYNC) && SQLITE_MAX_MMAP_SIZE>0 { "FlushViewOfFile", (SYSCALL)FlushViewOfFile, 0 }, #else { "FlushViewOfFile", (SYSCALL)0, 0 }, #endif #define osFlushViewOfFile \ ((BOOL(WINAPI*)(LPCVOID,SIZE_T))aSyscall[79].pCurrent) }; /* End of the overrideable system calls */ /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the ** "win32" VFSes. Return SQLITE_OK opon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */ static int winSetSystemCall( sqlite3_vfs *pNotUsed, /* The VFS pointer. Not used */ const char *zName, /* Name of system call to override */ sqlite3_syscall_ptr pNewFunc /* Pointer to new system call value */ ){ unsigned int i; int rc = SQLITE_NOTFOUND; UNUSED_PARAMETER(pNotUsed); if( zName==0 ){ /* If no zName is given, restore all system calls to their default ** settings and return NULL */ rc = SQLITE_OK; for(i=0; i0 ){ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); memcpy(zDbgBuf, zBuf, nMin); osOutputDebugStringA(zDbgBuf); }else{ osOutputDebugStringA(zBuf); } #elif defined(SQLITE_WIN32_HAS_WIDE) memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); if ( osMultiByteToWideChar( osAreFileApisANSI() ? CP_ACP : CP_OEMCP, 0, zBuf, nMin, (LPWSTR)zDbgBuf, SQLITE_WIN32_DBG_BUF_SIZE/sizeof(WCHAR))<=0 ){ return; } osOutputDebugStringW((LPCWSTR)zDbgBuf); #else if( nMin>0 ){ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); memcpy(zDbgBuf, zBuf, nMin); fprintf(stderr, "%s", zDbgBuf); }else{ fprintf(stderr, "%s", zBuf); } #endif } /* ** The following routine suspends the current thread for at least ms ** milliseconds. This is equivalent to the Win32 Sleep() interface. */ #if SQLITE_OS_WINRT static HANDLE sleepObj = NULL; #endif SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds){ #if SQLITE_OS_WINRT if ( sleepObj==NULL ){ sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET, SYNCHRONIZE); } assert( sleepObj!=NULL ); osWaitForSingleObjectEx(sleepObj, milliseconds, FALSE); #else osSleep(milliseconds); #endif } #if SQLITE_MAX_WORKER_THREADS>0 && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \ SQLITE_THREADSAFE>0 SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){ DWORD rc; while( (rc = osWaitForSingleObjectEx(hObject, INFINITE, TRUE))==WAIT_IO_COMPLETION ){} return rc; } #endif /* ** Return true (non-zero) if we are running under WinNT, Win2K, WinXP, ** or WinCE. Return false (zero) for Win95, Win98, or WinME. ** ** Here is an interesting observation: Win95, Win98, and WinME lack ** the LockFileEx() API. But we can still statically link against that ** API as long as we don't call it when running Win95/98/ME. A call to ** this routine is used to determine if the host is Win95/98/ME or ** WinNT/2K/XP so that we will know whether or not we can safely call ** the LockFileEx() API. */ #if !defined(SQLITE_WIN32_GETVERSIONEX) || !SQLITE_WIN32_GETVERSIONEX # define osIsNT() (1) #elif SQLITE_OS_WINCE || SQLITE_OS_WINRT || !defined(SQLITE_WIN32_HAS_ANSI) # define osIsNT() (1) #elif !defined(SQLITE_WIN32_HAS_WIDE) # define osIsNT() (0) #else # define osIsNT() ((sqlite3_os_type==2) || sqlite3_win32_is_nt()) #endif /* ** This function determines if the machine is running a version of Windows ** based on the NT kernel. */ SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void){ #if SQLITE_OS_WINRT /* ** NOTE: The WinRT sub-platform is always assumed to be based on the NT ** kernel. */ return 1; #elif defined(SQLITE_WIN32_GETVERSIONEX) && SQLITE_WIN32_GETVERSIONEX if( osInterlockedCompareExchange(&sqlite3_os_type, 0, 0)==0 ){ #if defined(SQLITE_WIN32_HAS_ANSI) OSVERSIONINFOA sInfo; sInfo.dwOSVersionInfoSize = sizeof(sInfo); osGetVersionExA(&sInfo); osInterlockedCompareExchange(&sqlite3_os_type, (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); #elif defined(SQLITE_WIN32_HAS_WIDE) OSVERSIONINFOW sInfo; sInfo.dwOSVersionInfoSize = sizeof(sInfo); osGetVersionExW(&sInfo); osInterlockedCompareExchange(&sqlite3_os_type, (sInfo.dwPlatformId == VER_PLATFORM_WIN32_NT) ? 2 : 1, 0); #endif } return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; #elif SQLITE_TEST return osInterlockedCompareExchange(&sqlite3_os_type, 2, 2)==2; #else /* ** NOTE: All sub-platforms where the GetVersionEx[AW] functions are ** deprecated are always assumed to be based on the NT kernel. */ return 1; #endif } #ifdef SQLITE_WIN32_MALLOC /* ** Allocate nBytes of memory. */ static void *winMemMalloc(int nBytes){ HANDLE hHeap; void *p; winMemAssertMagic(); hHeap = winMemGetHeap(); assert( hHeap!=0 ); assert( hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); #endif assert( nBytes>=0 ); p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); if( !p ){ sqlite3_log(SQLITE_NOMEM, "failed to HeapAlloc %u bytes (%lu), heap=%p", nBytes, osGetLastError(), (void*)hHeap); } return p; } /* ** Free memory. */ static void winMemFree(void *pPrior){ HANDLE hHeap; winMemAssertMagic(); hHeap = winMemGetHeap(); assert( hHeap!=0 ); assert( hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); #endif if( !pPrior ) return; /* Passing NULL to HeapFree is undefined. */ if( !osHeapFree(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ){ sqlite3_log(SQLITE_NOMEM, "failed to HeapFree block %p (%lu), heap=%p", pPrior, osGetLastError(), (void*)hHeap); } } /* ** Change the size of an existing memory allocation */ static void *winMemRealloc(void *pPrior, int nBytes){ HANDLE hHeap; void *p; winMemAssertMagic(); hHeap = winMemGetHeap(); assert( hHeap!=0 ); assert( hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior) ); #endif assert( nBytes>=0 ); if( !pPrior ){ p = osHeapAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, (SIZE_T)nBytes); }else{ p = osHeapReAlloc(hHeap, SQLITE_WIN32_HEAP_FLAGS, pPrior, (SIZE_T)nBytes); } if( !p ){ sqlite3_log(SQLITE_NOMEM, "failed to %s %u bytes (%lu), heap=%p", pPrior ? "HeapReAlloc" : "HeapAlloc", nBytes, osGetLastError(), (void*)hHeap); } return p; } /* ** Return the size of an outstanding allocation, in bytes. */ static int winMemSize(void *p){ HANDLE hHeap; SIZE_T n; winMemAssertMagic(); hHeap = winMemGetHeap(); assert( hHeap!=0 ); assert( hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(hHeap, SQLITE_WIN32_HEAP_FLAGS, p) ); #endif if( !p ) return 0; n = osHeapSize(hHeap, SQLITE_WIN32_HEAP_FLAGS, p); if( n==(SIZE_T)-1 ){ sqlite3_log(SQLITE_NOMEM, "failed to HeapSize block %p (%lu), heap=%p", p, osGetLastError(), (void*)hHeap); return 0; } return (int)n; } /* ** Round up a request size to the next valid allocation size. */ static int winMemRoundup(int n){ return n; } /* ** Initialize this module. */ static int winMemInit(void *pAppData){ winMemData *pWinMemData = (winMemData *)pAppData; if( !pWinMemData ) return SQLITE_ERROR; assert( pWinMemData->magic1==WINMEM_MAGIC1 ); assert( pWinMemData->magic2==WINMEM_MAGIC2 ); #if !SQLITE_OS_WINRT && SQLITE_WIN32_HEAP_CREATE if( !pWinMemData->hHeap ){ DWORD dwInitialSize = SQLITE_WIN32_HEAP_INIT_SIZE; DWORD dwMaximumSize = (DWORD)sqlite3GlobalConfig.nHeap; if( dwMaximumSize==0 ){ dwMaximumSize = SQLITE_WIN32_HEAP_MAX_SIZE; }else if( dwInitialSize>dwMaximumSize ){ dwInitialSize = dwMaximumSize; } pWinMemData->hHeap = osHeapCreate(SQLITE_WIN32_HEAP_FLAGS, dwInitialSize, dwMaximumSize); if( !pWinMemData->hHeap ){ sqlite3_log(SQLITE_NOMEM, "failed to HeapCreate (%lu), flags=%u, initSize=%lu, maxSize=%lu", osGetLastError(), SQLITE_WIN32_HEAP_FLAGS, dwInitialSize, dwMaximumSize); return SQLITE_NOMEM; } pWinMemData->bOwned = TRUE; assert( pWinMemData->bOwned ); } #else pWinMemData->hHeap = osGetProcessHeap(); if( !pWinMemData->hHeap ){ sqlite3_log(SQLITE_NOMEM, "failed to GetProcessHeap (%lu)", osGetLastError()); return SQLITE_NOMEM; } pWinMemData->bOwned = FALSE; assert( !pWinMemData->bOwned ); #endif assert( pWinMemData->hHeap!=0 ); assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); #endif return SQLITE_OK; } /* ** Deinitialize this module. */ static void winMemShutdown(void *pAppData){ winMemData *pWinMemData = (winMemData *)pAppData; if( !pWinMemData ) return; assert( pWinMemData->magic1==WINMEM_MAGIC1 ); assert( pWinMemData->magic2==WINMEM_MAGIC2 ); if( pWinMemData->hHeap ){ assert( pWinMemData->hHeap!=INVALID_HANDLE_VALUE ); #if !SQLITE_OS_WINRT && defined(SQLITE_WIN32_MALLOC_VALIDATE) assert( osHeapValidate(pWinMemData->hHeap, SQLITE_WIN32_HEAP_FLAGS, NULL) ); #endif if( pWinMemData->bOwned ){ if( !osHeapDestroy(pWinMemData->hHeap) ){ sqlite3_log(SQLITE_NOMEM, "failed to HeapDestroy (%lu), heap=%p", osGetLastError(), (void*)pWinMemData->hHeap); } pWinMemData->bOwned = FALSE; } pWinMemData->hHeap = NULL; } } /* ** Populate the low-level memory allocation function pointers in ** sqlite3GlobalConfig.m with pointers to the routines in this file. The ** arguments specify the block of memory to manage. ** ** This routine is only called by sqlite3_config(), and therefore ** is not required to be threadsafe (it is not). */ SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetWin32(void){ static const sqlite3_mem_methods winMemMethods = { winMemMalloc, winMemFree, winMemRealloc, winMemSize, winMemRoundup, winMemInit, winMemShutdown, &win_mem_data }; return &winMemMethods; } SQLITE_PRIVATE void sqlite3MemSetDefault(void){ sqlite3_config(SQLITE_CONFIG_MALLOC, sqlite3MemGetWin32()); } #endif /* SQLITE_WIN32_MALLOC */ /* ** Convert a UTF-8 string to Microsoft Unicode (UTF-16?). ** ** Space to hold the returned string is obtained from malloc. */ static LPWSTR winUtf8ToUnicode(const char *zFilename){ int nChar; LPWSTR zWideFilename; nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); if( nChar==0 ){ return 0; } zWideFilename = sqlite3MallocZero( nChar*sizeof(zWideFilename[0]) ); if( zWideFilename==0 ){ return 0; } nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, nChar); if( nChar==0 ){ sqlite3_free(zWideFilename); zWideFilename = 0; } return zWideFilename; } /* ** Convert Microsoft Unicode to UTF-8. Space to hold the returned string is ** obtained from sqlite3_malloc(). */ static char *winUnicodeToUtf8(LPCWSTR zWideFilename){ int nByte; char *zFilename; nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); if( nByte == 0 ){ return 0; } zFilename = sqlite3MallocZero( nByte ); if( zFilename==0 ){ return 0; } nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ sqlite3_free(zFilename); zFilename = 0; } return zFilename; } /* ** Convert an ANSI string to Microsoft Unicode, based on the ** current codepage settings for file apis. ** ** Space to hold the returned string is obtained ** from sqlite3_malloc. */ static LPWSTR winMbcsToUnicode(const char *zFilename){ int nByte; LPWSTR zMbcsFilename; int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, NULL, 0)*sizeof(WCHAR); if( nByte==0 ){ return 0; } zMbcsFilename = sqlite3MallocZero( nByte*sizeof(zMbcsFilename[0]) ); if( zMbcsFilename==0 ){ return 0; } nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, zMbcsFilename, nByte); if( nByte==0 ){ sqlite3_free(zMbcsFilename); zMbcsFilename = 0; } return zMbcsFilename; } /* ** Convert Microsoft Unicode to multi-byte character string, based on the ** user's ANSI codepage. ** ** Space to hold the returned string is obtained from ** sqlite3_malloc(). */ static char *winUnicodeToMbcs(LPCWSTR zWideFilename){ int nByte; char *zFilename; int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, 0, 0, 0, 0); if( nByte == 0 ){ return 0; } zFilename = sqlite3MallocZero( nByte ); if( zFilename==0 ){ return 0; } nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, zFilename, nByte, 0, 0); if( nByte == 0 ){ sqlite3_free(zFilename); zFilename = 0; } return zFilename; } /* ** Convert multibyte character string to UTF-8. Space to hold the ** returned string is obtained from sqlite3_malloc(). */ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zFilename){ char *zFilenameUtf8; LPWSTR zTmpWide; zTmpWide = winMbcsToUnicode(zFilename); if( zTmpWide==0 ){ return 0; } zFilenameUtf8 = winUnicodeToUtf8(zTmpWide); sqlite3_free(zTmpWide); return zFilenameUtf8; } /* ** Convert UTF-8 to multibyte character string. Space to hold the ** returned string is obtained from sqlite3_malloc(). */ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zFilename){ char *zFilenameMbcs; LPWSTR zTmpWide; zTmpWide = winUtf8ToUnicode(zFilename); if( zTmpWide==0 ){ return 0; } zFilenameMbcs = winUnicodeToMbcs(zTmpWide); sqlite3_free(zTmpWide); return zFilenameMbcs; } /* ** This function sets the data directory or the temporary directory based on ** the provided arguments. The type argument must be 1 in order to set the ** data directory or 2 in order to set the temporary directory. The zValue ** argument is the name of the directory to use. The return value will be ** SQLITE_OK if successful. */ SQLITE_API int SQLITE_STDCALL sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){ char **ppDirectory = 0; #ifndef SQLITE_OMIT_AUTOINIT int rc = sqlite3_initialize(); if( rc ) return rc; #endif if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){ ppDirectory = &sqlite3_data_directory; }else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){ ppDirectory = &sqlite3_temp_directory; } assert( !ppDirectory || type==SQLITE_WIN32_DATA_DIRECTORY_TYPE || type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ); assert( !ppDirectory || sqlite3MemdebugHasType(*ppDirectory, MEMTYPE_HEAP) ); if( ppDirectory ){ char *zValueUtf8 = 0; if( zValue && zValue[0] ){ zValueUtf8 = winUnicodeToUtf8(zValue); if ( zValueUtf8==0 ){ return SQLITE_NOMEM; } } sqlite3_free(*ppDirectory); *ppDirectory = zValueUtf8; return SQLITE_OK; } return SQLITE_ERROR; } /* ** The return value of winGetLastErrorMsg ** is zero if the error message fits in the buffer, or non-zero ** otherwise (if the message was truncated). */ static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){ /* FormatMessage returns 0 on failure. Otherwise it ** returns the number of TCHARs written to the output ** buffer, excluding the terminating null char. */ DWORD dwLen = 0; char *zOut = 0; if( osIsNT() ){ #if SQLITE_OS_WINRT WCHAR zTempWide[SQLITE_WIN32_MAX_ERRMSG_CHARS+1]; dwLen = osFormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, lastErrno, 0, zTempWide, SQLITE_WIN32_MAX_ERRMSG_CHARS, 0); #else LPWSTR zTempWide = NULL; dwLen = osFormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, lastErrno, 0, (LPWSTR) &zTempWide, 0, 0); #endif if( dwLen > 0 ){ /* allocate a buffer and convert to UTF8 */ sqlite3BeginBenignMalloc(); zOut = winUnicodeToUtf8(zTempWide); sqlite3EndBenignMalloc(); #if !SQLITE_OS_WINRT /* free the system buffer allocated by FormatMessage */ osLocalFree(zTempWide); #endif } } #ifdef SQLITE_WIN32_HAS_ANSI else{ char *zTemp = NULL; dwLen = osFormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, lastErrno, 0, (LPSTR) &zTemp, 0, 0); if( dwLen > 0 ){ /* allocate a buffer and convert to UTF8 */ sqlite3BeginBenignMalloc(); zOut = sqlite3_win32_mbcs_to_utf8(zTemp); sqlite3EndBenignMalloc(); /* free the system buffer allocated by FormatMessage */ osLocalFree(zTemp); } } #endif if( 0 == dwLen ){ sqlite3_snprintf(nBuf, zBuf, "OsError 0x%lx (%lu)", lastErrno, lastErrno); }else{ /* copy a maximum of nBuf chars to output buffer */ sqlite3_snprintf(nBuf, zBuf, "%s", zOut); /* free the UTF8 buffer */ sqlite3_free(zOut); } return 0; } /* ** ** This function - winLogErrorAtLine() - is only ever called via the macro ** winLogError(). ** ** This routine is invoked after an error occurs in an OS function. ** It logs a message using sqlite3_log() containing the current value of ** error code and, if possible, the human-readable equivalent from ** FormatMessage. ** ** The first argument passed to the macro should be the error code that ** will be returned to SQLite (e.g. SQLITE_IOERR_DELETE, SQLITE_CANTOPEN). ** The two subsequent arguments should be the name of the OS function that ** failed and the associated file-system path, if any. */ #define winLogError(a,b,c,d) winLogErrorAtLine(a,b,c,d,__LINE__) static int winLogErrorAtLine( int errcode, /* SQLite error code */ DWORD lastErrno, /* Win32 last error */ const char *zFunc, /* Name of OS function that failed */ const char *zPath, /* File path associated with error */ int iLine /* Source line number where error occurred */ ){ char zMsg[500]; /* Human readable error text */ int i; /* Loop counter */ zMsg[0] = 0; winGetLastErrorMsg(lastErrno, sizeof(zMsg), zMsg); assert( errcode!=SQLITE_OK ); if( zPath==0 ) zPath = ""; for(i=0; zMsg[i] && zMsg[i]!='\r' && zMsg[i]!='\n'; i++){} zMsg[i] = 0; sqlite3_log(errcode, "os_win.c:%d: (%lu) %s(%s) - %s", iLine, lastErrno, zFunc, zPath, zMsg ); return errcode; } /* ** The number of times that a ReadFile(), WriteFile(), and DeleteFile() ** will be retried following a locking error - probably caused by ** antivirus software. Also the initial delay before the first retry. ** The delay increases linearly with each retry. */ #ifndef SQLITE_WIN32_IOERR_RETRY # define SQLITE_WIN32_IOERR_RETRY 10 #endif #ifndef SQLITE_WIN32_IOERR_RETRY_DELAY # define SQLITE_WIN32_IOERR_RETRY_DELAY 25 #endif static int winIoerrRetry = SQLITE_WIN32_IOERR_RETRY; static int winIoerrRetryDelay = SQLITE_WIN32_IOERR_RETRY_DELAY; /* ** The "winIoerrCanRetry1" macro is used to determine if a particular I/O ** error code obtained via GetLastError() is eligible to be retried. It ** must accept the error code DWORD as its only argument and should return ** non-zero if the error code is transient in nature and the operation ** responsible for generating the original error might succeed upon being ** retried. The argument to this macro should be a variable. ** ** Additionally, a macro named "winIoerrCanRetry2" may be defined. If it ** is defined, it will be consulted only when the macro "winIoerrCanRetry1" ** returns zero. The "winIoerrCanRetry2" macro is completely optional and ** may be used to include additional error codes in the set that should ** result in the failing I/O operation being retried by the caller. If ** defined, the "winIoerrCanRetry2" macro must exhibit external semantics ** identical to those of the "winIoerrCanRetry1" macro. */ #if !defined(winIoerrCanRetry1) #define winIoerrCanRetry1(a) (((a)==ERROR_ACCESS_DENIED) || \ ((a)==ERROR_SHARING_VIOLATION) || \ ((a)==ERROR_LOCK_VIOLATION) || \ ((a)==ERROR_DEV_NOT_EXIST) || \ ((a)==ERROR_NETNAME_DELETED) || \ ((a)==ERROR_SEM_TIMEOUT) || \ ((a)==ERROR_NETWORK_UNREACHABLE)) #endif /* ** If a ReadFile() or WriteFile() error occurs, invoke this routine ** to see if it should be retried. Return TRUE to retry. Return FALSE ** to give up with an error. */ static int winRetryIoerr(int *pnRetry, DWORD *pError){ DWORD e = osGetLastError(); if( *pnRetry>=winIoerrRetry ){ if( pError ){ *pError = e; } return 0; } if( winIoerrCanRetry1(e) ){ sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); ++*pnRetry; return 1; } #if defined(winIoerrCanRetry2) else if( winIoerrCanRetry2(e) ){ sqlite3_win32_sleep(winIoerrRetryDelay*(1+*pnRetry)); ++*pnRetry; return 1; } #endif if( pError ){ *pError = e; } return 0; } /* ** Log a I/O error retry episode. */ static void winLogIoerr(int nRetry, int lineno){ if( nRetry ){ sqlite3_log(SQLITE_NOTICE, "delayed %dms for lock/sharing conflict at line %d", winIoerrRetryDelay*nRetry*(nRetry+1)/2, lineno ); } } #if SQLITE_OS_WINCE /************************************************************************* ** This section contains code for WinCE only. */ #if !defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API /* ** The MSVC CRT on Windows CE may not have a localtime() function. So ** create a substitute. */ /* #include */ struct tm *__cdecl localtime(const time_t *t) { static struct tm y; FILETIME uTm, lTm; SYSTEMTIME pTm; sqlite3_int64 t64; t64 = *t; t64 = (t64 + 11644473600)*10000000; uTm.dwLowDateTime = (DWORD)(t64 & 0xFFFFFFFF); uTm.dwHighDateTime= (DWORD)(t64 >> 32); osFileTimeToLocalFileTime(&uTm,&lTm); osFileTimeToSystemTime(&lTm,&pTm); y.tm_year = pTm.wYear - 1900; y.tm_mon = pTm.wMonth - 1; y.tm_wday = pTm.wDayOfWeek; y.tm_mday = pTm.wDay; y.tm_hour = pTm.wHour; y.tm_min = pTm.wMinute; y.tm_sec = pTm.wSecond; return &y; } #endif #define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-(int)offsetof(winFile,h)] /* ** Acquire a lock on the handle h */ static void winceMutexAcquire(HANDLE h){ DWORD dwErr; do { dwErr = osWaitForSingleObject(h, INFINITE); } while (dwErr != WAIT_OBJECT_0 && dwErr != WAIT_ABANDONED); } /* ** Release a lock acquired by winceMutexAcquire() */ #define winceMutexRelease(h) ReleaseMutex(h) /* ** Create the mutex and shared memory used for locking in the file ** descriptor pFile */ static int winceCreateLock(const char *zFilename, winFile *pFile){ LPWSTR zTok; LPWSTR zName; DWORD lastErrno; BOOL bLogged = FALSE; BOOL bInit = TRUE; zName = winUtf8ToUnicode(zFilename); if( zName==0 ){ /* out of memory */ return SQLITE_IOERR_NOMEM; } /* Initialize the local lockdata */ memset(&pFile->local, 0, sizeof(pFile->local)); /* Replace the backslashes from the filename and lowercase it ** to derive a mutex name. */ zTok = osCharLowerW(zName); for (;*zTok;zTok++){ if (*zTok == '\\') *zTok = '_'; } /* Create/open the named mutex */ pFile->hMutex = osCreateMutexW(NULL, FALSE, zName); if (!pFile->hMutex){ pFile->lastErrno = osGetLastError(); sqlite3_free(zName); return winLogError(SQLITE_IOERR, pFile->lastErrno, "winceCreateLock1", zFilename); } /* Acquire the mutex before continuing */ winceMutexAcquire(pFile->hMutex); /* Since the names of named mutexes, semaphores, file mappings etc are ** case-sensitive, take advantage of that by uppercasing the mutex name ** and using that as the shared filemapping name. */ osCharUpperW(zName); pFile->hShared = osCreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, sizeof(winceLock), zName); /* Set a flag that indicates we're the first to create the memory so it ** must be zero-initialized */ lastErrno = osGetLastError(); if (lastErrno == ERROR_ALREADY_EXISTS){ bInit = FALSE; } sqlite3_free(zName); /* If we succeeded in making the shared memory handle, map it. */ if( pFile->hShared ){ pFile->shared = (winceLock*)osMapViewOfFile(pFile->hShared, FILE_MAP_READ|FILE_MAP_WRITE, 0, 0, sizeof(winceLock)); /* If mapping failed, close the shared memory handle and erase it */ if( !pFile->shared ){ pFile->lastErrno = osGetLastError(); winLogError(SQLITE_IOERR, pFile->lastErrno, "winceCreateLock2", zFilename); bLogged = TRUE; osCloseHandle(pFile->hShared); pFile->hShared = NULL; } } /* If shared memory could not be created, then close the mutex and fail */ if( pFile->hShared==NULL ){ if( !bLogged ){ pFile->lastErrno = lastErrno; winLogError(SQLITE_IOERR, pFile->lastErrno, "winceCreateLock3", zFilename); bLogged = TRUE; } winceMutexRelease(pFile->hMutex); osCloseHandle(pFile->hMutex); pFile->hMutex = NULL; return SQLITE_IOERR; } /* Initialize the shared memory if we're supposed to */ if( bInit ){ memset(pFile->shared, 0, sizeof(winceLock)); } winceMutexRelease(pFile->hMutex); return SQLITE_OK; } /* ** Destroy the part of winFile that deals with wince locks */ static void winceDestroyLock(winFile *pFile){ if (pFile->hMutex){ /* Acquire the mutex */ winceMutexAcquire(pFile->hMutex); /* The following blocks should probably assert in debug mode, but they are to cleanup in case any locks remained open */ if (pFile->local.nReaders){ pFile->shared->nReaders --; } if (pFile->local.bReserved){ pFile->shared->bReserved = FALSE; } if (pFile->local.bPending){ pFile->shared->bPending = FALSE; } if (pFile->local.bExclusive){ pFile->shared->bExclusive = FALSE; } /* De-reference and close our copy of the shared memory handle */ osUnmapViewOfFile(pFile->shared); osCloseHandle(pFile->hShared); /* Done with the mutex */ winceMutexRelease(pFile->hMutex); osCloseHandle(pFile->hMutex); pFile->hMutex = NULL; } } /* ** An implementation of the LockFile() API of Windows for CE */ static BOOL winceLockFile( LPHANDLE phFile, DWORD dwFileOffsetLow, DWORD dwFileOffsetHigh, DWORD nNumberOfBytesToLockLow, DWORD nNumberOfBytesToLockHigh ){ winFile *pFile = HANDLE_TO_WINFILE(phFile); BOOL bReturn = FALSE; UNUSED_PARAMETER(dwFileOffsetHigh); UNUSED_PARAMETER(nNumberOfBytesToLockHigh); if (!pFile->hMutex) return TRUE; winceMutexAcquire(pFile->hMutex); /* Wanting an exclusive lock? */ if (dwFileOffsetLow == (DWORD)SHARED_FIRST && nNumberOfBytesToLockLow == (DWORD)SHARED_SIZE){ if (pFile->shared->nReaders == 0 && pFile->shared->bExclusive == 0){ pFile->shared->bExclusive = TRUE; pFile->local.bExclusive = TRUE; bReturn = TRUE; } } /* Want a read-only lock? */ else if (dwFileOffsetLow == (DWORD)SHARED_FIRST && nNumberOfBytesToLockLow == 1){ if (pFile->shared->bExclusive == 0){ pFile->local.nReaders ++; if (pFile->local.nReaders == 1){ pFile->shared->nReaders ++; } bReturn = TRUE; } } /* Want a pending lock? */ else if (dwFileOffsetLow == (DWORD)PENDING_BYTE && nNumberOfBytesToLockLow == 1){ /* If no pending lock has been acquired, then acquire it */ if (pFile->shared->bPending == 0) { pFile->shared->bPending = TRUE; pFile->local.bPending = TRUE; bReturn = TRUE; } } /* Want a reserved lock? */ else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE && nNumberOfBytesToLockLow == 1){ if (pFile->shared->bReserved == 0) { pFile->shared->bReserved = TRUE; pFile->local.bReserved = TRUE; bReturn = TRUE; } } winceMutexRelease(pFile->hMutex); return bReturn; } /* ** An implementation of the UnlockFile API of Windows for CE */ static BOOL winceUnlockFile( LPHANDLE phFile, DWORD dwFileOffsetLow, DWORD dwFileOffsetHigh, DWORD nNumberOfBytesToUnlockLow, DWORD nNumberOfBytesToUnlockHigh ){ winFile *pFile = HANDLE_TO_WINFILE(phFile); BOOL bReturn = FALSE; UNUSED_PARAMETER(dwFileOffsetHigh); UNUSED_PARAMETER(nNumberOfBytesToUnlockHigh); if (!pFile->hMutex) return TRUE; winceMutexAcquire(pFile->hMutex); /* Releasing a reader lock or an exclusive lock */ if (dwFileOffsetLow == (DWORD)SHARED_FIRST){ /* Did we have an exclusive lock? */ if (pFile->local.bExclusive){ assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE); pFile->local.bExclusive = FALSE; pFile->shared->bExclusive = FALSE; bReturn = TRUE; } /* Did we just have a reader lock? */ else if (pFile->local.nReaders){ assert(nNumberOfBytesToUnlockLow == (DWORD)SHARED_SIZE || nNumberOfBytesToUnlockLow == 1); pFile->local.nReaders --; if (pFile->local.nReaders == 0) { pFile->shared->nReaders --; } bReturn = TRUE; } } /* Releasing a pending lock */ else if (dwFileOffsetLow == (DWORD)PENDING_BYTE && nNumberOfBytesToUnlockLow == 1){ if (pFile->local.bPending){ pFile->local.bPending = FALSE; pFile->shared->bPending = FALSE; bReturn = TRUE; } } /* Releasing a reserved lock */ else if (dwFileOffsetLow == (DWORD)RESERVED_BYTE && nNumberOfBytesToUnlockLow == 1){ if (pFile->local.bReserved) { pFile->local.bReserved = FALSE; pFile->shared->bReserved = FALSE; bReturn = TRUE; } } winceMutexRelease(pFile->hMutex); return bReturn; } /* ** End of the special code for wince *****************************************************************************/ #endif /* SQLITE_OS_WINCE */ /* ** Lock a file region. */ static BOOL winLockFile( LPHANDLE phFile, DWORD flags, DWORD offsetLow, DWORD offsetHigh, DWORD numBytesLow, DWORD numBytesHigh ){ #if SQLITE_OS_WINCE /* ** NOTE: Windows CE is handled differently here due its lack of the Win32 ** API LockFile. */ return winceLockFile(phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); #else if( osIsNT() ){ OVERLAPPED ovlp; memset(&ovlp, 0, sizeof(OVERLAPPED)); ovlp.Offset = offsetLow; ovlp.OffsetHigh = offsetHigh; return osLockFileEx(*phFile, flags, 0, numBytesLow, numBytesHigh, &ovlp); }else{ return osLockFile(*phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); } #endif } /* ** Unlock a file region. */ static BOOL winUnlockFile( LPHANDLE phFile, DWORD offsetLow, DWORD offsetHigh, DWORD numBytesLow, DWORD numBytesHigh ){ #if SQLITE_OS_WINCE /* ** NOTE: Windows CE is handled differently here due its lack of the Win32 ** API UnlockFile. */ return winceUnlockFile(phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); #else if( osIsNT() ){ OVERLAPPED ovlp; memset(&ovlp, 0, sizeof(OVERLAPPED)); ovlp.Offset = offsetLow; ovlp.OffsetHigh = offsetHigh; return osUnlockFileEx(*phFile, 0, numBytesLow, numBytesHigh, &ovlp); }else{ return osUnlockFile(*phFile, offsetLow, offsetHigh, numBytesLow, numBytesHigh); } #endif } /***************************************************************************** ** The next group of routines implement the I/O methods specified ** by the sqlite3_io_methods object. ******************************************************************************/ /* ** Some Microsoft compilers lack this definition. */ #ifndef INVALID_SET_FILE_POINTER # define INVALID_SET_FILE_POINTER ((DWORD)-1) #endif /* ** Move the current position of the file handle passed as the first ** argument to offset iOffset within the file. If successful, return 0. ** Otherwise, set pFile->lastErrno and return non-zero. */ static int winSeekFile(winFile *pFile, sqlite3_int64 iOffset){ #if !SQLITE_OS_WINRT LONG upperBits; /* Most sig. 32 bits of new offset */ LONG lowerBits; /* Least sig. 32 bits of new offset */ DWORD dwRet; /* Value returned by SetFilePointer() */ DWORD lastErrno; /* Value returned by GetLastError() */ OSTRACE(("SEEK file=%p, offset=%lld\n", pFile->h, iOffset)); upperBits = (LONG)((iOffset>>32) & 0x7fffffff); lowerBits = (LONG)(iOffset & 0xffffffff); /* API oddity: If successful, SetFilePointer() returns a dword ** containing the lower 32-bits of the new file-offset. Or, if it fails, ** it returns INVALID_SET_FILE_POINTER. However according to MSDN, ** INVALID_SET_FILE_POINTER may also be a valid new offset. So to determine ** whether an error has actually occurred, it is also necessary to call ** GetLastError(). */ dwRet = osSetFilePointer(pFile->h, lowerBits, &upperBits, FILE_BEGIN); if( (dwRet==INVALID_SET_FILE_POINTER && ((lastErrno = osGetLastError())!=NO_ERROR)) ){ pFile->lastErrno = lastErrno; winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, "winSeekFile", pFile->zPath); OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); return 1; } OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); return 0; #else /* ** Same as above, except that this implementation works for WinRT. */ LARGE_INTEGER x; /* The new offset */ BOOL bRet; /* Value returned by SetFilePointerEx() */ x.QuadPart = iOffset; bRet = osSetFilePointerEx(pFile->h, x, 0, FILE_BEGIN); if(!bRet){ pFile->lastErrno = osGetLastError(); winLogError(SQLITE_IOERR_SEEK, pFile->lastErrno, "winSeekFile", pFile->zPath); OSTRACE(("SEEK file=%p, rc=SQLITE_IOERR_SEEK\n", pFile->h)); return 1; } OSTRACE(("SEEK file=%p, rc=SQLITE_OK\n", pFile->h)); return 0; #endif } #if SQLITE_MAX_MMAP_SIZE>0 /* Forward references to VFS helper methods used for memory mapped files */ static int winMapfile(winFile*, sqlite3_int64); static int winUnmapfile(winFile*); #endif /* ** Close a file. ** ** It is reported that an attempt to close a handle might sometimes ** fail. This is a very unreasonable result, but Windows is notorious ** for being unreasonable so I do not doubt that it might happen. If ** the close fails, we pause for 100 milliseconds and try again. As ** many as MX_CLOSE_ATTEMPT attempts to close the handle are made before ** giving up and returning an error. */ #define MX_CLOSE_ATTEMPT 3 static int winClose(sqlite3_file *id){ int rc, cnt = 0; winFile *pFile = (winFile*)id; assert( id!=0 ); #ifndef SQLITE_OMIT_WAL assert( pFile->pShm==0 ); #endif assert( pFile->h!=NULL && pFile->h!=INVALID_HANDLE_VALUE ); OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p\n", osGetCurrentProcessId(), pFile, pFile->h)); #if SQLITE_MAX_MMAP_SIZE>0 winUnmapfile(pFile); #endif do{ rc = osCloseHandle(pFile->h); /* SimulateIOError( rc=0; cnt=MX_CLOSE_ATTEMPT; ); */ }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (sqlite3_win32_sleep(100), 1) ); #if SQLITE_OS_WINCE #define WINCE_DELETION_ATTEMPTS 3 winceDestroyLock(pFile); if( pFile->zDeleteOnClose ){ int cnt = 0; while( osDeleteFileW(pFile->zDeleteOnClose)==0 && osGetFileAttributesW(pFile->zDeleteOnClose)!=0xffffffff && cnt++ < WINCE_DELETION_ATTEMPTS ){ sqlite3_win32_sleep(100); /* Wait a little before trying again */ } sqlite3_free(pFile->zDeleteOnClose); } #endif if( rc ){ pFile->h = NULL; } OpenCounter(-1); OSTRACE(("CLOSE pid=%lu, pFile=%p, file=%p, rc=%s\n", osGetCurrentProcessId(), pFile, pFile->h, rc ? "ok" : "failed")); return rc ? SQLITE_OK : winLogError(SQLITE_IOERR_CLOSE, osGetLastError(), "winClose", pFile->zPath); } /* ** Read data from a file into a buffer. Return SQLITE_OK if all ** bytes were read successfully and SQLITE_IOERR if anything goes ** wrong. */ static int winRead( sqlite3_file *id, /* File to read from */ void *pBuf, /* Write content into this buffer */ int amt, /* Number of bytes to read */ sqlite3_int64 offset /* Begin reading at this offset */ ){ #if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) OVERLAPPED overlapped; /* The offset for ReadFile. */ #endif winFile *pFile = (winFile*)id; /* file handle */ DWORD nRead; /* Number of bytes actually read from file */ int nRetry = 0; /* Number of retrys */ assert( id!=0 ); assert( amt>0 ); assert( offset>=0 ); SimulateIOError(return SQLITE_IOERR_READ); OSTRACE(("READ pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, " "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile, pFile->h, pBuf, amt, offset, pFile->locktype)); #if SQLITE_MAX_MMAP_SIZE>0 /* Deal with as much of this read request as possible by transfering ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], amt); OSTRACE(("READ-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; }else{ int nCopy = (int)(pFile->mmapSize - offset); memcpy(pBuf, &((u8 *)(pFile->pMapRegion))[offset], nCopy); pBuf = &((u8 *)pBuf)[nCopy]; amt -= nCopy; offset += nCopy; } } #endif #if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) if( winSeekFile(pFile, offset) ){ OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_FULL; } while( !osReadFile(pFile->h, pBuf, amt, &nRead, 0) ){ #else memset(&overlapped, 0, sizeof(OVERLAPPED)); overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); while( !osReadFile(pFile->h, pBuf, amt, &nRead, &overlapped) && osGetLastError()!=ERROR_HANDLE_EOF ){ #endif DWORD lastErrno; if( winRetryIoerr(&nRetry, &lastErrno) ) continue; pFile->lastErrno = lastErrno; OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_READ\n", osGetCurrentProcessId(), pFile, pFile->h)); return winLogError(SQLITE_IOERR_READ, pFile->lastErrno, "winRead", pFile->zPath); } winLogIoerr(nRetry, __LINE__); if( nRead<(DWORD)amt ){ /* Unread parts of the buffer must be zero-filled */ memset(&((char*)pBuf)[nRead], 0, amt-nRead); OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_SHORT_READ\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_IOERR_SHORT_READ; } OSTRACE(("READ pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; } /* ** Write data from a buffer into a file. Return SQLITE_OK on success ** or some other error code on failure. */ static int winWrite( sqlite3_file *id, /* File to write into */ const void *pBuf, /* The bytes to be written */ int amt, /* Number of bytes to write */ sqlite3_int64 offset /* Offset into the file to begin writing at */ ){ int rc = 0; /* True if error has occurred, else false */ winFile *pFile = (winFile*)id; /* File handle */ int nRetry = 0; /* Number of retries */ assert( amt>0 ); assert( pFile ); SimulateIOError(return SQLITE_IOERR_WRITE); SimulateDiskfullError(return SQLITE_FULL); OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, buffer=%p, amount=%d, " "offset=%lld, lock=%d\n", osGetCurrentProcessId(), pFile, pFile->h, pBuf, amt, offset, pFile->locktype)); #if SQLITE_MAX_MMAP_SIZE>0 /* Deal with as much of this write request as possible by transfering ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, amt); OSTRACE(("WRITE-MMAP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; }else{ int nCopy = (int)(pFile->mmapSize - offset); memcpy(&((u8 *)(pFile->pMapRegion))[offset], pBuf, nCopy); pBuf = &((u8 *)pBuf)[nCopy]; amt -= nCopy; offset += nCopy; } } #endif #if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) rc = winSeekFile(pFile, offset); if( rc==0 ){ #else { #endif #if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) OVERLAPPED overlapped; /* The offset for WriteFile. */ #endif u8 *aRem = (u8 *)pBuf; /* Data yet to be written */ int nRem = amt; /* Number of bytes yet to be written */ DWORD nWrite; /* Bytes written by each WriteFile() call */ DWORD lastErrno = NO_ERROR; /* Value returned by GetLastError() */ #if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) memset(&overlapped, 0, sizeof(OVERLAPPED)); overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); #endif while( nRem>0 ){ #if SQLITE_OS_WINCE || defined(SQLITE_WIN32_NO_OVERLAPPED) if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, 0) ){ #else if( !osWriteFile(pFile->h, aRem, nRem, &nWrite, &overlapped) ){ #endif if( winRetryIoerr(&nRetry, &lastErrno) ) continue; break; } assert( nWrite==0 || nWrite<=(DWORD)nRem ); if( nWrite==0 || nWrite>(DWORD)nRem ){ lastErrno = osGetLastError(); break; } #if !SQLITE_OS_WINCE && !defined(SQLITE_WIN32_NO_OVERLAPPED) offset += nWrite; overlapped.Offset = (LONG)(offset & 0xffffffff); overlapped.OffsetHigh = (LONG)((offset>>32) & 0x7fffffff); #endif aRem += nWrite; nRem -= nWrite; } if( nRem>0 ){ pFile->lastErrno = lastErrno; rc = 1; } } if( rc ){ if( ( pFile->lastErrno==ERROR_HANDLE_DISK_FULL ) || ( pFile->lastErrno==ERROR_DISK_FULL )){ OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_FULL\n", osGetCurrentProcessId(), pFile, pFile->h)); return winLogError(SQLITE_FULL, pFile->lastErrno, "winWrite1", pFile->zPath); } OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_WRITE\n", osGetCurrentProcessId(), pFile, pFile->h)); return winLogError(SQLITE_IOERR_WRITE, pFile->lastErrno, "winWrite2", pFile->zPath); }else{ winLogIoerr(nRetry, __LINE__); } OSTRACE(("WRITE pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; } /* ** Truncate an open file to a specified size */ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ winFile *pFile = (winFile*)id; /* File handle object */ int rc = SQLITE_OK; /* Return code for this function */ DWORD lastErrno; assert( pFile ); SimulateIOError(return SQLITE_IOERR_TRUNCATE); OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, size=%lld, lock=%d\n", osGetCurrentProcessId(), pFile, pFile->h, nByte, pFile->locktype)); /* If the user has configured a chunk-size for this file, truncate the ** file so that it consists of an integer number of chunks (i.e. the ** actual file size after the operation may be larger than the requested ** size). */ if( pFile->szChunk>0 ){ nByte = ((nByte + pFile->szChunk - 1)/pFile->szChunk) * pFile->szChunk; } /* SetEndOfFile() returns non-zero when successful, or zero when it fails. */ if( winSeekFile(pFile, nByte) ){ rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, "winTruncate1", pFile->zPath); }else if( 0==osSetEndOfFile(pFile->h) && ((lastErrno = osGetLastError())!=ERROR_USER_MAPPED_FILE) ){ pFile->lastErrno = lastErrno; rc = winLogError(SQLITE_IOERR_TRUNCATE, pFile->lastErrno, "winTruncate2", pFile->zPath); } #if SQLITE_MAX_MMAP_SIZE>0 /* If the file was truncated to a size smaller than the currently ** mapped region, reduce the effective mapping size as well. SQLite will ** use read() and write() to access data beyond this point from now on. */ if( pFile->pMapRegion && nBytemmapSize ){ pFile->mmapSize = nByte; } #endif OSTRACE(("TRUNCATE pid=%lu, pFile=%p, file=%p, rc=%s\n", osGetCurrentProcessId(), pFile, pFile->h, sqlite3ErrName(rc))); return rc; } #ifdef SQLITE_TEST /* ** Count the number of fullsyncs and normal syncs. This is used to test ** that syncs and fullsyncs are occuring at the right times. */ SQLITE_API int sqlite3_sync_count = 0; SQLITE_API int sqlite3_fullsync_count = 0; #endif /* ** Make sure all writes to a particular file are committed to disk. */ static int winSync(sqlite3_file *id, int flags){ #ifndef SQLITE_NO_SYNC /* ** Used only when SQLITE_NO_SYNC is not defined. */ BOOL rc; #endif #if !defined(NDEBUG) || !defined(SQLITE_NO_SYNC) || \ defined(SQLITE_HAVE_OS_TRACE) /* ** Used when SQLITE_NO_SYNC is not defined and by the assert() and/or ** OSTRACE() macros. */ winFile *pFile = (winFile*)id; #else UNUSED_PARAMETER(id); #endif assert( pFile ); /* Check that one of SQLITE_SYNC_NORMAL or FULL was passed */ assert((flags&0x0F)==SQLITE_SYNC_NORMAL || (flags&0x0F)==SQLITE_SYNC_FULL ); /* Unix cannot, but some systems may return SQLITE_FULL from here. This ** line is to test that doing so does not cause any problems. */ SimulateDiskfullError( return SQLITE_FULL ); OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, flags=%x, lock=%d\n", osGetCurrentProcessId(), pFile, pFile->h, flags, pFile->locktype)); #ifndef SQLITE_TEST UNUSED_PARAMETER(flags); #else if( (flags&0x0F)==SQLITE_SYNC_FULL ){ sqlite3_fullsync_count++; } sqlite3_sync_count++; #endif /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a ** no-op */ #ifdef SQLITE_NO_SYNC OSTRACE(("SYNC-NOP pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; #else #if SQLITE_MAX_MMAP_SIZE>0 if( pFile->pMapRegion ){ if( osFlushViewOfFile(pFile->pMapRegion, 0) ){ OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, " "rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->pMapRegion)); }else{ pFile->lastErrno = osGetLastError(); OSTRACE(("SYNC-MMAP pid=%lu, pFile=%p, pMapRegion=%p, " "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile, pFile->pMapRegion)); return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, "winSync1", pFile->zPath); } } #endif rc = osFlushFileBuffers(pFile->h); SimulateIOError( rc=FALSE ); if( rc ){ OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile, pFile->h)); return SQLITE_OK; }else{ pFile->lastErrno = osGetLastError(); OSTRACE(("SYNC pid=%lu, pFile=%p, file=%p, rc=SQLITE_IOERR_FSYNC\n", osGetCurrentProcessId(), pFile, pFile->h)); return winLogError(SQLITE_IOERR_FSYNC, pFile->lastErrno, "winSync2", pFile->zPath); } #endif } /* ** Determine the current size of a file in bytes */ static int winFileSize(sqlite3_file *id, sqlite3_int64 *pSize){ winFile *pFile = (winFile*)id; int rc = SQLITE_OK; assert( id!=0 ); assert( pSize!=0 ); SimulateIOError(return SQLITE_IOERR_FSTAT); OSTRACE(("SIZE file=%p, pSize=%p\n", pFile->h, pSize)); #if SQLITE_OS_WINRT { FILE_STANDARD_INFO info; if( osGetFileInformationByHandleEx(pFile->h, FileStandardInfo, &info, sizeof(info)) ){ *pSize = info.EndOfFile.QuadPart; }else{ pFile->lastErrno = osGetLastError(); rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, "winFileSize", pFile->zPath); } } #else { DWORD upperBits; DWORD lowerBits; DWORD lastErrno; lowerBits = osGetFileSize(pFile->h, &upperBits); *pSize = (((sqlite3_int64)upperBits)<<32) + lowerBits; if( (lowerBits == INVALID_FILE_SIZE) && ((lastErrno = osGetLastError())!=NO_ERROR) ){ pFile->lastErrno = lastErrno; rc = winLogError(SQLITE_IOERR_FSTAT, pFile->lastErrno, "winFileSize", pFile->zPath); } } #endif OSTRACE(("SIZE file=%p, pSize=%p, *pSize=%lld, rc=%s\n", pFile->h, pSize, *pSize, sqlite3ErrName(rc))); return rc; } /* ** LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems. */ #ifndef LOCKFILE_FAIL_IMMEDIATELY # define LOCKFILE_FAIL_IMMEDIATELY 1 #endif #ifndef LOCKFILE_EXCLUSIVE_LOCK # define LOCKFILE_EXCLUSIVE_LOCK 2 #endif /* ** Historically, SQLite has used both the LockFile and LockFileEx functions. ** When the LockFile function was used, it was always expected to fail ** immediately if the lock could not be obtained. Also, it always expected to ** obtain an exclusive lock. These flags are used with the LockFileEx function ** and reflect those expectations; therefore, they should not be changed. */ #ifndef SQLITE_LOCKFILE_FLAGS # define SQLITE_LOCKFILE_FLAGS (LOCKFILE_FAIL_IMMEDIATELY | \ LOCKFILE_EXCLUSIVE_LOCK) #endif /* ** Currently, SQLite never calls the LockFileEx function without wanting the ** call to fail immediately if the lock cannot be obtained. */ #ifndef SQLITE_LOCKFILEEX_FLAGS # define SQLITE_LOCKFILEEX_FLAGS (LOCKFILE_FAIL_IMMEDIATELY) #endif /* ** Acquire a reader lock. ** Different API routines are called depending on whether or not this ** is Win9x or WinNT. */ static int winGetReadLock(winFile *pFile){ int res; OSTRACE(("READ-LOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); if( osIsNT() ){ #if SQLITE_OS_WINCE /* ** NOTE: Windows CE is handled differently here due its lack of the Win32 ** API LockFileEx. */ res = winceLockFile(&pFile->h, SHARED_FIRST, 0, 1, 0); #else res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS, SHARED_FIRST, 0, SHARED_SIZE, 0); #endif } #ifdef SQLITE_WIN32_HAS_ANSI else{ int lk; sqlite3_randomness(sizeof(lk), &lk); pFile->sharedLockByte = (short)((lk & 0x7fffffff)%(SHARED_SIZE - 1)); res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); } #endif if( res == 0 ){ pFile->lastErrno = osGetLastError(); /* No need to log a failure to lock */ } OSTRACE(("READ-LOCK file=%p, result=%d\n", pFile->h, res)); return res; } /* ** Undo a readlock */ static int winUnlockReadLock(winFile *pFile){ int res; DWORD lastErrno; OSTRACE(("READ-UNLOCK file=%p, lock=%d\n", pFile->h, pFile->locktype)); if( osIsNT() ){ res = winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); } #ifdef SQLITE_WIN32_HAS_ANSI else{ res = winUnlockFile(&pFile->h, SHARED_FIRST+pFile->sharedLockByte, 0, 1, 0); } #endif if( res==0 && ((lastErrno = osGetLastError())!=ERROR_NOT_LOCKED) ){ pFile->lastErrno = lastErrno; winLogError(SQLITE_IOERR_UNLOCK, pFile->lastErrno, "winUnlockReadLock", pFile->zPath); } OSTRACE(("READ-UNLOCK file=%p, result=%d\n", pFile->h, res)); return res; } /* ** Lock the file with the lock specified by parameter locktype - one ** of the following: ** ** (1) SHARED_LOCK ** (2) RESERVED_LOCK ** (3) PENDING_LOCK ** (4) EXCLUSIVE_LOCK ** ** Sometimes when requesting one lock state, additional lock states ** are inserted in between. The locking might fail on one of the later ** transitions leaving the lock state different from what it started but ** still short of its goal. The following chart shows the allowed ** transitions and the inserted intermediate states: ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED ** SHARED -> (PENDING) -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** ** This routine will only increase a lock. The winUnlock() routine ** erases all locks at once and returns us immediately to locking level 0. ** It is not possible to lower the locking level one step at a time. You ** must go straight to locking level 0. */ static int winLock(sqlite3_file *id, int locktype){ int rc = SQLITE_OK; /* Return code from subroutines */ int res = 1; /* Result of a Windows lock call */ int newLocktype; /* Set pFile->locktype to this value before exiting */ int gotPendingLock = 0;/* True if we acquired a PENDING lock this time */ winFile *pFile = (winFile*)id; DWORD lastErrno = NO_ERROR; assert( id!=0 ); OSTRACE(("LOCK file=%p, oldLock=%d(%d), newLock=%d\n", pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); /* If there is already a lock of this type or more restrictive on the ** OsFile, do nothing. Don't use the end_lock: exit path, as ** sqlite3OsEnterMutex() hasn't been called yet. */ if( pFile->locktype>=locktype ){ OSTRACE(("LOCK-HELD file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } /* Do not allow any kind of write-lock on a read-only database */ if( (pFile->ctrlFlags & WINFILE_RDONLY)!=0 && locktype>=RESERVED_LOCK ){ return SQLITE_IOERR_LOCK; } /* Make sure the locking sequence is correct */ assert( pFile->locktype!=NO_LOCK || locktype==SHARED_LOCK ); assert( locktype!=PENDING_LOCK ); assert( locktype!=RESERVED_LOCK || pFile->locktype==SHARED_LOCK ); /* Lock the PENDING_LOCK byte if we need to acquire a PENDING lock or ** a SHARED lock. If we are acquiring a SHARED lock, the acquisition of ** the PENDING_LOCK byte is temporary. */ newLocktype = pFile->locktype; if( (pFile->locktype==NO_LOCK) || ( (locktype==EXCLUSIVE_LOCK) && (pFile->locktype==RESERVED_LOCK)) ){ int cnt = 3; while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, PENDING_BYTE, 0, 1, 0))==0 ){ /* Try 3 times to get the pending lock. This is needed to work ** around problems caused by indexing and/or anti-virus software on ** Windows systems. ** If you are using this code as a model for alternative VFSes, do not ** copy this retry logic. It is a hack intended for Windows only. */ lastErrno = osGetLastError(); OSTRACE(("LOCK-PENDING-FAIL file=%p, count=%d, result=%d\n", pFile->h, cnt, res)); if( lastErrno==ERROR_INVALID_HANDLE ){ pFile->lastErrno = lastErrno; rc = SQLITE_IOERR_LOCK; OSTRACE(("LOCK-FAIL file=%p, count=%d, rc=%s\n", pFile->h, cnt, sqlite3ErrName(rc))); return rc; } if( cnt ) sqlite3_win32_sleep(1); } gotPendingLock = res; if( !res ){ lastErrno = osGetLastError(); } } /* Acquire a shared lock */ if( locktype==SHARED_LOCK && res ){ assert( pFile->locktype==NO_LOCK ); res = winGetReadLock(pFile); if( res ){ newLocktype = SHARED_LOCK; }else{ lastErrno = osGetLastError(); } } /* Acquire a RESERVED lock */ if( locktype==RESERVED_LOCK && res ){ assert( pFile->locktype==SHARED_LOCK ); res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, RESERVED_BYTE, 0, 1, 0); if( res ){ newLocktype = RESERVED_LOCK; }else{ lastErrno = osGetLastError(); } } /* Acquire a PENDING lock */ if( locktype==EXCLUSIVE_LOCK && res ){ newLocktype = PENDING_LOCK; gotPendingLock = 0; } /* Acquire an EXCLUSIVE lock */ if( locktype==EXCLUSIVE_LOCK && res ){ assert( pFile->locktype>=SHARED_LOCK ); res = winUnlockReadLock(pFile); res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0, SHARED_SIZE, 0); if( res ){ newLocktype = EXCLUSIVE_LOCK; }else{ lastErrno = osGetLastError(); winGetReadLock(pFile); } } /* If we are holding a PENDING lock that ought to be released, then ** release it now. */ if( gotPendingLock && locktype==SHARED_LOCK ){ winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); } /* Update the state of the lock has held in the file descriptor then ** return the appropriate result code. */ if( res ){ rc = SQLITE_OK; }else{ pFile->lastErrno = lastErrno; rc = SQLITE_BUSY; OSTRACE(("LOCK-FAIL file=%p, wanted=%d, got=%d\n", pFile->h, locktype, newLocktype)); } pFile->locktype = (u8)newLocktype; OSTRACE(("LOCK file=%p, lock=%d, rc=%s\n", pFile->h, pFile->locktype, sqlite3ErrName(rc))); return rc; } /* ** This routine checks if there is a RESERVED lock held on the specified ** file by this or any other process. If such a lock is held, return ** non-zero, otherwise zero. */ static int winCheckReservedLock(sqlite3_file *id, int *pResOut){ int res; winFile *pFile = (winFile*)id; SimulateIOError( return SQLITE_IOERR_CHECKRESERVEDLOCK; ); OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p\n", pFile->h, pResOut)); assert( id!=0 ); if( pFile->locktype>=RESERVED_LOCK ){ res = 1; OSTRACE(("TEST-WR-LOCK file=%p, result=%d (local)\n", pFile->h, res)); }else{ res = winLockFile(&pFile->h, SQLITE_LOCKFILEEX_FLAGS,RESERVED_BYTE, 0, 1, 0); if( res ){ winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); } res = !res; OSTRACE(("TEST-WR-LOCK file=%p, result=%d (remote)\n", pFile->h, res)); } *pResOut = res; OSTRACE(("TEST-WR-LOCK file=%p, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", pFile->h, pResOut, *pResOut)); return SQLITE_OK; } /* ** Lower the locking level on file descriptor id to locktype. locktype ** must be either NO_LOCK or SHARED_LOCK. ** ** If the locking level of the file descriptor is already at or below ** the requested locking level, this routine is a no-op. ** ** It is not possible for this routine to fail if the second argument ** is NO_LOCK. If the second argument is SHARED_LOCK then this routine ** might return SQLITE_IOERR; */ static int winUnlock(sqlite3_file *id, int locktype){ int type; winFile *pFile = (winFile*)id; int rc = SQLITE_OK; assert( pFile!=0 ); assert( locktype<=SHARED_LOCK ); OSTRACE(("UNLOCK file=%p, oldLock=%d(%d), newLock=%d\n", pFile->h, pFile->locktype, pFile->sharedLockByte, locktype)); type = pFile->locktype; if( type>=EXCLUSIVE_LOCK ){ winUnlockFile(&pFile->h, SHARED_FIRST, 0, SHARED_SIZE, 0); if( locktype==SHARED_LOCK && !winGetReadLock(pFile) ){ /* This should never happen. We should always be able to ** reacquire the read lock */ rc = winLogError(SQLITE_IOERR_UNLOCK, osGetLastError(), "winUnlock", pFile->zPath); } } if( type>=RESERVED_LOCK ){ winUnlockFile(&pFile->h, RESERVED_BYTE, 0, 1, 0); } if( locktype==NO_LOCK && type>=SHARED_LOCK ){ winUnlockReadLock(pFile); } if( type>=PENDING_LOCK ){ winUnlockFile(&pFile->h, PENDING_BYTE, 0, 1, 0); } pFile->locktype = (u8)locktype; OSTRACE(("UNLOCK file=%p, lock=%d, rc=%s\n", pFile->h, pFile->locktype, sqlite3ErrName(rc))); return rc; } /* ** If *pArg is initially negative then this is a query. Set *pArg to ** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. ** ** If *pArg is 0 or 1, then clear or set the mask bit of pFile->ctrlFlags. */ static void winModeBit(winFile *pFile, unsigned char mask, int *pArg){ if( *pArg<0 ){ *pArg = (pFile->ctrlFlags & mask)!=0; }else if( (*pArg)==0 ){ pFile->ctrlFlags &= ~mask; }else{ pFile->ctrlFlags |= mask; } } /* Forward references to VFS helper methods used for temporary files */ static int winGetTempname(sqlite3_vfs *, char **); static int winIsDir(const void *); static BOOL winIsDriveLetterAndColon(const char *); /* ** Control and query of the open file handle. */ static int winFileControl(sqlite3_file *id, int op, void *pArg){ winFile *pFile = (winFile*)id; OSTRACE(("FCNTL file=%p, op=%d, pArg=%p\n", pFile->h, op, pArg)); switch( op ){ case SQLITE_FCNTL_LOCKSTATE: { *(int*)pArg = pFile->locktype; OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_LAST_ERRNO: { *(int*)pArg = (int)pFile->lastErrno; OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_CHUNK_SIZE: { pFile->szChunk = *(int *)pArg; OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_SIZE_HINT: { if( pFile->szChunk>0 ){ sqlite3_int64 oldSz; int rc = winFileSize(id, &oldSz); if( rc==SQLITE_OK ){ sqlite3_int64 newSz = *(sqlite3_int64*)pArg; if( newSz>oldSz ){ SimulateIOErrorBenign(1); rc = winTruncate(id, newSz); SimulateIOErrorBenign(0); } } OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); return rc; } OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_PERSIST_WAL: { winModeBit(pFile, WINFILE_PERSIST_WAL, (int*)pArg); OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_POWERSAFE_OVERWRITE: { winModeBit(pFile, WINFILE_PSOW, (int*)pArg); OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_VFSNAME: { *(char**)pArg = sqlite3_mprintf("%s", pFile->pVfs->zName); OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } case SQLITE_FCNTL_WIN32_AV_RETRY: { int *a = (int*)pArg; if( a[0]>0 ){ winIoerrRetry = a[0]; }else{ a[0] = winIoerrRetry; } if( a[1]>0 ){ winIoerrRetryDelay = a[1]; }else{ a[1] = winIoerrRetryDelay; } OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h)); return SQLITE_OK; } #ifdef SQLITE_TEST case SQLITE_FCNTL_WIN32_SET_HANDLE: { LPHANDLE phFile = (LPHANDLE)pArg; HANDLE hOldFile = pFile->h; pFile->h = *phFile; *phFile = hOldFile; OSTRACE(("FCNTL oldFile=%p, newFile=%p, rc=SQLITE_OK\n", hOldFile, pFile->h)); return SQLITE_OK; } #endif case SQLITE_FCNTL_TEMPFILENAME: { char *zTFile = 0; int rc = winGetTempname(pFile->pVfs, &zTFile); if( rc==SQLITE_OK ){ *(char**)pArg = zTFile; } OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); return rc; } #if SQLITE_MAX_MMAP_SIZE>0 case SQLITE_FCNTL_MMAP_SIZE: { i64 newLimit = *(i64*)pArg; int rc = SQLITE_OK; if( newLimit>sqlite3GlobalConfig.mxMmap ){ newLimit = sqlite3GlobalConfig.mxMmap; } *(i64*)pArg = pFile->mmapSizeMax; if( newLimit>=0 && newLimit!=pFile->mmapSizeMax && pFile->nFetchOut==0 ){ pFile->mmapSizeMax = newLimit; if( pFile->mmapSize>0 ){ winUnmapfile(pFile); rc = winMapfile(pFile, -1); } } OSTRACE(("FCNTL file=%p, rc=%s\n", pFile->h, sqlite3ErrName(rc))); return rc; } #endif } OSTRACE(("FCNTL file=%p, rc=SQLITE_NOTFOUND\n", pFile->h)); return SQLITE_NOTFOUND; } /* ** Return the sector size in bytes of the underlying block device for ** the specified file. This is almost always 512 bytes, but may be ** larger for some devices. ** ** SQLite code assumes this function cannot fail. It also assumes that ** if two files are created in the same file-system directory (i.e. ** a database and its journal file) that the sector size will be the ** same for both. */ static int winSectorSize(sqlite3_file *id){ (void)id; return SQLITE_DEFAULT_SECTOR_SIZE; } /* ** Return a vector of device characteristics. */ static int winDeviceCharacteristics(sqlite3_file *id){ winFile *p = (winFile*)id; return SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN | ((p->ctrlFlags & WINFILE_PSOW)?SQLITE_IOCAP_POWERSAFE_OVERWRITE:0); } /* ** Windows will only let you create file view mappings ** on allocation size granularity boundaries. ** During sqlite3_os_init() we do a GetSystemInfo() ** to get the granularity size. */ static SYSTEM_INFO winSysInfo; #ifndef SQLITE_OMIT_WAL /* ** Helper functions to obtain and relinquish the global mutex. The ** global mutex is used to protect the winLockInfo objects used by ** this file, all of which may be shared by multiple threads. ** ** Function winShmMutexHeld() is used to assert() that the global mutex ** is held when required. This function is only used as part of assert() ** statements. e.g. ** ** winShmEnterMutex() ** assert( winShmMutexHeld() ); ** winShmLeaveMutex() */ static void winShmEnterMutex(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } static void winShmLeaveMutex(void){ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #ifndef NDEBUG static int winShmMutexHeld(void) { return sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1)); } #endif /* ** Object used to represent a single file opened and mmapped to provide ** shared memory. When multiple threads all reference the same ** log-summary, each thread has its own winFile object, but they all ** point to a single instance of this object. In other words, each ** log-summary is opened only once per process. ** ** winShmMutexHeld() must be true when creating or destroying ** this object or while reading or writing the following fields: ** ** nRef ** pNext ** ** The following fields are read-only after the object is created: ** ** fid ** zFilename ** ** Either winShmNode.mutex must be held or winShmNode.nRef==0 and ** winShmMutexHeld() is true when reading or writing any other field ** in this structure. ** */ struct winShmNode { sqlite3_mutex *mutex; /* Mutex to access this object */ char *zFilename; /* Name of the file */ winFile hFile; /* File handle from winOpen */ int szRegion; /* Size of shared-memory regions */ int nRegion; /* Size of array apRegion */ struct ShmRegion { HANDLE hMap; /* File handle from CreateFileMapping */ void *pMap; } *aRegion; DWORD lastErrno; /* The Windows errno from the last I/O error */ int nRef; /* Number of winShm objects pointing to this */ winShm *pFirst; /* All winShm objects pointing to this */ winShmNode *pNext; /* Next in list of all winShmNode objects */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 nextShmId; /* Next available winShm.id value */ #endif }; /* ** A global array of all winShmNode objects. ** ** The winShmMutexHeld() must be true while reading or writing this list. */ static winShmNode *winShmNodeList = 0; /* ** Structure used internally by this VFS to record the state of an ** open shared memory connection. ** ** The following fields are initialized when this object is created and ** are read-only thereafter: ** ** winShm.pShmNode ** winShm.id ** ** All other fields are read/write. The winShm.pShmNode->mutex must be held ** while accessing any read/write fields. */ struct winShm { winShmNode *pShmNode; /* The underlying winShmNode object */ winShm *pNext; /* Next winShm with the same winShmNode */ u8 hasMutex; /* True if holding the winShmNode mutex */ u16 sharedMask; /* Mask of shared locks held */ u16 exclMask; /* Mask of exclusive locks held */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) u8 id; /* Id of this connection with its winShmNode */ #endif }; /* ** Constants used for locking */ #define WIN_SHM_BASE ((22+SQLITE_SHM_NLOCK)*4) /* first lock byte */ #define WIN_SHM_DMS (WIN_SHM_BASE+SQLITE_SHM_NLOCK) /* deadman switch */ /* ** Apply advisory locks for all n bytes beginning at ofst. */ #define _SHM_UNLCK 1 #define _SHM_RDLCK 2 #define _SHM_WRLCK 3 static int winShmSystemLock( winShmNode *pFile, /* Apply locks to this open shared-memory segment */ int lockType, /* _SHM_UNLCK, _SHM_RDLCK, or _SHM_WRLCK */ int ofst, /* Offset to first byte to be locked/unlocked */ int nByte /* Number of bytes to lock or unlock */ ){ int rc = 0; /* Result code form Lock/UnlockFileEx() */ /* Access to the winShmNode object is serialized by the caller */ assert( sqlite3_mutex_held(pFile->mutex) || pFile->nRef==0 ); OSTRACE(("SHM-LOCK file=%p, lock=%d, offset=%d, size=%d\n", pFile->hFile.h, lockType, ofst, nByte)); /* Release/Acquire the system-level lock */ if( lockType==_SHM_UNLCK ){ rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); }else{ /* Initialize the locking parameters */ DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; if( lockType == _SHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); } if( rc!= 0 ){ rc = SQLITE_OK; }else{ pFile->lastErrno = osGetLastError(); rc = SQLITE_BUSY; } OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", pFile->hFile.h, (lockType == _SHM_UNLCK) ? "winUnlockFile" : "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); return rc; } /* Forward references to VFS methods */ static int winOpen(sqlite3_vfs*,const char*,sqlite3_file*,int,int*); static int winDelete(sqlite3_vfs *,const char*,int); /* ** Purge the winShmNodeList list of all entries with winShmNode.nRef==0. ** ** This is not a VFS shared-memory method; it is a utility function called ** by VFS shared-memory methods. */ static void winShmPurge(sqlite3_vfs *pVfs, int deleteFlag){ winShmNode **pp; winShmNode *p; assert( winShmMutexHeld() ); OSTRACE(("SHM-PURGE pid=%lu, deleteFlag=%d\n", osGetCurrentProcessId(), deleteFlag)); pp = &winShmNodeList; while( (p = *pp)!=0 ){ if( p->nRef==0 ){ int i; if( p->mutex ){ sqlite3_mutex_free(p->mutex); } for(i=0; inRegion; i++){ BOOL bRc = osUnmapViewOfFile(p->aRegion[i].pMap); OSTRACE(("SHM-PURGE-UNMAP pid=%lu, region=%d, rc=%s\n", osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); UNUSED_VARIABLE_VALUE(bRc); bRc = osCloseHandle(p->aRegion[i].hMap); OSTRACE(("SHM-PURGE-CLOSE pid=%lu, region=%d, rc=%s\n", osGetCurrentProcessId(), i, bRc ? "ok" : "failed")); UNUSED_VARIABLE_VALUE(bRc); } if( p->hFile.h!=NULL && p->hFile.h!=INVALID_HANDLE_VALUE ){ SimulateIOErrorBenign(1); winClose((sqlite3_file *)&p->hFile); SimulateIOErrorBenign(0); } if( deleteFlag ){ SimulateIOErrorBenign(1); sqlite3BeginBenignMalloc(); winDelete(pVfs, p->zFilename, 0); sqlite3EndBenignMalloc(); SimulateIOErrorBenign(0); } *pp = p->pNext; sqlite3_free(p->aRegion); sqlite3_free(p); }else{ pp = &p->pNext; } } } /* ** Open the shared-memory area associated with database file pDbFd. ** ** When opening a new shared-memory file, if no other instances of that ** file are currently open, in this process or in other processes, then ** the file must be truncated to zero length or have its header cleared. */ static int winOpenSharedMemory(winFile *pDbFd){ struct winShm *p; /* The connection to be opened */ struct winShmNode *pShmNode = 0; /* The underlying mmapped file */ int rc; /* Result code */ struct winShmNode *pNew; /* Newly allocated winShmNode */ int nName; /* Size of zName in bytes */ assert( pDbFd->pShm==0 ); /* Not previously opened */ /* Allocate space for the new sqlite3_shm object. Also speculatively ** allocate space for a new winShmNode and filename. */ p = sqlite3MallocZero( sizeof(*p) ); if( p==0 ) return SQLITE_IOERR_NOMEM; nName = sqlite3Strlen30(pDbFd->zPath); pNew = sqlite3MallocZero( sizeof(*pShmNode) + nName + 17 ); if( pNew==0 ){ sqlite3_free(p); return SQLITE_IOERR_NOMEM; } pNew->zFilename = (char*)&pNew[1]; sqlite3_snprintf(nName+15, pNew->zFilename, "%s-shm", pDbFd->zPath); sqlite3FileSuffix3(pDbFd->zPath, pNew->zFilename); /* Look to see if there is an existing winShmNode that can be used. ** If no matching winShmNode currently exists, create a new one. */ winShmEnterMutex(); for(pShmNode = winShmNodeList; pShmNode; pShmNode=pShmNode->pNext){ /* TBD need to come up with better match here. Perhaps ** use FILE_ID_BOTH_DIR_INFO Structure. */ if( sqlite3StrICmp(pShmNode->zFilename, pNew->zFilename)==0 ) break; } if( pShmNode ){ sqlite3_free(pNew); }else{ pShmNode = pNew; pNew = 0; ((winFile*)(&pShmNode->hFile))->h = INVALID_HANDLE_VALUE; pShmNode->pNext = winShmNodeList; winShmNodeList = pShmNode; pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); if( pShmNode->mutex==0 ){ rc = SQLITE_IOERR_NOMEM; goto shm_open_err; } rc = winOpen(pDbFd->pVfs, pShmNode->zFilename, /* Name of the file (UTF-8) */ (sqlite3_file*)&pShmNode->hFile, /* File handle here */ SQLITE_OPEN_WAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); if( SQLITE_OK!=rc ){ goto shm_open_err; } /* Check to see if another process is holding the dead-man switch. ** If not, truncate the file to zero length. */ if( winShmSystemLock(pShmNode, _SHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0); if( rc!=SQLITE_OK ){ rc = winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), "winOpenShm", pDbFd->zPath); } } if( rc==SQLITE_OK ){ winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); rc = winShmSystemLock(pShmNode, _SHM_RDLCK, WIN_SHM_DMS, 1); } if( rc ) goto shm_open_err; } /* Make the new connection a child of the winShmNode */ p->pShmNode = pShmNode; #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) p->id = pShmNode->nextShmId++; #endif pShmNode->nRef++; pDbFd->pShm = p; winShmLeaveMutex(); /* The reference count on pShmNode has already been incremented under ** the cover of the winShmEnterMutex() mutex and the pointer from the ** new (struct winShm) object to the pShmNode has been set. All that is ** left to do is to link the new object into the linked list starting ** at pShmNode->pFirst. This must be done while holding the pShmNode->mutex ** mutex. */ sqlite3_mutex_enter(pShmNode->mutex); p->pNext = pShmNode->pFirst; pShmNode->pFirst = p; sqlite3_mutex_leave(pShmNode->mutex); return SQLITE_OK; /* Jump here on any error */ shm_open_err: winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ sqlite3_free(p); sqlite3_free(pNew); winShmLeaveMutex(); return rc; } /* ** Close a connection to shared-memory. Delete the underlying ** storage if deleteFlag is true. */ static int winShmUnmap( sqlite3_file *fd, /* Database holding shared memory */ int deleteFlag /* Delete after closing if true */ ){ winFile *pDbFd; /* Database holding shared-memory */ winShm *p; /* The connection to be closed */ winShmNode *pShmNode; /* The underlying shared-memory file */ winShm **pp; /* For looping over sibling connections */ pDbFd = (winFile*)fd; p = pDbFd->pShm; if( p==0 ) return SQLITE_OK; pShmNode = p->pShmNode; /* Remove connection p from the set of connections associated ** with pShmNode */ sqlite3_mutex_enter(pShmNode->mutex); for(pp=&pShmNode->pFirst; (*pp)!=p; pp = &(*pp)->pNext){} *pp = p->pNext; /* Free the connection p */ sqlite3_free(p); pDbFd->pShm = 0; sqlite3_mutex_leave(pShmNode->mutex); /* If pShmNode->nRef has reached 0, then close the underlying ** shared-memory file, too */ winShmEnterMutex(); assert( pShmNode->nRef>0 ); pShmNode->nRef--; if( pShmNode->nRef==0 ){ winShmPurge(pDbFd->pVfs, deleteFlag); } winShmLeaveMutex(); return SQLITE_OK; } /* ** Change the lock state for a shared-memory segment. */ static int winShmLock( sqlite3_file *fd, /* Database file holding the shared memory */ int ofst, /* First lock to acquire or release */ int n, /* Number of locks to acquire or release */ int flags /* What to do with the lock */ ){ winFile *pDbFd = (winFile*)fd; /* Connection holding shared memory */ winShm *p = pDbFd->pShm; /* The shared memory being locked */ winShm *pX; /* For looping over all siblings */ winShmNode *pShmNode = p->pShmNode; int rc = SQLITE_OK; /* Result code */ u16 mask; /* Mask of locks to take or release */ assert( ofst>=0 && ofst+n<=SQLITE_SHM_NLOCK ); assert( n>=1 ); assert( flags==(SQLITE_SHM_LOCK | SQLITE_SHM_SHARED) || flags==(SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE) || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED) || flags==(SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE) ); assert( n==1 || (flags & SQLITE_SHM_EXCLUSIVE)!=0 ); mask = (u16)((1U<<(ofst+n)) - (1U<1 || mask==(1<mutex); if( flags & SQLITE_SHM_UNLOCK ){ u16 allMask = 0; /* Mask of locks held by siblings */ /* See if any siblings hold this same lock */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( pX==p ) continue; assert( (pX->exclMask & (p->exclMask|p->sharedMask))==0 ); allMask |= pX->sharedMask; } /* Unlock the system-level locks */ if( (mask & allMask)==0 ){ rc = winShmSystemLock(pShmNode, _SHM_UNLCK, ofst+WIN_SHM_BASE, n); }else{ rc = SQLITE_OK; } /* Undo the local locks */ if( rc==SQLITE_OK ){ p->exclMask &= ~mask; p->sharedMask &= ~mask; } }else if( flags & SQLITE_SHM_SHARED ){ u16 allShared = 0; /* Union of locks held by connections other than "p" */ /* Find out which shared locks are already held by sibling connections. ** If any sibling already holds an exclusive lock, go ahead and return ** SQLITE_BUSY. */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( (pX->exclMask & mask)!=0 ){ rc = SQLITE_BUSY; break; } allShared |= pX->sharedMask; } /* Get shared locks at the system level, if necessary */ if( rc==SQLITE_OK ){ if( (allShared & mask)==0 ){ rc = winShmSystemLock(pShmNode, _SHM_RDLCK, ofst+WIN_SHM_BASE, n); }else{ rc = SQLITE_OK; } } /* Get the local shared locks */ if( rc==SQLITE_OK ){ p->sharedMask |= mask; } }else{ /* Make sure no sibling connections hold locks that will block this ** lock. If any do, return SQLITE_BUSY right away. */ for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ if( (pX->exclMask & mask)!=0 || (pX->sharedMask & mask)!=0 ){ rc = SQLITE_BUSY; break; } } /* Get the exclusive locks at the system level. Then if successful ** also mark the local connection as being locked. */ if( rc==SQLITE_OK ){ rc = winShmSystemLock(pShmNode, _SHM_WRLCK, ofst+WIN_SHM_BASE, n); if( rc==SQLITE_OK ){ assert( (p->sharedMask & mask)==0 ); p->exclMask |= mask; } } } sqlite3_mutex_leave(pShmNode->mutex); OSTRACE(("SHM-LOCK pid=%lu, id=%d, sharedMask=%03x, exclMask=%03x, rc=%s\n", osGetCurrentProcessId(), p->id, p->sharedMask, p->exclMask, sqlite3ErrName(rc))); return rc; } /* ** Implement a memory barrier or memory fence on shared memory. ** ** All loads and stores begun before the barrier must complete before ** any load or store begun after the barrier. */ static void winShmBarrier( sqlite3_file *fd /* Database holding the shared memory */ ){ UNUSED_PARAMETER(fd); /* MemoryBarrier(); // does not work -- do not know why not */ winShmEnterMutex(); winShmLeaveMutex(); } /* ** This function is called to obtain a pointer to region iRegion of the ** shared-memory associated with the database file fd. Shared-memory regions ** are numbered starting from zero. Each shared-memory region is szRegion ** bytes in size. ** ** If an error occurs, an error code is returned and *pp is set to NULL. ** ** Otherwise, if the isWrite parameter is 0 and the requested shared-memory ** region has not been allocated (by any client, including one running in a ** separate process), then *pp is set to NULL and SQLITE_OK returned. If ** isWrite is non-zero and the requested shared-memory region has not yet ** been allocated, it is allocated by this function. ** ** If the shared-memory region has already been allocated or is allocated by ** this call as described above, then it is mapped into this processes ** address space (if it is not already), *pp is set to point to the mapped ** memory and SQLITE_OK returned. */ static int winShmMap( sqlite3_file *fd, /* Handle open on database file */ int iRegion, /* Region to retrieve */ int szRegion, /* Size of regions */ int isWrite, /* True to extend file if necessary */ void volatile **pp /* OUT: Mapped memory */ ){ winFile *pDbFd = (winFile*)fd; winShm *pShm = pDbFd->pShm; winShmNode *pShmNode; int rc = SQLITE_OK; if( !pShm ){ rc = winOpenSharedMemory(pDbFd); if( rc!=SQLITE_OK ) return rc; pShm = pDbFd->pShm; } pShmNode = pShm->pShmNode; sqlite3_mutex_enter(pShmNode->mutex); assert( szRegion==pShmNode->szRegion || pShmNode->nRegion==0 ); if( pShmNode->nRegion<=iRegion ){ struct ShmRegion *apNew; /* New aRegion[] array */ int nByte = (iRegion+1)*szRegion; /* Minimum required file size */ sqlite3_int64 sz; /* Current size of wal-index file */ pShmNode->szRegion = szRegion; /* The requested region is not mapped into this processes address space. ** Check to see if it has been allocated (i.e. if the wal-index file is ** large enough to contain the requested region). */ rc = winFileSize((sqlite3_file *)&pShmNode->hFile, &sz); if( rc!=SQLITE_OK ){ rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), "winShmMap1", pDbFd->zPath); goto shmpage_out; } if( szhFile, nByte); if( rc!=SQLITE_OK ){ rc = winLogError(SQLITE_IOERR_SHMSIZE, osGetLastError(), "winShmMap2", pDbFd->zPath); goto shmpage_out; } } /* Map the requested memory region into this processes address space. */ apNew = (struct ShmRegion *)sqlite3_realloc64( pShmNode->aRegion, (iRegion+1)*sizeof(apNew[0]) ); if( !apNew ){ rc = SQLITE_IOERR_NOMEM; goto shmpage_out; } pShmNode->aRegion = apNew; while( pShmNode->nRegion<=iRegion ){ HANDLE hMap = NULL; /* file-mapping handle */ void *pMap = 0; /* Mapped memory region */ #if SQLITE_OS_WINRT hMap = osCreateFileMappingFromApp(pShmNode->hFile.h, NULL, PAGE_READWRITE, nByte, NULL ); #elif defined(SQLITE_WIN32_HAS_WIDE) hMap = osCreateFileMappingW(pShmNode->hFile.h, NULL, PAGE_READWRITE, 0, nByte, NULL ); #elif defined(SQLITE_WIN32_HAS_ANSI) hMap = osCreateFileMappingA(pShmNode->hFile.h, NULL, PAGE_READWRITE, 0, nByte, NULL ); #endif OSTRACE(("SHM-MAP-CREATE pid=%lu, region=%d, size=%d, rc=%s\n", osGetCurrentProcessId(), pShmNode->nRegion, nByte, hMap ? "ok" : "failed")); if( hMap ){ int iOffset = pShmNode->nRegion*szRegion; int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; #if SQLITE_OS_WINRT pMap = osMapViewOfFileFromApp(hMap, FILE_MAP_WRITE | FILE_MAP_READ, iOffset - iOffsetShift, szRegion + iOffsetShift ); #else pMap = osMapViewOfFile(hMap, FILE_MAP_WRITE | FILE_MAP_READ, 0, iOffset - iOffsetShift, szRegion + iOffsetShift ); #endif OSTRACE(("SHM-MAP-MAP pid=%lu, region=%d, offset=%d, size=%d, rc=%s\n", osGetCurrentProcessId(), pShmNode->nRegion, iOffset, szRegion, pMap ? "ok" : "failed")); } if( !pMap ){ pShmNode->lastErrno = osGetLastError(); rc = winLogError(SQLITE_IOERR_SHMMAP, pShmNode->lastErrno, "winShmMap3", pDbFd->zPath); if( hMap ) osCloseHandle(hMap); goto shmpage_out; } pShmNode->aRegion[pShmNode->nRegion].pMap = pMap; pShmNode->aRegion[pShmNode->nRegion].hMap = hMap; pShmNode->nRegion++; } } shmpage_out: if( pShmNode->nRegion>iRegion ){ int iOffset = iRegion*szRegion; int iOffsetShift = iOffset % winSysInfo.dwAllocationGranularity; char *p = (char *)pShmNode->aRegion[iRegion].pMap; *pp = (void *)&p[iOffsetShift]; }else{ *pp = 0; } sqlite3_mutex_leave(pShmNode->mutex); return rc; } #else # define winShmMap 0 # define winShmLock 0 # define winShmBarrier 0 # define winShmUnmap 0 #endif /* #ifndef SQLITE_OMIT_WAL */ /* ** Cleans up the mapped region of the specified file, if any. */ #if SQLITE_MAX_MMAP_SIZE>0 static int winUnmapfile(winFile *pFile){ assert( pFile!=0 ); OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, pMapRegion=%p, " "mmapSize=%lld, mmapSizeActual=%lld, mmapSizeMax=%lld\n", osGetCurrentProcessId(), pFile, pFile->hMap, pFile->pMapRegion, pFile->mmapSize, pFile->mmapSizeActual, pFile->mmapSizeMax)); if( pFile->pMapRegion ){ if( !osUnmapViewOfFile(pFile->pMapRegion) ){ pFile->lastErrno = osGetLastError(); OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, pMapRegion=%p, " "rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile, pFile->pMapRegion)); return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, "winUnmapfile1", pFile->zPath); } pFile->pMapRegion = 0; pFile->mmapSize = 0; pFile->mmapSizeActual = 0; } if( pFile->hMap!=NULL ){ if( !osCloseHandle(pFile->hMap) ){ pFile->lastErrno = osGetLastError(); OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, hMap=%p, rc=SQLITE_IOERR_MMAP\n", osGetCurrentProcessId(), pFile, pFile->hMap)); return winLogError(SQLITE_IOERR_MMAP, pFile->lastErrno, "winUnmapfile2", pFile->zPath); } pFile->hMap = NULL; } OSTRACE(("UNMAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFile)); return SQLITE_OK; } /* ** Memory map or remap the file opened by file-descriptor pFd (if the file ** is already mapped, the existing mapping is replaced by the new). Or, if ** there already exists a mapping for this file, and there are still ** outstanding xFetch() references to it, this function is a no-op. ** ** If parameter nByte is non-negative, then it is the requested size of ** the mapping to create. Otherwise, if nByte is less than zero, then the ** requested size is the size of the file on disk. The actual size of the ** created mapping is either the requested size or the value configured ** using SQLITE_FCNTL_MMAP_SIZE, whichever is smaller. ** ** SQLITE_OK is returned if no error occurs (even if the mapping is not ** recreated as a result of outstanding references) or an SQLite error ** code otherwise. */ static int winMapfile(winFile *pFd, sqlite3_int64 nByte){ sqlite3_int64 nMap = nByte; int rc; assert( nMap>=0 || pFd->nFetchOut==0 ); OSTRACE(("MAP-FILE pid=%lu, pFile=%p, size=%lld\n", osGetCurrentProcessId(), pFd, nByte)); if( pFd->nFetchOut>0 ) return SQLITE_OK; if( nMap<0 ){ rc = winFileSize((sqlite3_file*)pFd, &nMap); if( rc ){ OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_IOERR_FSTAT\n", osGetCurrentProcessId(), pFd)); return SQLITE_IOERR_FSTAT; } } if( nMap>pFd->mmapSizeMax ){ nMap = pFd->mmapSizeMax; } nMap &= ~(sqlite3_int64)(winSysInfo.dwPageSize - 1); if( nMap==0 && pFd->mmapSize>0 ){ winUnmapfile(pFd); } if( nMap!=pFd->mmapSize ){ void *pNew = 0; DWORD protect = PAGE_READONLY; DWORD flags = FILE_MAP_READ; winUnmapfile(pFd); if( (pFd->ctrlFlags & WINFILE_RDONLY)==0 ){ protect = PAGE_READWRITE; flags |= FILE_MAP_WRITE; } #if SQLITE_OS_WINRT pFd->hMap = osCreateFileMappingFromApp(pFd->h, NULL, protect, nMap, NULL); #elif defined(SQLITE_WIN32_HAS_WIDE) pFd->hMap = osCreateFileMappingW(pFd->h, NULL, protect, (DWORD)((nMap>>32) & 0xffffffff), (DWORD)(nMap & 0xffffffff), NULL); #elif defined(SQLITE_WIN32_HAS_ANSI) pFd->hMap = osCreateFileMappingA(pFd->h, NULL, protect, (DWORD)((nMap>>32) & 0xffffffff), (DWORD)(nMap & 0xffffffff), NULL); #endif if( pFd->hMap==NULL ){ pFd->lastErrno = osGetLastError(); rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, "winMapfile1", pFd->zPath); /* Log the error, but continue normal operation using xRead/xWrite */ OSTRACE(("MAP-FILE-CREATE pid=%lu, pFile=%p, rc=%s\n", osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); return SQLITE_OK; } assert( (nMap % winSysInfo.dwPageSize)==0 ); assert( sizeof(SIZE_T)==sizeof(sqlite3_int64) || nMap<=0xffffffff ); #if SQLITE_OS_WINRT pNew = osMapViewOfFileFromApp(pFd->hMap, flags, 0, (SIZE_T)nMap); #else pNew = osMapViewOfFile(pFd->hMap, flags, 0, 0, (SIZE_T)nMap); #endif if( pNew==NULL ){ osCloseHandle(pFd->hMap); pFd->hMap = NULL; pFd->lastErrno = osGetLastError(); rc = winLogError(SQLITE_IOERR_MMAP, pFd->lastErrno, "winMapfile2", pFd->zPath); /* Log the error, but continue normal operation using xRead/xWrite */ OSTRACE(("MAP-FILE-MAP pid=%lu, pFile=%p, rc=%s\n", osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); return SQLITE_OK; } pFd->pMapRegion = pNew; pFd->mmapSize = nMap; pFd->mmapSizeActual = nMap; } OSTRACE(("MAP-FILE pid=%lu, pFile=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), pFd)); return SQLITE_OK; } #endif /* SQLITE_MAX_MMAP_SIZE>0 */ /* ** If possible, return a pointer to a mapping of file fd starting at offset ** iOff. The mapping must be valid for at least nAmt bytes. ** ** If such a pointer can be obtained, store it in *pp and return SQLITE_OK. ** Or, if one cannot but no error occurs, set *pp to 0 and return SQLITE_OK. ** Finally, if an error does occur, return an SQLite error code. The final ** value of *pp is undefined in this case. ** ** If this function does return a pointer, the caller must eventually ** release the reference by calling winUnfetch(). */ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 winFile *pFd = (winFile*)fd; /* The underlying database file */ #endif *pp = 0; OSTRACE(("FETCH pid=%lu, pFile=%p, offset=%lld, amount=%d, pp=%p\n", osGetCurrentProcessId(), fd, iOff, nAmt, pp)); #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ if( pFd->pMapRegion==0 ){ int rc = winMapfile(pFd, -1); if( rc!=SQLITE_OK ){ OSTRACE(("FETCH pid=%lu, pFile=%p, rc=%s\n", osGetCurrentProcessId(), pFd, sqlite3ErrName(rc))); return rc; } } if( pFd->mmapSize >= iOff+nAmt ){ *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; } } #endif OSTRACE(("FETCH pid=%lu, pFile=%p, pp=%p, *pp=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), fd, pp, *pp)); return SQLITE_OK; } /* ** If the third argument is non-NULL, then this function releases a ** reference obtained by an earlier call to winFetch(). The second ** argument passed to this function must be the same as the corresponding ** argument that was passed to the winFetch() invocation. ** ** Or, if the third argument is NULL, then this function is being called ** to inform the VFS layer that, according to POSIX, any existing mapping ** may now be invalid and should be unmapped. */ static int winUnfetch(sqlite3_file *fd, i64 iOff, void *p){ #if SQLITE_MAX_MMAP_SIZE>0 winFile *pFd = (winFile*)fd; /* The underlying database file */ /* If p==0 (unmap the entire file) then there must be no outstanding ** xFetch references. Or, if p!=0 (meaning it is an xFetch reference), ** then there must be at least one outstanding. */ assert( (p==0)==(pFd->nFetchOut==0) ); /* If p!=0, it must match the iOff value. */ assert( p==0 || p==&((u8 *)pFd->pMapRegion)[iOff] ); OSTRACE(("UNFETCH pid=%lu, pFile=%p, offset=%lld, p=%p\n", osGetCurrentProcessId(), pFd, iOff, p)); if( p ){ pFd->nFetchOut--; }else{ /* FIXME: If Windows truly always prevents truncating or deleting a ** file while a mapping is held, then the following winUnmapfile() call ** is unnecessary can be omitted - potentially improving ** performance. */ winUnmapfile(pFd); } assert( pFd->nFetchOut>=0 ); #endif OSTRACE(("UNFETCH pid=%lu, pFile=%p, rc=SQLITE_OK\n", osGetCurrentProcessId(), fd)); return SQLITE_OK; } /* ** Here ends the implementation of all sqlite3_file methods. ** ********************** End sqlite3_file Methods ******************************* ******************************************************************************/ /* ** This vector defines all the methods that can operate on an ** sqlite3_file for win32. */ static const sqlite3_io_methods winIoMethod = { 3, /* iVersion */ winClose, /* xClose */ winRead, /* xRead */ winWrite, /* xWrite */ winTruncate, /* xTruncate */ winSync, /* xSync */ winFileSize, /* xFileSize */ winLock, /* xLock */ winUnlock, /* xUnlock */ winCheckReservedLock, /* xCheckReservedLock */ winFileControl, /* xFileControl */ winSectorSize, /* xSectorSize */ winDeviceCharacteristics, /* xDeviceCharacteristics */ winShmMap, /* xShmMap */ winShmLock, /* xShmLock */ winShmBarrier, /* xShmBarrier */ winShmUnmap, /* xShmUnmap */ winFetch, /* xFetch */ winUnfetch /* xUnfetch */ }; /**************************************************************************** **************************** sqlite3_vfs methods **************************** ** ** This division contains the implementation of methods on the ** sqlite3_vfs object. */ #if defined(__CYGWIN__) /* ** Convert a filename from whatever the underlying operating system ** supports for filenames into UTF-8. Space to hold the result is ** obtained from malloc and must be freed by the calling function. */ static char *winConvertToUtf8Filename(const void *zFilename){ char *zConverted = 0; if( osIsNT() ){ zConverted = winUnicodeToUtf8(zFilename); } #ifdef SQLITE_WIN32_HAS_ANSI else{ zConverted = sqlite3_win32_mbcs_to_utf8(zFilename); } #endif /* caller will handle out of memory */ return zConverted; } #endif /* ** Convert a UTF-8 filename into whatever form the underlying ** operating system wants filenames in. Space to hold the result ** is obtained from malloc and must be freed by the calling ** function. */ static void *winConvertFromUtf8Filename(const char *zFilename){ void *zConverted = 0; if( osIsNT() ){ zConverted = winUtf8ToUnicode(zFilename); } #ifdef SQLITE_WIN32_HAS_ANSI else{ zConverted = sqlite3_win32_utf8_to_mbcs(zFilename); } #endif /* caller will handle out of memory */ return zConverted; } /* ** This function returns non-zero if the specified UTF-8 string buffer ** ends with a directory separator character or one was successfully ** added to it. */ static int winMakeEndInDirSep(int nBuf, char *zBuf){ if( zBuf ){ int nLen = sqlite3Strlen30(zBuf); if( nLen>0 ){ if( winIsDirSep(zBuf[nLen-1]) ){ return 1; }else if( nLen+1mxPathname; nBuf = nMax + 2; zBuf = sqlite3MallocZero( nBuf ); if( !zBuf ){ OSTRACE(("TEMP-FILENAME rc=SQLITE_IOERR_NOMEM\n")); return SQLITE_IOERR_NOMEM; } /* Figure out the effective temporary directory. First, check if one ** has been explicitly set by the application; otherwise, use the one ** configured by the operating system. */ nDir = nMax - (nPre + 15); assert( nDir>0 ); if( sqlite3_temp_directory ){ int nDirLen = sqlite3Strlen30(sqlite3_temp_directory); if( nDirLen>0 ){ if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){ nDirLen++; } if( nDirLen>nDir ){ sqlite3_free(zBuf); OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0); } sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory); } } #if defined(__CYGWIN__) else{ static const char *azDirs[] = { 0, /* getenv("SQLITE_TMPDIR") */ 0, /* getenv("TMPDIR") */ 0, /* getenv("TMP") */ 0, /* getenv("TEMP") */ 0, /* getenv("USERPROFILE") */ "/var/tmp", "/usr/tmp", "/tmp", ".", 0 /* List terminator */ }; unsigned int i; const char *zDir = 0; if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); if( !azDirs[2] ) azDirs[2] = getenv("TMP"); if( !azDirs[3] ) azDirs[3] = getenv("TEMP"); if( !azDirs[4] ) azDirs[4] = getenv("USERPROFILE"); for(i=0; i/etilqs_XXXXXXXXXXXXXXX\0\0" ** ** If not, return SQLITE_ERROR. The number 17 is used here in order to ** account for the space used by the 15 character random suffix and the ** two trailing NUL characters. The final directory separator character ** has already added if it was not already present. */ nLen = sqlite3Strlen30(zBuf); if( (nLen + nPre + 17) > nBuf ){ sqlite3_free(zBuf); OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n")); return winLogError(SQLITE_ERROR, 0, "winGetTempname5", 0); } sqlite3_snprintf(nBuf-16-nLen, zBuf+nLen, SQLITE_TEMP_FILE_PREFIX); j = sqlite3Strlen30(zBuf); sqlite3_randomness(15, &zBuf[j]); for(i=0; i<15; i++, j++){ zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0; zBuf[j+1] = 0; *pzBuf = zBuf; OSTRACE(("TEMP-FILENAME name=%s, rc=SQLITE_OK\n", zBuf)); return SQLITE_OK; } /* ** Return TRUE if the named file is really a directory. Return false if ** it is something other than a directory, or if there is any kind of memory ** allocation failure. */ static int winIsDir(const void *zConverted){ DWORD attr; int rc = 0; DWORD lastErrno; if( osIsNT() ){ int cnt = 0; WIN32_FILE_ATTRIBUTE_DATA sAttrData; memset(&sAttrData, 0, sizeof(sAttrData)); while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, GetFileExInfoStandard, &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} if( !rc ){ return 0; /* Invalid name? */ } attr = sAttrData.dwFileAttributes; #if SQLITE_OS_WINCE==0 }else{ attr = osGetFileAttributesA((char*)zConverted); #endif } return (attr!=INVALID_FILE_ATTRIBUTES) && (attr&FILE_ATTRIBUTE_DIRECTORY); } /* ** Open a file. */ static int winOpen( sqlite3_vfs *pVfs, /* Used to get maximum path name length */ const char *zName, /* Name of the file (UTF-8) */ sqlite3_file *id, /* Write the SQLite file handle here */ int flags, /* Open mode flags */ int *pOutFlags /* Status return flags */ ){ HANDLE h; DWORD lastErrno = 0; DWORD dwDesiredAccess; DWORD dwShareMode; DWORD dwCreationDisposition; DWORD dwFlagsAndAttributes = 0; #if SQLITE_OS_WINCE int isTemp = 0; #endif winFile *pFile = (winFile*)id; void *zConverted; /* Filename in OS encoding */ const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ int cnt = 0; /* If argument zPath is a NULL pointer, this function is required to open ** a temporary file. Use this buffer to store the file name in. */ char *zTmpname = 0; /* For temporary filename, if necessary. */ int rc = SQLITE_OK; /* Function Return Code */ #if !defined(NDEBUG) || SQLITE_OS_WINCE int eType = flags&0xFFFFFF00; /* Type of file to open */ #endif int isExclusive = (flags & SQLITE_OPEN_EXCLUSIVE); int isDelete = (flags & SQLITE_OPEN_DELETEONCLOSE); int isCreate = (flags & SQLITE_OPEN_CREATE); int isReadonly = (flags & SQLITE_OPEN_READONLY); int isReadWrite = (flags & SQLITE_OPEN_READWRITE); #ifndef NDEBUG int isOpenJournal = (isCreate && ( eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_WAL )); #endif OSTRACE(("OPEN name=%s, pFile=%p, flags=%x, pOutFlags=%p\n", zUtf8Name, id, flags, pOutFlags)); /* Check the following statements are true: ** ** (a) Exactly one of the READWRITE and READONLY flags must be set, and ** (b) if CREATE is set, then READWRITE must also be set, and ** (c) if EXCLUSIVE is set, then CREATE must also be set. ** (d) if DELETEONCLOSE is set, then CREATE must also be set. */ assert((isReadonly==0 || isReadWrite==0) && (isReadWrite || isReadonly)); assert(isCreate==0 || isReadWrite); assert(isExclusive==0 || isCreate); assert(isDelete==0 || isCreate); /* The main DB, main journal, WAL file and master journal are never ** automatically deleted. Nor are they ever temporary files. */ assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_DB ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MAIN_JOURNAL ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_MASTER_JOURNAL ); assert( (!isDelete && zName) || eType!=SQLITE_OPEN_WAL ); /* Assert that the upper layer has set one of the "file-type" flags. */ assert( eType==SQLITE_OPEN_MAIN_DB || eType==SQLITE_OPEN_TEMP_DB || eType==SQLITE_OPEN_MAIN_JOURNAL || eType==SQLITE_OPEN_TEMP_JOURNAL || eType==SQLITE_OPEN_SUBJOURNAL || eType==SQLITE_OPEN_MASTER_JOURNAL || eType==SQLITE_OPEN_TRANSIENT_DB || eType==SQLITE_OPEN_WAL ); assert( pFile!=0 ); memset(pFile, 0, sizeof(winFile)); pFile->h = INVALID_HANDLE_VALUE; #if SQLITE_OS_WINRT if( !zUtf8Name && !sqlite3_temp_directory ){ sqlite3_log(SQLITE_ERROR, "sqlite3_temp_directory variable should be set for WinRT"); } #endif /* If the second argument to this function is NULL, generate a ** temporary file name to use */ if( !zUtf8Name ){ assert( isDelete && !isOpenJournal ); rc = winGetTempname(pVfs, &zTmpname); if( rc!=SQLITE_OK ){ OSTRACE(("OPEN name=%s, rc=%s", zUtf8Name, sqlite3ErrName(rc))); return rc; } zUtf8Name = zTmpname; } /* Database filenames are double-zero terminated if they are not ** URIs with parameters. Hence, they can always be passed into ** sqlite3_uri_parameter(). */ assert( (eType!=SQLITE_OPEN_MAIN_DB) || (flags & SQLITE_OPEN_URI) || zUtf8Name[sqlite3Strlen30(zUtf8Name)+1]==0 ); /* Convert the filename to the system encoding. */ zConverted = winConvertFromUtf8Filename(zUtf8Name); if( zConverted==0 ){ sqlite3_free(zTmpname); OSTRACE(("OPEN name=%s, rc=SQLITE_IOERR_NOMEM", zUtf8Name)); return SQLITE_IOERR_NOMEM; } if( winIsDir(zConverted) ){ sqlite3_free(zConverted); sqlite3_free(zTmpname); OSTRACE(("OPEN name=%s, rc=SQLITE_CANTOPEN_ISDIR", zUtf8Name)); return SQLITE_CANTOPEN_ISDIR; } if( isReadWrite ){ dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; }else{ dwDesiredAccess = GENERIC_READ; } /* SQLITE_OPEN_EXCLUSIVE is used to make sure that a new file is ** created. SQLite doesn't use it to indicate "exclusive access" ** as it is usually understood. */ if( isExclusive ){ /* Creates a new file, only if it does not already exist. */ /* If the file exists, it fails. */ dwCreationDisposition = CREATE_NEW; }else if( isCreate ){ /* Open existing file, or create if it doesn't exist */ dwCreationDisposition = OPEN_ALWAYS; }else{ /* Opens a file, only if it exists. */ dwCreationDisposition = OPEN_EXISTING; } dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; if( isDelete ){ #if SQLITE_OS_WINCE dwFlagsAndAttributes = FILE_ATTRIBUTE_HIDDEN; isTemp = 1; #else dwFlagsAndAttributes = FILE_ATTRIBUTE_TEMPORARY | FILE_ATTRIBUTE_HIDDEN | FILE_FLAG_DELETE_ON_CLOSE; #endif }else{ dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; } /* Reports from the internet are that performance is always ** better if FILE_FLAG_RANDOM_ACCESS is used. Ticket #2699. */ #if SQLITE_OS_WINCE dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS; #endif if( osIsNT() ){ #if SQLITE_OS_WINRT CREATEFILE2_EXTENDED_PARAMETERS extendedParameters; extendedParameters.dwSize = sizeof(CREATEFILE2_EXTENDED_PARAMETERS); extendedParameters.dwFileAttributes = dwFlagsAndAttributes & FILE_ATTRIBUTE_MASK; extendedParameters.dwFileFlags = dwFlagsAndAttributes & FILE_FLAG_MASK; extendedParameters.dwSecurityQosFlags = SECURITY_ANONYMOUS; extendedParameters.lpSecurityAttributes = NULL; extendedParameters.hTemplateFile = NULL; while( (h = osCreateFile2((LPCWSTR)zConverted, dwDesiredAccess, dwShareMode, dwCreationDisposition, &extendedParameters))==INVALID_HANDLE_VALUE && winRetryIoerr(&cnt, &lastErrno) ){ /* Noop */ } #else while( (h = osCreateFileW((LPCWSTR)zConverted, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL))==INVALID_HANDLE_VALUE && winRetryIoerr(&cnt, &lastErrno) ){ /* Noop */ } #endif } #ifdef SQLITE_WIN32_HAS_ANSI else{ while( (h = osCreateFileA((LPCSTR)zConverted, dwDesiredAccess, dwShareMode, NULL, dwCreationDisposition, dwFlagsAndAttributes, NULL))==INVALID_HANDLE_VALUE && winRetryIoerr(&cnt, &lastErrno) ){ /* Noop */ } } #endif winLogIoerr(cnt, __LINE__); OSTRACE(("OPEN file=%p, name=%s, access=%lx, rc=%s\n", h, zUtf8Name, dwDesiredAccess, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); if( h==INVALID_HANDLE_VALUE ){ pFile->lastErrno = lastErrno; winLogError(SQLITE_CANTOPEN, pFile->lastErrno, "winOpen", zUtf8Name); sqlite3_free(zConverted); sqlite3_free(zTmpname); if( isReadWrite && !isExclusive ){ return winOpen(pVfs, zName, id, ((flags|SQLITE_OPEN_READONLY) & ~(SQLITE_OPEN_CREATE|SQLITE_OPEN_READWRITE)), pOutFlags); }else{ return SQLITE_CANTOPEN_BKPT; } } if( pOutFlags ){ if( isReadWrite ){ *pOutFlags = SQLITE_OPEN_READWRITE; }else{ *pOutFlags = SQLITE_OPEN_READONLY; } } OSTRACE(("OPEN file=%p, name=%s, access=%lx, pOutFlags=%p, *pOutFlags=%d, " "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ? *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); #if SQLITE_OS_WINCE if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK ){ osCloseHandle(h); sqlite3_free(zConverted); sqlite3_free(zTmpname); OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc))); return rc; } if( isTemp ){ pFile->zDeleteOnClose = zConverted; }else #endif { sqlite3_free(zConverted); } sqlite3_free(zTmpname); pFile->pMethod = &winIoMethod; pFile->pVfs = pVfs; pFile->h = h; if( isReadonly ){ pFile->ctrlFlags |= WINFILE_RDONLY; } if( sqlite3_uri_boolean(zName, "psow", SQLITE_POWERSAFE_OVERWRITE) ){ pFile->ctrlFlags |= WINFILE_PSOW; } pFile->lastErrno = NO_ERROR; pFile->zPath = zName; #if SQLITE_MAX_MMAP_SIZE>0 pFile->hMap = NULL; pFile->pMapRegion = 0; pFile->mmapSize = 0; pFile->mmapSizeActual = 0; pFile->mmapSizeMax = sqlite3GlobalConfig.szMmap; #endif OpenCounter(+1); return rc; } /* ** Delete the named file. ** ** Note that Windows does not allow a file to be deleted if some other ** process has it open. Sometimes a virus scanner or indexing program ** will open a journal file shortly after it is created in order to do ** whatever it does. While this other process is holding the ** file open, we will be unable to delete it. To work around this ** problem, we delay 100 milliseconds and try to delete again. Up ** to MX_DELETION_ATTEMPTs deletion attempts are run before giving ** up and returning an error. */ static int winDelete( sqlite3_vfs *pVfs, /* Not used on win32 */ const char *zFilename, /* Name of file to delete */ int syncDir /* Not used on win32 */ ){ int cnt = 0; int rc; DWORD attr; DWORD lastErrno = 0; void *zConverted; UNUSED_PARAMETER(pVfs); UNUSED_PARAMETER(syncDir); SimulateIOError(return SQLITE_IOERR_DELETE); OSTRACE(("DELETE name=%s, syncDir=%d\n", zFilename, syncDir)); zConverted = winConvertFromUtf8Filename(zFilename); if( zConverted==0 ){ OSTRACE(("DELETE name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); return SQLITE_IOERR_NOMEM; } if( osIsNT() ){ do { #if SQLITE_OS_WINRT WIN32_FILE_ATTRIBUTE_DATA sAttrData; memset(&sAttrData, 0, sizeof(sAttrData)); if ( osGetFileAttributesExW(zConverted, GetFileExInfoStandard, &sAttrData) ){ attr = sAttrData.dwFileAttributes; }else{ lastErrno = osGetLastError(); if( lastErrno==ERROR_FILE_NOT_FOUND || lastErrno==ERROR_PATH_NOT_FOUND ){ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ }else{ rc = SQLITE_ERROR; } break; } #else attr = osGetFileAttributesW(zConverted); #endif if ( attr==INVALID_FILE_ATTRIBUTES ){ lastErrno = osGetLastError(); if( lastErrno==ERROR_FILE_NOT_FOUND || lastErrno==ERROR_PATH_NOT_FOUND ){ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ }else{ rc = SQLITE_ERROR; } break; } if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ rc = SQLITE_ERROR; /* Files only. */ break; } if ( osDeleteFileW(zConverted) ){ rc = SQLITE_OK; /* Deleted OK. */ break; } if ( !winRetryIoerr(&cnt, &lastErrno) ){ rc = SQLITE_ERROR; /* No more retries. */ break; } } while(1); } #ifdef SQLITE_WIN32_HAS_ANSI else{ do { attr = osGetFileAttributesA(zConverted); if ( attr==INVALID_FILE_ATTRIBUTES ){ lastErrno = osGetLastError(); if( lastErrno==ERROR_FILE_NOT_FOUND || lastErrno==ERROR_PATH_NOT_FOUND ){ rc = SQLITE_IOERR_DELETE_NOENT; /* Already gone? */ }else{ rc = SQLITE_ERROR; } break; } if ( attr&FILE_ATTRIBUTE_DIRECTORY ){ rc = SQLITE_ERROR; /* Files only. */ break; } if ( osDeleteFileA(zConverted) ){ rc = SQLITE_OK; /* Deleted OK. */ break; } if ( !winRetryIoerr(&cnt, &lastErrno) ){ rc = SQLITE_ERROR; /* No more retries. */ break; } } while(1); } #endif if( rc && rc!=SQLITE_IOERR_DELETE_NOENT ){ rc = winLogError(SQLITE_IOERR_DELETE, lastErrno, "winDelete", zFilename); }else{ winLogIoerr(cnt, __LINE__); } sqlite3_free(zConverted); OSTRACE(("DELETE name=%s, rc=%s\n", zFilename, sqlite3ErrName(rc))); return rc; } /* ** Check the existence and status of a file. */ static int winAccess( sqlite3_vfs *pVfs, /* Not used on win32 */ const char *zFilename, /* Name of file to check */ int flags, /* Type of test to make on this file */ int *pResOut /* OUT: Result */ ){ DWORD attr; int rc = 0; DWORD lastErrno = 0; void *zConverted; UNUSED_PARAMETER(pVfs); SimulateIOError( return SQLITE_IOERR_ACCESS; ); OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", zFilename, flags, pResOut)); zConverted = winConvertFromUtf8Filename(zFilename); if( zConverted==0 ){ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); return SQLITE_IOERR_NOMEM; } if( osIsNT() ){ int cnt = 0; WIN32_FILE_ATTRIBUTE_DATA sAttrData; memset(&sAttrData, 0, sizeof(sAttrData)); while( !(rc = osGetFileAttributesExW((LPCWSTR)zConverted, GetFileExInfoStandard, &sAttrData)) && winRetryIoerr(&cnt, &lastErrno) ){} if( rc ){ /* For an SQLITE_ACCESS_EXISTS query, treat a zero-length file ** as if it does not exist. */ if( flags==SQLITE_ACCESS_EXISTS && sAttrData.nFileSizeHigh==0 && sAttrData.nFileSizeLow==0 ){ attr = INVALID_FILE_ATTRIBUTES; }else{ attr = sAttrData.dwFileAttributes; } }else{ winLogIoerr(cnt, __LINE__); if( lastErrno!=ERROR_FILE_NOT_FOUND && lastErrno!=ERROR_PATH_NOT_FOUND ){ sqlite3_free(zConverted); return winLogError(SQLITE_IOERR_ACCESS, lastErrno, "winAccess", zFilename); }else{ attr = INVALID_FILE_ATTRIBUTES; } } } #ifdef SQLITE_WIN32_HAS_ANSI else{ attr = osGetFileAttributesA((char*)zConverted); } #endif sqlite3_free(zConverted); switch( flags ){ case SQLITE_ACCESS_READ: case SQLITE_ACCESS_EXISTS: rc = attr!=INVALID_FILE_ATTRIBUTES; break; case SQLITE_ACCESS_READWRITE: rc = attr!=INVALID_FILE_ATTRIBUTES && (attr & FILE_ATTRIBUTE_READONLY)==0; break; default: assert(!"Invalid flags argument"); } *pResOut = rc; OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", zFilename, pResOut, *pResOut)); return SQLITE_OK; } /* ** Returns non-zero if the specified path name starts with a drive letter ** followed by a colon character. */ static BOOL winIsDriveLetterAndColon( const char *zPathname ){ return ( sqlite3Isalpha(zPathname[0]) && zPathname[1]==':' ); } /* ** Returns non-zero if the specified path name should be used verbatim. If ** non-zero is returned from this function, the calling function must simply ** use the provided path name verbatim -OR- resolve it into a full path name ** using the GetFullPathName Win32 API function (if available). */ static BOOL winIsVerbatimPathname( const char *zPathname ){ /* ** If the path name starts with a forward slash or a backslash, it is either ** a legal UNC name, a volume relative path, or an absolute path name in the ** "Unix" format on Windows. There is no easy way to differentiate between ** the final two cases; therefore, we return the safer return value of TRUE ** so that callers of this function will simply use it verbatim. */ if ( winIsDirSep(zPathname[0]) ){ return TRUE; } /* ** If the path name starts with a letter and a colon it is either a volume ** relative path or an absolute path. Callers of this function must not ** attempt to treat it as a relative path name (i.e. they should simply use ** it verbatim). */ if ( winIsDriveLetterAndColon(zPathname) ){ return TRUE; } /* ** If we get to this point, the path name should almost certainly be a purely ** relative one (i.e. not a UNC name, not absolute, and not volume relative). */ return FALSE; } /* ** Turn a relative pathname into a full pathname. Write the full ** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname ** bytes in size. */ static int winFullPathname( sqlite3_vfs *pVfs, /* Pointer to vfs object */ const char *zRelative, /* Possibly relative input path */ int nFull, /* Size of output buffer in bytes */ char *zFull /* Output buffer */ ){ #if defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); UNUSED_PARAMETER(nFull); assert( nFull>=pVfs->mxPathname ); if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ /* ** NOTE: We are dealing with a relative path name and the data ** directory has been set. Therefore, use it as the basis ** for converting the relative path name to an absolute ** one by prepending the data directory and a slash. */ char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); if( !zOut ){ return SQLITE_IOERR_NOMEM; } if( cygwin_conv_path( (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A) | CCP_RELATIVE, zRelative, zOut, pVfs->mxPathname+1)<0 ){ sqlite3_free(zOut); return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, "winFullPathname1", zRelative); }else{ char *zUtf8 = winConvertToUtf8Filename(zOut); if( !zUtf8 ){ sqlite3_free(zOut); return SQLITE_IOERR_NOMEM; } sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", sqlite3_data_directory, winGetDirSep(), zUtf8); sqlite3_free(zUtf8); sqlite3_free(zOut); } }else{ char *zOut = sqlite3MallocZero( pVfs->mxPathname+1 ); if( !zOut ){ return SQLITE_IOERR_NOMEM; } if( cygwin_conv_path( (osIsNT() ? CCP_POSIX_TO_WIN_W : CCP_POSIX_TO_WIN_A), zRelative, zOut, pVfs->mxPathname+1)<0 ){ sqlite3_free(zOut); return winLogError(SQLITE_CANTOPEN_CONVPATH, (DWORD)errno, "winFullPathname2", zRelative); }else{ char *zUtf8 = winConvertToUtf8Filename(zOut); if( !zUtf8 ){ sqlite3_free(zOut); return SQLITE_IOERR_NOMEM; } sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zUtf8); sqlite3_free(zUtf8); sqlite3_free(zOut); } } return SQLITE_OK; #endif #if (SQLITE_OS_WINCE || SQLITE_OS_WINRT) && !defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); /* WinCE has no concept of a relative pathname, or so I am told. */ /* WinRT has no way to convert a relative path to an absolute one. */ if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ /* ** NOTE: We are dealing with a relative path name and the data ** directory has been set. Therefore, use it as the basis ** for converting the relative path name to an absolute ** one by prepending the data directory and a backslash. */ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", sqlite3_data_directory, winGetDirSep(), zRelative); }else{ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zRelative); } return SQLITE_OK; #endif #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) DWORD nByte; void *zConverted; char *zOut; /* If this path name begins with "/X:", where "X" is any alphabetic ** character, discard the initial "/" from the pathname. */ if( zRelative[0]=='/' && winIsDriveLetterAndColon(zRelative+1) ){ zRelative++; } /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the ** current working directory has been unlinked. */ SimulateIOError( return SQLITE_ERROR ); if ( sqlite3_data_directory && !winIsVerbatimPathname(zRelative) ){ /* ** NOTE: We are dealing with a relative path name and the data ** directory has been set. Therefore, use it as the basis ** for converting the relative path name to an absolute ** one by prepending the data directory and a backslash. */ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s%c%s", sqlite3_data_directory, winGetDirSep(), zRelative); return SQLITE_OK; } zConverted = winConvertFromUtf8Filename(zRelative); if( zConverted==0 ){ return SQLITE_IOERR_NOMEM; } if( osIsNT() ){ LPWSTR zTemp; nByte = osGetFullPathNameW((LPCWSTR)zConverted, 0, 0, 0); if( nByte==0 ){ sqlite3_free(zConverted); return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname1", zRelative); } nByte += 3; zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); if( zTemp==0 ){ sqlite3_free(zConverted); return SQLITE_IOERR_NOMEM; } nByte = osGetFullPathNameW((LPCWSTR)zConverted, nByte, zTemp, 0); if( nByte==0 ){ sqlite3_free(zConverted); sqlite3_free(zTemp); return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname2", zRelative); } sqlite3_free(zConverted); zOut = winUnicodeToUtf8(zTemp); sqlite3_free(zTemp); } #ifdef SQLITE_WIN32_HAS_ANSI else{ char *zTemp; nByte = osGetFullPathNameA((char*)zConverted, 0, 0, 0); if( nByte==0 ){ sqlite3_free(zConverted); return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname3", zRelative); } nByte += 3; zTemp = sqlite3MallocZero( nByte*sizeof(zTemp[0]) ); if( zTemp==0 ){ sqlite3_free(zConverted); return SQLITE_IOERR_NOMEM; } nByte = osGetFullPathNameA((char*)zConverted, nByte, zTemp, 0); if( nByte==0 ){ sqlite3_free(zConverted); sqlite3_free(zTemp); return winLogError(SQLITE_CANTOPEN_FULLPATH, osGetLastError(), "winFullPathname4", zRelative); } sqlite3_free(zConverted); zOut = sqlite3_win32_mbcs_to_utf8(zTemp); sqlite3_free(zTemp); } #endif if( zOut ){ sqlite3_snprintf(MIN(nFull, pVfs->mxPathname), zFull, "%s", zOut); sqlite3_free(zOut); return SQLITE_OK; }else{ return SQLITE_IOERR_NOMEM; } #endif } #ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** Interfaces for opening a shared library, finding entry points ** within the shared library, and closing the shared library. */ static void *winDlOpen(sqlite3_vfs *pVfs, const char *zFilename){ HANDLE h; #if defined(__CYGWIN__) int nFull = pVfs->mxPathname+1; char *zFull = sqlite3MallocZero( nFull ); void *zConverted = 0; if( zFull==0 ){ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; } if( winFullPathname(pVfs, zFilename, nFull, zFull)!=SQLITE_OK ){ sqlite3_free(zFull); OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; } zConverted = winConvertFromUtf8Filename(zFull); sqlite3_free(zFull); #else void *zConverted = winConvertFromUtf8Filename(zFilename); UNUSED_PARAMETER(pVfs); #endif if( zConverted==0 ){ OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)0)); return 0; } if( osIsNT() ){ #if SQLITE_OS_WINRT h = osLoadPackagedLibrary((LPCWSTR)zConverted, 0); #else h = osLoadLibraryW((LPCWSTR)zConverted); #endif } #ifdef SQLITE_WIN32_HAS_ANSI else{ h = osLoadLibraryA((char*)zConverted); } #endif OSTRACE(("DLOPEN name=%s, handle=%p\n", zFilename, (void*)h)); sqlite3_free(zConverted); return (void*)h; } static void winDlError(sqlite3_vfs *pVfs, int nBuf, char *zBufOut){ UNUSED_PARAMETER(pVfs); winGetLastErrorMsg(osGetLastError(), nBuf, zBufOut); } static void (*winDlSym(sqlite3_vfs *pVfs,void *pH,const char *zSym))(void){ FARPROC proc; UNUSED_PARAMETER(pVfs); proc = osGetProcAddressA((HANDLE)pH, zSym); OSTRACE(("DLSYM handle=%p, symbol=%s, address=%p\n", (void*)pH, zSym, (void*)proc)); return (void(*)(void))proc; } static void winDlClose(sqlite3_vfs *pVfs, void *pHandle){ UNUSED_PARAMETER(pVfs); osFreeLibrary((HANDLE)pHandle); OSTRACE(("DLCLOSE handle=%p\n", (void*)pHandle)); } #else /* if SQLITE_OMIT_LOAD_EXTENSION is defined: */ #define winDlOpen 0 #define winDlError 0 #define winDlSym 0 #define winDlClose 0 #endif /* ** Write up to nBuf bytes of randomness into zBuf. */ static int winRandomness(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ int n = 0; UNUSED_PARAMETER(pVfs); #if defined(SQLITE_TEST) || defined(SQLITE_OMIT_RANDOMNESS) n = nBuf; memset(zBuf, 0, nBuf); #else if( sizeof(SYSTEMTIME)<=nBuf-n ){ SYSTEMTIME x; osGetSystemTime(&x); memcpy(&zBuf[n], &x, sizeof(x)); n += sizeof(x); } if( sizeof(DWORD)<=nBuf-n ){ DWORD pid = osGetCurrentProcessId(); memcpy(&zBuf[n], &pid, sizeof(pid)); n += sizeof(pid); } #if SQLITE_OS_WINRT if( sizeof(ULONGLONG)<=nBuf-n ){ ULONGLONG cnt = osGetTickCount64(); memcpy(&zBuf[n], &cnt, sizeof(cnt)); n += sizeof(cnt); } #else if( sizeof(DWORD)<=nBuf-n ){ DWORD cnt = osGetTickCount(); memcpy(&zBuf[n], &cnt, sizeof(cnt)); n += sizeof(cnt); } #endif if( sizeof(LARGE_INTEGER)<=nBuf-n ){ LARGE_INTEGER i; osQueryPerformanceCounter(&i); memcpy(&zBuf[n], &i, sizeof(i)); n += sizeof(i); } #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && SQLITE_WIN32_USE_UUID if( sizeof(UUID)<=nBuf-n ){ UUID id; memset(&id, 0, sizeof(UUID)); osUuidCreate(&id); memcpy(&zBuf[n], &id, sizeof(UUID)); n += sizeof(UUID); } if( sizeof(UUID)<=nBuf-n ){ UUID id; memset(&id, 0, sizeof(UUID)); osUuidCreateSequential(&id); memcpy(&zBuf[n], &id, sizeof(UUID)); n += sizeof(UUID); } #endif #endif /* defined(SQLITE_TEST) || defined(SQLITE_ZERO_PRNG_SEED) */ return n; } /* ** Sleep for a little while. Return the amount of time slept. */ static int winSleep(sqlite3_vfs *pVfs, int microsec){ sqlite3_win32_sleep((microsec+999)/1000); UNUSED_PARAMETER(pVfs); return ((microsec+999)/1000)*1000; } /* ** The following variable, if set to a non-zero value, is interpreted as ** the number of seconds since 1970 and is used to set the result of ** sqlite3OsCurrentTime() during testing. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_current_time = 0; /* Fake system time in seconds since 1970. */ #endif /* ** Find the current time (in Universal Coordinated Time). Write into *piNow ** the current time and date as a Julian Day number times 86_400_000. In ** other words, write into *piNow the number of milliseconds since the Julian ** epoch of noon in Greenwich on November 24, 4714 B.C according to the ** proleptic Gregorian calendar. ** ** On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date ** cannot be found. */ static int winCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *piNow){ /* FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601 (= JD 2305813.5). */ FILETIME ft; static const sqlite3_int64 winFiletimeEpoch = 23058135*(sqlite3_int64)8640000; #ifdef SQLITE_TEST static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; #endif /* 2^32 - to avoid use of LL and warnings in gcc */ static const sqlite3_int64 max32BitValue = (sqlite3_int64)2000000000 + (sqlite3_int64)2000000000 + (sqlite3_int64)294967296; #if SQLITE_OS_WINCE SYSTEMTIME time; osGetSystemTime(&time); /* if SystemTimeToFileTime() fails, it returns zero. */ if (!osSystemTimeToFileTime(&time,&ft)){ return SQLITE_ERROR; } #else osGetSystemTimeAsFileTime( &ft ); #endif *piNow = winFiletimeEpoch + ((((sqlite3_int64)ft.dwHighDateTime)*max32BitValue) + (sqlite3_int64)ft.dwLowDateTime)/(sqlite3_int64)10000; #ifdef SQLITE_TEST if( sqlite3_current_time ){ *piNow = 1000*(sqlite3_int64)sqlite3_current_time + unixEpoch; } #endif UNUSED_PARAMETER(pVfs); return SQLITE_OK; } /* ** Find the current time (in Universal Coordinated Time). Write the ** current time and date as a Julian Day number into *prNow and ** return 0. Return 1 if the time and date cannot be found. */ static int winCurrentTime(sqlite3_vfs *pVfs, double *prNow){ int rc; sqlite3_int64 i; rc = winCurrentTimeInt64(pVfs, &i); if( !rc ){ *prNow = i/86400000.0; } return rc; } /* ** The idea is that this function works like a combination of ** GetLastError() and FormatMessage() on Windows (or errno and ** strerror_r() on Unix). After an error is returned by an OS ** function, SQLite calls this function with zBuf pointing to ** a buffer of nBuf bytes. The OS layer should populate the ** buffer with a nul-terminated UTF-8 encoded error message ** describing the last IO error to have occurred within the calling ** thread. ** ** If the error message is too large for the supplied buffer, ** it should be truncated. The return value of xGetLastError ** is zero if the error message fits in the buffer, or non-zero ** otherwise (if the message was truncated). If non-zero is returned, ** then it is not necessary to include the nul-terminator character ** in the output buffer. ** ** Not supplying an error message will have no adverse effect ** on SQLite. It is fine to have an implementation that never ** returns an error message: ** ** int xGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ ** assert(zBuf[0]=='\0'); ** return 0; ** } ** ** However if an error message is supplied, it will be incorporated ** by sqlite into the error message available to the user using ** sqlite3_errmsg(), possibly making IO errors easier to debug. */ static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ UNUSED_PARAMETER(pVfs); return winGetLastErrorMsg(osGetLastError(), nBuf, zBuf); } /* ** Initialize and deinitialize the operating system interface. */ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){ static sqlite3_vfs winVfs = { 3, /* iVersion */ sizeof(winFile), /* szOsFile */ SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ 0, /* pNext */ "win32", /* zName */ 0, /* pAppData */ winOpen, /* xOpen */ winDelete, /* xDelete */ winAccess, /* xAccess */ winFullPathname, /* xFullPathname */ winDlOpen, /* xDlOpen */ winDlError, /* xDlError */ winDlSym, /* xDlSym */ winDlClose, /* xDlClose */ winRandomness, /* xRandomness */ winSleep, /* xSleep */ winCurrentTime, /* xCurrentTime */ winGetLastError, /* xGetLastError */ winCurrentTimeInt64, /* xCurrentTimeInt64 */ winSetSystemCall, /* xSetSystemCall */ winGetSystemCall, /* xGetSystemCall */ winNextSystemCall, /* xNextSystemCall */ }; #if defined(SQLITE_WIN32_HAS_WIDE) static sqlite3_vfs winLongPathVfs = { 3, /* iVersion */ sizeof(winFile), /* szOsFile */ SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ 0, /* pNext */ "win32-longpath", /* zName */ 0, /* pAppData */ winOpen, /* xOpen */ winDelete, /* xDelete */ winAccess, /* xAccess */ winFullPathname, /* xFullPathname */ winDlOpen, /* xDlOpen */ winDlError, /* xDlError */ winDlSym, /* xDlSym */ winDlClose, /* xDlClose */ winRandomness, /* xRandomness */ winSleep, /* xSleep */ winCurrentTime, /* xCurrentTime */ winGetLastError, /* xGetLastError */ winCurrentTimeInt64, /* xCurrentTimeInt64 */ winSetSystemCall, /* xSetSystemCall */ winGetSystemCall, /* xGetSystemCall */ winNextSystemCall, /* xNextSystemCall */ }; #endif /* Double-check that the aSyscall[] array has been constructed ** correctly. See ticket [bb3a86e890c8e96ab] */ assert( ArraySize(aSyscall)==80 ); /* get memory map allocation granularity */ memset(&winSysInfo, 0, sizeof(SYSTEM_INFO)); #if SQLITE_OS_WINRT osGetNativeSystemInfo(&winSysInfo); #else osGetSystemInfo(&winSysInfo); #endif assert( winSysInfo.dwAllocationGranularity>0 ); assert( winSysInfo.dwPageSize>0 ); sqlite3_vfs_register(&winVfs, 1); #if defined(SQLITE_WIN32_HAS_WIDE) sqlite3_vfs_register(&winLongPathVfs, 0); #endif return SQLITE_OK; } SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){ #if SQLITE_OS_WINRT if( sleepObj!=NULL ){ osCloseHandle(sleepObj); sleepObj = NULL; } #endif return SQLITE_OK; } #endif /* SQLITE_OS_WIN */ /************** End of os_win.c **********************************************/ /************** Begin file bitvec.c ******************************************/ /* ** 2008 February 16 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements an object that represents a fixed-length ** bitmap. Bits are numbered starting with 1. ** ** A bitmap is used to record which pages of a database file have been ** journalled during a transaction, or which pages have the "dont-write" ** property. Usually only a few pages are meet either condition. ** So the bitmap is usually sparse and has low cardinality. ** But sometimes (for example when during a DROP of a large table) most ** or all of the pages in a database can get journalled. In those cases, ** the bitmap becomes dense with high cardinality. The algorithm needs ** to handle both cases well. ** ** The size of the bitmap is fixed when the object is created. ** ** All bits are clear when the bitmap is created. Individual bits ** may be set or cleared one at a time. ** ** Test operations are about 100 times more common that set operations. ** Clear operations are exceedingly rare. There are usually between ** 5 and 500 set operations per Bitvec object, though the number of sets can ** sometimes grow into tens of thousands or larger. The size of the ** Bitvec object is the number of pages in the database file at the ** start of a transaction, and is thus usually less than a few thousand, ** but can be as large as 2 billion for a really big database. */ /* #include "sqliteInt.h" */ /* Size of the Bitvec structure in bytes. */ #define BITVEC_SZ 512 /* Round the union size down to the nearest pointer boundary, since that's how ** it will be aligned within the Bitvec struct. */ #define BITVEC_USIZE (((BITVEC_SZ-(3*sizeof(u32)))/sizeof(Bitvec*))*sizeof(Bitvec*)) /* Type of the array "element" for the bitmap representation. ** Should be a power of 2, and ideally, evenly divide into BITVEC_USIZE. ** Setting this to the "natural word" size of your CPU may improve ** performance. */ #define BITVEC_TELEM u8 /* Size, in bits, of the bitmap element. */ #define BITVEC_SZELEM 8 /* Number of elements in a bitmap array. */ #define BITVEC_NELEM (BITVEC_USIZE/sizeof(BITVEC_TELEM)) /* Number of bits in the bitmap array. */ #define BITVEC_NBIT (BITVEC_NELEM*BITVEC_SZELEM) /* Number of u32 values in hash table. */ #define BITVEC_NINT (BITVEC_USIZE/sizeof(u32)) /* Maximum number of entries in hash table before ** sub-dividing and re-hashing. */ #define BITVEC_MXHASH (BITVEC_NINT/2) /* Hashing function for the aHash representation. ** Empirical testing showed that the *37 multiplier ** (an arbitrary prime)in the hash function provided ** no fewer collisions than the no-op *1. */ #define BITVEC_HASH(X) (((X)*1)%BITVEC_NINT) #define BITVEC_NPTR (BITVEC_USIZE/sizeof(Bitvec *)) /* ** A bitmap is an instance of the following structure. ** ** This bitmap records the existence of zero or more bits ** with values between 1 and iSize, inclusive. ** ** There are three possible representations of the bitmap. ** If iSize<=BITVEC_NBIT, then Bitvec.u.aBitmap[] is a straight ** bitmap. The least significant bit is bit 1. ** ** If iSize>BITVEC_NBIT and iDivisor==0 then Bitvec.u.aHash[] is ** a hash table that will hold up to BITVEC_MXHASH distinct values. ** ** Otherwise, the value i is redirected into one of BITVEC_NPTR ** sub-bitmaps pointed to by Bitvec.u.apSub[]. Each subbitmap ** handles up to iDivisor separate values of i. apSub[0] holds ** values between 1 and iDivisor. apSub[1] holds values between ** iDivisor+1 and 2*iDivisor. apSub[N] holds values between ** N*iDivisor+1 and (N+1)*iDivisor. Each subbitmap is normalized ** to hold deal with values between 1 and iDivisor. */ struct Bitvec { u32 iSize; /* Maximum bit index. Max iSize is 4,294,967,296. */ u32 nSet; /* Number of bits that are set - only valid for aHash ** element. Max is BITVEC_NINT. For BITVEC_SZ of 512, ** this would be 125. */ u32 iDivisor; /* Number of bits handled by each apSub[] entry. */ /* Should >=0 for apSub element. */ /* Max iDivisor is max(u32) / BITVEC_NPTR + 1. */ /* For a BITVEC_SZ of 512, this would be 34,359,739. */ union { BITVEC_TELEM aBitmap[BITVEC_NELEM]; /* Bitmap representation */ u32 aHash[BITVEC_NINT]; /* Hash table representation */ Bitvec *apSub[BITVEC_NPTR]; /* Recursive representation */ } u; }; /* ** Create a new bitmap object able to handle bits between 0 and iSize, ** inclusive. Return a pointer to the new object. Return NULL if ** malloc fails. */ SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32 iSize){ Bitvec *p; assert( sizeof(*p)==BITVEC_SZ ); p = sqlite3MallocZero( sizeof(*p) ); if( p ){ p->iSize = iSize; } return p; } /* ** Check to see if the i-th bit is set. Return true or false. ** If p is NULL (if the bitmap has not been created) or if ** i is out of range, then return false. */ SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec *p, u32 i){ assert( p!=0 ); i--; if( i>=p->iSize ) return 0; while( p->iDivisor ){ u32 bin = i/p->iDivisor; i = i%p->iDivisor; p = p->u.apSub[bin]; if (!p) { return 0; } } if( p->iSize<=BITVEC_NBIT ){ return (p->u.aBitmap[i/BITVEC_SZELEM] & (1<<(i&(BITVEC_SZELEM-1))))!=0; } else{ u32 h = BITVEC_HASH(i++); while( p->u.aHash[h] ){ if( p->u.aHash[h]==i ) return 1; h = (h+1) % BITVEC_NINT; } return 0; } } SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec *p, u32 i){ return p!=0 && sqlite3BitvecTestNotNull(p,i); } /* ** Set the i-th bit. Return 0 on success and an error code if ** anything goes wrong. ** ** This routine might cause sub-bitmaps to be allocated. Failing ** to get the memory needed to hold the sub-bitmap is the only ** that can go wrong with an insert, assuming p and i are valid. ** ** The calling function must ensure that p is a valid Bitvec object ** and that the value for "i" is within range of the Bitvec object. ** Otherwise the behavior is undefined. */ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ u32 h; if( p==0 ) return SQLITE_OK; assert( i>0 ); assert( i<=p->iSize ); i--; while((p->iSize > BITVEC_NBIT) && p->iDivisor) { u32 bin = i/p->iDivisor; i = i%p->iDivisor; if( p->u.apSub[bin]==0 ){ p->u.apSub[bin] = sqlite3BitvecCreate( p->iDivisor ); if( p->u.apSub[bin]==0 ) return SQLITE_NOMEM; } p = p->u.apSub[bin]; } if( p->iSize<=BITVEC_NBIT ){ p->u.aBitmap[i/BITVEC_SZELEM] |= 1 << (i&(BITVEC_SZELEM-1)); return SQLITE_OK; } h = BITVEC_HASH(i++); /* if there wasn't a hash collision, and this doesn't */ /* completely fill the hash, then just add it without */ /* worring about sub-dividing and re-hashing. */ if( !p->u.aHash[h] ){ if (p->nSet<(BITVEC_NINT-1)) { goto bitvec_set_end; } else { goto bitvec_set_rehash; } } /* there was a collision, check to see if it's already */ /* in hash, if not, try to find a spot for it */ do { if( p->u.aHash[h]==i ) return SQLITE_OK; h++; if( h>=BITVEC_NINT ) h = 0; } while( p->u.aHash[h] ); /* we didn't find it in the hash. h points to the first */ /* available free spot. check to see if this is going to */ /* make our hash too "full". */ bitvec_set_rehash: if( p->nSet>=BITVEC_MXHASH ){ unsigned int j; int rc; u32 *aiValues = sqlite3StackAllocRaw(0, sizeof(p->u.aHash)); if( aiValues==0 ){ return SQLITE_NOMEM; }else{ memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); memset(p->u.apSub, 0, sizeof(p->u.apSub)); p->iDivisor = (p->iSize + BITVEC_NPTR - 1)/BITVEC_NPTR; rc = sqlite3BitvecSet(p, i); for(j=0; jnSet++; p->u.aHash[h] = i; return SQLITE_OK; } /* ** Clear the i-th bit. ** ** pBuf must be a pointer to at least BITVEC_SZ bytes of temporary storage ** that BitvecClear can use to rebuilt its hash table. */ SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec *p, u32 i, void *pBuf){ if( p==0 ) return; assert( i>0 ); i--; while( p->iDivisor ){ u32 bin = i/p->iDivisor; i = i%p->iDivisor; p = p->u.apSub[bin]; if (!p) { return; } } if( p->iSize<=BITVEC_NBIT ){ p->u.aBitmap[i/BITVEC_SZELEM] &= ~(1 << (i&(BITVEC_SZELEM-1))); }else{ unsigned int j; u32 *aiValues = pBuf; memcpy(aiValues, p->u.aHash, sizeof(p->u.aHash)); memset(p->u.aHash, 0, sizeof(p->u.aHash)); p->nSet = 0; for(j=0; jnSet++; while( p->u.aHash[h] ){ h++; if( h>=BITVEC_NINT ) h = 0; } p->u.aHash[h] = aiValues[j]; } } } } /* ** Destroy a bitmap object. Reclaim all memory used. */ SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec *p){ if( p==0 ) return; if( p->iDivisor ){ unsigned int i; for(i=0; iu.apSub[i]); } } sqlite3_free(p); } /* ** Return the value of the iSize parameter specified when Bitvec *p ** was created. */ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){ return p->iSize; } #ifndef SQLITE_OMIT_BUILTIN_TEST /* ** Let V[] be an array of unsigned characters sufficient to hold ** up to N bits. Let I be an integer between 0 and N. 0<=I>3] |= (1<<(I&7)) #define CLEARBIT(V,I) V[I>>3] &= ~(1<<(I&7)) #define TESTBIT(V,I) (V[I>>3]&(1<<(I&7)))!=0 /* ** This routine runs an extensive test of the Bitvec code. ** ** The input is an array of integers that acts as a program ** to test the Bitvec. The integers are opcodes followed ** by 0, 1, or 3 operands, depending on the opcode. Another ** opcode follows immediately after the last operand. ** ** There are 6 opcodes numbered from 0 through 5. 0 is the ** "halt" opcode and causes the test to end. ** ** 0 Halt and return the number of errors ** 1 N S X Set N bits beginning with S and incrementing by X ** 2 N S X Clear N bits beginning with S and incrementing by X ** 3 N Set N randomly chosen bits ** 4 N Clear N randomly chosen bits ** 5 N S X Set N bits from S increment X in array only, not in bitvec ** ** The opcodes 1 through 4 perform set and clear operations are performed ** on both a Bitvec object and on a linear array of bits obtained from malloc. ** Opcode 5 works on the linear array only, not on the Bitvec. ** Opcode 5 is used to deliberately induce a fault in order to ** confirm that error detection works. ** ** At the conclusion of the test the linear array is compared ** against the Bitvec object. If there are any differences, ** an error is returned. If they are the same, zero is returned. ** ** If a memory allocation error occurs, return -1. */ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ Bitvec *pBitvec = 0; unsigned char *pV = 0; int rc = -1; int i, nx, pc, op; void *pTmpSpace; /* Allocate the Bitvec to be tested and a linear array of ** bits to act as the reference */ pBitvec = sqlite3BitvecCreate( sz ); pV = sqlite3MallocZero( (sz+7)/8 + 1 ); pTmpSpace = sqlite3_malloc64(BITVEC_SZ); if( pBitvec==0 || pV==0 || pTmpSpace==0 ) goto bitvec_end; /* NULL pBitvec tests */ sqlite3BitvecSet(0, 1); sqlite3BitvecClear(0, 1, pTmpSpace); /* Run the program */ pc = 0; while( (op = aOp[pc])!=0 ){ switch( op ){ case 1: case 2: case 5: { nx = 4; i = aOp[pc+2] - 1; aOp[pc+2] += aOp[pc+3]; break; } case 3: case 4: default: { nx = 2; sqlite3_randomness(sizeof(i), &i); break; } } if( (--aOp[pc+1]) > 0 ) nx = 0; pc += nx; i = (i & 0x7fffffff)%sz; if( (op & 1)!=0 ){ SETBIT(pV, (i+1)); if( op!=5 ){ if( sqlite3BitvecSet(pBitvec, i+1) ) goto bitvec_end; } }else{ CLEARBIT(pV, (i+1)); sqlite3BitvecClear(pBitvec, i+1, pTmpSpace); } } /* Test to make sure the linear array exactly matches the ** Bitvec object. Start with the assumption that they do ** match (rc==0). Change rc to non-zero if a discrepancy ** is found. */ rc = sqlite3BitvecTest(0,0) + sqlite3BitvecTest(pBitvec, sz+1) + sqlite3BitvecTest(pBitvec, 0) + (sqlite3BitvecSize(pBitvec) - sz); for(i=1; i<=sz; i++){ if( (TESTBIT(pV,i))!=sqlite3BitvecTest(pBitvec,i) ){ rc = i; break; } } /* Free allocated structure */ bitvec_end: sqlite3_free(pTmpSpace); sqlite3_free(pV); sqlite3BitvecDestroy(pBitvec); return rc; } #endif /* SQLITE_OMIT_BUILTIN_TEST */ /************** End of bitvec.c **********************************************/ /************** Begin file pcache.c ******************************************/ /* ** 2008 August 05 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements that page cache. */ /* #include "sqliteInt.h" */ /* ** A complete page cache is an instance of this structure. */ struct PCache { PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ PgHdr *pSynced; /* Last synced page in dirty page list */ int nRef; /* Number of referenced pages */ int szCache; /* Configured cache size */ int szPage; /* Size of every page in this cache */ int szExtra; /* Size of extra space for each page */ u8 bPurgeable; /* True if pages are on backing store */ u8 eCreate; /* eCreate value for for xFetch() */ int (*xStress)(void*,PgHdr*); /* Call to try make a page clean */ void *pStress; /* Argument to xStress */ sqlite3_pcache *pCache; /* Pluggable cache module */ }; /********************************** Linked List Management ********************/ /* Allowed values for second argument to pcacheManageDirtyList() */ #define PCACHE_DIRTYLIST_REMOVE 1 /* Remove pPage from dirty list */ #define PCACHE_DIRTYLIST_ADD 2 /* Add pPage to the dirty list */ #define PCACHE_DIRTYLIST_FRONT 3 /* Move pPage to the front of the list */ /* ** Manage pPage's participation on the dirty list. Bits of the addRemove ** argument determines what operation to do. The 0x01 bit means first ** remove pPage from the dirty list. The 0x02 means add pPage back to ** the dirty list. Doing both moves pPage to the front of the dirty list. */ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ PCache *p = pPage->pCache; if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); assert( pPage->pDirtyPrev || pPage==p->pDirty ); /* Update the PCache1.pSynced variable if necessary. */ if( p->pSynced==pPage ){ PgHdr *pSynced = pPage->pDirtyPrev; while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ pSynced = pSynced->pDirtyPrev; } p->pSynced = pSynced; } if( pPage->pDirtyNext ){ pPage->pDirtyNext->pDirtyPrev = pPage->pDirtyPrev; }else{ assert( pPage==p->pDirtyTail ); p->pDirtyTail = pPage->pDirtyPrev; } if( pPage->pDirtyPrev ){ pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; }else{ assert( pPage==p->pDirty ); p->pDirty = pPage->pDirtyNext; if( p->pDirty==0 && p->bPurgeable ){ assert( p->eCreate==1 ); p->eCreate = 2; } } pPage->pDirtyNext = 0; pPage->pDirtyPrev = 0; } if( addRemove & PCACHE_DIRTYLIST_ADD ){ assert( pPage->pDirtyNext==0 && pPage->pDirtyPrev==0 && p->pDirty!=pPage ); pPage->pDirtyNext = p->pDirty; if( pPage->pDirtyNext ){ assert( pPage->pDirtyNext->pDirtyPrev==0 ); pPage->pDirtyNext->pDirtyPrev = pPage; }else{ p->pDirtyTail = pPage; if( p->bPurgeable ){ assert( p->eCreate==2 ); p->eCreate = 1; } } p->pDirty = pPage; if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ p->pSynced = pPage; } } } /* ** Wrapper around the pluggable caches xUnpin method. If the cache is ** being used for an in-memory database, this function is a no-op. */ static void pcacheUnpin(PgHdr *p){ if( p->pCache->bPurgeable ){ sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); } } /* ** Compute the number of pages of cache requested. p->szCache is the ** cache size requested by the "PRAGMA cache_size" statement. ** ** */ static int numberOfCachePages(PCache *p){ if( p->szCache>=0 ){ /* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the ** suggested cache size is set to N. */ return p->szCache; }else{ /* IMPLEMENTATION-OF: R-61436-13639 If the argument N is negative, then ** the number of cache pages is adjusted to use approximately abs(N*1024) ** bytes of memory. */ return (int)((-1024*(i64)p->szCache)/(p->szPage+p->szExtra)); } } /*************************************************** General Interfaces ****** ** ** Initialize and shutdown the page cache subsystem. Neither of these ** functions are threadsafe. */ SQLITE_PRIVATE int sqlite3PcacheInitialize(void){ if( sqlite3GlobalConfig.pcache2.xInit==0 ){ /* IMPLEMENTATION-OF: R-26801-64137 If the xInit() method is NULL, then the ** built-in default page cache is used instead of the application defined ** page cache. */ sqlite3PCacheSetDefault(); } return sqlite3GlobalConfig.pcache2.xInit(sqlite3GlobalConfig.pcache2.pArg); } SQLITE_PRIVATE void sqlite3PcacheShutdown(void){ if( sqlite3GlobalConfig.pcache2.xShutdown ){ /* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */ sqlite3GlobalConfig.pcache2.xShutdown(sqlite3GlobalConfig.pcache2.pArg); } } /* ** Return the size in bytes of a PCache object. */ SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); } /* ** Create a new PCache object. Storage space to hold the object ** has already been allocated and is passed in as the p pointer. ** The caller discovers how much space needs to be allocated by ** calling sqlite3PcacheSize(). */ SQLITE_PRIVATE int sqlite3PcacheOpen( int szPage, /* Size of every page */ int szExtra, /* Extra space associated with each page */ int bPurgeable, /* True if pages are on backing store */ int (*xStress)(void*,PgHdr*),/* Call to try to make pages clean */ void *pStress, /* Argument to xStress */ PCache *p /* Preallocated space for the PCache */ ){ memset(p, 0, sizeof(PCache)); p->szPage = 1; p->szExtra = szExtra; p->bPurgeable = bPurgeable; p->eCreate = 2; p->xStress = xStress; p->pStress = pStress; p->szCache = 100; return sqlite3PcacheSetPageSize(p, szPage); } /* ** Change the page size for PCache object. The caller must ensure that there ** are no outstanding page references when this function is called. */ SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ assert( pCache->nRef==0 && pCache->pDirty==0 ); if( pCache->szPage ){ sqlite3_pcache *pNew; pNew = sqlite3GlobalConfig.pcache2.xCreate( szPage, pCache->szExtra + ROUND8(sizeof(PgHdr)), pCache->bPurgeable ); if( pNew==0 ) return SQLITE_NOMEM; sqlite3GlobalConfig.pcache2.xCachesize(pNew, numberOfCachePages(pCache)); if( pCache->pCache ){ sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); } pCache->pCache = pNew; pCache->szPage = szPage; } return SQLITE_OK; } /* ** Try to obtain a page from the cache. ** ** This routine returns a pointer to an sqlite3_pcache_page object if ** such an object is already in cache, or if a new one is created. ** This routine returns a NULL pointer if the object was not in cache ** and could not be created. ** ** The createFlags should be 0 to check for existing pages and should ** be 3 (not 1, but 3) to try to create a new page. ** ** If the createFlag is 0, then NULL is always returned if the page ** is not already in the cache. If createFlag is 1, then a new page ** is created only if that can be done without spilling dirty pages ** and without exceeding the cache size limit. ** ** The caller needs to invoke sqlite3PcacheFetchFinish() to properly ** initialize the sqlite3_pcache_page object and convert it into a ** PgHdr object. The sqlite3PcacheFetch() and sqlite3PcacheFetchFinish() ** routines are split this way for performance reasons. When separated ** they can both (usually) operate without having to push values to ** the stack on entry and pop them back off on exit, which saves a ** lot of pushing and popping. */ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( PCache *pCache, /* Obtain the page from this cache */ Pgno pgno, /* Page number to obtain */ int createFlag /* If true, create page if it does not exist already */ ){ int eCreate; assert( pCache!=0 ); assert( pCache->pCache!=0 ); assert( createFlag==3 || createFlag==0 ); assert( pgno>0 ); /* eCreate defines what to do if the page does not exist. ** 0 Do not allocate a new page. (createFlag==0) ** 1 Allocate a new page if doing so is inexpensive. ** (createFlag==1 AND bPurgeable AND pDirty) ** 2 Allocate a new page even it doing so is difficult. ** (createFlag==1 AND !(bPurgeable AND pDirty) */ eCreate = createFlag & pCache->eCreate; assert( eCreate==0 || eCreate==1 || eCreate==2 ); assert( createFlag==0 || pCache->eCreate==eCreate ); assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); } /* ** If the sqlite3PcacheFetch() routine is unable to allocate a new ** page because new clean pages are available for reuse and the cache ** size limit has been reached, then this routine can be invoked to ** try harder to allocate a page. This routine might invoke the stress ** callback to spill dirty pages to the journal. It will then try to ** allocate the new page and will only fail to allocate a new page on ** an OOM error. ** ** This routine should be invoked only after sqlite3PcacheFetch() fails. */ SQLITE_PRIVATE int sqlite3PcacheFetchStress( PCache *pCache, /* Obtain the page from this cache */ Pgno pgno, /* Page number to obtain */ sqlite3_pcache_page **ppPage /* Write result here */ ){ PgHdr *pPg; if( pCache->eCreate==2 ) return 0; /* Find a dirty page to write-out and recycle. First try to find a ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC ** cleared), but if that is not possible settle for any other ** unreferenced dirty page. */ for(pPg=pCache->pSynced; pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); pPg=pPg->pDirtyPrev ); pCache->pSynced = pPg; if( !pPg ){ for(pPg=pCache->pDirtyTail; pPg && pPg->nRef; pPg=pPg->pDirtyPrev); } if( pPg ){ int rc; #ifdef SQLITE_LOG_CACHE_SPILL sqlite3_log(SQLITE_FULL, "spill page %d making room for %d - cache used: %d/%d", pPg->pgno, pgno, sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), numberOfCachePages(pCache)); #endif rc = pCache->xStress(pCache->pStress, pPg); if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ return rc; } } *ppPage = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, 2); return *ppPage==0 ? SQLITE_NOMEM : SQLITE_OK; } /* ** This is a helper routine for sqlite3PcacheFetchFinish() ** ** In the uncommon case where the page being fetched has not been ** initialized, this routine is invoked to do the initialization. ** This routine is broken out into a separate function since it ** requires extra stack manipulation that can be avoided in the common ** case. */ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit( PCache *pCache, /* Obtain the page from this cache */ Pgno pgno, /* Page number obtained */ sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ ){ PgHdr *pPgHdr; assert( pPage!=0 ); pPgHdr = (PgHdr*)pPage->pExtra; assert( pPgHdr->pPage==0 ); memset(pPgHdr, 0, sizeof(PgHdr)); pPgHdr->pPage = pPage; pPgHdr->pData = pPage->pBuf; pPgHdr->pExtra = (void *)&pPgHdr[1]; memset(pPgHdr->pExtra, 0, pCache->szExtra); pPgHdr->pCache = pCache; pPgHdr->pgno = pgno; pPgHdr->flags = PGHDR_CLEAN; return sqlite3PcacheFetchFinish(pCache,pgno,pPage); } /* ** This routine converts the sqlite3_pcache_page object returned by ** sqlite3PcacheFetch() into an initialized PgHdr object. This routine ** must be called after sqlite3PcacheFetch() in order to get a usable ** result. */ SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish( PCache *pCache, /* Obtain the page from this cache */ Pgno pgno, /* Page number obtained */ sqlite3_pcache_page *pPage /* Page obtained by prior PcacheFetch() call */ ){ PgHdr *pPgHdr; assert( pPage!=0 ); pPgHdr = (PgHdr *)pPage->pExtra; if( !pPgHdr->pPage ){ return pcacheFetchFinishWithInit(pCache, pgno, pPage); } if( 0==pPgHdr->nRef ){ pCache->nRef++; } pPgHdr->nRef++; return pPgHdr; } /* ** Decrement the reference count on a page. If the page is clean and the ** reference count drops to 0, then it is made eligible for recycling. */ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ assert( p->nRef>0 ); p->nRef--; if( p->nRef==0 ){ p->pCache->nRef--; if( p->flags&PGHDR_CLEAN ){ pcacheUnpin(p); }else if( p->pDirtyPrev!=0 ){ /* Move the page to the head of the dirty list. */ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); } } } /* ** Increase the reference count of a supplied page by 1. */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ assert(p->nRef>0); p->nRef++; } /* ** Drop a page from the cache. There must be exactly one reference to the ** page. This function deletes that reference, so after it returns the ** page pointed to by p is invalid. */ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ assert( p->nRef==1 ); if( p->flags&PGHDR_DIRTY ){ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); } p->pCache->nRef--; sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 1); } /* ** Make sure the page is marked as dirty. If it isn't dirty already, ** make it so. */ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ assert( p->nRef>0 ); if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ p->flags &= ~PGHDR_DONT_WRITE; if( p->flags & PGHDR_CLEAN ){ p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); } } } /* ** Make sure the page is marked as clean. If it isn't clean already, ** make it so. */ SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ if( (p->flags & PGHDR_DIRTY) ){ assert( (p->flags & PGHDR_CLEAN)==0 ); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); p->flags |= PGHDR_CLEAN; if( p->nRef==0 ){ pcacheUnpin(p); } } } /* ** Make every page in the cache clean. */ SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ PgHdr *p; while( (p = pCache->pDirty)!=0 ){ sqlite3PcacheMakeClean(p); } } /* ** Clear the PGHDR_NEED_SYNC flag from all dirty pages. */ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){ PgHdr *p; for(p=pCache->pDirty; p; p=p->pDirtyNext){ p->flags &= ~PGHDR_NEED_SYNC; } pCache->pSynced = pCache->pDirtyTail; } /* ** Change the page number of page p to newPgno. */ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ PCache *pCache = p->pCache; assert( p->nRef>0 ); assert( newPgno>0 ); sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); p->pgno = newPgno; if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); } } /* ** Drop every cache entry whose page number is greater than "pgno". The ** caller must ensure that there are no outstanding references to any pages ** other than page 1 with a page number greater than pgno. ** ** If there is a reference to page 1 and the pgno parameter passed to this ** function is 0, then the data area associated with page 1 is zeroed, but ** the page object is not dropped. */ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ if( pCache->pCache ){ PgHdr *p; PgHdr *pNext; for(p=pCache->pDirty; p; p=pNext){ pNext = p->pDirtyNext; /* This routine never gets call with a positive pgno except right ** after sqlite3PcacheCleanAll(). So if there are dirty pages, ** it must be that pgno==0. */ assert( p->pgno>0 ); if( ALWAYS(p->pgno>pgno) ){ assert( p->flags&PGHDR_DIRTY ); sqlite3PcacheMakeClean(p); } } if( pgno==0 && pCache->nRef ){ sqlite3_pcache_page *pPage1; pPage1 = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache,1,0); if( ALWAYS(pPage1) ){ /* Page 1 is always available in cache, because ** pCache->nRef>0 */ memset(pPage1->pBuf, 0, pCache->szPage); pgno = 1; } } sqlite3GlobalConfig.pcache2.xTruncate(pCache->pCache, pgno+1); } } /* ** Close a cache. */ SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ assert( pCache->pCache!=0 ); sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); } /* ** Discard the contents of the cache. */ SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){ sqlite3PcacheTruncate(pCache, 0); } /* ** Merge two lists of pages connected by pDirty and in pgno order. ** Do not both fixing the pDirtyPrev pointers. */ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ PgHdr result, *pTail; pTail = &result; while( pA && pB ){ if( pA->pgnopgno ){ pTail->pDirty = pA; pTail = pA; pA = pA->pDirty; }else{ pTail->pDirty = pB; pTail = pB; pB = pB->pDirty; } } if( pA ){ pTail->pDirty = pA; }else if( pB ){ pTail->pDirty = pB; }else{ pTail->pDirty = 0; } return result.pDirty; } /* ** Sort the list of pages in accending order by pgno. Pages are ** connected by pDirty pointers. The pDirtyPrev pointers are ** corrupted by this sort. ** ** Since there cannot be more than 2^31 distinct pages in a database, ** there cannot be more than 31 buckets required by the merge sorter. ** One extra bucket is added to catch overflow in case something ** ever changes to make the previous sentence incorrect. */ #define N_SORT_BUCKET 32 static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ PgHdr *a[N_SORT_BUCKET], *p; int i; memset(a, 0, sizeof(a)); while( pIn ){ p = pIn; pIn = p->pDirty; p->pDirty = 0; for(i=0; ALWAYS(ipDirty; p; p=p->pDirtyNext){ p->pDirty = p->pDirtyNext; } return pcacheSortDirtyList(pCache->pDirty); } /* ** Return the total number of referenced pages held by the cache. */ SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){ return pCache->nRef; } /* ** Return the number of references to the page supplied as an argument. */ SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){ return p->nRef; } /* ** Return the total number of pages in the cache. */ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache *pCache){ assert( pCache->pCache!=0 ); return sqlite3GlobalConfig.pcache2.xPagecount(pCache->pCache); } #ifdef SQLITE_TEST /* ** Get the suggested cache-size value. */ SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *pCache){ return numberOfCachePages(pCache); } #endif /* ** Set the suggested cache-size value. */ SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage){ assert( pCache->pCache!=0 ); pCache->szCache = mxPage; sqlite3GlobalConfig.pcache2.xCachesize(pCache->pCache, numberOfCachePages(pCache)); } /* ** Free up as much memory as possible from the page cache. */ SQLITE_PRIVATE void sqlite3PcacheShrink(PCache *pCache){ assert( pCache->pCache!=0 ); sqlite3GlobalConfig.pcache2.xShrink(pCache->pCache); } /* ** Return the size of the header added by this middleware layer ** in the page-cache hierarchy. */ SQLITE_PRIVATE int sqlite3HeaderSizePcache(void){ return ROUND8(sizeof(PgHdr)); } #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) /* ** For all dirty pages currently in the cache, invoke the specified ** callback. This is only used if the SQLITE_CHECK_PAGES macro is ** defined. */ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)){ PgHdr *pDirty; for(pDirty=pCache->pDirty; pDirty; pDirty=pDirty->pDirtyNext){ xIter(pDirty); } } #endif /************** End of pcache.c **********************************************/ /************** Begin file pcache1.c *****************************************/ /* ** 2008 November 05 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file implements the default page cache implementation (the ** sqlite3_pcache interface). It also contains part of the implementation ** of the SQLITE_CONFIG_PAGECACHE and sqlite3_release_memory() features. ** If the default page cache implementation is overridden, then neither of ** these two features are available. ** ** A Page cache line looks like this: ** ** ------------------------------------------------------------- ** | database page content | PgHdr1 | MemPage | PgHdr | ** ------------------------------------------------------------- ** ** The database page content is up front (so that buffer overreads tend to ** flow harmlessly into the PgHdr1, MemPage, and PgHdr extensions). MemPage ** is the extension added by the btree.c module containing information such ** as the database page number and how that database page is used. PgHdr ** is added by the pcache.c layer and contains information used to keep track ** of which pages are "dirty". PgHdr1 is an extension added by this ** module (pcache1.c). The PgHdr1 header is a subclass of sqlite3_pcache_page. ** PgHdr1 contains information needed to look up a page by its page number. ** The superclass sqlite3_pcache_page.pBuf points to the start of the ** database page content and sqlite3_pcache_page.pExtra points to PgHdr. ** ** The size of the extension (MemPage+PgHdr+PgHdr1) can be determined at ** runtime using sqlite3_config(SQLITE_CONFIG_PCACHE_HDRSZ, &size). The ** sizes of the extensions sum to 272 bytes on x64 for 3.8.10, but this ** size can vary according to architecture, compile-time options, and ** SQLite library version number. ** ** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained ** using a separate memory allocation from the database page content. This ** seeks to overcome the "clownshoe" problem (also called "internal ** fragmentation" in academic literature) of allocating a few bytes more ** than a power of two with the memory allocator rounding up to the next ** power of two, and leaving the rounded-up space unused. ** ** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates ** with this module. Information is passed back and forth as PgHdr1 pointers. ** ** The pcache.c and pager.c modules deal pointers to PgHdr objects. ** The btree.c module deals with pointers to MemPage objects. ** ** SOURCE OF PAGE CACHE MEMORY: ** ** Memory for a page might come from any of three sources: ** ** (1) The general-purpose memory allocator - sqlite3Malloc() ** (2) Global page-cache memory provided using sqlite3_config() with ** SQLITE_CONFIG_PAGECACHE. ** (3) PCache-local bulk allocation. ** ** The third case is a chunk of heap memory (defaulting to 100 pages worth) ** that is allocated when the page cache is created. The size of the local ** bulk allocation can be adjusted using ** ** sqlite3_config(SQLITE_CONFIG_PCACHE, 0, 0, N). ** ** If N is positive, then N pages worth of memory are allocated using a single ** sqlite3Malloc() call and that memory is used for the first N pages allocated. ** Or if N is negative, then -1024*N bytes of memory are allocated and used ** for as many pages as can be accomodated. ** ** Only one of (2) or (3) can be used. Once the memory available to (2) or ** (3) is exhausted, subsequent allocations fail over to the general-purpose ** memory allocator (1). ** ** Earlier versions of SQLite used only methods (1) and (2). But experiments ** show that method (3) with N==100 provides about a 5% performance boost for ** common workloads. */ /* #include "sqliteInt.h" */ typedef struct PCache1 PCache1; typedef struct PgHdr1 PgHdr1; typedef struct PgFreeslot PgFreeslot; typedef struct PGroup PGroup; /* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set ** of one or more PCaches that are able to recycle each other's unpinned ** pages when they are under memory pressure. A PGroup is an instance of ** the following object. ** ** This page cache implementation works in one of two modes: ** ** (1) Every PCache is the sole member of its own PGroup. There is ** one PGroup per PCache. ** ** (2) There is a single global PGroup that all PCaches are a member ** of. ** ** Mode 1 uses more memory (since PCache instances are not able to rob ** unused pages from other PCaches) but it also operates without a mutex, ** and is therefore often faster. Mode 2 requires a mutex in order to be ** threadsafe, but recycles pages more efficiently. ** ** For mode (1), PGroup.mutex is NULL. For mode (2) there is only a single ** PGroup which is the pcache1.grp global variable and its mutex is ** SQLITE_MUTEX_STATIC_LRU. */ struct PGroup { sqlite3_mutex *mutex; /* MUTEX_STATIC_LRU or NULL */ unsigned int nMaxPage; /* Sum of nMax for purgeable caches */ unsigned int nMinPage; /* Sum of nMin for purgeable caches */ unsigned int mxPinned; /* nMaxpage + 10 - nMinPage */ unsigned int nCurrentPage; /* Number of purgeable pages allocated */ PgHdr1 *pLruHead, *pLruTail; /* LRU list of unpinned pages */ }; /* Each page cache is an instance of the following object. Every ** open database file (including each in-memory database and each ** temporary or transient database) has a single page cache which ** is an instance of this object. ** ** Pointers to structures of this type are cast and returned as ** opaque sqlite3_pcache* handles. */ struct PCache1 { /* Cache configuration parameters. Page size (szPage) and the purgeable ** flag (bPurgeable) are set when the cache is created. nMax may be ** modified at any time by a call to the pcache1Cachesize() method. ** The PGroup mutex must be held when accessing nMax. */ PGroup *pGroup; /* PGroup this cache belongs to */ int szPage; /* Size of database content section */ int szExtra; /* sizeof(MemPage)+sizeof(PgHdr) */ int szAlloc; /* Total size of one pcache line */ int bPurgeable; /* True if cache is purgeable */ unsigned int nMin; /* Minimum number of pages reserved */ unsigned int nMax; /* Configured "cache_size" value */ unsigned int n90pct; /* nMax*9/10 */ unsigned int iMaxKey; /* Largest key seen since xTruncate() */ /* Hash table of all pages. The following variables may only be accessed ** when the accessor is holding the PGroup mutex. */ unsigned int nRecyclable; /* Number of pages in the LRU list */ unsigned int nPage; /* Total number of pages in apHash */ unsigned int nHash; /* Number of slots in apHash[] */ PgHdr1 **apHash; /* Hash table for fast lookup by key */ PgHdr1 *pFree; /* List of unused pcache-local pages */ void *pBulk; /* Bulk memory used by pcache-local */ }; /* ** Each cache entry is represented by an instance of the following ** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of ** PgHdr1.pCache->szPage bytes is allocated directly before this structure ** in memory. */ struct PgHdr1 { sqlite3_pcache_page page; unsigned int iKey; /* Key value (page number) */ u8 isPinned; /* Page in use, not on the LRU list */ u8 isBulkLocal; /* This page from bulk local storage */ PgHdr1 *pNext; /* Next in hash table chain */ PCache1 *pCache; /* Cache that currently owns this page */ PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ }; /* ** Free slots in the allocator used to divide up the global page cache ** buffer provided using the SQLITE_CONFIG_PAGECACHE mechanism. */ struct PgFreeslot { PgFreeslot *pNext; /* Next free slot */ }; /* ** Global data used by this cache. */ static SQLITE_WSD struct PCacheGlobal { PGroup grp; /* The global PGroup for mode (2) */ /* Variables related to SQLITE_CONFIG_PAGECACHE settings. The ** szSlot, nSlot, pStart, pEnd, nReserve, and isInit values are all ** fixed at sqlite3_initialize() time and do not require mutex protection. ** The nFreeSlot and pFree values do require mutex protection. */ int isInit; /* True if initialized */ int separateCache; /* Use a new PGroup for each PCache */ int nInitPage; /* Initial bulk allocation size */ int szSlot; /* Size of each free slot */ int nSlot; /* The number of pcache slots */ int nReserve; /* Try to keep nFreeSlot above this */ void *pStart, *pEnd; /* Bounds of global page cache memory */ /* Above requires no mutex. Use mutex below for variable that follow. */ sqlite3_mutex *mutex; /* Mutex for accessing the following: */ PgFreeslot *pFree; /* Free page blocks */ int nFreeSlot; /* Number of unused pcache slots */ /* The following value requires a mutex to change. We skip the mutex on ** reading because (1) most platforms read a 32-bit integer atomically and ** (2) even if an incorrect value is read, no great harm is done since this ** is really just an optimization. */ int bUnderPressure; /* True if low on PAGECACHE memory */ } pcache1_g; /* ** All code in this file should access the global structure above via the ** alias "pcache1". This ensures that the WSD emulation is used when ** compiling for systems that do not support real WSD. */ #define pcache1 (GLOBAL(struct PCacheGlobal, pcache1_g)) /* ** Macros to enter and leave the PCache LRU mutex. */ #if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE==0 # define pcache1EnterMutex(X) assert((X)->mutex==0) # define pcache1LeaveMutex(X) assert((X)->mutex==0) # define PCACHE1_MIGHT_USE_GROUP_MUTEX 0 #else # define pcache1EnterMutex(X) sqlite3_mutex_enter((X)->mutex) # define pcache1LeaveMutex(X) sqlite3_mutex_leave((X)->mutex) # define PCACHE1_MIGHT_USE_GROUP_MUTEX 1 #endif /******************************************************************************/ /******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/ /* ** This function is called during initialization if a static buffer is ** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE ** verb to sqlite3_config(). Parameter pBuf points to an allocation large ** enough to contain 'n' buffers of 'sz' bytes each. ** ** This routine is called from sqlite3_initialize() and so it is guaranteed ** to be serialized already. There is no need for further mutexing. */ SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){ if( pcache1.isInit ){ PgFreeslot *p; if( pBuf==0 ) sz = n = 0; sz = ROUNDDOWN8(sz); pcache1.szSlot = sz; pcache1.nSlot = pcache1.nFreeSlot = n; pcache1.nReserve = n>90 ? 10 : (n/10 + 1); pcache1.pStart = pBuf; pcache1.pFree = 0; pcache1.bUnderPressure = 0; while( n-- ){ p = (PgFreeslot*)pBuf; p->pNext = pcache1.pFree; pcache1.pFree = p; pBuf = (void*)&((char*)pBuf)[sz]; } pcache1.pEnd = pBuf; } } /* ** Try to initialize the pCache->pFree and pCache->pBulk fields. Return ** true if pCache->pFree ends up containing one or more free pages. */ static int pcache1InitBulk(PCache1 *pCache){ i64 szBulk; char *zBulk; if( pcache1.nInitPage==0 ) return 0; /* Do not bother with a bulk allocation if the cache size very small */ if( pCache->nMax<3 ) return 0; sqlite3BeginBenignMalloc(); if( pcache1.nInitPage>0 ){ szBulk = pCache->szAlloc * (i64)pcache1.nInitPage; }else{ szBulk = -1024 * (i64)pcache1.nInitPage; } if( szBulk > pCache->szAlloc*(i64)pCache->nMax ){ szBulk = pCache->szAlloc*pCache->nMax; } zBulk = pCache->pBulk = sqlite3Malloc( szBulk ); sqlite3EndBenignMalloc(); if( zBulk ){ int nBulk = sqlite3MallocSize(zBulk)/pCache->szAlloc; int i; for(i=0; iszPage]; pX->page.pBuf = zBulk; pX->page.pExtra = &pX[1]; pX->isBulkLocal = 1; pX->pNext = pCache->pFree; pCache->pFree = pX; zBulk += pCache->szAlloc; } } return pCache->pFree!=0; } /* ** Malloc function used within this file to allocate space from the buffer ** configured using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no ** such buffer exists or there is no space left in it, this function falls ** back to sqlite3Malloc(). ** ** Multiple threads can run this routine at the same time. Global variables ** in pcache1 need to be protected via mutex. */ static void *pcache1Alloc(int nByte){ void *p = 0; assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); if( nByte<=pcache1.szSlot ){ sqlite3_mutex_enter(pcache1.mutex); p = (PgHdr1 *)pcache1.pFree; if( p ){ pcache1.pFree = pcache1.pFree->pNext; pcache1.nFreeSlot--; pcache1.bUnderPressure = pcache1.nFreeSlot=0 ); sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte); sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_USED, 1); } sqlite3_mutex_leave(pcache1.mutex); } if( p==0 ){ /* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get ** it from sqlite3Malloc instead. */ p = sqlite3Malloc(nByte); #ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS if( p ){ int sz = sqlite3MallocSize(p); sqlite3_mutex_enter(pcache1.mutex); sqlite3StatusSet(SQLITE_STATUS_PAGECACHE_SIZE, nByte); sqlite3StatusUp(SQLITE_STATUS_PAGECACHE_OVERFLOW, sz); sqlite3_mutex_leave(pcache1.mutex); } #endif sqlite3MemdebugSetType(p, MEMTYPE_PCACHE); } return p; } /* ** Free an allocated buffer obtained from pcache1Alloc(). */ static void pcache1Free(void *p){ int nFreed = 0; if( p==0 ) return; if( p>=pcache1.pStart && ppNext = pcache1.pFree; pcache1.pFree = pSlot; pcache1.nFreeSlot++; pcache1.bUnderPressure = pcache1.nFreeSlot=pcache1.pStart && ppGroup->mutex) ); if( pCache->pFree || (pCache->nPage==0 && pcache1InitBulk(pCache)) ){ p = pCache->pFree; pCache->pFree = p->pNext; p->pNext = 0; }else{ #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT /* The group mutex must be released before pcache1Alloc() is called. This ** is because it might call sqlite3_release_memory(), which assumes that ** this mutex is not held. */ assert( pcache1.separateCache==0 ); assert( pCache->pGroup==&pcache1.grp ); pcache1LeaveMutex(pCache->pGroup); #endif #ifdef SQLITE_PCACHE_SEPARATE_HEADER pPg = pcache1Alloc(pCache->szPage); p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); if( !pPg || !p ){ pcache1Free(pPg); sqlite3_free(p); pPg = 0; } #else pPg = pcache1Alloc(pCache->szAlloc); p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; #endif #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT pcache1EnterMutex(pCache->pGroup); #endif if( pPg==0 ) return 0; p->page.pBuf = pPg; p->page.pExtra = &p[1]; p->isBulkLocal = 0; } if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage++; } return p; } /* ** Free a page object allocated by pcache1AllocPage(). */ static void pcache1FreePage(PgHdr1 *p){ PCache1 *pCache; assert( p!=0 ); pCache = p->pCache; assert( sqlite3_mutex_held(p->pCache->pGroup->mutex) ); if( p->isBulkLocal ){ p->pNext = pCache->pFree; pCache->pFree = p; }else{ pcache1Free(p->page.pBuf); #ifdef SQLITE_PCACHE_SEPARATE_HEADER sqlite3_free(p); #endif } if( pCache->bPurgeable ){ pCache->pGroup->nCurrentPage--; } } /* ** Malloc function used by SQLite to obtain space from the buffer configured ** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer ** exists, this function falls back to sqlite3Malloc(). */ SQLITE_PRIVATE void *sqlite3PageMalloc(int sz){ return pcache1Alloc(sz); } /* ** Free an allocated buffer obtained from sqlite3PageMalloc(). */ SQLITE_PRIVATE void sqlite3PageFree(void *p){ pcache1Free(p); } /* ** Return true if it desirable to avoid allocating a new page cache ** entry. ** ** If memory was allocated specifically to the page cache using ** SQLITE_CONFIG_PAGECACHE but that memory has all been used, then ** it is desirable to avoid allocating a new page cache entry because ** presumably SQLITE_CONFIG_PAGECACHE was suppose to be sufficient ** for all page cache needs and we should not need to spill the ** allocation onto the heap. ** ** Or, the heap is used for all page cache memory but the heap is ** under memory pressure, then again it is desirable to avoid ** allocating a new page cache entry in order to avoid stressing ** the heap even further. */ static int pcache1UnderMemoryPressure(PCache1 *pCache){ if( pcache1.nSlot && (pCache->szPage+pCache->szExtra)<=pcache1.szSlot ){ return pcache1.bUnderPressure; }else{ return sqlite3HeapNearlyFull(); } } /******************************************************************************/ /******** General Implementation Functions ************************************/ /* ** This function is used to resize the hash table used by the cache passed ** as the first argument. ** ** The PCache mutex must be held when this function is called. */ static void pcache1ResizeHash(PCache1 *p){ PgHdr1 **apNew; unsigned int nNew; unsigned int i; assert( sqlite3_mutex_held(p->pGroup->mutex) ); nNew = p->nHash*2; if( nNew<256 ){ nNew = 256; } pcache1LeaveMutex(p->pGroup); if( p->nHash ){ sqlite3BeginBenignMalloc(); } apNew = (PgHdr1 **)sqlite3MallocZero(sizeof(PgHdr1 *)*nNew); if( p->nHash ){ sqlite3EndBenignMalloc(); } pcache1EnterMutex(p->pGroup); if( apNew ){ for(i=0; inHash; i++){ PgHdr1 *pPage; PgHdr1 *pNext = p->apHash[i]; while( (pPage = pNext)!=0 ){ unsigned int h = pPage->iKey % nNew; pNext = pPage->pNext; pPage->pNext = apNew[h]; apNew[h] = pPage; } } sqlite3_free(p->apHash); p->apHash = apNew; p->nHash = nNew; } } /* ** This function is used internally to remove the page pPage from the ** PGroup LRU list, if is part of it. If pPage is not part of the PGroup ** LRU list, then this function is a no-op. ** ** The PGroup mutex must be held when this function is called. */ static PgHdr1 *pcache1PinPage(PgHdr1 *pPage){ PCache1 *pCache; assert( pPage!=0 ); assert( pPage->isPinned==0 ); pCache = pPage->pCache; assert( pPage->pLruNext || pPage==pCache->pGroup->pLruTail ); assert( pPage->pLruPrev || pPage==pCache->pGroup->pLruHead ); assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); if( pPage->pLruPrev ){ pPage->pLruPrev->pLruNext = pPage->pLruNext; }else{ pCache->pGroup->pLruHead = pPage->pLruNext; } if( pPage->pLruNext ){ pPage->pLruNext->pLruPrev = pPage->pLruPrev; }else{ pCache->pGroup->pLruTail = pPage->pLruPrev; } pPage->pLruNext = 0; pPage->pLruPrev = 0; pPage->isPinned = 1; pCache->nRecyclable--; return pPage; } /* ** Remove the page supplied as an argument from the hash table ** (PCache1.apHash structure) that it is currently stored in. ** Also free the page if freePage is true. ** ** The PGroup mutex must be held when this function is called. */ static void pcache1RemoveFromHash(PgHdr1 *pPage, int freeFlag){ unsigned int h; PCache1 *pCache = pPage->pCache; PgHdr1 **pp; assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); h = pPage->iKey % pCache->nHash; for(pp=&pCache->apHash[h]; (*pp)!=pPage; pp=&(*pp)->pNext); *pp = (*pp)->pNext; pCache->nPage--; if( freeFlag ) pcache1FreePage(pPage); } /* ** If there are currently more than nMaxPage pages allocated, try ** to recycle pages to reduce the number allocated to nMaxPage. */ static void pcache1EnforceMaxPage(PCache1 *pCache){ PGroup *pGroup = pCache->pGroup; assert( sqlite3_mutex_held(pGroup->mutex) ); while( pGroup->nCurrentPage>pGroup->nMaxPage && pGroup->pLruTail ){ PgHdr1 *p = pGroup->pLruTail; assert( p->pCache->pGroup==pGroup ); assert( p->isPinned==0 ); pcache1PinPage(p); pcache1RemoveFromHash(p, 1); } if( pCache->nPage==0 && pCache->pBulk ){ sqlite3_free(pCache->pBulk); pCache->pBulk = pCache->pFree = 0; } } /* ** Discard all pages from cache pCache with a page number (key value) ** greater than or equal to iLimit. Any pinned pages that meet this ** criteria are unpinned before they are discarded. ** ** The PCache mutex must be held when this function is called. */ static void pcache1TruncateUnsafe( PCache1 *pCache, /* The cache to truncate */ unsigned int iLimit /* Drop pages with this pgno or larger */ ){ TESTONLY( unsigned int nPage = 0; ) /* To assert pCache->nPage is correct */ unsigned int h; assert( sqlite3_mutex_held(pCache->pGroup->mutex) ); for(h=0; hnHash; h++){ PgHdr1 **pp = &pCache->apHash[h]; PgHdr1 *pPage; while( (pPage = *pp)!=0 ){ if( pPage->iKey>=iLimit ){ pCache->nPage--; *pp = pPage->pNext; if( !pPage->isPinned ) pcache1PinPage(pPage); pcache1FreePage(pPage); }else{ pp = &pPage->pNext; TESTONLY( nPage++; ) } } } assert( pCache->nPage==nPage ); } /******************************************************************************/ /******** sqlite3_pcache Methods **********************************************/ /* ** Implementation of the sqlite3_pcache.xInit method. */ static int pcache1Init(void *NotUsed){ UNUSED_PARAMETER(NotUsed); assert( pcache1.isInit==0 ); memset(&pcache1, 0, sizeof(pcache1)); /* ** The pcache1.separateCache variable is true if each PCache has its own ** private PGroup (mode-1). pcache1.separateCache is false if the single ** PGroup in pcache1.grp is used for all page caches (mode-2). ** ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT ** ** * Use a unified cache in single-threaded applications that have ** configured a start-time buffer for use as page-cache memory using ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL ** pBuf argument. ** ** * Otherwise use separate caches (mode-1) */ #if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) pcache1.separateCache = 0; #elif SQLITE_THREADSAFE pcache1.separateCache = sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.bCoreMutex>0; #else pcache1.separateCache = sqlite3GlobalConfig.pPage==0; #endif #if SQLITE_THREADSAFE if( sqlite3GlobalConfig.bCoreMutex ){ pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); } #endif if( pcache1.separateCache && sqlite3GlobalConfig.nPage!=0 && sqlite3GlobalConfig.pPage==0 ){ pcache1.nInitPage = sqlite3GlobalConfig.nPage; }else{ pcache1.nInitPage = 0; } pcache1.grp.mxPinned = 10; pcache1.isInit = 1; return SQLITE_OK; } /* ** Implementation of the sqlite3_pcache.xShutdown method. ** Note that the static mutex allocated in xInit does ** not need to be freed. */ static void pcache1Shutdown(void *NotUsed){ UNUSED_PARAMETER(NotUsed); assert( pcache1.isInit!=0 ); memset(&pcache1, 0, sizeof(pcache1)); } /* forward declaration */ static void pcache1Destroy(sqlite3_pcache *p); /* ** Implementation of the sqlite3_pcache.xCreate method. ** ** Allocate a new cache. */ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){ PCache1 *pCache; /* The newly created page cache */ PGroup *pGroup; /* The group the new page cache will belong to */ int sz; /* Bytes of memory required to allocate the new cache */ assert( (szPage & (szPage-1))==0 && szPage>=512 && szPage<=65536 ); assert( szExtra < 300 ); sz = sizeof(PCache1) + sizeof(PGroup)*pcache1.separateCache; pCache = (PCache1 *)sqlite3MallocZero(sz); if( pCache ){ if( pcache1.separateCache ){ pGroup = (PGroup*)&pCache[1]; pGroup->mxPinned = 10; }else{ pGroup = &pcache1.grp; } pCache->pGroup = pGroup; pCache->szPage = szPage; pCache->szExtra = szExtra; pCache->szAlloc = szPage + szExtra + ROUND8(sizeof(PgHdr1)); pCache->bPurgeable = (bPurgeable ? 1 : 0); pcache1EnterMutex(pGroup); pcache1ResizeHash(pCache); if( bPurgeable ){ pCache->nMin = 10; pGroup->nMinPage += pCache->nMin; pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; } pcache1LeaveMutex(pGroup); if( pCache->nHash==0 ){ pcache1Destroy((sqlite3_pcache*)pCache); pCache = 0; } } return (sqlite3_pcache *)pCache; } /* ** Implementation of the sqlite3_pcache.xCachesize method. ** ** Configure the cache_size limit for a cache. */ static void pcache1Cachesize(sqlite3_pcache *p, int nMax){ PCache1 *pCache = (PCache1 *)p; if( pCache->bPurgeable ){ PGroup *pGroup = pCache->pGroup; pcache1EnterMutex(pGroup); pGroup->nMaxPage += (nMax - pCache->nMax); pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; pCache->nMax = nMax; pCache->n90pct = pCache->nMax*9/10; pcache1EnforceMaxPage(pCache); pcache1LeaveMutex(pGroup); } } /* ** Implementation of the sqlite3_pcache.xShrink method. ** ** Free up as much memory as possible. */ static void pcache1Shrink(sqlite3_pcache *p){ PCache1 *pCache = (PCache1*)p; if( pCache->bPurgeable ){ PGroup *pGroup = pCache->pGroup; int savedMaxPage; pcache1EnterMutex(pGroup); savedMaxPage = pGroup->nMaxPage; pGroup->nMaxPage = 0; pcache1EnforceMaxPage(pCache); pGroup->nMaxPage = savedMaxPage; pcache1LeaveMutex(pGroup); } } /* ** Implementation of the sqlite3_pcache.xPagecount method. */ static int pcache1Pagecount(sqlite3_pcache *p){ int n; PCache1 *pCache = (PCache1*)p; pcache1EnterMutex(pCache->pGroup); n = pCache->nPage; pcache1LeaveMutex(pCache->pGroup); return n; } /* ** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described ** in the header of the pcache1Fetch() procedure. ** ** This steps are broken out into a separate procedure because they are ** usually not needed, and by avoiding the stack initialization required ** for these steps, the main pcache1Fetch() procedure can run faster. */ static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2( PCache1 *pCache, unsigned int iKey, int createFlag ){ unsigned int nPinned; PGroup *pGroup = pCache->pGroup; PgHdr1 *pPage = 0; /* Step 3: Abort if createFlag is 1 but the cache is nearly full */ assert( pCache->nPage >= pCache->nRecyclable ); nPinned = pCache->nPage - pCache->nRecyclable; assert( pGroup->mxPinned == pGroup->nMaxPage + 10 - pGroup->nMinPage ); assert( pCache->n90pct == pCache->nMax*9/10 ); if( createFlag==1 && ( nPinned>=pGroup->mxPinned || nPinned>=pCache->n90pct || (pcache1UnderMemoryPressure(pCache) && pCache->nRecyclablenPage>=pCache->nHash ) pcache1ResizeHash(pCache); assert( pCache->nHash>0 && pCache->apHash ); /* Step 4. Try to recycle a page. */ if( pCache->bPurgeable && pGroup->pLruTail && ((pCache->nPage+1>=pCache->nMax) || pcache1UnderMemoryPressure(pCache)) ){ PCache1 *pOther; pPage = pGroup->pLruTail; assert( pPage->isPinned==0 ); pcache1RemoveFromHash(pPage, 0); pcache1PinPage(pPage); pOther = pPage->pCache; if( pOther->szAlloc != pCache->szAlloc ){ pcache1FreePage(pPage); pPage = 0; }else{ pGroup->nCurrentPage -= (pOther->bPurgeable - pCache->bPurgeable); } } /* Step 5. If a usable page buffer has still not been found, ** attempt to allocate a new one. */ if( !pPage ){ if( createFlag==1 ){ sqlite3BeginBenignMalloc(); } pPage = pcache1AllocPage(pCache); if( createFlag==1 ){ sqlite3EndBenignMalloc(); } } if( pPage ){ unsigned int h = iKey % pCache->nHash; pCache->nPage++; pPage->iKey = iKey; pPage->pNext = pCache->apHash[h]; pPage->pCache = pCache; pPage->pLruPrev = 0; pPage->pLruNext = 0; pPage->isPinned = 1; *(void **)pPage->page.pExtra = 0; pCache->apHash[h] = pPage; if( iKey>pCache->iMaxKey ){ pCache->iMaxKey = iKey; } } return pPage; } /* ** Implementation of the sqlite3_pcache.xFetch method. ** ** Fetch a page by key value. ** ** Whether or not a new page may be allocated by this function depends on ** the value of the createFlag argument. 0 means do not allocate a new ** page. 1 means allocate a new page if space is easily available. 2 ** means to try really hard to allocate a new page. ** ** For a non-purgeable cache (a cache used as the storage for an in-memory ** database) there is really no difference between createFlag 1 and 2. So ** the calling function (pcache.c) will never have a createFlag of 1 on ** a non-purgeable cache. ** ** There are three different approaches to obtaining space for a page, ** depending on the value of parameter createFlag (which may be 0, 1 or 2). ** ** 1. Regardless of the value of createFlag, the cache is searched for a ** copy of the requested page. If one is found, it is returned. ** ** 2. If createFlag==0 and the page is not already in the cache, NULL is ** returned. ** ** 3. If createFlag is 1, and the page is not already in the cache, then ** return NULL (do not allocate a new page) if any of the following ** conditions are true: ** ** (a) the number of pages pinned by the cache is greater than ** PCache1.nMax, or ** ** (b) the number of pages pinned by the cache is greater than ** the sum of nMax for all purgeable caches, less the sum of ** nMin for all other purgeable caches, or ** ** 4. If none of the first three conditions apply and the cache is marked ** as purgeable, and if one of the following is true: ** ** (a) The number of pages allocated for the cache is already ** PCache1.nMax, or ** ** (b) The number of pages allocated for all purgeable caches is ** already equal to or greater than the sum of nMax for all ** purgeable caches, ** ** (c) The system is under memory pressure and wants to avoid ** unnecessary pages cache entry allocations ** ** then attempt to recycle a page from the LRU list. If it is the right ** size, return the recycled buffer. Otherwise, free the buffer and ** proceed to step 5. ** ** 5. Otherwise, allocate and return a new page buffer. ** ** There are two versions of this routine. pcache1FetchWithMutex() is ** the general case. pcache1FetchNoMutex() is a faster implementation for ** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper ** invokes the appropriate routine. */ static PgHdr1 *pcache1FetchNoMutex( sqlite3_pcache *p, unsigned int iKey, int createFlag ){ PCache1 *pCache = (PCache1 *)p; PgHdr1 *pPage = 0; /* Step 1: Search the hash table for an existing entry. */ pPage = pCache->apHash[iKey % pCache->nHash]; while( pPage && pPage->iKey!=iKey ){ pPage = pPage->pNext; } /* Step 2: Abort if no existing page is found and createFlag is 0 */ if( pPage ){ if( !pPage->isPinned ){ return pcache1PinPage(pPage); }else{ return pPage; } }else if( createFlag ){ /* Steps 3, 4, and 5 implemented by this subroutine */ return pcache1FetchStage2(pCache, iKey, createFlag); }else{ return 0; } } #if PCACHE1_MIGHT_USE_GROUP_MUTEX static PgHdr1 *pcache1FetchWithMutex( sqlite3_pcache *p, unsigned int iKey, int createFlag ){ PCache1 *pCache = (PCache1 *)p; PgHdr1 *pPage; pcache1EnterMutex(pCache->pGroup); pPage = pcache1FetchNoMutex(p, iKey, createFlag); assert( pPage==0 || pCache->iMaxKey>=iKey ); pcache1LeaveMutex(pCache->pGroup); return pPage; } #endif static sqlite3_pcache_page *pcache1Fetch( sqlite3_pcache *p, unsigned int iKey, int createFlag ){ #if PCACHE1_MIGHT_USE_GROUP_MUTEX || defined(SQLITE_DEBUG) PCache1 *pCache = (PCache1 *)p; #endif assert( offsetof(PgHdr1,page)==0 ); assert( pCache->bPurgeable || createFlag!=1 ); assert( pCache->bPurgeable || pCache->nMin==0 ); assert( pCache->bPurgeable==0 || pCache->nMin==10 ); assert( pCache->nMin==0 || pCache->bPurgeable ); assert( pCache->nHash>0 ); #if PCACHE1_MIGHT_USE_GROUP_MUTEX if( pCache->pGroup->mutex ){ return (sqlite3_pcache_page*)pcache1FetchWithMutex(p, iKey, createFlag); }else #endif { return (sqlite3_pcache_page*)pcache1FetchNoMutex(p, iKey, createFlag); } } /* ** Implementation of the sqlite3_pcache.xUnpin method. ** ** Mark a page as unpinned (eligible for asynchronous recycling). */ static void pcache1Unpin( sqlite3_pcache *p, sqlite3_pcache_page *pPg, int reuseUnlikely ){ PCache1 *pCache = (PCache1 *)p; PgHdr1 *pPage = (PgHdr1 *)pPg; PGroup *pGroup = pCache->pGroup; assert( pPage->pCache==pCache ); pcache1EnterMutex(pGroup); /* It is an error to call this function if the page is already ** part of the PGroup LRU list. */ assert( pPage->pLruPrev==0 && pPage->pLruNext==0 ); assert( pGroup->pLruHead!=pPage && pGroup->pLruTail!=pPage ); assert( pPage->isPinned==1 ); if( reuseUnlikely || pGroup->nCurrentPage>pGroup->nMaxPage ){ pcache1RemoveFromHash(pPage, 1); }else{ /* Add the page to the PGroup LRU list. */ if( pGroup->pLruHead ){ pGroup->pLruHead->pLruPrev = pPage; pPage->pLruNext = pGroup->pLruHead; pGroup->pLruHead = pPage; }else{ pGroup->pLruTail = pPage; pGroup->pLruHead = pPage; } pCache->nRecyclable++; pPage->isPinned = 0; } pcache1LeaveMutex(pCache->pGroup); } /* ** Implementation of the sqlite3_pcache.xRekey method. */ static void pcache1Rekey( sqlite3_pcache *p, sqlite3_pcache_page *pPg, unsigned int iOld, unsigned int iNew ){ PCache1 *pCache = (PCache1 *)p; PgHdr1 *pPage = (PgHdr1 *)pPg; PgHdr1 **pp; unsigned int h; assert( pPage->iKey==iOld ); assert( pPage->pCache==pCache ); pcache1EnterMutex(pCache->pGroup); h = iOld%pCache->nHash; pp = &pCache->apHash[h]; while( (*pp)!=pPage ){ pp = &(*pp)->pNext; } *pp = pPage->pNext; h = iNew%pCache->nHash; pPage->iKey = iNew; pPage->pNext = pCache->apHash[h]; pCache->apHash[h] = pPage; if( iNew>pCache->iMaxKey ){ pCache->iMaxKey = iNew; } pcache1LeaveMutex(pCache->pGroup); } /* ** Implementation of the sqlite3_pcache.xTruncate method. ** ** Discard all unpinned pages in the cache with a page number equal to ** or greater than parameter iLimit. Any pinned pages with a page number ** equal to or greater than iLimit are implicitly unpinned. */ static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){ PCache1 *pCache = (PCache1 *)p; pcache1EnterMutex(pCache->pGroup); if( iLimit<=pCache->iMaxKey ){ pcache1TruncateUnsafe(pCache, iLimit); pCache->iMaxKey = iLimit-1; } pcache1LeaveMutex(pCache->pGroup); } /* ** Implementation of the sqlite3_pcache.xDestroy method. ** ** Destroy a cache allocated using pcache1Create(). */ static void pcache1Destroy(sqlite3_pcache *p){ PCache1 *pCache = (PCache1 *)p; PGroup *pGroup = pCache->pGroup; assert( pCache->bPurgeable || (pCache->nMax==0 && pCache->nMin==0) ); pcache1EnterMutex(pGroup); pcache1TruncateUnsafe(pCache, 0); assert( pGroup->nMaxPage >= pCache->nMax ); pGroup->nMaxPage -= pCache->nMax; assert( pGroup->nMinPage >= pCache->nMin ); pGroup->nMinPage -= pCache->nMin; pGroup->mxPinned = pGroup->nMaxPage + 10 - pGroup->nMinPage; pcache1EnforceMaxPage(pCache); pcache1LeaveMutex(pGroup); sqlite3_free(pCache->pBulk); sqlite3_free(pCache->apHash); sqlite3_free(pCache); } /* ** This function is called during initialization (sqlite3_initialize()) to ** install the default pluggable cache module, assuming the user has not ** already provided an alternative. */ SQLITE_PRIVATE void sqlite3PCacheSetDefault(void){ static const sqlite3_pcache_methods2 defaultMethods = { 1, /* iVersion */ 0, /* pArg */ pcache1Init, /* xInit */ pcache1Shutdown, /* xShutdown */ pcache1Create, /* xCreate */ pcache1Cachesize, /* xCachesize */ pcache1Pagecount, /* xPagecount */ pcache1Fetch, /* xFetch */ pcache1Unpin, /* xUnpin */ pcache1Rekey, /* xRekey */ pcache1Truncate, /* xTruncate */ pcache1Destroy, /* xDestroy */ pcache1Shrink /* xShrink */ }; sqlite3_config(SQLITE_CONFIG_PCACHE2, &defaultMethods); } /* ** Return the size of the header on each page of this PCACHE implementation. */ SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void){ return ROUND8(sizeof(PgHdr1)); } /* ** Return the global mutex used by this PCACHE implementation. The ** sqlite3_status() routine needs access to this mutex. */ SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void){ return pcache1.mutex; } #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT /* ** This function is called to free superfluous dynamically allocated memory ** held by the pager system. Memory in use by any SQLite pager allocated ** by the current thread may be sqlite3_free()ed. ** ** nReq is the number of bytes of memory required. Once this much has ** been released, the function returns. The return value is the total number ** of bytes of memory released. */ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ int nFree = 0; assert( sqlite3_mutex_notheld(pcache1.grp.mutex) ); assert( sqlite3_mutex_notheld(pcache1.mutex) ); if( sqlite3GlobalConfig.nPage==0 ){ PgHdr1 *p; pcache1EnterMutex(&pcache1.grp); while( (nReq<0 || nFreepage.pBuf); #ifdef SQLITE_PCACHE_SEPARATE_HEADER nFree += sqlite3MemSize(p); #endif assert( p->isPinned==0 ); pcache1PinPage(p); pcache1RemoveFromHash(p, 1); } pcache1LeaveMutex(&pcache1.grp); } return nFree; } #endif /* SQLITE_ENABLE_MEMORY_MANAGEMENT */ #ifdef SQLITE_TEST /* ** This function is used by test procedures to inspect the internal state ** of the global cache. */ SQLITE_PRIVATE void sqlite3PcacheStats( int *pnCurrent, /* OUT: Total number of pages cached */ int *pnMax, /* OUT: Global maximum cache size */ int *pnMin, /* OUT: Sum of PCache1.nMin for purgeable caches */ int *pnRecyclable /* OUT: Total number of pages available for recycling */ ){ PgHdr1 *p; int nRecyclable = 0; for(p=pcache1.grp.pLruHead; p; p=p->pLruNext){ assert( p->isPinned==0 ); nRecyclable++; } *pnCurrent = pcache1.grp.nCurrentPage; *pnMax = (int)pcache1.grp.nMaxPage; *pnMin = (int)pcache1.grp.nMinPage; *pnRecyclable = nRecyclable; } #endif /************** End of pcache1.c *********************************************/ /************** Begin file rowset.c ******************************************/ /* ** 2008 December 3 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This module implements an object we call a "RowSet". ** ** The RowSet object is a collection of rowids. Rowids ** are inserted into the RowSet in an arbitrary order. Inserts ** can be intermixed with tests to see if a given rowid has been ** previously inserted into the RowSet. ** ** After all inserts are finished, it is possible to extract the ** elements of the RowSet in sorted order. Once this extraction ** process has started, no new elements may be inserted. ** ** Hence, the primitive operations for a RowSet are: ** ** CREATE ** INSERT ** TEST ** SMALLEST ** DESTROY ** ** The CREATE and DESTROY primitives are the constructor and destructor, ** obviously. The INSERT primitive adds a new element to the RowSet. ** TEST checks to see if an element is already in the RowSet. SMALLEST ** extracts the least value from the RowSet. ** ** The INSERT primitive might allocate additional memory. Memory is ** allocated in chunks so most INSERTs do no allocation. There is an ** upper bound on the size of allocated memory. No memory is freed ** until DESTROY. ** ** The TEST primitive includes a "batch" number. The TEST primitive ** will only see elements that were inserted before the last change ** in the batch number. In other words, if an INSERT occurs between ** two TESTs where the TESTs have the same batch nubmer, then the ** value added by the INSERT will not be visible to the second TEST. ** The initial batch number is zero, so if the very first TEST contains ** a non-zero batch number, it will see all prior INSERTs. ** ** No INSERTs may occurs after a SMALLEST. An assertion will fail if ** that is attempted. ** ** The cost of an INSERT is roughly constant. (Sometimes new memory ** has to be allocated on an INSERT.) The cost of a TEST with a new ** batch number is O(NlogN) where N is the number of elements in the RowSet. ** The cost of a TEST using the same batch number is O(logN). The cost ** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST ** primitives are constant time. The cost of DESTROY is O(N). ** ** There is an added cost of O(N) when switching between TEST and ** SMALLEST primitives. */ /* #include "sqliteInt.h" */ /* ** Target size for allocation chunks. */ #define ROWSET_ALLOCATION_SIZE 1024 /* ** The number of rowset entries per allocation chunk. */ #define ROWSET_ENTRY_PER_CHUNK \ ((ROWSET_ALLOCATION_SIZE-8)/sizeof(struct RowSetEntry)) /* ** Each entry in a RowSet is an instance of the following object. ** ** This same object is reused to store a linked list of trees of RowSetEntry ** objects. In that alternative use, pRight points to the next entry ** in the list, pLeft points to the tree, and v is unused. The ** RowSet.pForest value points to the head of this forest list. */ struct RowSetEntry { i64 v; /* ROWID value for this entry */ struct RowSetEntry *pRight; /* Right subtree (larger entries) or list */ struct RowSetEntry *pLeft; /* Left subtree (smaller entries) */ }; /* ** RowSetEntry objects are allocated in large chunks (instances of the ** following structure) to reduce memory allocation overhead. The ** chunks are kept on a linked list so that they can be deallocated ** when the RowSet is destroyed. */ struct RowSetChunk { struct RowSetChunk *pNextChunk; /* Next chunk on list of them all */ struct RowSetEntry aEntry[ROWSET_ENTRY_PER_CHUNK]; /* Allocated entries */ }; /* ** A RowSet in an instance of the following structure. ** ** A typedef of this structure if found in sqliteInt.h. */ struct RowSet { struct RowSetChunk *pChunk; /* List of all chunk allocations */ sqlite3 *db; /* The database connection */ struct RowSetEntry *pEntry; /* List of entries using pRight */ struct RowSetEntry *pLast; /* Last entry on the pEntry list */ struct RowSetEntry *pFresh; /* Source of new entry objects */ struct RowSetEntry *pForest; /* List of binary trees of entries */ u16 nFresh; /* Number of objects on pFresh */ u16 rsFlags; /* Various flags */ int iBatch; /* Current insert batch */ }; /* ** Allowed values for RowSet.rsFlags */ #define ROWSET_SORTED 0x01 /* True if RowSet.pEntry is sorted */ #define ROWSET_NEXT 0x02 /* True if sqlite3RowSetNext() has been called */ /* ** Turn bulk memory into a RowSet object. N bytes of memory ** are available at pSpace. The db pointer is used as a memory context ** for any subsequent allocations that need to occur. ** Return a pointer to the new RowSet object. ** ** It must be the case that N is sufficient to make a Rowset. If not ** an assertion fault occurs. ** ** If N is larger than the minimum, use the surplus as an initial ** allocation of entries available to be filled. */ SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3 *db, void *pSpace, unsigned int N){ RowSet *p; assert( N >= ROUND8(sizeof(*p)) ); p = pSpace; p->pChunk = 0; p->db = db; p->pEntry = 0; p->pLast = 0; p->pForest = 0; p->pFresh = (struct RowSetEntry*)(ROUND8(sizeof(*p)) + (char*)p); p->nFresh = (u16)((N - ROUND8(sizeof(*p)))/sizeof(struct RowSetEntry)); p->rsFlags = ROWSET_SORTED; p->iBatch = 0; return p; } /* ** Deallocate all chunks from a RowSet. This frees all memory that ** the RowSet has allocated over its lifetime. This routine is ** the destructor for the RowSet. */ SQLITE_PRIVATE void sqlite3RowSetClear(RowSet *p){ struct RowSetChunk *pChunk, *pNextChunk; for(pChunk=p->pChunk; pChunk; pChunk = pNextChunk){ pNextChunk = pChunk->pNextChunk; sqlite3DbFree(p->db, pChunk); } p->pChunk = 0; p->nFresh = 0; p->pEntry = 0; p->pLast = 0; p->pForest = 0; p->rsFlags = ROWSET_SORTED; } /* ** Allocate a new RowSetEntry object that is associated with the ** given RowSet. Return a pointer to the new and completely uninitialized ** objected. ** ** In an OOM situation, the RowSet.db->mallocFailed flag is set and this ** routine returns NULL. */ static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ assert( p!=0 ); if( p->nFresh==0 ){ struct RowSetChunk *pNew; pNew = sqlite3DbMallocRaw(p->db, sizeof(*pNew)); if( pNew==0 ){ return 0; } pNew->pNextChunk = p->pChunk; p->pChunk = pNew; p->pFresh = pNew->aEntry; p->nFresh = ROWSET_ENTRY_PER_CHUNK; } p->nFresh--; return p->pFresh++; } /* ** Insert a new value into a RowSet. ** ** The mallocFailed flag of the database connection is set if a ** memory allocation fails. */ SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet *p, i64 rowid){ struct RowSetEntry *pEntry; /* The new entry */ struct RowSetEntry *pLast; /* The last prior entry */ /* This routine is never called after sqlite3RowSetNext() */ assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); pEntry = rowSetEntryAlloc(p); if( pEntry==0 ) return; pEntry->v = rowid; pEntry->pRight = 0; pLast = p->pLast; if( pLast ){ if( (p->rsFlags & ROWSET_SORTED)!=0 && rowid<=pLast->v ){ p->rsFlags &= ~ROWSET_SORTED; } pLast->pRight = pEntry; }else{ p->pEntry = pEntry; } p->pLast = pEntry; } /* ** Merge two lists of RowSetEntry objects. Remove duplicates. ** ** The input lists are connected via pRight pointers and are ** assumed to each already be in sorted order. */ static struct RowSetEntry *rowSetEntryMerge( struct RowSetEntry *pA, /* First sorted list to be merged */ struct RowSetEntry *pB /* Second sorted list to be merged */ ){ struct RowSetEntry head; struct RowSetEntry *pTail; pTail = &head; while( pA && pB ){ assert( pA->pRight==0 || pA->v<=pA->pRight->v ); assert( pB->pRight==0 || pB->v<=pB->pRight->v ); if( pA->vv ){ pTail->pRight = pA; pA = pA->pRight; pTail = pTail->pRight; }else if( pB->vv ){ pTail->pRight = pB; pB = pB->pRight; pTail = pTail->pRight; }else{ pA = pA->pRight; } } if( pA ){ assert( pA->pRight==0 || pA->v<=pA->pRight->v ); pTail->pRight = pA; }else{ assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); pTail->pRight = pB; } return head.pRight; } /* ** Sort all elements on the list of RowSetEntry objects into order of ** increasing v. */ static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ unsigned int i; struct RowSetEntry *pNext, *aBucket[40]; memset(aBucket, 0, sizeof(aBucket)); while( pIn ){ pNext = pIn->pRight; pIn->pRight = 0; for(i=0; aBucket[i]; i++){ pIn = rowSetEntryMerge(aBucket[i], pIn); aBucket[i] = 0; } aBucket[i] = pIn; pIn = pNext; } pIn = 0; for(i=0; ipLeft ){ struct RowSetEntry *p; rowSetTreeToList(pIn->pLeft, ppFirst, &p); p->pRight = pIn; }else{ *ppFirst = pIn; } if( pIn->pRight ){ rowSetTreeToList(pIn->pRight, &pIn->pRight, ppLast); }else{ *ppLast = pIn; } assert( (*ppLast)->pRight==0 ); } /* ** Convert a sorted list of elements (connected by pRight) into a binary ** tree with depth of iDepth. A depth of 1 means the tree contains a single ** node taken from the head of *ppList. A depth of 2 means a tree with ** three nodes. And so forth. ** ** Use as many entries from the input list as required and update the ** *ppList to point to the unused elements of the list. If the input ** list contains too few elements, then construct an incomplete tree ** and leave *ppList set to NULL. ** ** Return a pointer to the root of the constructed binary tree. */ static struct RowSetEntry *rowSetNDeepTree( struct RowSetEntry **ppList, int iDepth ){ struct RowSetEntry *p; /* Root of the new tree */ struct RowSetEntry *pLeft; /* Left subtree */ if( *ppList==0 ){ return 0; } if( iDepth==1 ){ p = *ppList; *ppList = p->pRight; p->pLeft = p->pRight = 0; return p; } pLeft = rowSetNDeepTree(ppList, iDepth-1); p = *ppList; if( p==0 ){ return pLeft; } p->pLeft = pLeft; *ppList = p->pRight; p->pRight = rowSetNDeepTree(ppList, iDepth-1); return p; } /* ** Convert a sorted list of elements into a binary tree. Make the tree ** as deep as it needs to be in order to contain the entire list. */ static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ int iDepth; /* Depth of the tree so far */ struct RowSetEntry *p; /* Current tree root */ struct RowSetEntry *pLeft; /* Left subtree */ assert( pList!=0 ); p = pList; pList = p->pRight; p->pLeft = p->pRight = 0; for(iDepth=1; pList; iDepth++){ pLeft = p; p = pList; pList = p->pRight; p->pLeft = pLeft; p->pRight = rowSetNDeepTree(&pList, iDepth); } return p; } /* ** Take all the entries on p->pEntry and on the trees in p->pForest and ** sort them all together into one big ordered list on p->pEntry. ** ** This routine should only be called once in the life of a RowSet. */ static void rowSetToList(RowSet *p){ /* This routine is called only once */ assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); if( (p->rsFlags & ROWSET_SORTED)==0 ){ p->pEntry = rowSetEntrySort(p->pEntry); } /* While this module could theoretically support it, sqlite3RowSetNext() ** is never called after sqlite3RowSetText() for the same RowSet. So ** there is never a forest to deal with. Should this change, simply ** remove the assert() and the #if 0. */ assert( p->pForest==0 ); #if 0 while( p->pForest ){ struct RowSetEntry *pTree = p->pForest->pLeft; if( pTree ){ struct RowSetEntry *pHead, *pTail; rowSetTreeToList(pTree, &pHead, &pTail); p->pEntry = rowSetEntryMerge(p->pEntry, pHead); } p->pForest = p->pForest->pRight; } #endif p->rsFlags |= ROWSET_NEXT; /* Verify this routine is never called again */ } /* ** Extract the smallest element from the RowSet. ** Write the element into *pRowid. Return 1 on success. Return ** 0 if the RowSet is already empty. ** ** After this routine has been called, the sqlite3RowSetInsert() ** routine may not be called again. */ SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ assert( p!=0 ); /* Merge the forest into a single sorted list on first call */ if( (p->rsFlags & ROWSET_NEXT)==0 ) rowSetToList(p); /* Return the next entry on the list */ if( p->pEntry ){ *pRowid = p->pEntry->v; p->pEntry = p->pEntry->pRight; if( p->pEntry==0 ){ sqlite3RowSetClear(p); } return 1; }else{ return 0; } } /* ** Check to see if element iRowid was inserted into the rowset as ** part of any insert batch prior to iBatch. Return 1 or 0. ** ** If this is the first test of a new batch and if there exist entries ** on pRowSet->pEntry, then sort those entries into the forest at ** pRowSet->pForest so that they can be tested. */ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 iRowid){ struct RowSetEntry *p, *pTree; /* This routine is never called after sqlite3RowSetNext() */ assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); /* Sort entries into the forest on the first test of a new batch */ if( iBatch!=pRowSet->iBatch ){ p = pRowSet->pEntry; if( p ){ struct RowSetEntry **ppPrevTree = &pRowSet->pForest; if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ p = rowSetEntrySort(p); } for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ ppPrevTree = &pTree->pRight; if( pTree->pLeft==0 ){ pTree->pLeft = rowSetListToTree(p); break; }else{ struct RowSetEntry *pAux, *pTail; rowSetTreeToList(pTree->pLeft, &pAux, &pTail); pTree->pLeft = 0; p = rowSetEntryMerge(pAux, p); } } if( pTree==0 ){ *ppPrevTree = pTree = rowSetEntryAlloc(pRowSet); if( pTree ){ pTree->v = 0; pTree->pRight = 0; pTree->pLeft = rowSetListToTree(p); } } pRowSet->pEntry = 0; pRowSet->pLast = 0; pRowSet->rsFlags |= ROWSET_SORTED; } pRowSet->iBatch = iBatch; } /* Test to see if the iRowid value appears anywhere in the forest. ** Return 1 if it does and 0 if not. */ for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ p = pTree->pLeft; while( p ){ if( p->vpRight; }else if( p->v>iRowid ){ p = p->pLeft; }else{ return 1; } } } return 0; } /************** End of rowset.c **********************************************/ /************** Begin file pager.c *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This is the implementation of the page cache subsystem or "pager". ** ** The pager is used to access a database disk file. It implements ** atomic commit and rollback through the use of a journal file that ** is separate from the database file. The pager also implements file ** locking to prevent two processes from writing the same database ** file simultaneously, or one process from reading the database while ** another is writing. */ #ifndef SQLITE_OMIT_DISKIO /* #include "sqliteInt.h" */ /************** Include wal.h in the middle of pager.c ***********************/ /************** Begin file wal.h *********************************************/ /* ** 2010 February 1 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the interface to the write-ahead logging ** system. Refer to the comments below and the header comment attached to ** the implementation of each function in log.c for further details. */ #ifndef _WAL_H_ #define _WAL_H_ /* #include "sqliteInt.h" */ /* Additional values that can be added to the sync_flags argument of ** sqlite3WalFrames(): */ #define WAL_SYNC_TRANSACTIONS 0x20 /* Sync at the end of each transaction */ #define SQLITE_SYNC_MASK 0x13 /* Mask off the SQLITE_SYNC_* values */ #ifdef SQLITE_OMIT_WAL # define sqlite3WalOpen(x,y,z) 0 # define sqlite3WalLimit(x,y) # define sqlite3WalClose(w,x,y,z) 0 # define sqlite3WalBeginReadTransaction(y,z) 0 # define sqlite3WalEndReadTransaction(z) # define sqlite3WalDbsize(y) 0 # define sqlite3WalBeginWriteTransaction(y) 0 # define sqlite3WalEndWriteTransaction(x) 0 # define sqlite3WalUndo(x,y,z) 0 # define sqlite3WalSavepoint(y,z) # define sqlite3WalSavepointUndo(y,z) 0 # define sqlite3WalFrames(u,v,w,x,y,z) 0 # define sqlite3WalCheckpoint(r,s,t,u,v,w,x,y,z) 0 # define sqlite3WalCallback(z) 0 # define sqlite3WalExclusiveMode(y,z) 0 # define sqlite3WalHeapMemory(z) 0 # define sqlite3WalFramesize(z) 0 # define sqlite3WalFindFrame(x,y,z) 0 #else #define WAL_SAVEPOINT_NDATA 4 /* Connection to a write-ahead log (WAL) file. ** There is one object of this type for each pager. */ typedef struct Wal Wal; /* Open and close a connection to a write-ahead log. */ SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**); SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *); /* Set the limiting size of a WAL file. */ SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64); /* Used by readers to open (lock) and close (unlock) a snapshot. A ** snapshot is like a read-transaction. It is the state of the database ** at an instant in time. sqlite3WalOpenSnapshot gets a read lock and ** preserves the current state even if the other threads or processes ** write to or checkpoint the WAL. sqlite3WalCloseSnapshot() closes the ** transaction and releases the lock. */ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *); SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal); /* Read a page from the write-ahead log, if it is present. */ SQLITE_PRIVATE int sqlite3WalFindFrame(Wal *, Pgno, u32 *); SQLITE_PRIVATE int sqlite3WalReadFrame(Wal *, u32, int, u8 *); /* If the WAL is not empty, return the size of the database. */ SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal); /* Obtain or release the WRITER lock. */ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal); SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal); /* Undo any frames written (but not committed) to the log */ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx); /* Return an integer that records the current (uncommitted) write ** position in the WAL */ SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData); /* Move the write position of the WAL back to iFrame. Called in ** response to a ROLLBACK TO command. */ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData); /* Write a frame or frames to the log. */ SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int); /* Copy pages from the log to the database file */ SQLITE_PRIVATE int sqlite3WalCheckpoint( Wal *pWal, /* Write-ahead log connection */ int eMode, /* One of PASSIVE, FULL and RESTART */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ int sync_flags, /* Flags to sync db file with (or 0) */ int nBuf, /* Size of buffer nBuf */ u8 *zBuf, /* Temporary buffer to use */ int *pnLog, /* OUT: Number of frames in WAL */ int *pnCkpt /* OUT: Number of backfilled frames in WAL */ ); /* Return the value to pass to a sqlite3_wal_hook callback, the ** number of frames in the WAL at the point of the last commit since ** sqlite3WalCallback() was called. If no commits have occurred since ** the last call, then return 0. */ SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal); /* Tell the wal layer that an EXCLUSIVE lock has been obtained (or released) ** by the pager layer on the database file. */ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op); /* Return true if the argument is non-NULL and the WAL module is using ** heap-memory for the wal-index. Otherwise, if the argument is NULL or the ** WAL module is using shared-memory, return false. */ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal); #ifdef SQLITE_ENABLE_ZIPVFS /* If the WAL file is not empty, return the number of bytes of content ** stored in each frame (i.e. the db page-size when the WAL was created). */ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal); #endif #endif /* ifndef SQLITE_OMIT_WAL */ #endif /* _WAL_H_ */ /************** End of wal.h *************************************************/ /************** Continuing where we left off in pager.c **********************/ /******************* NOTES ON THE DESIGN OF THE PAGER ************************ ** ** This comment block describes invariants that hold when using a rollback ** journal. These invariants do not apply for journal_mode=WAL, ** journal_mode=MEMORY, or journal_mode=OFF. ** ** Within this comment block, a page is deemed to have been synced ** automatically as soon as it is written when PRAGMA synchronous=OFF. ** Otherwise, the page is not synced until the xSync method of the VFS ** is called successfully on the file containing the page. ** ** Definition: A page of the database file is said to be "overwriteable" if ** one or more of the following are true about the page: ** ** (a) The original content of the page as it was at the beginning of ** the transaction has been written into the rollback journal and ** synced. ** ** (b) The page was a freelist leaf page at the start of the transaction. ** ** (c) The page number is greater than the largest page that existed in ** the database file at the start of the transaction. ** ** (1) A page of the database file is never overwritten unless one of the ** following are true: ** ** (a) The page and all other pages on the same sector are overwriteable. ** ** (b) The atomic page write optimization is enabled, and the entire ** transaction other than the update of the transaction sequence ** number consists of a single page change. ** ** (2) The content of a page written into the rollback journal exactly matches ** both the content in the database when the rollback journal was written ** and the content in the database at the beginning of the current ** transaction. ** ** (3) Writes to the database file are an integer multiple of the page size ** in length and are aligned on a page boundary. ** ** (4) Reads from the database file are either aligned on a page boundary and ** an integer multiple of the page size in length or are taken from the ** first 100 bytes of the database file. ** ** (5) All writes to the database file are synced prior to the rollback journal ** being deleted, truncated, or zeroed. ** ** (6) If a master journal file is used, then all writes to the database file ** are synced prior to the master journal being deleted. ** ** Definition: Two databases (or the same database at two points it time) ** are said to be "logically equivalent" if they give the same answer to ** all queries. Note in particular the content of freelist leaf ** pages can be changed arbitrarily without affecting the logical equivalence ** of the database. ** ** (7) At any time, if any subset, including the empty set and the total set, ** of the unsynced changes to a rollback journal are removed and the ** journal is rolled back, the resulting database file will be logically ** equivalent to the database file at the beginning of the transaction. ** ** (8) When a transaction is rolled back, the xTruncate method of the VFS ** is called to restore the database file to the same size it was at ** the beginning of the transaction. (In some VFSes, the xTruncate ** method is a no-op, but that does not change the fact the SQLite will ** invoke it.) ** ** (9) Whenever the database file is modified, at least one bit in the range ** of bytes from 24 through 39 inclusive will be changed prior to releasing ** the EXCLUSIVE lock, thus signaling other connections on the same ** database to flush their caches. ** ** (10) The pattern of bits in bytes 24 through 39 shall not repeat in less ** than one billion transactions. ** ** (11) A database file is well-formed at the beginning and at the conclusion ** of every transaction. ** ** (12) An EXCLUSIVE lock is held on the database file when writing to ** the database file. ** ** (13) A SHARED lock is held on the database file while reading any ** content out of the database file. ** ******************************************************************************/ /* ** Macros for troubleshooting. Normally turned off */ #if 0 int sqlite3PagerTrace=1; /* True to enable tracing */ #define sqlite3DebugPrintf printf #define PAGERTRACE(X) if( sqlite3PagerTrace ){ sqlite3DebugPrintf X; } #else #define PAGERTRACE(X) #endif /* ** The following two macros are used within the PAGERTRACE() macros above ** to print out file-descriptors. ** ** PAGERID() takes a pointer to a Pager struct as its argument. The ** associated file-descriptor is returned. FILEHANDLEID() takes an sqlite3_file ** struct as its argument. */ #define PAGERID(p) ((int)(p->fd)) #define FILEHANDLEID(fd) ((int)fd) /* ** The Pager.eState variable stores the current 'state' of a pager. A ** pager may be in any one of the seven states shown in the following ** state diagram. ** ** OPEN <------+------+ ** | | | ** V | | ** +---------> READER-------+ | ** | | | ** | V | ** |<-------WRITER_LOCKED------> ERROR ** | | ^ ** | V | ** |<------WRITER_CACHEMOD-------->| ** | | | ** | V | ** |<-------WRITER_DBMOD---------->| ** | | | ** | V | ** +<------WRITER_FINISHED-------->+ ** ** ** List of state transitions and the C [function] that performs each: ** ** OPEN -> READER [sqlite3PagerSharedLock] ** READER -> OPEN [pager_unlock] ** ** READER -> WRITER_LOCKED [sqlite3PagerBegin] ** WRITER_LOCKED -> WRITER_CACHEMOD [pager_open_journal] ** WRITER_CACHEMOD -> WRITER_DBMOD [syncJournal] ** WRITER_DBMOD -> WRITER_FINISHED [sqlite3PagerCommitPhaseOne] ** WRITER_*** -> READER [pager_end_transaction] ** ** WRITER_*** -> ERROR [pager_error] ** ERROR -> OPEN [pager_unlock] ** ** ** OPEN: ** ** The pager starts up in this state. Nothing is guaranteed in this ** state - the file may or may not be locked and the database size is ** unknown. The database may not be read or written. ** ** * No read or write transaction is active. ** * Any lock, or no lock at all, may be held on the database file. ** * The dbSize, dbOrigSize and dbFileSize variables may not be trusted. ** ** READER: ** ** In this state all the requirements for reading the database in ** rollback (non-WAL) mode are met. Unless the pager is (or recently ** was) in exclusive-locking mode, a user-level read transaction is ** open. The database size is known in this state. ** ** A connection running with locking_mode=normal enters this state when ** it opens a read-transaction on the database and returns to state ** OPEN after the read-transaction is completed. However a connection ** running in locking_mode=exclusive (including temp databases) remains in ** this state even after the read-transaction is closed. The only way ** a locking_mode=exclusive connection can transition from READER to OPEN ** is via the ERROR state (see below). ** ** * A read transaction may be active (but a write-transaction cannot). ** * A SHARED or greater lock is held on the database file. ** * The dbSize variable may be trusted (even if a user-level read ** transaction is not active). The dbOrigSize and dbFileSize variables ** may not be trusted at this point. ** * If the database is a WAL database, then the WAL connection is open. ** * Even if a read-transaction is not open, it is guaranteed that ** there is no hot-journal in the file-system. ** ** WRITER_LOCKED: ** ** The pager moves to this state from READER when a write-transaction ** is first opened on the database. In WRITER_LOCKED state, all locks ** required to start a write-transaction are held, but no actual ** modifications to the cache or database have taken place. ** ** In rollback mode, a RESERVED or (if the transaction was opened with ** BEGIN EXCLUSIVE) EXCLUSIVE lock is obtained on the database file when ** moving to this state, but the journal file is not written to or opened ** to in this state. If the transaction is committed or rolled back while ** in WRITER_LOCKED state, all that is required is to unlock the database ** file. ** ** IN WAL mode, WalBeginWriteTransaction() is called to lock the log file. ** If the connection is running with locking_mode=exclusive, an attempt ** is made to obtain an EXCLUSIVE lock on the database file. ** ** * A write transaction is active. ** * If the connection is open in rollback-mode, a RESERVED or greater ** lock is held on the database file. ** * If the connection is open in WAL-mode, a WAL write transaction ** is open (i.e. sqlite3WalBeginWriteTransaction() has been successfully ** called). ** * The dbSize, dbOrigSize and dbFileSize variables are all valid. ** * The contents of the pager cache have not been modified. ** * The journal file may or may not be open. ** * Nothing (not even the first header) has been written to the journal. ** ** WRITER_CACHEMOD: ** ** A pager moves from WRITER_LOCKED state to this state when a page is ** first modified by the upper layer. In rollback mode the journal file ** is opened (if it is not already open) and a header written to the ** start of it. The database file on disk has not been modified. ** ** * A write transaction is active. ** * A RESERVED or greater lock is held on the database file. ** * The journal file is open and the first header has been written ** to it, but the header has not been synced to disk. ** * The contents of the page cache have been modified. ** ** WRITER_DBMOD: ** ** The pager transitions from WRITER_CACHEMOD into WRITER_DBMOD state ** when it modifies the contents of the database file. WAL connections ** never enter this state (since they do not modify the database file, ** just the log file). ** ** * A write transaction is active. ** * An EXCLUSIVE or greater lock is held on the database file. ** * The journal file is open and the first header has been written ** and synced to disk. ** * The contents of the page cache have been modified (and possibly ** written to disk). ** ** WRITER_FINISHED: ** ** It is not possible for a WAL connection to enter this state. ** ** A rollback-mode pager changes to WRITER_FINISHED state from WRITER_DBMOD ** state after the entire transaction has been successfully written into the ** database file. In this state the transaction may be committed simply ** by finalizing the journal file. Once in WRITER_FINISHED state, it is ** not possible to modify the database further. At this point, the upper ** layer must either commit or rollback the transaction. ** ** * A write transaction is active. ** * An EXCLUSIVE or greater lock is held on the database file. ** * All writing and syncing of journal and database data has finished. ** If no error occurred, all that remains is to finalize the journal to ** commit the transaction. If an error did occur, the caller will need ** to rollback the transaction. ** ** ERROR: ** ** The ERROR state is entered when an IO or disk-full error (including ** SQLITE_IOERR_NOMEM) occurs at a point in the code that makes it ** difficult to be sure that the in-memory pager state (cache contents, ** db size etc.) are consistent with the contents of the file-system. ** ** Temporary pager files may enter the ERROR state, but in-memory pagers ** cannot. ** ** For example, if an IO error occurs while performing a rollback, ** the contents of the page-cache may be left in an inconsistent state. ** At this point it would be dangerous to change back to READER state ** (as usually happens after a rollback). Any subsequent readers might ** report database corruption (due to the inconsistent cache), and if ** they upgrade to writers, they may inadvertently corrupt the database ** file. To avoid this hazard, the pager switches into the ERROR state ** instead of READER following such an error. ** ** Once it has entered the ERROR state, any attempt to use the pager ** to read or write data returns an error. Eventually, once all ** outstanding transactions have been abandoned, the pager is able to ** transition back to OPEN state, discarding the contents of the ** page-cache and any other in-memory state at the same time. Everything ** is reloaded from disk (and, if necessary, hot-journal rollback peformed) ** when a read-transaction is next opened on the pager (transitioning ** the pager into READER state). At that point the system has recovered ** from the error. ** ** Specifically, the pager jumps into the ERROR state if: ** ** 1. An error occurs while attempting a rollback. This happens in ** function sqlite3PagerRollback(). ** ** 2. An error occurs while attempting to finalize a journal file ** following a commit in function sqlite3PagerCommitPhaseTwo(). ** ** 3. An error occurs while attempting to write to the journal or ** database file in function pagerStress() in order to free up ** memory. ** ** In other cases, the error is returned to the b-tree layer. The b-tree ** layer then attempts a rollback operation. If the error condition ** persists, the pager enters the ERROR state via condition (1) above. ** ** Condition (3) is necessary because it can be triggered by a read-only ** statement executed within a transaction. In this case, if the error ** code were simply returned to the user, the b-tree layer would not ** automatically attempt a rollback, as it assumes that an error in a ** read-only statement cannot leave the pager in an internally inconsistent ** state. ** ** * The Pager.errCode variable is set to something other than SQLITE_OK. ** * There are one or more outstanding references to pages (after the ** last reference is dropped the pager should move back to OPEN state). ** * The pager is not an in-memory pager. ** ** ** Notes: ** ** * A pager is never in WRITER_DBMOD or WRITER_FINISHED state if the ** connection is open in WAL mode. A WAL connection is always in one ** of the first four states. ** ** * Normally, a connection open in exclusive mode is never in PAGER_OPEN ** state. There are two exceptions: immediately after exclusive-mode has ** been turned on (and before any read or write transactions are ** executed), and when the pager is leaving the "error state". ** ** * See also: assert_pager_state(). */ #define PAGER_OPEN 0 #define PAGER_READER 1 #define PAGER_WRITER_LOCKED 2 #define PAGER_WRITER_CACHEMOD 3 #define PAGER_WRITER_DBMOD 4 #define PAGER_WRITER_FINISHED 5 #define PAGER_ERROR 6 /* ** The Pager.eLock variable is almost always set to one of the ** following locking-states, according to the lock currently held on ** the database file: NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. ** This variable is kept up to date as locks are taken and released by ** the pagerLockDb() and pagerUnlockDb() wrappers. ** ** If the VFS xLock() or xUnlock() returns an error other than SQLITE_BUSY ** (i.e. one of the SQLITE_IOERR subtypes), it is not clear whether or not ** the operation was successful. In these circumstances pagerLockDb() and ** pagerUnlockDb() take a conservative approach - eLock is always updated ** when unlocking the file, and only updated when locking the file if the ** VFS call is successful. This way, the Pager.eLock variable may be set ** to a less exclusive (lower) value than the lock that is actually held ** at the system level, but it is never set to a more exclusive value. ** ** This is usually safe. If an xUnlock fails or appears to fail, there may ** be a few redundant xLock() calls or a lock may be held for longer than ** required, but nothing really goes wrong. ** ** The exception is when the database file is unlocked as the pager moves ** from ERROR to OPEN state. At this point there may be a hot-journal file ** in the file-system that needs to be rolled back (as part of an OPEN->SHARED ** transition, by the same pager or any other). If the call to xUnlock() ** fails at this point and the pager is left holding an EXCLUSIVE lock, this ** can confuse the call to xCheckReservedLock() call made later as part ** of hot-journal detection. ** ** xCheckReservedLock() is defined as returning true "if there is a RESERVED ** lock held by this process or any others". So xCheckReservedLock may ** return true because the caller itself is holding an EXCLUSIVE lock (but ** doesn't know it because of a previous error in xUnlock). If this happens ** a hot-journal may be mistaken for a journal being created by an active ** transaction in another process, causing SQLite to read from the database ** without rolling it back. ** ** To work around this, if a call to xUnlock() fails when unlocking the ** database in the ERROR state, Pager.eLock is set to UNKNOWN_LOCK. It ** is only changed back to a real locking state after a successful call ** to xLock(EXCLUSIVE). Also, the code to do the OPEN->SHARED state transition ** omits the check for a hot-journal if Pager.eLock is set to UNKNOWN_LOCK ** lock. Instead, it assumes a hot-journal exists and obtains an EXCLUSIVE ** lock on the database file before attempting to roll it back. See function ** PagerSharedLock() for more detail. ** ** Pager.eLock may only be set to UNKNOWN_LOCK when the pager is in ** PAGER_OPEN state. */ #define UNKNOWN_LOCK (EXCLUSIVE_LOCK+1) /* ** A macro used for invoking the codec if there is one */ #ifdef SQLITE_HAS_CODEC # define CODEC1(P,D,N,X,E) \ if( P->xCodec && P->xCodec(P->pCodec,D,N,X)==0 ){ E; } # define CODEC2(P,D,N,X,E,O) \ if( P->xCodec==0 ){ O=(char*)D; }else \ if( (O=(char*)(P->xCodec(P->pCodec,D,N,X)))==0 ){ E; } #else # define CODEC1(P,D,N,X,E) /* NO-OP */ # define CODEC2(P,D,N,X,E,O) O=(char*)D #endif /* ** The maximum allowed sector size. 64KiB. If the xSectorsize() method ** returns a value larger than this, then MAX_SECTOR_SIZE is used instead. ** This could conceivably cause corruption following a power failure on ** such a system. This is currently an undocumented limit. */ #define MAX_SECTOR_SIZE 0x10000 /* ** An instance of the following structure is allocated for each active ** savepoint and statement transaction in the system. All such structures ** are stored in the Pager.aSavepoint[] array, which is allocated and ** resized using sqlite3Realloc(). ** ** When a savepoint is created, the PagerSavepoint.iHdrOffset field is ** set to 0. If a journal-header is written into the main journal while ** the savepoint is active, then iHdrOffset is set to the byte offset ** immediately following the last journal record written into the main ** journal before the journal-header. This is required during savepoint ** rollback (see pagerPlaybackSavepoint()). */ typedef struct PagerSavepoint PagerSavepoint; struct PagerSavepoint { i64 iOffset; /* Starting offset in main journal */ i64 iHdrOffset; /* See above */ Bitvec *pInSavepoint; /* Set of pages in this savepoint */ Pgno nOrig; /* Original number of pages in file */ Pgno iSubRec; /* Index of first record in sub-journal */ #ifndef SQLITE_OMIT_WAL u32 aWalData[WAL_SAVEPOINT_NDATA]; /* WAL savepoint context */ #endif }; /* ** Bits of the Pager.doNotSpill flag. See further description below. */ #define SPILLFLAG_OFF 0x01 /* Never spill cache. Set via pragma */ #define SPILLFLAG_ROLLBACK 0x02 /* Current rolling back, so do not spill */ #define SPILLFLAG_NOSYNC 0x04 /* Spill is ok, but do not sync */ /* ** An open page cache is an instance of struct Pager. A description of ** some of the more important member variables follows: ** ** eState ** ** The current 'state' of the pager object. See the comment and state ** diagram above for a description of the pager state. ** ** eLock ** ** For a real on-disk database, the current lock held on the database file - ** NO_LOCK, SHARED_LOCK, RESERVED_LOCK or EXCLUSIVE_LOCK. ** ** For a temporary or in-memory database (neither of which require any ** locks), this variable is always set to EXCLUSIVE_LOCK. Since such ** databases always have Pager.exclusiveMode==1, this tricks the pager ** logic into thinking that it already has all the locks it will ever ** need (and no reason to release them). ** ** In some (obscure) circumstances, this variable may also be set to ** UNKNOWN_LOCK. See the comment above the #define of UNKNOWN_LOCK for ** details. ** ** changeCountDone ** ** This boolean variable is used to make sure that the change-counter ** (the 4-byte header field at byte offset 24 of the database file) is ** not updated more often than necessary. ** ** It is set to true when the change-counter field is updated, which ** can only happen if an exclusive lock is held on the database file. ** It is cleared (set to false) whenever an exclusive lock is ** relinquished on the database file. Each time a transaction is committed, ** The changeCountDone flag is inspected. If it is true, the work of ** updating the change-counter is omitted for the current transaction. ** ** This mechanism means that when running in exclusive mode, a connection ** need only update the change-counter once, for the first transaction ** committed. ** ** setMaster ** ** When PagerCommitPhaseOne() is called to commit a transaction, it may ** (or may not) specify a master-journal name to be written into the ** journal file before it is synced to disk. ** ** Whether or not a journal file contains a master-journal pointer affects ** the way in which the journal file is finalized after the transaction is ** committed or rolled back when running in "journal_mode=PERSIST" mode. ** If a journal file does not contain a master-journal pointer, it is ** finalized by overwriting the first journal header with zeroes. If ** it does contain a master-journal pointer the journal file is finalized ** by truncating it to zero bytes, just as if the connection were ** running in "journal_mode=truncate" mode. ** ** Journal files that contain master journal pointers cannot be finalized ** simply by overwriting the first journal-header with zeroes, as the ** master journal pointer could interfere with hot-journal rollback of any ** subsequently interrupted transaction that reuses the journal file. ** ** The flag is cleared as soon as the journal file is finalized (either ** by PagerCommitPhaseTwo or PagerRollback). If an IO error prevents the ** journal file from being successfully finalized, the setMaster flag ** is cleared anyway (and the pager will move to ERROR state). ** ** doNotSpill ** ** This variables control the behavior of cache-spills (calls made by ** the pcache module to the pagerStress() routine to write cached data ** to the file-system in order to free up memory). ** ** When bits SPILLFLAG_OFF or SPILLFLAG_ROLLBACK of doNotSpill are set, ** writing to the database from pagerStress() is disabled altogether. ** The SPILLFLAG_ROLLBACK case is done in a very obscure case that ** comes up during savepoint rollback that requires the pcache module ** to allocate a new page to prevent the journal file from being written ** while it is being traversed by code in pager_playback(). The SPILLFLAG_OFF ** case is a user preference. ** ** If the SPILLFLAG_NOSYNC bit is set, writing to the database from ** pagerStress() is permitted, but syncing the journal file is not. ** This flag is set by sqlite3PagerWrite() when the file-system sector-size ** is larger than the database page-size in order to prevent a journal sync ** from happening in between the journalling of two pages on the same sector. ** ** subjInMemory ** ** This is a boolean variable. If true, then any required sub-journal ** is opened as an in-memory journal file. If false, then in-memory ** sub-journals are only used for in-memory pager files. ** ** This variable is updated by the upper layer each time a new ** write-transaction is opened. ** ** dbSize, dbOrigSize, dbFileSize ** ** Variable dbSize is set to the number of pages in the database file. ** It is valid in PAGER_READER and higher states (all states except for ** OPEN and ERROR). ** ** dbSize is set based on the size of the database file, which may be ** larger than the size of the database (the value stored at offset ** 28 of the database header by the btree). If the size of the file ** is not an integer multiple of the page-size, the value stored in ** dbSize is rounded down (i.e. a 5KB file with 2K page-size has dbSize==2). ** Except, any file that is greater than 0 bytes in size is considered ** to have at least one page. (i.e. a 1KB file with 2K page-size leads ** to dbSize==1). ** ** During a write-transaction, if pages with page-numbers greater than ** dbSize are modified in the cache, dbSize is updated accordingly. ** Similarly, if the database is truncated using PagerTruncateImage(), ** dbSize is updated. ** ** Variables dbOrigSize and dbFileSize are valid in states ** PAGER_WRITER_LOCKED and higher. dbOrigSize is a copy of the dbSize ** variable at the start of the transaction. It is used during rollback, ** and to determine whether or not pages need to be journalled before ** being modified. ** ** Throughout a write-transaction, dbFileSize contains the size of ** the file on disk in pages. It is set to a copy of dbSize when the ** write-transaction is first opened, and updated when VFS calls are made ** to write or truncate the database file on disk. ** ** The only reason the dbFileSize variable is required is to suppress ** unnecessary calls to xTruncate() after committing a transaction. If, ** when a transaction is committed, the dbFileSize variable indicates ** that the database file is larger than the database image (Pager.dbSize), ** pager_truncate() is called. The pager_truncate() call uses xFilesize() ** to measure the database file on disk, and then truncates it if required. ** dbFileSize is not used when rolling back a transaction. In this case ** pager_truncate() is called unconditionally (which means there may be ** a call to xFilesize() that is not strictly required). In either case, ** pager_truncate() may cause the file to become smaller or larger. ** ** dbHintSize ** ** The dbHintSize variable is used to limit the number of calls made to ** the VFS xFileControl(FCNTL_SIZE_HINT) method. ** ** dbHintSize is set to a copy of the dbSize variable when a ** write-transaction is opened (at the same time as dbFileSize and ** dbOrigSize). If the xFileControl(FCNTL_SIZE_HINT) method is called, ** dbHintSize is increased to the number of pages that correspond to the ** size-hint passed to the method call. See pager_write_pagelist() for ** details. ** ** errCode ** ** The Pager.errCode variable is only ever used in PAGER_ERROR state. It ** is set to zero in all other states. In PAGER_ERROR state, Pager.errCode ** is always set to SQLITE_FULL, SQLITE_IOERR or one of the SQLITE_IOERR_XXX ** sub-codes. */ struct Pager { sqlite3_vfs *pVfs; /* OS functions to use for IO */ u8 exclusiveMode; /* Boolean. True if locking_mode==EXCLUSIVE */ u8 journalMode; /* One of the PAGER_JOURNALMODE_* values */ u8 useJournal; /* Use a rollback journal on this file */ u8 noSync; /* Do not sync the journal if true */ u8 fullSync; /* Do extra syncs of the journal for robustness */ u8 ckptSyncFlags; /* SYNC_NORMAL or SYNC_FULL for checkpoint */ u8 walSyncFlags; /* SYNC_NORMAL or SYNC_FULL for wal writes */ u8 syncFlags; /* SYNC_NORMAL or SYNC_FULL otherwise */ u8 tempFile; /* zFilename is a temporary or immutable file */ u8 noLock; /* Do not lock (except in WAL mode) */ u8 readOnly; /* True for a read-only database */ u8 memDb; /* True to inhibit all file I/O */ /************************************************************************** ** The following block contains those class members that change during ** routine operation. Class members not in this block are either fixed ** when the pager is first created or else only change when there is a ** significant mode change (such as changing the page_size, locking_mode, ** or the journal_mode). From another view, these class members describe ** the "state" of the pager, while other class members describe the ** "configuration" of the pager. */ u8 eState; /* Pager state (OPEN, READER, WRITER_LOCKED..) */ u8 eLock; /* Current lock held on database file */ u8 changeCountDone; /* Set after incrementing the change-counter */ u8 setMaster; /* True if a m-j name has been written to jrnl */ u8 doNotSpill; /* Do not spill the cache when non-zero */ u8 subjInMemory; /* True to use in-memory sub-journals */ u8 bUseFetch; /* True to use xFetch() */ u8 hasBeenUsed; /* True if any content previously read */ Pgno dbSize; /* Number of pages in the database */ Pgno dbOrigSize; /* dbSize before the current transaction */ Pgno dbFileSize; /* Number of pages in the database file */ Pgno dbHintSize; /* Value passed to FCNTL_SIZE_HINT call */ int errCode; /* One of several kinds of errors */ int nRec; /* Pages journalled since last j-header written */ u32 cksumInit; /* Quasi-random value added to every checksum */ u32 nSubRec; /* Number of records written to sub-journal */ Bitvec *pInJournal; /* One bit for each page in the database file */ sqlite3_file *fd; /* File descriptor for database */ sqlite3_file *jfd; /* File descriptor for main journal */ sqlite3_file *sjfd; /* File descriptor for sub-journal */ i64 journalOff; /* Current write offset in the journal file */ i64 journalHdr; /* Byte offset to previous journal header */ sqlite3_backup *pBackup; /* Pointer to list of ongoing backup processes */ PagerSavepoint *aSavepoint; /* Array of active savepoints */ int nSavepoint; /* Number of elements in aSavepoint[] */ u32 iDataVersion; /* Changes whenever database content changes */ char dbFileVers[16]; /* Changes whenever database file changes */ int nMmapOut; /* Number of mmap pages currently outstanding */ sqlite3_int64 szMmap; /* Desired maximum mmap size */ PgHdr *pMmapFreelist; /* List of free mmap page headers (pDirty) */ /* ** End of the routinely-changing class members ***************************************************************************/ u16 nExtra; /* Add this many bytes to each in-memory page */ i16 nReserve; /* Number of unused bytes at end of each page */ u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */ u32 sectorSize; /* Assumed sector size during rollback */ int pageSize; /* Number of bytes in a page */ Pgno mxPgno; /* Maximum allowed size of the database */ i64 journalSizeLimit; /* Size limit for persistent journal files */ char *zFilename; /* Name of the database file */ char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ int aStat[3]; /* Total cache hits, misses and writes */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */ #ifdef SQLITE_HAS_CODEC void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */ void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */ void (*xCodecFree)(void*); /* Destructor for the codec */ void *pCodec; /* First argument to xCodec... methods */ #endif char *pTmpSpace; /* Pager.pageSize bytes of space for tmp use */ PCache *pPCache; /* Pointer to page cache object */ #ifndef SQLITE_OMIT_WAL Wal *pWal; /* Write-ahead log used by "journal_mode=wal" */ char *zWal; /* File name for write-ahead log */ #endif }; /* ** Indexes for use with Pager.aStat[]. The Pager.aStat[] array contains ** the values accessed by passing SQLITE_DBSTATUS_CACHE_HIT, CACHE_MISS ** or CACHE_WRITE to sqlite3_db_status(). */ #define PAGER_STAT_HIT 0 #define PAGER_STAT_MISS 1 #define PAGER_STAT_WRITE 2 /* ** The following global variables hold counters used for ** testing purposes only. These variables do not exist in ** a non-testing build. These variables are not thread-safe. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_pager_readdb_count = 0; /* Number of full pages read from DB */ SQLITE_API int sqlite3_pager_writedb_count = 0; /* Number of full pages written to DB */ SQLITE_API int sqlite3_pager_writej_count = 0; /* Number of pages written to journal */ # define PAGER_INCR(v) v++ #else # define PAGER_INCR(v) #endif /* ** Journal files begin with the following magic string. The data ** was obtained from /dev/random. It is used only as a sanity check. ** ** Since version 2.8.0, the journal format contains additional sanity ** checking information. If the power fails while the journal is being ** written, semi-random garbage data might appear in the journal ** file after power is restored. If an attempt is then made ** to roll the journal back, the database could be corrupted. The additional ** sanity checking data is an attempt to discover the garbage in the ** journal and ignore it. ** ** The sanity checking information for the new journal format consists ** of a 32-bit checksum on each page of data. The checksum covers both ** the page number and the pPager->pageSize bytes of data for the page. ** This cksum is initialized to a 32-bit random value that appears in the ** journal file right after the header. The random initializer is important, ** because garbage data that appears at the end of a journal is likely ** data that was once in other files that have now been deleted. If the ** garbage data came from an obsolete journal file, the checksums might ** be correct. But by initializing the checksum to random value which ** is different for every journal, we minimize that risk. */ static const unsigned char aJournalMagic[] = { 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7, }; /* ** The size of the of each page record in the journal is given by ** the following macro. */ #define JOURNAL_PG_SZ(pPager) ((pPager->pageSize) + 8) /* ** The journal header size for this pager. This is usually the same ** size as a single disk sector. See also setSectorSize(). */ #define JOURNAL_HDR_SZ(pPager) (pPager->sectorSize) /* ** The macro MEMDB is true if we are dealing with an in-memory database. ** We do this as a macro so that if the SQLITE_OMIT_MEMORYDB macro is set, ** the value of MEMDB will be a constant and the compiler will optimize ** out code that would never execute. */ #ifdef SQLITE_OMIT_MEMORYDB # define MEMDB 0 #else # define MEMDB pPager->memDb #endif /* ** The macro USEFETCH is true if we are allowed to use the xFetch and xUnfetch ** interfaces to access the database using memory-mapped I/O. */ #if SQLITE_MAX_MMAP_SIZE>0 # define USEFETCH(x) ((x)->bUseFetch) #else # define USEFETCH(x) 0 #endif /* ** The maximum legal page number is (2^31 - 1). */ #define PAGER_MAX_PGNO 2147483647 /* ** The argument to this macro is a file descriptor (type sqlite3_file*). ** Return 0 if it is not open, or non-zero (but not 1) if it is. ** ** This is so that expressions can be written as: ** ** if( isOpen(pPager->jfd) ){ ... ** ** instead of ** ** if( pPager->jfd->pMethods ){ ... */ #define isOpen(pFd) ((pFd)->pMethods!=0) /* ** Return true if this pager uses a write-ahead log instead of the usual ** rollback journal. Otherwise false. */ #ifndef SQLITE_OMIT_WAL static int pagerUseWal(Pager *pPager){ return (pPager->pWal!=0); } #else # define pagerUseWal(x) 0 # define pagerRollbackWal(x) 0 # define pagerWalFrames(v,w,x,y) 0 # define pagerOpenWalIfPresent(z) SQLITE_OK # define pagerBeginReadTransaction(z) SQLITE_OK #endif #ifndef NDEBUG /* ** Usage: ** ** assert( assert_pager_state(pPager) ); ** ** This function runs many asserts to try to find inconsistencies in ** the internal state of the Pager object. */ static int assert_pager_state(Pager *p){ Pager *pPager = p; /* State must be valid. */ assert( p->eState==PAGER_OPEN || p->eState==PAGER_READER || p->eState==PAGER_WRITER_LOCKED || p->eState==PAGER_WRITER_CACHEMOD || p->eState==PAGER_WRITER_DBMOD || p->eState==PAGER_WRITER_FINISHED || p->eState==PAGER_ERROR ); /* Regardless of the current state, a temp-file connection always behaves ** as if it has an exclusive lock on the database file. It never updates ** the change-counter field, so the changeCountDone flag is always set. */ assert( p->tempFile==0 || p->eLock==EXCLUSIVE_LOCK ); assert( p->tempFile==0 || pPager->changeCountDone ); /* If the useJournal flag is clear, the journal-mode must be "OFF". ** And if the journal-mode is "OFF", the journal file must not be open. */ assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->useJournal ); assert( p->journalMode!=PAGER_JOURNALMODE_OFF || !isOpen(p->jfd) ); /* Check that MEMDB implies noSync. And an in-memory journal. Since ** this means an in-memory pager performs no IO at all, it cannot encounter ** either SQLITE_IOERR or SQLITE_FULL during rollback or while finalizing ** a journal file. (although the in-memory journal implementation may ** return SQLITE_IOERR_NOMEM while the journal file is being written). It ** is therefore not possible for an in-memory pager to enter the ERROR ** state. */ if( MEMDB ){ assert( p->noSync ); assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_MEMORY ); assert( p->eState!=PAGER_ERROR && p->eState!=PAGER_OPEN ); assert( pagerUseWal(p)==0 ); } /* If changeCountDone is set, a RESERVED lock or greater must be held ** on the file. */ assert( pPager->changeCountDone==0 || pPager->eLock>=RESERVED_LOCK ); assert( p->eLock!=PENDING_LOCK ); switch( p->eState ){ case PAGER_OPEN: assert( !MEMDB ); assert( pPager->errCode==SQLITE_OK ); assert( sqlite3PcacheRefCount(pPager->pPCache)==0 || pPager->tempFile ); break; case PAGER_READER: assert( pPager->errCode==SQLITE_OK ); assert( p->eLock!=UNKNOWN_LOCK ); assert( p->eLock>=SHARED_LOCK ); break; case PAGER_WRITER_LOCKED: assert( p->eLock!=UNKNOWN_LOCK ); assert( pPager->errCode==SQLITE_OK ); if( !pagerUseWal(pPager) ){ assert( p->eLock>=RESERVED_LOCK ); } assert( pPager->dbSize==pPager->dbOrigSize ); assert( pPager->dbOrigSize==pPager->dbFileSize ); assert( pPager->dbOrigSize==pPager->dbHintSize ); assert( pPager->setMaster==0 ); break; case PAGER_WRITER_CACHEMOD: assert( p->eLock!=UNKNOWN_LOCK ); assert( pPager->errCode==SQLITE_OK ); if( !pagerUseWal(pPager) ){ /* It is possible that if journal_mode=wal here that neither the ** journal file nor the WAL file are open. This happens during ** a rollback transaction that switches from journal_mode=off ** to journal_mode=wal. */ assert( p->eLock>=RESERVED_LOCK ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL ); } assert( pPager->dbOrigSize==pPager->dbFileSize ); assert( pPager->dbOrigSize==pPager->dbHintSize ); break; case PAGER_WRITER_DBMOD: assert( p->eLock==EXCLUSIVE_LOCK ); assert( pPager->errCode==SQLITE_OK ); assert( !pagerUseWal(pPager) ); assert( p->eLock>=EXCLUSIVE_LOCK ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL ); assert( pPager->dbOrigSize<=pPager->dbHintSize ); break; case PAGER_WRITER_FINISHED: assert( p->eLock==EXCLUSIVE_LOCK ); assert( pPager->errCode==SQLITE_OK ); assert( !pagerUseWal(pPager) ); assert( isOpen(p->jfd) || p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_WAL ); break; case PAGER_ERROR: /* There must be at least one outstanding reference to the pager if ** in ERROR state. Otherwise the pager should have already dropped ** back to OPEN state. */ assert( pPager->errCode!=SQLITE_OK ); assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); break; } return 1; } #endif /* ifndef NDEBUG */ #ifdef SQLITE_DEBUG /* ** Return a pointer to a human readable string in a static buffer ** containing the state of the Pager object passed as an argument. This ** is intended to be used within debuggers. For example, as an alternative ** to "print *pPager" in gdb: ** ** (gdb) printf "%s", print_pager_state(pPager) */ static char *print_pager_state(Pager *p){ static char zRet[1024]; sqlite3_snprintf(1024, zRet, "Filename: %s\n" "State: %s errCode=%d\n" "Lock: %s\n" "Locking mode: locking_mode=%s\n" "Journal mode: journal_mode=%s\n" "Backing store: tempFile=%d memDb=%d useJournal=%d\n" "Journal: journalOff=%lld journalHdr=%lld\n" "Size: dbsize=%d dbOrigSize=%d dbFileSize=%d\n" , p->zFilename , p->eState==PAGER_OPEN ? "OPEN" : p->eState==PAGER_READER ? "READER" : p->eState==PAGER_WRITER_LOCKED ? "WRITER_LOCKED" : p->eState==PAGER_WRITER_CACHEMOD ? "WRITER_CACHEMOD" : p->eState==PAGER_WRITER_DBMOD ? "WRITER_DBMOD" : p->eState==PAGER_WRITER_FINISHED ? "WRITER_FINISHED" : p->eState==PAGER_ERROR ? "ERROR" : "?error?" , (int)p->errCode , p->eLock==NO_LOCK ? "NO_LOCK" : p->eLock==RESERVED_LOCK ? "RESERVED" : p->eLock==EXCLUSIVE_LOCK ? "EXCLUSIVE" : p->eLock==SHARED_LOCK ? "SHARED" : p->eLock==UNKNOWN_LOCK ? "UNKNOWN" : "?error?" , p->exclusiveMode ? "exclusive" : "normal" , p->journalMode==PAGER_JOURNALMODE_MEMORY ? "memory" : p->journalMode==PAGER_JOURNALMODE_OFF ? "off" : p->journalMode==PAGER_JOURNALMODE_DELETE ? "delete" : p->journalMode==PAGER_JOURNALMODE_PERSIST ? "persist" : p->journalMode==PAGER_JOURNALMODE_TRUNCATE ? "truncate" : p->journalMode==PAGER_JOURNALMODE_WAL ? "wal" : "?error?" , (int)p->tempFile, (int)p->memDb, (int)p->useJournal , p->journalOff, p->journalHdr , (int)p->dbSize, (int)p->dbOrigSize, (int)p->dbFileSize ); return zRet; } #endif /* ** Return true if it is necessary to write page *pPg into the sub-journal. ** A page needs to be written into the sub-journal if there exists one ** or more open savepoints for which: ** ** * The page-number is less than or equal to PagerSavepoint.nOrig, and ** * The bit corresponding to the page-number is not set in ** PagerSavepoint.pInSavepoint. */ static int subjRequiresPage(PgHdr *pPg){ Pager *pPager = pPg->pPager; PagerSavepoint *p; Pgno pgno = pPg->pgno; int i; for(i=0; inSavepoint; i++){ p = &pPager->aSavepoint[i]; if( p->nOrig>=pgno && 0==sqlite3BitvecTestNotNull(p->pInSavepoint, pgno) ){ return 1; } } return 0; } #ifdef SQLITE_DEBUG /* ** Return true if the page is already in the journal file. */ static int pageInJournal(Pager *pPager, PgHdr *pPg){ return sqlite3BitvecTest(pPager->pInJournal, pPg->pgno); } #endif /* ** Read a 32-bit integer from the given file descriptor. Store the integer ** that is read in *pRes. Return SQLITE_OK if everything worked, or an ** error code is something goes wrong. ** ** All values are stored on disk as big-endian. */ static int read32bits(sqlite3_file *fd, i64 offset, u32 *pRes){ unsigned char ac[4]; int rc = sqlite3OsRead(fd, ac, sizeof(ac), offset); if( rc==SQLITE_OK ){ *pRes = sqlite3Get4byte(ac); } return rc; } /* ** Write a 32-bit integer into a string buffer in big-endian byte order. */ #define put32bits(A,B) sqlite3Put4byte((u8*)A,B) /* ** Write a 32-bit integer into the given file descriptor. Return SQLITE_OK ** on success or an error code is something goes wrong. */ static int write32bits(sqlite3_file *fd, i64 offset, u32 val){ char ac[4]; put32bits(ac, val); return sqlite3OsWrite(fd, ac, 4, offset); } /* ** Unlock the database file to level eLock, which must be either NO_LOCK ** or SHARED_LOCK. Regardless of whether or not the call to xUnlock() ** succeeds, set the Pager.eLock variable to match the (attempted) new lock. ** ** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is ** called, do not modify it. See the comment above the #define of ** UNKNOWN_LOCK for an explanation of this. */ static int pagerUnlockDb(Pager *pPager, int eLock){ int rc = SQLITE_OK; assert( !pPager->exclusiveMode || pPager->eLock==eLock ); assert( eLock==NO_LOCK || eLock==SHARED_LOCK ); assert( eLock!=NO_LOCK || pagerUseWal(pPager)==0 ); if( isOpen(pPager->fd) ){ assert( pPager->eLock>=eLock ); rc = pPager->noLock ? SQLITE_OK : sqlite3OsUnlock(pPager->fd, eLock); if( pPager->eLock!=UNKNOWN_LOCK ){ pPager->eLock = (u8)eLock; } IOTRACE(("UNLOCK %p %d\n", pPager, eLock)) } return rc; } /* ** Lock the database file to level eLock, which must be either SHARED_LOCK, ** RESERVED_LOCK or EXCLUSIVE_LOCK. If the caller is successful, set the ** Pager.eLock variable to the new locking state. ** ** Except, if Pager.eLock is set to UNKNOWN_LOCK when this function is ** called, do not modify it unless the new locking state is EXCLUSIVE_LOCK. ** See the comment above the #define of UNKNOWN_LOCK for an explanation ** of this. */ static int pagerLockDb(Pager *pPager, int eLock){ int rc = SQLITE_OK; assert( eLock==SHARED_LOCK || eLock==RESERVED_LOCK || eLock==EXCLUSIVE_LOCK ); if( pPager->eLockeLock==UNKNOWN_LOCK ){ rc = pPager->noLock ? SQLITE_OK : sqlite3OsLock(pPager->fd, eLock); if( rc==SQLITE_OK && (pPager->eLock!=UNKNOWN_LOCK||eLock==EXCLUSIVE_LOCK) ){ pPager->eLock = (u8)eLock; IOTRACE(("LOCK %p %d\n", pPager, eLock)) } } return rc; } /* ** This function determines whether or not the atomic-write optimization ** can be used with this pager. The optimization can be used if: ** ** (a) the value returned by OsDeviceCharacteristics() indicates that ** a database page may be written atomically, and ** (b) the value returned by OsSectorSize() is less than or equal ** to the page size. ** ** The optimization is also always enabled for temporary files. It is ** an error to call this function if pPager is opened on an in-memory ** database. ** ** If the optimization cannot be used, 0 is returned. If it can be used, ** then the value returned is the size of the journal file when it ** contains rollback data for exactly one page. */ #ifdef SQLITE_ENABLE_ATOMIC_WRITE static int jrnlBufferSize(Pager *pPager){ assert( !MEMDB ); if( !pPager->tempFile ){ int dc; /* Device characteristics */ int nSector; /* Sector size */ int szPage; /* Page size */ assert( isOpen(pPager->fd) ); dc = sqlite3OsDeviceCharacteristics(pPager->fd); nSector = pPager->sectorSize; szPage = pPager->pageSize; assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); if( 0==(dc&(SQLITE_IOCAP_ATOMIC|(szPage>>8)) || nSector>szPage) ){ return 0; } } return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); } #endif /* ** If SQLITE_CHECK_PAGES is defined then we do some sanity checking ** on the cache using a hash function. This is used for testing ** and debugging only. */ #ifdef SQLITE_CHECK_PAGES /* ** Return a 32-bit hash of the page data for pPage. */ static u32 pager_datahash(int nByte, unsigned char *pData){ u32 hash = 0; int i; for(i=0; ipPager->pageSize, (unsigned char *)pPage->pData); } static void pager_set_pagehash(PgHdr *pPage){ pPage->pageHash = pager_pagehash(pPage); } /* ** The CHECK_PAGE macro takes a PgHdr* as an argument. If SQLITE_CHECK_PAGES ** is defined, and NDEBUG is not defined, an assert() statement checks ** that the page is either dirty or still matches the calculated page-hash. */ #define CHECK_PAGE(x) checkPage(x) static void checkPage(PgHdr *pPg){ Pager *pPager = pPg->pPager; assert( pPager->eState!=PAGER_ERROR ); assert( (pPg->flags&PGHDR_DIRTY) || pPg->pageHash==pager_pagehash(pPg) ); } #else #define pager_datahash(X,Y) 0 #define pager_pagehash(X) 0 #define pager_set_pagehash(X) #define CHECK_PAGE(x) #endif /* SQLITE_CHECK_PAGES */ /* ** When this is called the journal file for pager pPager must be open. ** This function attempts to read a master journal file name from the ** end of the file and, if successful, copies it into memory supplied ** by the caller. See comments above writeMasterJournal() for the format ** used to store a master journal file name at the end of a journal file. ** ** zMaster must point to a buffer of at least nMaster bytes allocated by ** the caller. This should be sqlite3_vfs.mxPathname+1 (to ensure there is ** enough space to write the master journal name). If the master journal ** name in the journal is longer than nMaster bytes (including a ** nul-terminator), then this is handled as if no master journal name ** were present in the journal. ** ** If a master journal file name is present at the end of the journal ** file, then it is copied into the buffer pointed to by zMaster. A ** nul-terminator byte is appended to the buffer following the master ** journal file name. ** ** If it is determined that no master journal file name is present ** zMaster[0] is set to 0 and SQLITE_OK returned. ** ** If an error occurs while reading from the journal file, an SQLite ** error code is returned. */ static int readMasterJournal(sqlite3_file *pJrnl, char *zMaster, u32 nMaster){ int rc; /* Return code */ u32 len; /* Length in bytes of master journal name */ i64 szJ; /* Total size in bytes of journal file pJrnl */ u32 cksum; /* MJ checksum value read from journal */ u32 u; /* Unsigned loop counter */ unsigned char aMagic[8]; /* A buffer to hold the magic header */ zMaster[0] = '\0'; if( SQLITE_OK!=(rc = sqlite3OsFileSize(pJrnl, &szJ)) || szJ<16 || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-16, &len)) || len>=nMaster || len==0 || SQLITE_OK!=(rc = read32bits(pJrnl, szJ-12, &cksum)) || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, aMagic, 8, szJ-8)) || memcmp(aMagic, aJournalMagic, 8) || SQLITE_OK!=(rc = sqlite3OsRead(pJrnl, zMaster, len, szJ-16-len)) ){ return rc; } /* See if the checksum matches the master journal name */ for(u=0; ujournalOff, assuming a sector ** size of pPager->sectorSize bytes. ** ** i.e for a sector size of 512: ** ** Pager.journalOff Return value ** --------------------------------------- ** 0 0 ** 512 512 ** 100 512 ** 2000 2048 ** */ static i64 journalHdrOffset(Pager *pPager){ i64 offset = 0; i64 c = pPager->journalOff; if( c ){ offset = ((c-1)/JOURNAL_HDR_SZ(pPager) + 1) * JOURNAL_HDR_SZ(pPager); } assert( offset%JOURNAL_HDR_SZ(pPager)==0 ); assert( offset>=c ); assert( (offset-c)jfd) ); if( pPager->journalOff ){ const i64 iLimit = pPager->journalSizeLimit; /* Local cache of jsl */ IOTRACE(("JZEROHDR %p\n", pPager)) if( doTruncate || iLimit==0 ){ rc = sqlite3OsTruncate(pPager->jfd, 0); }else{ static const char zeroHdr[28] = {0}; rc = sqlite3OsWrite(pPager->jfd, zeroHdr, sizeof(zeroHdr), 0); } if( rc==SQLITE_OK && !pPager->noSync ){ rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_DATAONLY|pPager->syncFlags); } /* At this point the transaction is committed but the write lock ** is still held on the file. If there is a size limit configured for ** the persistent journal and the journal file currently consumes more ** space than that limit allows for, truncate it now. There is no need ** to sync the file following this operation. */ if( rc==SQLITE_OK && iLimit>0 ){ i64 sz; rc = sqlite3OsFileSize(pPager->jfd, &sz); if( rc==SQLITE_OK && sz>iLimit ){ rc = sqlite3OsTruncate(pPager->jfd, iLimit); } } } return rc; } /* ** The journal file must be open when this routine is called. A journal ** header (JOURNAL_HDR_SZ bytes) is written into the journal file at the ** current location. ** ** The format for the journal header is as follows: ** - 8 bytes: Magic identifying journal format. ** - 4 bytes: Number of records in journal, or -1 no-sync mode is on. ** - 4 bytes: Random number used for page hash. ** - 4 bytes: Initial database page count. ** - 4 bytes: Sector size used by the process that wrote this journal. ** - 4 bytes: Database page size. ** ** Followed by (JOURNAL_HDR_SZ - 28) bytes of unused space. */ static int writeJournalHdr(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ char *zHeader = pPager->pTmpSpace; /* Temporary space used to build header */ u32 nHeader = (u32)pPager->pageSize;/* Size of buffer pointed to by zHeader */ u32 nWrite; /* Bytes of header sector written */ int ii; /* Loop counter */ assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ if( nHeader>JOURNAL_HDR_SZ(pPager) ){ nHeader = JOURNAL_HDR_SZ(pPager); } /* If there are active savepoints and any of them were created ** since the most recent journal header was written, update the ** PagerSavepoint.iHdrOffset fields now. */ for(ii=0; iinSavepoint; ii++){ if( pPager->aSavepoint[ii].iHdrOffset==0 ){ pPager->aSavepoint[ii].iHdrOffset = pPager->journalOff; } } pPager->journalHdr = pPager->journalOff = journalHdrOffset(pPager); /* ** Write the nRec Field - the number of page records that follow this ** journal header. Normally, zero is written to this value at this time. ** After the records are added to the journal (and the journal synced, ** if in full-sync mode), the zero is overwritten with the true number ** of records (see syncJournal()). ** ** A faster alternative is to write 0xFFFFFFFF to the nRec field. When ** reading the journal this value tells SQLite to assume that the ** rest of the journal file contains valid page records. This assumption ** is dangerous, as if a failure occurred whilst writing to the journal ** file it may contain some garbage data. There are two scenarios ** where this risk can be ignored: ** ** * When the pager is in no-sync mode. Corruption can follow a ** power failure in this case anyway. ** ** * When the SQLITE_IOCAP_SAFE_APPEND flag is set. This guarantees ** that garbage data is never appended to the journal file. */ assert( isOpen(pPager->fd) || pPager->noSync ); if( pPager->noSync || (pPager->journalMode==PAGER_JOURNALMODE_MEMORY) || (sqlite3OsDeviceCharacteristics(pPager->fd)&SQLITE_IOCAP_SAFE_APPEND) ){ memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); put32bits(&zHeader[sizeof(aJournalMagic)], 0xffffffff); }else{ memset(zHeader, 0, sizeof(aJournalMagic)+4); } /* The random check-hash initializer */ sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); /* The initial database size */ put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); /* The assumed sector size for this process */ put32bits(&zHeader[sizeof(aJournalMagic)+12], pPager->sectorSize); /* The page size */ put32bits(&zHeader[sizeof(aJournalMagic)+16], pPager->pageSize); /* Initializing the tail of the buffer is not necessary. Everything ** works find if the following memset() is omitted. But initializing ** the memory prevents valgrind from complaining, so we are willing to ** take the performance hit. */ memset(&zHeader[sizeof(aJournalMagic)+20], 0, nHeader-(sizeof(aJournalMagic)+20)); /* In theory, it is only necessary to write the 28 bytes that the ** journal header consumes to the journal file here. Then increment the ** Pager.journalOff variable by JOURNAL_HDR_SZ so that the next ** record is written to the following sector (leaving a gap in the file ** that will be implicitly filled in by the OS). ** ** However it has been discovered that on some systems this pattern can ** be significantly slower than contiguously writing data to the file, ** even if that means explicitly writing data to the block of ** (JOURNAL_HDR_SZ - 28) bytes that will not be used. So that is what ** is done. ** ** The loop is required here in case the sector-size is larger than the ** database page size. Since the zHeader buffer is only Pager.pageSize ** bytes in size, more than one call to sqlite3OsWrite() may be required ** to populate the entire journal header sector. */ for(nWrite=0; rc==SQLITE_OK&&nWritejournalHdr, nHeader)) rc = sqlite3OsWrite(pPager->jfd, zHeader, nHeader, pPager->journalOff); assert( pPager->journalHdr <= pPager->journalOff ); pPager->journalOff += nHeader; } return rc; } /* ** The journal file must be open when this is called. A journal header file ** (JOURNAL_HDR_SZ bytes) is read from the current location in the journal ** file. The current location in the journal file is given by ** pPager->journalOff. See comments above function writeJournalHdr() for ** a description of the journal header format. ** ** If the header is read successfully, *pNRec is set to the number of ** page records following this header and *pDbSize is set to the size of the ** database before the transaction began, in pages. Also, pPager->cksumInit ** is set to the value read from the journal header. SQLITE_OK is returned ** in this case. ** ** If the journal header file appears to be corrupted, SQLITE_DONE is ** returned and *pNRec and *PDbSize are undefined. If JOURNAL_HDR_SZ bytes ** cannot be read from the journal file an error code is returned. */ static int readJournalHdr( Pager *pPager, /* Pager object */ int isHot, i64 journalSize, /* Size of the open journal file in bytes */ u32 *pNRec, /* OUT: Value read from the nRec field */ u32 *pDbSize /* OUT: Value of original database size field */ ){ int rc; /* Return code */ unsigned char aMagic[8]; /* A buffer to hold the magic header */ i64 iHdrOff; /* Offset of journal header being read */ assert( isOpen(pPager->jfd) ); /* Journal file must be open. */ /* Advance Pager.journalOff to the start of the next sector. If the ** journal file is too small for there to be a header stored at this ** point, return SQLITE_DONE. */ pPager->journalOff = journalHdrOffset(pPager); if( pPager->journalOff+JOURNAL_HDR_SZ(pPager) > journalSize ){ return SQLITE_DONE; } iHdrOff = pPager->journalOff; /* Read in the first 8 bytes of the journal header. If they do not match ** the magic string found at the start of each journal header, return ** SQLITE_DONE. If an IO error occurs, return an error code. Otherwise, ** proceed. */ if( isHot || iHdrOff!=pPager->journalHdr ){ rc = sqlite3OsRead(pPager->jfd, aMagic, sizeof(aMagic), iHdrOff); if( rc ){ return rc; } if( memcmp(aMagic, aJournalMagic, sizeof(aMagic))!=0 ){ return SQLITE_DONE; } } /* Read the first three 32-bit fields of the journal header: The nRec ** field, the checksum-initializer and the database size at the start ** of the transaction. Return an error code if anything goes wrong. */ if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+8, pNRec)) || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+12, &pPager->cksumInit)) || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+16, pDbSize)) ){ return rc; } if( pPager->journalOff==0 ){ u32 iPageSize; /* Page-size field of journal header */ u32 iSectorSize; /* Sector-size field of journal header */ /* Read the page-size and sector-size journal header fields. */ if( SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+20, &iSectorSize)) || SQLITE_OK!=(rc = read32bits(pPager->jfd, iHdrOff+24, &iPageSize)) ){ return rc; } /* Versions of SQLite prior to 3.5.8 set the page-size field of the ** journal header to zero. In this case, assume that the Pager.pageSize ** variable is already set to the correct page size. */ if( iPageSize==0 ){ iPageSize = pPager->pageSize; } /* Check that the values read from the page-size and sector-size fields ** are within range. To be 'in range', both values need to be a power ** of two greater than or equal to 512 or 32, and not greater than their ** respective compile time maximum limits. */ if( iPageSize<512 || iSectorSize<32 || iPageSize>SQLITE_MAX_PAGE_SIZE || iSectorSize>MAX_SECTOR_SIZE || ((iPageSize-1)&iPageSize)!=0 || ((iSectorSize-1)&iSectorSize)!=0 ){ /* If the either the page-size or sector-size in the journal-header is ** invalid, then the process that wrote the journal-header must have ** crashed before the header was synced. In this case stop reading ** the journal file here. */ return SQLITE_DONE; } /* Update the page-size to match the value read from the journal. ** Use a testcase() macro to make sure that malloc failure within ** PagerSetPagesize() is tested. */ rc = sqlite3PagerSetPagesize(pPager, &iPageSize, -1); testcase( rc!=SQLITE_OK ); /* Update the assumed sector-size to match the value used by ** the process that created this journal. If this journal was ** created by a process other than this one, then this routine ** is being called from within pager_playback(). The local value ** of Pager.sectorSize is restored at the end of that routine. */ pPager->sectorSize = iSectorSize; } pPager->journalOff += JOURNAL_HDR_SZ(pPager); return rc; } /* ** Write the supplied master journal name into the journal file for pager ** pPager at the current location. The master journal name must be the last ** thing written to a journal file. If the pager is in full-sync mode, the ** journal file descriptor is advanced to the next sector boundary before ** anything is written. The format is: ** ** + 4 bytes: PAGER_MJ_PGNO. ** + N bytes: Master journal filename in utf-8. ** + 4 bytes: N (length of master journal name in bytes, no nul-terminator). ** + 4 bytes: Master journal name checksum. ** + 8 bytes: aJournalMagic[]. ** ** The master journal page checksum is the sum of the bytes in the master ** journal name, where each byte is interpreted as a signed 8-bit integer. ** ** If zMaster is a NULL pointer (occurs for a single database transaction), ** this call is a no-op. */ static int writeMasterJournal(Pager *pPager, const char *zMaster){ int rc; /* Return code */ int nMaster; /* Length of string zMaster */ i64 iHdrOff; /* Offset of header in journal file */ i64 jrnlSize; /* Size of journal file on disk */ u32 cksum = 0; /* Checksum of string zMaster */ assert( pPager->setMaster==0 ); assert( !pagerUseWal(pPager) ); if( !zMaster || pPager->journalMode==PAGER_JOURNALMODE_MEMORY || !isOpen(pPager->jfd) ){ return SQLITE_OK; } pPager->setMaster = 1; assert( pPager->journalHdr <= pPager->journalOff ); /* Calculate the length in bytes and the checksum of zMaster */ for(nMaster=0; zMaster[nMaster]; nMaster++){ cksum += zMaster[nMaster]; } /* If in full-sync mode, advance to the next disk sector before writing ** the master journal name. This is in case the previous page written to ** the journal has already been synced. */ if( pPager->fullSync ){ pPager->journalOff = journalHdrOffset(pPager); } iHdrOff = pPager->journalOff; /* Write the master journal data to the end of the journal file. If ** an error occurs, return the error code to the caller. */ if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager)))) || (0 != (rc = sqlite3OsWrite(pPager->jfd, zMaster, nMaster, iHdrOff+4))) || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster, nMaster))) || (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nMaster+4, cksum))) || (0 != (rc = sqlite3OsWrite(pPager->jfd, aJournalMagic, 8, iHdrOff+4+nMaster+8))) ){ return rc; } pPager->journalOff += (nMaster+20); /* If the pager is in peristent-journal mode, then the physical ** journal-file may extend past the end of the master-journal name ** and 8 bytes of magic data just written to the file. This is ** dangerous because the code to rollback a hot-journal file ** will not be able to find the master-journal name to determine ** whether or not the journal is hot. ** ** Easiest thing to do in this scenario is to truncate the journal ** file to the required size. */ if( SQLITE_OK==(rc = sqlite3OsFileSize(pPager->jfd, &jrnlSize)) && jrnlSize>pPager->journalOff ){ rc = sqlite3OsTruncate(pPager->jfd, pPager->journalOff); } return rc; } /* ** Discard the entire contents of the in-memory page-cache. */ static void pager_reset(Pager *pPager){ pPager->iDataVersion++; sqlite3BackupRestart(pPager->pBackup); sqlite3PcacheClear(pPager->pPCache); } /* ** Return the pPager->iDataVersion value */ SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager *pPager){ assert( pPager->eState>PAGER_OPEN ); return pPager->iDataVersion; } /* ** Free all structures in the Pager.aSavepoint[] array and set both ** Pager.aSavepoint and Pager.nSavepoint to zero. Close the sub-journal ** if it is open and the pager is not in exclusive mode. */ static void releaseAllSavepoints(Pager *pPager){ int ii; /* Iterator for looping through Pager.aSavepoint */ for(ii=0; iinSavepoint; ii++){ sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); } if( !pPager->exclusiveMode || sqlite3IsMemJournal(pPager->sjfd) ){ sqlite3OsClose(pPager->sjfd); } sqlite3_free(pPager->aSavepoint); pPager->aSavepoint = 0; pPager->nSavepoint = 0; pPager->nSubRec = 0; } /* ** Set the bit number pgno in the PagerSavepoint.pInSavepoint ** bitvecs of all open savepoints. Return SQLITE_OK if successful ** or SQLITE_NOMEM if a malloc failure occurs. */ static int addToSavepointBitvecs(Pager *pPager, Pgno pgno){ int ii; /* Loop counter */ int rc = SQLITE_OK; /* Result code */ for(ii=0; iinSavepoint; ii++){ PagerSavepoint *p = &pPager->aSavepoint[ii]; if( pgno<=p->nOrig ){ rc |= sqlite3BitvecSet(p->pInSavepoint, pgno); testcase( rc==SQLITE_NOMEM ); assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); } } return rc; } /* ** This function is a no-op if the pager is in exclusive mode and not ** in the ERROR state. Otherwise, it switches the pager to PAGER_OPEN ** state. ** ** If the pager is not in exclusive-access mode, the database file is ** completely unlocked. If the file is unlocked and the file-system does ** not exhibit the UNDELETABLE_WHEN_OPEN property, the journal file is ** closed (if it is open). ** ** If the pager is in ERROR state when this function is called, the ** contents of the pager cache are discarded before switching back to ** the OPEN state. Regardless of whether the pager is in exclusive-mode ** or not, any journal file left in the file-system will be treated ** as a hot-journal and rolled back the next time a read-transaction ** is opened (by this or by any other connection). */ static void pager_unlock(Pager *pPager){ assert( pPager->eState==PAGER_READER || pPager->eState==PAGER_OPEN || pPager->eState==PAGER_ERROR ); sqlite3BitvecDestroy(pPager->pInJournal); pPager->pInJournal = 0; releaseAllSavepoints(pPager); if( pagerUseWal(pPager) ){ assert( !isOpen(pPager->jfd) ); sqlite3WalEndReadTransaction(pPager->pWal); pPager->eState = PAGER_OPEN; }else if( !pPager->exclusiveMode ){ int rc; /* Error code returned by pagerUnlockDb() */ int iDc = isOpen(pPager->fd)?sqlite3OsDeviceCharacteristics(pPager->fd):0; /* If the operating system support deletion of open files, then ** close the journal file when dropping the database lock. Otherwise ** another connection with journal_mode=delete might delete the file ** out from under us. */ assert( (PAGER_JOURNALMODE_MEMORY & 5)!=1 ); assert( (PAGER_JOURNALMODE_OFF & 5)!=1 ); assert( (PAGER_JOURNALMODE_WAL & 5)!=1 ); assert( (PAGER_JOURNALMODE_DELETE & 5)!=1 ); assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); if( 0==(iDc & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN) || 1!=(pPager->journalMode & 5) ){ sqlite3OsClose(pPager->jfd); } /* If the pager is in the ERROR state and the call to unlock the database ** file fails, set the current lock to UNKNOWN_LOCK. See the comment ** above the #define for UNKNOWN_LOCK for an explanation of why this ** is necessary. */ rc = pagerUnlockDb(pPager, NO_LOCK); if( rc!=SQLITE_OK && pPager->eState==PAGER_ERROR ){ pPager->eLock = UNKNOWN_LOCK; } /* The pager state may be changed from PAGER_ERROR to PAGER_OPEN here ** without clearing the error code. This is intentional - the error ** code is cleared and the cache reset in the block below. */ assert( pPager->errCode || pPager->eState!=PAGER_ERROR ); pPager->changeCountDone = 0; pPager->eState = PAGER_OPEN; } /* If Pager.errCode is set, the contents of the pager cache cannot be ** trusted. Now that there are no outstanding references to the pager, ** it can safely move back to PAGER_OPEN state. This happens in both ** normal and exclusive-locking mode. */ if( pPager->errCode ){ assert( !MEMDB ); pager_reset(pPager); pPager->changeCountDone = pPager->tempFile; pPager->eState = PAGER_OPEN; pPager->errCode = SQLITE_OK; if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); } pPager->journalOff = 0; pPager->journalHdr = 0; pPager->setMaster = 0; } /* ** This function is called whenever an IOERR or FULL error that requires ** the pager to transition into the ERROR state may ahve occurred. ** The first argument is a pointer to the pager structure, the second ** the error-code about to be returned by a pager API function. The ** value returned is a copy of the second argument to this function. ** ** If the second argument is SQLITE_FULL, SQLITE_IOERR or one of the ** IOERR sub-codes, the pager enters the ERROR state and the error code ** is stored in Pager.errCode. While the pager remains in the ERROR state, ** all major API calls on the Pager will immediately return Pager.errCode. ** ** The ERROR state indicates that the contents of the pager-cache ** cannot be trusted. This state can be cleared by completely discarding ** the contents of the pager-cache. If a transaction was active when ** the persistent error occurred, then the rollback journal may need ** to be replayed to restore the contents of the database file (as if ** it were a hot-journal). */ static int pager_error(Pager *pPager, int rc){ int rc2 = rc & 0xff; assert( rc==SQLITE_OK || !MEMDB ); assert( pPager->errCode==SQLITE_FULL || pPager->errCode==SQLITE_OK || (pPager->errCode & 0xff)==SQLITE_IOERR ); if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){ pPager->errCode = rc; pPager->eState = PAGER_ERROR; } return rc; } static int pager_truncate(Pager *pPager, Pgno nPage); /* ** This routine ends a transaction. A transaction is usually ended by ** either a COMMIT or a ROLLBACK operation. This routine may be called ** after rollback of a hot-journal, or if an error occurs while opening ** the journal file or writing the very first journal-header of a ** database transaction. ** ** This routine is never called in PAGER_ERROR state. If it is called ** in PAGER_NONE or PAGER_SHARED state and the lock held is less ** exclusive than a RESERVED lock, it is a no-op. ** ** Otherwise, any active savepoints are released. ** ** If the journal file is open, then it is "finalized". Once a journal ** file has been finalized it is not possible to use it to roll back a ** transaction. Nor will it be considered to be a hot-journal by this ** or any other database connection. Exactly how a journal is finalized ** depends on whether or not the pager is running in exclusive mode and ** the current journal-mode (Pager.journalMode value), as follows: ** ** journalMode==MEMORY ** Journal file descriptor is simply closed. This destroys an ** in-memory journal. ** ** journalMode==TRUNCATE ** Journal file is truncated to zero bytes in size. ** ** journalMode==PERSIST ** The first 28 bytes of the journal file are zeroed. This invalidates ** the first journal header in the file, and hence the entire journal ** file. An invalid journal file cannot be rolled back. ** ** journalMode==DELETE ** The journal file is closed and deleted using sqlite3OsDelete(). ** ** If the pager is running in exclusive mode, this method of finalizing ** the journal file is never used. Instead, if the journalMode is ** DELETE and the pager is in exclusive mode, the method described under ** journalMode==PERSIST is used instead. ** ** After the journal is finalized, the pager moves to PAGER_READER state. ** If running in non-exclusive rollback mode, the lock on the file is ** downgraded to a SHARED_LOCK. ** ** SQLITE_OK is returned if no error occurs. If an error occurs during ** any of the IO operations to finalize the journal file or unlock the ** database then the IO error code is returned to the user. If the ** operation to finalize the journal file fails, then the code still ** tries to unlock the database file if not in exclusive mode. If the ** unlock operation fails as well, then the first error code related ** to the first error encountered (the journal finalization one) is ** returned. */ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ int rc = SQLITE_OK; /* Error code from journal finalization operation */ int rc2 = SQLITE_OK; /* Error code from db file unlock operation */ /* Do nothing if the pager does not have an open write transaction ** or at least a RESERVED lock. This function may be called when there ** is no write-transaction active but a RESERVED or greater lock is ** held under two circumstances: ** ** 1. After a successful hot-journal rollback, it is called with ** eState==PAGER_NONE and eLock==EXCLUSIVE_LOCK. ** ** 2. If a connection with locking_mode=exclusive holding an EXCLUSIVE ** lock switches back to locking_mode=normal and then executes a ** read-transaction, this function is called with eState==PAGER_READER ** and eLock==EXCLUSIVE_LOCK when the read-transaction is closed. */ assert( assert_pager_state(pPager) ); assert( pPager->eState!=PAGER_ERROR ); if( pPager->eStateeLockjfd) || pPager->pInJournal==0 ); if( isOpen(pPager->jfd) ){ assert( !pagerUseWal(pPager) ); /* Finalize the journal file. */ if( sqlite3IsMemJournal(pPager->jfd) ){ assert( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ); sqlite3OsClose(pPager->jfd); }else if( pPager->journalMode==PAGER_JOURNALMODE_TRUNCATE ){ if( pPager->journalOff==0 ){ rc = SQLITE_OK; }else{ rc = sqlite3OsTruncate(pPager->jfd, 0); if( rc==SQLITE_OK && pPager->fullSync ){ /* Make sure the new file size is written into the inode right away. ** Otherwise the journal might resurrect following a power loss and ** cause the last transaction to roll back. See ** https://bugzilla.mozilla.org/show_bug.cgi?id=1072773 */ rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); } } pPager->journalOff = 0; }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) ){ rc = zeroJournalHdr(pPager, hasMaster); pPager->journalOff = 0; }else{ /* This branch may be executed with Pager.journalMode==MEMORY if ** a hot-journal was just rolled back. In this case the journal ** file should be closed and deleted. If this connection writes to ** the database file, it will do so using an in-memory journal. */ int bDelete = (!pPager->tempFile && sqlite3JournalExists(pPager->jfd)); assert( pPager->journalMode==PAGER_JOURNALMODE_DELETE || pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->journalMode==PAGER_JOURNALMODE_WAL ); sqlite3OsClose(pPager->jfd); if( bDelete ){ rc = sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); } } } #ifdef SQLITE_CHECK_PAGES sqlite3PcacheIterateDirty(pPager->pPCache, pager_set_pagehash); if( pPager->dbSize==0 && sqlite3PcacheRefCount(pPager->pPCache)>0 ){ PgHdr *p = sqlite3PagerLookup(pPager, 1); if( p ){ p->pageHash = 0; sqlite3PagerUnrefNotNull(p); } } #endif sqlite3BitvecDestroy(pPager->pInJournal); pPager->pInJournal = 0; pPager->nRec = 0; sqlite3PcacheCleanAll(pPager->pPCache); sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); if( pagerUseWal(pPager) ){ /* Drop the WAL write-lock, if any. Also, if the connection was in ** locking_mode=exclusive mode but is no longer, drop the EXCLUSIVE ** lock held on the database file. */ rc2 = sqlite3WalEndWriteTransaction(pPager->pWal); assert( rc2==SQLITE_OK ); }else if( rc==SQLITE_OK && bCommit && pPager->dbFileSize>pPager->dbSize ){ /* This branch is taken when committing a transaction in rollback-journal ** mode if the database file on disk is larger than the database image. ** At this point the journal has been finalized and the transaction ** successfully committed, but the EXCLUSIVE lock is still held on the ** file. So it is safe to truncate the database file to its minimum ** required size. */ assert( pPager->eLock==EXCLUSIVE_LOCK ); rc = pager_truncate(pPager, pPager->dbSize); } if( rc==SQLITE_OK && bCommit && isOpen(pPager->fd) ){ rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_COMMIT_PHASETWO, 0); if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; } if( !pPager->exclusiveMode && (!pagerUseWal(pPager) || sqlite3WalExclusiveMode(pPager->pWal, 0)) ){ rc2 = pagerUnlockDb(pPager, SHARED_LOCK); pPager->changeCountDone = 0; } pPager->eState = PAGER_READER; pPager->setMaster = 0; return (rc==SQLITE_OK?rc2:rc); } /* ** Execute a rollback if a transaction is active and unlock the ** database file. ** ** If the pager has already entered the ERROR state, do not attempt ** the rollback at this time. Instead, pager_unlock() is called. The ** call to pager_unlock() will discard all in-memory pages, unlock ** the database file and move the pager back to OPEN state. If this ** means that there is a hot-journal left in the file-system, the next ** connection to obtain a shared lock on the pager (which may be this one) ** will roll it back. ** ** If the pager has not already entered the ERROR state, but an IO or ** malloc error occurs during a rollback, then this will itself cause ** the pager to enter the ERROR state. Which will be cleared by the ** call to pager_unlock(), as described above. */ static void pagerUnlockAndRollback(Pager *pPager){ if( pPager->eState!=PAGER_ERROR && pPager->eState!=PAGER_OPEN ){ assert( assert_pager_state(pPager) ); if( pPager->eState>=PAGER_WRITER_LOCKED ){ sqlite3BeginBenignMalloc(); sqlite3PagerRollback(pPager); sqlite3EndBenignMalloc(); }else if( !pPager->exclusiveMode ){ assert( pPager->eState==PAGER_READER ); pager_end_transaction(pPager, 0, 0); } } pager_unlock(pPager); } /* ** Parameter aData must point to a buffer of pPager->pageSize bytes ** of data. Compute and return a checksum based ont the contents of the ** page of data and the current value of pPager->cksumInit. ** ** This is not a real checksum. It is really just the sum of the ** random initial value (pPager->cksumInit) and every 200th byte ** of the page data, starting with byte offset (pPager->pageSize%200). ** Each byte is interpreted as an 8-bit unsigned integer. ** ** Changing the formula used to compute this checksum results in an ** incompatible journal file format. ** ** If journal corruption occurs due to a power failure, the most likely ** scenario is that one end or the other of the record will be changed. ** It is much less likely that the two ends of the journal record will be ** correct and the middle be corrupt. Thus, this "checksum" scheme, ** though fast and simple, catches the mostly likely kind of corruption. */ static u32 pager_cksum(Pager *pPager, const u8 *aData){ u32 cksum = pPager->cksumInit; /* Checksum value to return */ int i = pPager->pageSize-200; /* Loop counter */ while( i>0 ){ cksum += aData[i]; i -= 200; } return cksum; } /* ** Report the current page size and number of reserved bytes back ** to the codec. */ #ifdef SQLITE_HAS_CODEC static void pagerReportSize(Pager *pPager){ if( pPager->xCodecSizeChng ){ pPager->xCodecSizeChng(pPager->pCodec, pPager->pageSize, (int)pPager->nReserve); } } #else # define pagerReportSize(X) /* No-op if we do not support a codec */ #endif /* ** Read a single page from either the journal file (if isMainJrnl==1) or ** from the sub-journal (if isMainJrnl==0) and playback that page. ** The page begins at offset *pOffset into the file. The *pOffset ** value is increased to the start of the next page in the journal. ** ** The main rollback journal uses checksums - the statement journal does ** not. ** ** If the page number of the page record read from the (sub-)journal file ** is greater than the current value of Pager.dbSize, then playback is ** skipped and SQLITE_OK is returned. ** ** If pDone is not NULL, then it is a record of pages that have already ** been played back. If the page at *pOffset has already been played back ** (if the corresponding pDone bit is set) then skip the playback. ** Make sure the pDone bit corresponding to the *pOffset page is set ** prior to returning. ** ** If the page record is successfully read from the (sub-)journal file ** and played back, then SQLITE_OK is returned. If an IO error occurs ** while reading the record from the (sub-)journal file or while writing ** to the database file, then the IO error code is returned. If data ** is successfully read from the (sub-)journal file but appears to be ** corrupted, SQLITE_DONE is returned. Data is considered corrupted in ** two circumstances: ** ** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or ** * If the record is being rolled back from the main journal file ** and the checksum field does not match the record content. ** ** Neither of these two scenarios are possible during a savepoint rollback. ** ** If this is a savepoint rollback, then memory may have to be dynamically ** allocated by this function. If this is the case and an allocation fails, ** SQLITE_NOMEM is returned. */ static int pager_playback_one_page( Pager *pPager, /* The pager being played back */ i64 *pOffset, /* Offset of record to playback */ Bitvec *pDone, /* Bitvec of pages already played back */ int isMainJrnl, /* 1 -> main journal. 0 -> sub-journal. */ int isSavepnt /* True for a savepoint rollback */ ){ int rc; PgHdr *pPg; /* An existing page in the cache */ Pgno pgno; /* The page number of a page in journal */ u32 cksum; /* Checksum used for sanity checking */ char *aData; /* Temporary storage for the page */ sqlite3_file *jfd; /* The file descriptor for the journal file */ int isSynced; /* True if journal page is synced */ assert( (isMainJrnl&~1)==0 ); /* isMainJrnl is 0 or 1 */ assert( (isSavepnt&~1)==0 ); /* isSavepnt is 0 or 1 */ assert( isMainJrnl || pDone ); /* pDone always used on sub-journals */ assert( isSavepnt || pDone==0 ); /* pDone never used on non-savepoint */ aData = pPager->pTmpSpace; assert( aData ); /* Temp storage must have already been allocated */ assert( pagerUseWal(pPager)==0 || (!isMainJrnl && isSavepnt) ); /* Either the state is greater than PAGER_WRITER_CACHEMOD (a transaction ** or savepoint rollback done at the request of the caller) or this is ** a hot-journal rollback. If it is a hot-journal rollback, the pager ** is in state OPEN and holds an EXCLUSIVE lock. Hot-journal rollback ** only reads from the main journal, not the sub-journal. */ assert( pPager->eState>=PAGER_WRITER_CACHEMOD || (pPager->eState==PAGER_OPEN && pPager->eLock==EXCLUSIVE_LOCK) ); assert( pPager->eState>=PAGER_WRITER_CACHEMOD || isMainJrnl ); /* Read the page number and page data from the journal or sub-journal ** file. Return an error code to the caller if an IO error occurs. */ jfd = isMainJrnl ? pPager->jfd : pPager->sjfd; rc = read32bits(jfd, *pOffset, &pgno); if( rc!=SQLITE_OK ) return rc; rc = sqlite3OsRead(jfd, (u8*)aData, pPager->pageSize, (*pOffset)+4); if( rc!=SQLITE_OK ) return rc; *pOffset += pPager->pageSize + 4 + isMainJrnl*4; /* Sanity checking on the page. This is more important that I originally ** thought. If a power failure occurs while the journal is being written, ** it could cause invalid data to be written into the journal. We need to ** detect this invalid data (with high probability) and ignore it. */ if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){ assert( !isSavepnt ); return SQLITE_DONE; } if( pgno>(Pgno)pPager->dbSize || sqlite3BitvecTest(pDone, pgno) ){ return SQLITE_OK; } if( isMainJrnl ){ rc = read32bits(jfd, (*pOffset)-4, &cksum); if( rc ) return rc; if( !isSavepnt && pager_cksum(pPager, (u8*)aData)!=cksum ){ return SQLITE_DONE; } } /* If this page has already been played back before during the current ** rollback, then don't bother to play it back again. */ if( pDone && (rc = sqlite3BitvecSet(pDone, pgno))!=SQLITE_OK ){ return rc; } /* When playing back page 1, restore the nReserve setting */ if( pgno==1 && pPager->nReserve!=((u8*)aData)[20] ){ pPager->nReserve = ((u8*)aData)[20]; pagerReportSize(pPager); } /* If the pager is in CACHEMOD state, then there must be a copy of this ** page in the pager cache. In this case just update the pager cache, ** not the database file. The page is left marked dirty in this case. ** ** An exception to the above rule: If the database is in no-sync mode ** and a page is moved during an incremental vacuum then the page may ** not be in the pager cache. Later: if a malloc() or IO error occurs ** during a Movepage() call, then the page may not be in the cache ** either. So the condition described in the above paragraph is not ** assert()able. ** ** If in WRITER_DBMOD, WRITER_FINISHED or OPEN state, then we update the ** pager cache if it exists and the main file. The page is then marked ** not dirty. Since this code is only executed in PAGER_OPEN state for ** a hot-journal rollback, it is guaranteed that the page-cache is empty ** if the pager is in OPEN state. ** ** Ticket #1171: The statement journal might contain page content that is ** different from the page content at the start of the transaction. ** This occurs when a page is changed prior to the start of a statement ** then changed again within the statement. When rolling back such a ** statement we must not write to the original database unless we know ** for certain that original page contents are synced into the main rollback ** journal. Otherwise, a power loss might leave modified data in the ** database file without an entry in the rollback journal that can ** restore the database to its original form. Two conditions must be ** met before writing to the database files. (1) the database must be ** locked. (2) we know that the original page content is fully synced ** in the main journal either because the page is not in cache or else ** the page is marked as needSync==0. ** ** 2008-04-14: When attempting to vacuum a corrupt database file, it ** is possible to fail a statement on a database that does not yet exist. ** Do not attempt to write if database file has never been opened. */ if( pagerUseWal(pPager) ){ pPg = 0; }else{ pPg = sqlite3PagerLookup(pPager, pgno); } assert( pPg || !MEMDB ); assert( pPager->eState!=PAGER_OPEN || pPg==0 ); PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData), (isMainJrnl?"main-journal":"sub-journal") )); if( isMainJrnl ){ isSynced = pPager->noSync || (*pOffset <= pPager->journalHdr); }else{ isSynced = (pPg==0 || 0==(pPg->flags & PGHDR_NEED_SYNC)); } if( isOpen(pPager->fd) && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) && isSynced ){ i64 ofst = (pgno-1)*(i64)pPager->pageSize; testcase( !isSavepnt && pPg!=0 && (pPg->flags&PGHDR_NEED_SYNC)!=0 ); assert( !pagerUseWal(pPager) ); rc = sqlite3OsWrite(pPager->fd, (u8 *)aData, pPager->pageSize, ofst); if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } if( pPager->pBackup ){ CODEC1(pPager, aData, pgno, 3, rc=SQLITE_NOMEM); sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)aData); CODEC2(pPager, aData, pgno, 7, rc=SQLITE_NOMEM, aData); } }else if( !isMainJrnl && pPg==0 ){ /* If this is a rollback of a savepoint and data was not written to ** the database and the page is not in-memory, there is a potential ** problem. When the page is next fetched by the b-tree layer, it ** will be read from the database file, which may or may not be ** current. ** ** There are a couple of different ways this can happen. All are quite ** obscure. When running in synchronous mode, this can only happen ** if the page is on the free-list at the start of the transaction, then ** populated, then moved using sqlite3PagerMovepage(). ** ** The solution is to add an in-memory page to the cache containing ** the data just read from the sub-journal. Mark the page as dirty ** and if the pager requires a journal-sync, then mark the page as ** requiring a journal-sync before it is written. */ assert( isSavepnt ); assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)==0 ); pPager->doNotSpill |= SPILLFLAG_ROLLBACK; rc = sqlite3PagerAcquire(pPager, pgno, &pPg, 1); assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; if( rc!=SQLITE_OK ) return rc; pPg->flags &= ~PGHDR_NEED_READ; sqlite3PcacheMakeDirty(pPg); } if( pPg ){ /* No page should ever be explicitly rolled back that is in use, except ** for page 1 which is held in use in order to keep the lock on the ** database active. However such a page may be rolled back as a result ** of an internal error resulting in an automatic call to ** sqlite3PagerRollback(). */ void *pData; pData = pPg->pData; memcpy(pData, (u8*)aData, pPager->pageSize); pPager->xReiniter(pPg); if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ /* If the contents of this page were just restored from the main ** journal file, then its content must be as they were when the ** transaction was first opened. In this case we can mark the page ** as clean, since there will be no need to write it out to the ** database. ** ** There is one exception to this rule. If the page is being rolled ** back as part of a savepoint (or statement) rollback from an ** unsynced portion of the main journal file, then it is not safe ** to mark the page as clean. This is because marking the page as ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is ** already in the journal file (recorded in Pager.pInJournal) and ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to ** again within this transaction, it will be marked as dirty but ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially ** be written out into the database file before its journal file ** segment is synced. If a crash occurs during or following this, ** database corruption may ensue. */ assert( !pagerUseWal(pPager) ); sqlite3PcacheMakeClean(pPg); } pager_set_pagehash(pPg); /* If this was page 1, then restore the value of Pager.dbFileVers. ** Do this before any decoding. */ if( pgno==1 ){ memcpy(&pPager->dbFileVers, &((u8*)pData)[24],sizeof(pPager->dbFileVers)); } /* Decode the page just read from disk */ CODEC1(pPager, pData, pPg->pgno, 3, rc=SQLITE_NOMEM); sqlite3PcacheRelease(pPg); } return rc; } /* ** Parameter zMaster is the name of a master journal file. A single journal ** file that referred to the master journal file has just been rolled back. ** This routine checks if it is possible to delete the master journal file, ** and does so if it is. ** ** Argument zMaster may point to Pager.pTmpSpace. So that buffer is not ** available for use within this function. ** ** When a master journal file is created, it is populated with the names ** of all of its child journals, one after another, formatted as utf-8 ** encoded text. The end of each child journal file is marked with a ** nul-terminator byte (0x00). i.e. the entire contents of a master journal ** file for a transaction involving two databases might be: ** ** "/home/bill/a.db-journal\x00/home/bill/b.db-journal\x00" ** ** A master journal file may only be deleted once all of its child ** journals have been rolled back. ** ** This function reads the contents of the master-journal file into ** memory and loops through each of the child journal names. For ** each child journal, it checks if: ** ** * if the child journal exists, and if so ** * if the child journal contains a reference to master journal ** file zMaster ** ** If a child journal can be found that matches both of the criteria ** above, this function returns without doing anything. Otherwise, if ** no such child journal can be found, file zMaster is deleted from ** the file-system using sqlite3OsDelete(). ** ** If an IO error within this function, an error code is returned. This ** function allocates memory by calling sqlite3Malloc(). If an allocation ** fails, SQLITE_NOMEM is returned. Otherwise, if no IO or malloc errors ** occur, SQLITE_OK is returned. ** ** TODO: This function allocates a single block of memory to load ** the entire contents of the master journal file. This could be ** a couple of kilobytes or so - potentially larger than the page ** size. */ static int pager_delmaster(Pager *pPager, const char *zMaster){ sqlite3_vfs *pVfs = pPager->pVfs; int rc; /* Return code */ sqlite3_file *pMaster; /* Malloc'd master-journal file descriptor */ sqlite3_file *pJournal; /* Malloc'd child-journal file descriptor */ char *zMasterJournal = 0; /* Contents of master journal file */ i64 nMasterJournal; /* Size of master journal file */ char *zJournal; /* Pointer to one journal within MJ file */ char *zMasterPtr; /* Space to hold MJ filename from a journal file */ int nMasterPtr; /* Amount of space allocated to zMasterPtr[] */ /* Allocate space for both the pJournal and pMaster file descriptors. ** If successful, open the master journal file for reading. */ pMaster = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile * 2); pJournal = (sqlite3_file *)(((u8 *)pMaster) + pVfs->szOsFile); if( !pMaster ){ rc = SQLITE_NOMEM; }else{ const int flags = (SQLITE_OPEN_READONLY|SQLITE_OPEN_MASTER_JOURNAL); rc = sqlite3OsOpen(pVfs, zMaster, pMaster, flags, 0); } if( rc!=SQLITE_OK ) goto delmaster_out; /* Load the entire master journal file into space obtained from ** sqlite3_malloc() and pointed to by zMasterJournal. Also obtain ** sufficient space (in zMasterPtr) to hold the names of master ** journal files extracted from regular rollback-journals. */ rc = sqlite3OsFileSize(pMaster, &nMasterJournal); if( rc!=SQLITE_OK ) goto delmaster_out; nMasterPtr = pVfs->mxPathname+1; zMasterJournal = sqlite3Malloc(nMasterJournal + nMasterPtr + 1); if( !zMasterJournal ){ rc = SQLITE_NOMEM; goto delmaster_out; } zMasterPtr = &zMasterJournal[nMasterJournal+1]; rc = sqlite3OsRead(pMaster, zMasterJournal, (int)nMasterJournal, 0); if( rc!=SQLITE_OK ) goto delmaster_out; zMasterJournal[nMasterJournal] = 0; zJournal = zMasterJournal; while( (zJournal-zMasterJournal)pageSize bytes). ** If the file on disk is currently larger than nPage pages, then use the VFS ** xTruncate() method to truncate it. ** ** Or, it might be the case that the file on disk is smaller than ** nPage pages. Some operating system implementations can get confused if ** you try to truncate a file to some size that is larger than it ** currently is, so detect this case and write a single zero byte to ** the end of the new file instead. ** ** If successful, return SQLITE_OK. If an IO error occurs while modifying ** the database file, return the error code to the caller. */ static int pager_truncate(Pager *pPager, Pgno nPage){ int rc = SQLITE_OK; assert( pPager->eState!=PAGER_ERROR ); assert( pPager->eState!=PAGER_READER ); if( isOpen(pPager->fd) && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) ){ i64 currentSize, newSize; int szPage = pPager->pageSize; assert( pPager->eLock==EXCLUSIVE_LOCK ); /* TODO: Is it safe to use Pager.dbFileSize here? */ rc = sqlite3OsFileSize(pPager->fd, ¤tSize); newSize = szPage*(i64)nPage; if( rc==SQLITE_OK && currentSize!=newSize ){ if( currentSize>newSize ){ rc = sqlite3OsTruncate(pPager->fd, newSize); }else if( (currentSize+szPage)<=newSize ){ char *pTmp = pPager->pTmpSpace; memset(pTmp, 0, szPage); testcase( (newSize-szPage) == currentSize ); testcase( (newSize-szPage) > currentSize ); rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, newSize-szPage); } if( rc==SQLITE_OK ){ pPager->dbFileSize = nPage; } } } return rc; } /* ** Return a sanitized version of the sector-size of OS file pFile. The ** return value is guaranteed to lie between 32 and MAX_SECTOR_SIZE. */ SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *pFile){ int iRet = sqlite3OsSectorSize(pFile); if( iRet<32 ){ iRet = 512; }else if( iRet>MAX_SECTOR_SIZE ){ assert( MAX_SECTOR_SIZE>=512 ); iRet = MAX_SECTOR_SIZE; } return iRet; } /* ** Set the value of the Pager.sectorSize variable for the given ** pager based on the value returned by the xSectorSize method ** of the open database file. The sector size will be used ** to determine the size and alignment of journal header and ** master journal pointers within created journal files. ** ** For temporary files the effective sector size is always 512 bytes. ** ** Otherwise, for non-temporary files, the effective sector size is ** the value returned by the xSectorSize() method rounded up to 32 if ** it is less than 32, or rounded down to MAX_SECTOR_SIZE if it ** is greater than MAX_SECTOR_SIZE. ** ** If the file has the SQLITE_IOCAP_POWERSAFE_OVERWRITE property, then set ** the effective sector size to its minimum value (512). The purpose of ** pPager->sectorSize is to define the "blast radius" of bytes that ** might change if a crash occurs while writing to a single byte in ** that range. But with POWERSAFE_OVERWRITE, the blast radius is zero ** (that is what POWERSAFE_OVERWRITE means), so we minimize the sector ** size. For backwards compatibility of the rollback journal file format, ** we cannot reduce the effective sector size below 512. */ static void setSectorSize(Pager *pPager){ assert( isOpen(pPager->fd) || pPager->tempFile ); if( pPager->tempFile || (sqlite3OsDeviceCharacteristics(pPager->fd) & SQLITE_IOCAP_POWERSAFE_OVERWRITE)!=0 ){ /* Sector size doesn't matter for temporary files. Also, the file ** may not have been opened yet, in which case the OsSectorSize() ** call will segfault. */ pPager->sectorSize = 512; }else{ pPager->sectorSize = sqlite3SectorSize(pPager->fd); } } /* ** Playback the journal and thus restore the database file to ** the state it was in before we started making changes. ** ** The journal file format is as follows: ** ** (1) 8 byte prefix. A copy of aJournalMagic[]. ** (2) 4 byte big-endian integer which is the number of valid page records ** in the journal. If this value is 0xffffffff, then compute the ** number of page records from the journal size. ** (3) 4 byte big-endian integer which is the initial value for the ** sanity checksum. ** (4) 4 byte integer which is the number of pages to truncate the ** database to during a rollback. ** (5) 4 byte big-endian integer which is the sector size. The header ** is this many bytes in size. ** (6) 4 byte big-endian integer which is the page size. ** (7) zero padding out to the next sector size. ** (8) Zero or more pages instances, each as follows: ** + 4 byte page number. ** + pPager->pageSize bytes of data. ** + 4 byte checksum ** ** When we speak of the journal header, we mean the first 7 items above. ** Each entry in the journal is an instance of the 8th item. ** ** Call the value from the second bullet "nRec". nRec is the number of ** valid page entries in the journal. In most cases, you can compute the ** value of nRec from the size of the journal file. But if a power ** failure occurred while the journal was being written, it could be the ** case that the size of the journal file had already been increased but ** the extra entries had not yet made it safely to disk. In such a case, ** the value of nRec computed from the file size would be too large. For ** that reason, we always use the nRec value in the header. ** ** If the nRec value is 0xffffffff it means that nRec should be computed ** from the file size. This value is used when the user selects the ** no-sync option for the journal. A power failure could lead to corruption ** in this case. But for things like temporary table (which will be ** deleted when the power is restored) we don't care. ** ** If the file opened as the journal file is not a well-formed ** journal file then all pages up to the first corrupted page are rolled ** back (or no pages if the journal header is corrupted). The journal file ** is then deleted and SQLITE_OK returned, just as if no corruption had ** been encountered. ** ** If an I/O or malloc() error occurs, the journal-file is not deleted ** and an error code is returned. ** ** The isHot parameter indicates that we are trying to rollback a journal ** that might be a hot journal. Or, it could be that the journal is ** preserved because of JOURNALMODE_PERSIST or JOURNALMODE_TRUNCATE. ** If the journal really is hot, reset the pager cache prior rolling ** back any content. If the journal is merely persistent, no reset is ** needed. */ static int pager_playback(Pager *pPager, int isHot){ sqlite3_vfs *pVfs = pPager->pVfs; i64 szJ; /* Size of the journal file in bytes */ u32 nRec; /* Number of Records in the journal */ u32 u; /* Unsigned loop counter */ Pgno mxPg = 0; /* Size of the original file in pages */ int rc; /* Result code of a subroutine */ int res = 1; /* Value returned by sqlite3OsAccess() */ char *zMaster = 0; /* Name of master journal file if any */ int needPagerReset; /* True to reset page prior to first page rollback */ int nPlayback = 0; /* Total number of pages restored from journal */ /* Figure out how many records are in the journal. Abort early if ** the journal is empty. */ assert( isOpen(pPager->jfd) ); rc = sqlite3OsFileSize(pPager->jfd, &szJ); if( rc!=SQLITE_OK ){ goto end_playback; } /* Read the master journal name from the journal, if it is present. ** If a master journal file name is specified, but the file is not ** present on disk, then the journal is not hot and does not need to be ** played back. ** ** TODO: Technically the following is an error because it assumes that ** buffer Pager.pTmpSpace is (mxPathname+1) bytes or larger. i.e. that ** (pPager->pageSize >= pPager->pVfs->mxPathname+1). Using os_unix.c, ** mxPathname is 512, which is the same as the minimum allowable value ** for pageSize. */ zMaster = pPager->pTmpSpace; rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); if( rc==SQLITE_OK && zMaster[0] ){ rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); } zMaster = 0; if( rc!=SQLITE_OK || !res ){ goto end_playback; } pPager->journalOff = 0; needPagerReset = isHot; /* This loop terminates either when a readJournalHdr() or ** pager_playback_one_page() call returns SQLITE_DONE or an IO error ** occurs. */ while( 1 ){ /* Read the next journal header from the journal file. If there are ** not enough bytes left in the journal file for a complete header, or ** it is corrupted, then a process must have failed while writing it. ** This indicates nothing more needs to be rolled back. */ rc = readJournalHdr(pPager, isHot, szJ, &nRec, &mxPg); if( rc!=SQLITE_OK ){ if( rc==SQLITE_DONE ){ rc = SQLITE_OK; } goto end_playback; } /* If nRec is 0xffffffff, then this journal was created by a process ** working in no-sync mode. This means that the rest of the journal ** file consists of pages, there are no more journal headers. Compute ** the value of nRec based on this assumption. */ if( nRec==0xffffffff ){ assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ); nRec = (int)((szJ - JOURNAL_HDR_SZ(pPager))/JOURNAL_PG_SZ(pPager)); } /* If nRec is 0 and this rollback is of a transaction created by this ** process and if this is the final header in the journal, then it means ** that this part of the journal was being filled but has not yet been ** synced to disk. Compute the number of pages based on the remaining ** size of the file. ** ** The third term of the test was added to fix ticket #2565. ** When rolling back a hot journal, nRec==0 always means that the next ** chunk of the journal contains zero pages to be rolled back. But ** when doing a ROLLBACK and the nRec==0 chunk is the last chunk in ** the journal, it means that the journal might contain additional ** pages that need to be rolled back and that the number of pages ** should be computed based on the journal file size. */ if( nRec==0 && !isHot && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ nRec = (int)((szJ - pPager->journalOff) / JOURNAL_PG_SZ(pPager)); } /* If this is the first header read from the journal, truncate the ** database file back to its original size. */ if( pPager->journalOff==JOURNAL_HDR_SZ(pPager) ){ rc = pager_truncate(pPager, mxPg); if( rc!=SQLITE_OK ){ goto end_playback; } pPager->dbSize = mxPg; } /* Copy original pages out of the journal and back into the ** database file and/or page cache. */ for(u=0; ujournalOff,0,1,0); if( rc==SQLITE_OK ){ nPlayback++; }else{ if( rc==SQLITE_DONE ){ pPager->journalOff = szJ; break; }else if( rc==SQLITE_IOERR_SHORT_READ ){ /* If the journal has been truncated, simply stop reading and ** processing the journal. This might happen if the journal was ** not completely written and synced prior to a crash. In that ** case, the database should have never been written in the ** first place so it is OK to simply abandon the rollback. */ rc = SQLITE_OK; goto end_playback; }else{ /* If we are unable to rollback, quit and return the error ** code. This will cause the pager to enter the error state ** so that no further harm will be done. Perhaps the next ** process to come along will be able to rollback the database. */ goto end_playback; } } } } /*NOTREACHED*/ assert( 0 ); end_playback: /* Following a rollback, the database file should be back in its original ** state prior to the start of the transaction, so invoke the ** SQLITE_FCNTL_DB_UNCHANGED file-control method to disable the ** assertion that the transaction counter was modified. */ #ifdef SQLITE_DEBUG if( pPager->fd->pMethods ){ sqlite3OsFileControlHint(pPager->fd,SQLITE_FCNTL_DB_UNCHANGED,0); } #endif /* If this playback is happening automatically as a result of an IO or ** malloc error that occurred after the change-counter was updated but ** before the transaction was committed, then the change-counter ** modification may just have been reverted. If this happens in exclusive ** mode, then subsequent transactions performed by the connection will not ** update the change-counter at all. This may lead to cache inconsistency ** problems for other processes at some point in the future. So, just ** in case this has happened, clear the changeCountDone flag now. */ pPager->changeCountDone = pPager->tempFile; if( rc==SQLITE_OK ){ zMaster = pPager->pTmpSpace; rc = readMasterJournal(pPager->jfd, zMaster, pPager->pVfs->mxPathname+1); testcase( rc!=SQLITE_OK ); } if( rc==SQLITE_OK && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) ){ rc = sqlite3PagerSync(pPager, 0); } if( rc==SQLITE_OK ){ rc = pager_end_transaction(pPager, zMaster[0]!='\0', 0); testcase( rc!=SQLITE_OK ); } if( rc==SQLITE_OK && zMaster[0] && res ){ /* If there was a master journal and this routine will return success, ** see if it is possible to delete the master journal. */ rc = pager_delmaster(pPager, zMaster); testcase( rc!=SQLITE_OK ); } if( isHot && nPlayback ){ sqlite3_log(SQLITE_NOTICE_RECOVER_ROLLBACK, "recovered %d pages from %s", nPlayback, pPager->zJournal); } /* The Pager.sectorSize variable may have been updated while rolling ** back a journal created by a process with a different sector size ** value. Reset it to the correct value for this process. */ setSectorSize(pPager); return rc; } /* ** Read the content for page pPg out of the database file and into ** pPg->pData. A shared lock or greater must be held on the database ** file before this function is called. ** ** If page 1 is read, then the value of Pager.dbFileVers[] is set to ** the value read from the database file. ** ** If an IO error occurs, then the IO error is returned to the caller. ** Otherwise, SQLITE_OK is returned. */ static int readDbPage(PgHdr *pPg, u32 iFrame){ Pager *pPager = pPg->pPager; /* Pager object associated with page pPg */ Pgno pgno = pPg->pgno; /* Page number to read */ int rc = SQLITE_OK; /* Return code */ int pgsz = pPager->pageSize; /* Number of bytes to read */ assert( pPager->eState>=PAGER_READER && !MEMDB ); assert( isOpen(pPager->fd) ); #ifndef SQLITE_OMIT_WAL if( iFrame ){ /* Try to pull the page from the write-ahead log. */ rc = sqlite3WalReadFrame(pPager->pWal, iFrame, pgsz, pPg->pData); }else #endif { i64 iOffset = (pgno-1)*(i64)pPager->pageSize; rc = sqlite3OsRead(pPager->fd, pPg->pData, pgsz, iOffset); if( rc==SQLITE_IOERR_SHORT_READ ){ rc = SQLITE_OK; } } if( pgno==1 ){ if( rc ){ /* If the read is unsuccessful, set the dbFileVers[] to something ** that will never be a valid file version. dbFileVers[] is a copy ** of bytes 24..39 of the database. Bytes 28..31 should always be ** zero or the size of the database in page. Bytes 32..35 and 35..39 ** should be page numbers which are never 0xffffffff. So filling ** pPager->dbFileVers[] with all 0xff bytes should suffice. ** ** For an encrypted database, the situation is more complex: bytes ** 24..39 of the database are white noise. But the probability of ** white noise equaling 16 bytes of 0xff is vanishingly small so ** we should still be ok. */ memset(pPager->dbFileVers, 0xff, sizeof(pPager->dbFileVers)); }else{ u8 *dbFileVers = &((u8*)pPg->pData)[24]; memcpy(&pPager->dbFileVers, dbFileVers, sizeof(pPager->dbFileVers)); } } CODEC1(pPager, pPg->pData, pgno, 3, rc = SQLITE_NOMEM); PAGER_INCR(sqlite3_pager_readdb_count); PAGER_INCR(pPager->nRead); IOTRACE(("PGIN %p %d\n", pPager, pgno)); PAGERTRACE(("FETCH %d page %d hash(%08x)\n", PAGERID(pPager), pgno, pager_pagehash(pPg))); return rc; } /* ** Update the value of the change-counter at offsets 24 and 92 in ** the header and the sqlite version number at offset 96. ** ** This is an unconditional update. See also the pager_incr_changecounter() ** routine which only updates the change-counter if the update is actually ** needed, as determined by the pPager->changeCountDone state variable. */ static void pager_write_changecounter(PgHdr *pPg){ u32 change_counter; /* Increment the value just read and write it back to byte 24. */ change_counter = sqlite3Get4byte((u8*)pPg->pPager->dbFileVers)+1; put32bits(((char*)pPg->pData)+24, change_counter); /* Also store the SQLite version number in bytes 96..99 and in ** bytes 92..95 store the change counter for which the version number ** is valid. */ put32bits(((char*)pPg->pData)+92, change_counter); put32bits(((char*)pPg->pData)+96, SQLITE_VERSION_NUMBER); } #ifndef SQLITE_OMIT_WAL /* ** This function is invoked once for each page that has already been ** written into the log file when a WAL transaction is rolled back. ** Parameter iPg is the page number of said page. The pCtx argument ** is actually a pointer to the Pager structure. ** ** If page iPg is present in the cache, and has no outstanding references, ** it is discarded. Otherwise, if there are one or more outstanding ** references, the page content is reloaded from the database. If the ** attempt to reload content from the database is required and fails, ** return an SQLite error code. Otherwise, SQLITE_OK. */ static int pagerUndoCallback(void *pCtx, Pgno iPg){ int rc = SQLITE_OK; Pager *pPager = (Pager *)pCtx; PgHdr *pPg; assert( pagerUseWal(pPager) ); pPg = sqlite3PagerLookup(pPager, iPg); if( pPg ){ if( sqlite3PcachePageRefcount(pPg)==1 ){ sqlite3PcacheDrop(pPg); }else{ u32 iFrame = 0; rc = sqlite3WalFindFrame(pPager->pWal, pPg->pgno, &iFrame); if( rc==SQLITE_OK ){ rc = readDbPage(pPg, iFrame); } if( rc==SQLITE_OK ){ pPager->xReiniter(pPg); } sqlite3PagerUnrefNotNull(pPg); } } /* Normally, if a transaction is rolled back, any backup processes are ** updated as data is copied out of the rollback journal and into the ** database. This is not generally possible with a WAL database, as ** rollback involves simply truncating the log file. Therefore, if one ** or more frames have already been written to the log (and therefore ** also copied into the backup databases) as part of this transaction, ** the backups must be restarted. */ sqlite3BackupRestart(pPager->pBackup); return rc; } /* ** This function is called to rollback a transaction on a WAL database. */ static int pagerRollbackWal(Pager *pPager){ int rc; /* Return Code */ PgHdr *pList; /* List of dirty pages to revert */ /* For all pages in the cache that are currently dirty or have already ** been written (but not committed) to the log file, do one of the ** following: ** ** + Discard the cached page (if refcount==0), or ** + Reload page content from the database (if refcount>0). */ pPager->dbSize = pPager->dbOrigSize; rc = sqlite3WalUndo(pPager->pWal, pagerUndoCallback, (void *)pPager); pList = sqlite3PcacheDirtyList(pPager->pPCache); while( pList && rc==SQLITE_OK ){ PgHdr *pNext = pList->pDirty; rc = pagerUndoCallback((void *)pPager, pList->pgno); pList = pNext; } return rc; } /* ** This function is a wrapper around sqlite3WalFrames(). As well as logging ** the contents of the list of pages headed by pList (connected by pDirty), ** this function notifies any active backup processes that the pages have ** changed. ** ** The list of pages passed into this routine is always sorted by page number. ** Hence, if page 1 appears anywhere on the list, it will be the first page. */ static int pagerWalFrames( Pager *pPager, /* Pager object */ PgHdr *pList, /* List of frames to log */ Pgno nTruncate, /* Database size after this commit */ int isCommit /* True if this is a commit */ ){ int rc; /* Return code */ int nList; /* Number of pages in pList */ PgHdr *p; /* For looping over pages */ assert( pPager->pWal ); assert( pList ); #ifdef SQLITE_DEBUG /* Verify that the page list is in accending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); } #endif assert( pList->pDirty==0 || isCommit ); if( isCommit ){ /* If a WAL transaction is being committed, there is no point in writing ** any pages with page numbers greater than nTruncate into the WAL file. ** They will never be read by any client. So remove them from the pDirty ** list here. */ PgHdr **ppNext = &pList; nList = 0; for(p=pList; (*ppNext = p)!=0; p=p->pDirty){ if( p->pgno<=nTruncate ){ ppNext = &p->pDirty; nList++; } } assert( pList ); }else{ nList = 1; } pPager->aStat[PAGER_STAT_WRITE] += nList; if( pList->pgno==1 ) pager_write_changecounter(pList); rc = sqlite3WalFrames(pPager->pWal, pPager->pageSize, pList, nTruncate, isCommit, pPager->walSyncFlags ); if( rc==SQLITE_OK && pPager->pBackup ){ for(p=pList; p; p=p->pDirty){ sqlite3BackupUpdate(pPager->pBackup, p->pgno, (u8 *)p->pData); } } #ifdef SQLITE_CHECK_PAGES pList = sqlite3PcacheDirtyList(pPager->pPCache); for(p=pList; p; p=p->pDirty){ pager_set_pagehash(p); } #endif return rc; } /* ** Begin a read transaction on the WAL. ** ** This routine used to be called "pagerOpenSnapshot()" because it essentially ** makes a snapshot of the database at the current point in time and preserves ** that snapshot for use by the reader in spite of concurrently changes by ** other writers or checkpointers. */ static int pagerBeginReadTransaction(Pager *pPager){ int rc; /* Return code */ int changed = 0; /* True if cache must be reset */ assert( pagerUseWal(pPager) ); assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); /* sqlite3WalEndReadTransaction() was not called for the previous ** transaction in locking_mode=EXCLUSIVE. So call it now. If we ** are in locking_mode=NORMAL and EndRead() was previously called, ** the duplicate call is harmless. */ sqlite3WalEndReadTransaction(pPager->pWal); rc = sqlite3WalBeginReadTransaction(pPager->pWal, &changed); if( rc!=SQLITE_OK || changed ){ pager_reset(pPager); if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); } return rc; } #endif /* ** This function is called as part of the transition from PAGER_OPEN ** to PAGER_READER state to determine the size of the database file ** in pages (assuming the page size currently stored in Pager.pageSize). ** ** If no error occurs, SQLITE_OK is returned and the size of the database ** in pages is stored in *pnPage. Otherwise, an error code (perhaps ** SQLITE_IOERR_FSTAT) is returned and *pnPage is left unmodified. */ static int pagerPagecount(Pager *pPager, Pgno *pnPage){ Pgno nPage; /* Value to return via *pnPage */ /* Query the WAL sub-system for the database size. The WalDbsize() ** function returns zero if the WAL is not open (i.e. Pager.pWal==0), or ** if the database size is not available. The database size is not ** available from the WAL sub-system if the log file is empty or ** contains no valid committed transactions. */ assert( pPager->eState==PAGER_OPEN ); assert( pPager->eLock>=SHARED_LOCK ); nPage = sqlite3WalDbsize(pPager->pWal); /* If the number of pages in the database is not available from the ** WAL sub-system, determine the page counte based on the size of ** the database file. If the size of the database file is not an ** integer multiple of the page-size, round up the result. */ if( nPage==0 ){ i64 n = 0; /* Size of db file in bytes */ assert( isOpen(pPager->fd) || pPager->tempFile ); if( isOpen(pPager->fd) ){ int rc = sqlite3OsFileSize(pPager->fd, &n); if( rc!=SQLITE_OK ){ return rc; } } nPage = (Pgno)((n+pPager->pageSize-1) / pPager->pageSize); } /* If the current number of pages in the file is greater than the ** configured maximum pager number, increase the allowed limit so ** that the file can be read. */ if( nPage>pPager->mxPgno ){ pPager->mxPgno = (Pgno)nPage; } *pnPage = nPage; return SQLITE_OK; } #ifndef SQLITE_OMIT_WAL /* ** Check if the *-wal file that corresponds to the database opened by pPager ** exists if the database is not empy, or verify that the *-wal file does ** not exist (by deleting it) if the database file is empty. ** ** If the database is not empty and the *-wal file exists, open the pager ** in WAL mode. If the database is empty or if no *-wal file exists and ** if no error occurs, make sure Pager.journalMode is not set to ** PAGER_JOURNALMODE_WAL. ** ** Return SQLITE_OK or an error code. ** ** The caller must hold a SHARED lock on the database file to call this ** function. Because an EXCLUSIVE lock on the db file is required to delete ** a WAL on a none-empty database, this ensures there is no race condition ** between the xAccess() below and an xDelete() being executed by some ** other connection. */ static int pagerOpenWalIfPresent(Pager *pPager){ int rc = SQLITE_OK; assert( pPager->eState==PAGER_OPEN ); assert( pPager->eLock>=SHARED_LOCK ); if( !pPager->tempFile ){ int isWal; /* True if WAL file exists */ Pgno nPage; /* Size of the database file */ rc = pagerPagecount(pPager, &nPage); if( rc ) return rc; if( nPage==0 ){ rc = sqlite3OsDelete(pPager->pVfs, pPager->zWal, 0); if( rc==SQLITE_IOERR_DELETE_NOENT ) rc = SQLITE_OK; isWal = 0; }else{ rc = sqlite3OsAccess( pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &isWal ); } if( rc==SQLITE_OK ){ if( isWal ){ testcase( sqlite3PcachePagecount(pPager->pPCache)==0 ); rc = sqlite3PagerOpenWal(pPager, 0); }else if( pPager->journalMode==PAGER_JOURNALMODE_WAL ){ pPager->journalMode = PAGER_JOURNALMODE_DELETE; } } } return rc; } #endif /* ** Playback savepoint pSavepoint. Or, if pSavepoint==NULL, then playback ** the entire master journal file. The case pSavepoint==NULL occurs when ** a ROLLBACK TO command is invoked on a SAVEPOINT that is a transaction ** savepoint. ** ** When pSavepoint is not NULL (meaning a non-transaction savepoint is ** being rolled back), then the rollback consists of up to three stages, ** performed in the order specified: ** ** * Pages are played back from the main journal starting at byte ** offset PagerSavepoint.iOffset and continuing to ** PagerSavepoint.iHdrOffset, or to the end of the main journal ** file if PagerSavepoint.iHdrOffset is zero. ** ** * If PagerSavepoint.iHdrOffset is not zero, then pages are played ** back starting from the journal header immediately following ** PagerSavepoint.iHdrOffset to the end of the main journal file. ** ** * Pages are then played back from the sub-journal file, starting ** with the PagerSavepoint.iSubRec and continuing to the end of ** the journal file. ** ** Throughout the rollback process, each time a page is rolled back, the ** corresponding bit is set in a bitvec structure (variable pDone in the ** implementation below). This is used to ensure that a page is only ** rolled back the first time it is encountered in either journal. ** ** If pSavepoint is NULL, then pages are only played back from the main ** journal file. There is no need for a bitvec in this case. ** ** In either case, before playback commences the Pager.dbSize variable ** is reset to the value that it held at the start of the savepoint ** (or transaction). No page with a page-number greater than this value ** is played back. If one is encountered it is simply skipped. */ static int pagerPlaybackSavepoint(Pager *pPager, PagerSavepoint *pSavepoint){ i64 szJ; /* Effective size of the main journal */ i64 iHdrOff; /* End of first segment of main-journal records */ int rc = SQLITE_OK; /* Return code */ Bitvec *pDone = 0; /* Bitvec to ensure pages played back only once */ assert( pPager->eState!=PAGER_ERROR ); assert( pPager->eState>=PAGER_WRITER_LOCKED ); /* Allocate a bitvec to use to store the set of pages rolled back */ if( pSavepoint ){ pDone = sqlite3BitvecCreate(pSavepoint->nOrig); if( !pDone ){ return SQLITE_NOMEM; } } /* Set the database size back to the value it was before the savepoint ** being reverted was opened. */ pPager->dbSize = pSavepoint ? pSavepoint->nOrig : pPager->dbOrigSize; pPager->changeCountDone = pPager->tempFile; if( !pSavepoint && pagerUseWal(pPager) ){ return pagerRollbackWal(pPager); } /* Use pPager->journalOff as the effective size of the main rollback ** journal. The actual file might be larger than this in ** PAGER_JOURNALMODE_TRUNCATE or PAGER_JOURNALMODE_PERSIST. But anything ** past pPager->journalOff is off-limits to us. */ szJ = pPager->journalOff; assert( pagerUseWal(pPager)==0 || szJ==0 ); /* Begin by rolling back records from the main journal starting at ** PagerSavepoint.iOffset and continuing to the next journal header. ** There might be records in the main journal that have a page number ** greater than the current database size (pPager->dbSize) but those ** will be skipped automatically. Pages are added to pDone as they ** are played back. */ if( pSavepoint && !pagerUseWal(pPager) ){ iHdrOff = pSavepoint->iHdrOffset ? pSavepoint->iHdrOffset : szJ; pPager->journalOff = pSavepoint->iOffset; while( rc==SQLITE_OK && pPager->journalOffjournalOff, pDone, 1, 1); } assert( rc!=SQLITE_DONE ); }else{ pPager->journalOff = 0; } /* Continue rolling back records out of the main journal starting at ** the first journal header seen and continuing until the effective end ** of the main journal file. Continue to skip out-of-range pages and ** continue adding pages rolled back to pDone. */ while( rc==SQLITE_OK && pPager->journalOffjournalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff" ** test is related to ticket #2565. See the discussion in the ** pager_playback() function for additional information. */ if( nJRec==0 && pPager->journalHdr+JOURNAL_HDR_SZ(pPager)==pPager->journalOff ){ nJRec = (u32)((szJ - pPager->journalOff)/JOURNAL_PG_SZ(pPager)); } for(ii=0; rc==SQLITE_OK && iijournalOffjournalOff, pDone, 1, 1); } assert( rc!=SQLITE_DONE ); } assert( rc!=SQLITE_OK || pPager->journalOff>=szJ ); /* Finally, rollback pages from the sub-journal. Page that were ** previously rolled back out of the main journal (and are hence in pDone) ** will be skipped. Out-of-range pages are also skipped. */ if( pSavepoint ){ u32 ii; /* Loop counter */ i64 offset = (i64)pSavepoint->iSubRec*(4+pPager->pageSize); if( pagerUseWal(pPager) ){ rc = sqlite3WalSavepointUndo(pPager->pWal, pSavepoint->aWalData); } for(ii=pSavepoint->iSubRec; rc==SQLITE_OK && iinSubRec; ii++){ assert( offset==(i64)ii*(4+pPager->pageSize) ); rc = pager_playback_one_page(pPager, &offset, pDone, 0, 1); } assert( rc!=SQLITE_DONE ); } sqlite3BitvecDestroy(pDone); if( rc==SQLITE_OK ){ pPager->journalOff = szJ; } return rc; } /* ** Change the maximum number of in-memory pages that are allowed. */ SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager *pPager, int mxPage){ sqlite3PcacheSetCachesize(pPager->pPCache, mxPage); } /* ** Invoke SQLITE_FCNTL_MMAP_SIZE based on the current value of szMmap. */ static void pagerFixMaplimit(Pager *pPager){ #if SQLITE_MAX_MMAP_SIZE>0 sqlite3_file *fd = pPager->fd; if( isOpen(fd) && fd->pMethods->iVersion>=3 ){ sqlite3_int64 sz; sz = pPager->szMmap; pPager->bUseFetch = (sz>0); sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz); } #endif } /* ** Change the maximum size of any memory mapping made of the database file. */ SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *pPager, sqlite3_int64 szMmap){ pPager->szMmap = szMmap; pagerFixMaplimit(pPager); } /* ** Free as much memory as possible from the pager. */ SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){ sqlite3PcacheShrink(pPager->pPCache); } /* ** Adjust settings of the pager to those specified in the pgFlags parameter. ** ** The "level" in pgFlags & PAGER_SYNCHRONOUS_MASK sets the robustness ** of the database to damage due to OS crashes or power failures by ** changing the number of syncs()s when writing the journals. ** There are three levels: ** ** OFF sqlite3OsSync() is never called. This is the default ** for temporary and transient files. ** ** NORMAL The journal is synced once before writes begin on the ** database. This is normally adequate protection, but ** it is theoretically possible, though very unlikely, ** that an inopertune power failure could leave the journal ** in a state which would cause damage to the database ** when it is rolled back. ** ** FULL The journal is synced twice before writes begin on the ** database (with some additional information - the nRec field ** of the journal header - being written in between the two ** syncs). If we assume that writing a ** single disk sector is atomic, then this mode provides ** assurance that the journal will not be corrupted to the ** point of causing damage to the database during rollback. ** ** The above is for a rollback-journal mode. For WAL mode, OFF continues ** to mean that no syncs ever occur. NORMAL means that the WAL is synced ** prior to the start of checkpoint and that the database file is synced ** at the conclusion of the checkpoint if the entire content of the WAL ** was written back into the database. But no sync operations occur for ** an ordinary commit in NORMAL mode with WAL. FULL means that the WAL ** file is synced following each commit operation, in addition to the ** syncs associated with NORMAL. ** ** Do not confuse synchronous=FULL with SQLITE_SYNC_FULL. The ** SQLITE_SYNC_FULL macro means to use the MacOSX-style full-fsync ** using fcntl(F_FULLFSYNC). SQLITE_SYNC_NORMAL means to do an ** ordinary fsync() call. There is no difference between SQLITE_SYNC_FULL ** and SQLITE_SYNC_NORMAL on platforms other than MacOSX. But the ** synchronous=FULL versus synchronous=NORMAL setting determines when ** the xSync primitive is called and is relevant to all platforms. ** ** Numeric values associated with these states are OFF==1, NORMAL=2, ** and FULL=3. */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS SQLITE_PRIVATE void sqlite3PagerSetFlags( Pager *pPager, /* The pager to set safety level for */ unsigned pgFlags /* Various flags */ ){ unsigned level = pgFlags & PAGER_SYNCHRONOUS_MASK; assert( level>=1 && level<=3 ); pPager->noSync = (level==1 || pPager->tempFile) ?1:0; pPager->fullSync = (level==3 && !pPager->tempFile) ?1:0; if( pPager->noSync ){ pPager->syncFlags = 0; pPager->ckptSyncFlags = 0; }else if( pgFlags & PAGER_FULLFSYNC ){ pPager->syncFlags = SQLITE_SYNC_FULL; pPager->ckptSyncFlags = SQLITE_SYNC_FULL; }else if( pgFlags & PAGER_CKPT_FULLFSYNC ){ pPager->syncFlags = SQLITE_SYNC_NORMAL; pPager->ckptSyncFlags = SQLITE_SYNC_FULL; }else{ pPager->syncFlags = SQLITE_SYNC_NORMAL; pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; } pPager->walSyncFlags = pPager->syncFlags; if( pPager->fullSync ){ pPager->walSyncFlags |= WAL_SYNC_TRANSACTIONS; } if( pgFlags & PAGER_CACHESPILL ){ pPager->doNotSpill &= ~SPILLFLAG_OFF; }else{ pPager->doNotSpill |= SPILLFLAG_OFF; } } #endif /* ** The following global variable is incremented whenever the library ** attempts to open a temporary file. This information is used for ** testing and analysis only. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_opentemp_count = 0; #endif /* ** Open a temporary file. ** ** Write the file descriptor into *pFile. Return SQLITE_OK on success ** or some other error code if we fail. The OS will automatically ** delete the temporary file when it is closed. ** ** The flags passed to the VFS layer xOpen() call are those specified ** by parameter vfsFlags ORed with the following: ** ** SQLITE_OPEN_READWRITE ** SQLITE_OPEN_CREATE ** SQLITE_OPEN_EXCLUSIVE ** SQLITE_OPEN_DELETEONCLOSE */ static int pagerOpentemp( Pager *pPager, /* The pager object */ sqlite3_file *pFile, /* Write the file descriptor here */ int vfsFlags /* Flags passed through to the VFS */ ){ int rc; /* Return code */ #ifdef SQLITE_TEST sqlite3_opentemp_count++; /* Used for testing and analysis only */ #endif vfsFlags |= SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; rc = sqlite3OsOpen(pPager->pVfs, 0, pFile, vfsFlags, 0); assert( rc!=SQLITE_OK || isOpen(pFile) ); return rc; } /* ** Set the busy handler function. ** ** The pager invokes the busy-handler if sqlite3OsLock() returns ** SQLITE_BUSY when trying to upgrade from no-lock to a SHARED lock, ** or when trying to upgrade from a RESERVED lock to an EXCLUSIVE ** lock. It does *not* invoke the busy handler when upgrading from ** SHARED to RESERVED, or when upgrading from SHARED to EXCLUSIVE ** (which occurs during hot-journal rollback). Summary: ** ** Transition | Invokes xBusyHandler ** -------------------------------------------------------- ** NO_LOCK -> SHARED_LOCK | Yes ** SHARED_LOCK -> RESERVED_LOCK | No ** SHARED_LOCK -> EXCLUSIVE_LOCK | No ** RESERVED_LOCK -> EXCLUSIVE_LOCK | Yes ** ** If the busy-handler callback returns non-zero, the lock is ** retried. If it returns zero, then the SQLITE_BUSY error is ** returned to the caller of the pager API function. */ SQLITE_PRIVATE void sqlite3PagerSetBusyhandler( Pager *pPager, /* Pager object */ int (*xBusyHandler)(void *), /* Pointer to busy-handler function */ void *pBusyHandlerArg /* Argument to pass to xBusyHandler */ ){ pPager->xBusyHandler = xBusyHandler; pPager->pBusyHandlerArg = pBusyHandlerArg; if( isOpen(pPager->fd) ){ void **ap = (void **)&pPager->xBusyHandler; assert( ((int(*)(void *))(ap[0]))==xBusyHandler ); assert( ap[1]==pBusyHandlerArg ); sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_BUSYHANDLER, (void *)ap); } } /* ** Change the page size used by the Pager object. The new page size ** is passed in *pPageSize. ** ** If the pager is in the error state when this function is called, it ** is a no-op. The value returned is the error state error code (i.e. ** one of SQLITE_IOERR, an SQLITE_IOERR_xxx sub-code or SQLITE_FULL). ** ** Otherwise, if all of the following are true: ** ** * the new page size (value of *pPageSize) is valid (a power ** of two between 512 and SQLITE_MAX_PAGE_SIZE, inclusive), and ** ** * there are no outstanding page references, and ** ** * the database is either not an in-memory database or it is ** an in-memory database that currently consists of zero pages. ** ** then the pager object page size is set to *pPageSize. ** ** If the page size is changed, then this function uses sqlite3PagerMalloc() ** to obtain a new Pager.pTmpSpace buffer. If this allocation attempt ** fails, SQLITE_NOMEM is returned and the page size remains unchanged. ** In all other cases, SQLITE_OK is returned. ** ** If the page size is not changed, either because one of the enumerated ** conditions above is not true, the pager was in error state when this ** function was called, or because the memory allocation attempt failed, ** then *pPageSize is set to the old, retained page size before returning. */ SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nReserve){ int rc = SQLITE_OK; /* It is not possible to do a full assert_pager_state() here, as this ** function may be called from within PagerOpen(), before the state ** of the Pager object is internally consistent. ** ** At one point this function returned an error if the pager was in ** PAGER_ERROR state. But since PAGER_ERROR state guarantees that ** there is at least one outstanding page reference, this function ** is a no-op for that case anyhow. */ u32 pageSize = *pPageSize; assert( pageSize==0 || (pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE) ); if( (pPager->memDb==0 || pPager->dbSize==0) && sqlite3PcacheRefCount(pPager->pPCache)==0 && pageSize && pageSize!=(u32)pPager->pageSize ){ char *pNew = NULL; /* New temp space */ i64 nByte = 0; if( pPager->eState>PAGER_OPEN && isOpen(pPager->fd) ){ rc = sqlite3OsFileSize(pPager->fd, &nByte); } if( rc==SQLITE_OK ){ pNew = (char *)sqlite3PageMalloc(pageSize); if( !pNew ) rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ pager_reset(pPager); rc = sqlite3PcacheSetPageSize(pPager->pPCache, pageSize); } if( rc==SQLITE_OK ){ sqlite3PageFree(pPager->pTmpSpace); pPager->pTmpSpace = pNew; pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize); pPager->pageSize = pageSize; }else{ sqlite3PageFree(pNew); } } *pPageSize = pPager->pageSize; if( rc==SQLITE_OK ){ if( nReserve<0 ) nReserve = pPager->nReserve; assert( nReserve>=0 && nReserve<1000 ); pPager->nReserve = (i16)nReserve; pagerReportSize(pPager); pagerFixMaplimit(pPager); } return rc; } /* ** Return a pointer to the "temporary page" buffer held internally ** by the pager. This is a buffer that is big enough to hold the ** entire content of a database page. This buffer is used internally ** during rollback and will be overwritten whenever a rollback ** occurs. But other modules are free to use it too, as long as ** no rollbacks are happening. */ SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager *pPager){ return pPager->pTmpSpace; } /* ** Attempt to set the maximum database page count if mxPage is positive. ** Make no changes if mxPage is zero or negative. And never reduce the ** maximum page count below the current size of the database. ** ** Regardless of mxPage, return the current maximum page count. */ SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager *pPager, int mxPage){ if( mxPage>0 ){ pPager->mxPgno = mxPage; } assert( pPager->eState!=PAGER_OPEN ); /* Called only by OP_MaxPgcnt */ assert( pPager->mxPgno>=pPager->dbSize ); /* OP_MaxPgcnt enforces this */ return pPager->mxPgno; } /* ** The following set of routines are used to disable the simulated ** I/O error mechanism. These routines are used to avoid simulated ** errors in places where we do not care about errors. ** ** Unless -DSQLITE_TEST=1 is used, these routines are all no-ops ** and generate no code. */ #ifdef SQLITE_TEST SQLITE_API extern int sqlite3_io_error_pending; SQLITE_API extern int sqlite3_io_error_hit; static int saved_cnt; void disable_simulated_io_errors(void){ saved_cnt = sqlite3_io_error_pending; sqlite3_io_error_pending = -1; } void enable_simulated_io_errors(void){ sqlite3_io_error_pending = saved_cnt; } #else # define disable_simulated_io_errors() # define enable_simulated_io_errors() #endif /* ** Read the first N bytes from the beginning of the file into memory ** that pDest points to. ** ** If the pager was opened on a transient file (zFilename==""), or ** opened on a file less than N bytes in size, the output buffer is ** zeroed and SQLITE_OK returned. The rationale for this is that this ** function is used to read database headers, and a new transient or ** zero sized database has a header than consists entirely of zeroes. ** ** If any IO error apart from SQLITE_IOERR_SHORT_READ is encountered, ** the error code is returned to the caller and the contents of the ** output buffer undefined. */ SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager *pPager, int N, unsigned char *pDest){ int rc = SQLITE_OK; memset(pDest, 0, N); assert( isOpen(pPager->fd) || pPager->tempFile ); /* This routine is only called by btree immediately after creating ** the Pager object. There has not been an opportunity to transition ** to WAL mode yet. */ assert( !pagerUseWal(pPager) ); if( isOpen(pPager->fd) ){ IOTRACE(("DBHDR %p 0 %d\n", pPager, N)) rc = sqlite3OsRead(pPager->fd, pDest, N, 0); if( rc==SQLITE_IOERR_SHORT_READ ){ rc = SQLITE_OK; } } return rc; } /* ** This function may only be called when a read-transaction is open on ** the pager. It returns the total number of pages in the database. ** ** However, if the file is between 1 and bytes in size, then ** this is considered a 1 page file. */ SQLITE_PRIVATE void sqlite3PagerPagecount(Pager *pPager, int *pnPage){ assert( pPager->eState>=PAGER_READER ); assert( pPager->eState!=PAGER_WRITER_FINISHED ); *pnPage = (int)pPager->dbSize; } /* ** Try to obtain a lock of type locktype on the database file. If ** a similar or greater lock is already held, this function is a no-op ** (returning SQLITE_OK immediately). ** ** Otherwise, attempt to obtain the lock using sqlite3OsLock(). Invoke ** the busy callback if the lock is currently not available. Repeat ** until the busy callback returns false or until the attempt to ** obtain the lock succeeds. ** ** Return SQLITE_OK on success and an error code if we cannot obtain ** the lock. If the lock is obtained successfully, set the Pager.state ** variable to locktype before returning. */ static int pager_wait_on_lock(Pager *pPager, int locktype){ int rc; /* Return code */ /* Check that this is either a no-op (because the requested lock is ** already held), or one of the transitions that the busy-handler ** may be invoked during, according to the comment above ** sqlite3PagerSetBusyhandler(). */ assert( (pPager->eLock>=locktype) || (pPager->eLock==NO_LOCK && locktype==SHARED_LOCK) || (pPager->eLock==RESERVED_LOCK && locktype==EXCLUSIVE_LOCK) ); do { rc = pagerLockDb(pPager, locktype); }while( rc==SQLITE_BUSY && pPager->xBusyHandler(pPager->pBusyHandlerArg) ); return rc; } /* ** Function assertTruncateConstraint(pPager) checks that one of the ** following is true for all dirty pages currently in the page-cache: ** ** a) The page number is less than or equal to the size of the ** current database image, in pages, OR ** ** b) if the page content were written at this time, it would not ** be necessary to write the current content out to the sub-journal ** (as determined by function subjRequiresPage()). ** ** If the condition asserted by this function were not true, and the ** dirty page were to be discarded from the cache via the pagerStress() ** routine, pagerStress() would not write the current page content to ** the database file. If a savepoint transaction were rolled back after ** this happened, the correct behavior would be to restore the current ** content of the page. However, since this content is not present in either ** the database file or the portion of the rollback journal and ** sub-journal rolled back the content could not be restored and the ** database image would become corrupt. It is therefore fortunate that ** this circumstance cannot arise. */ #if defined(SQLITE_DEBUG) static void assertTruncateConstraintCb(PgHdr *pPg){ assert( pPg->flags&PGHDR_DIRTY ); assert( !subjRequiresPage(pPg) || pPg->pgno<=pPg->pPager->dbSize ); } static void assertTruncateConstraint(Pager *pPager){ sqlite3PcacheIterateDirty(pPager->pPCache, assertTruncateConstraintCb); } #else # define assertTruncateConstraint(pPager) #endif /* ** Truncate the in-memory database file image to nPage pages. This ** function does not actually modify the database file on disk. It ** just sets the internal state of the pager object so that the ** truncation will be done when the current transaction is committed. ** ** This function is only called right before committing a transaction. ** Once this function has been called, the transaction must either be ** rolled back or committed. It is not safe to call this function and ** then continue writing to the database. */ SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager *pPager, Pgno nPage){ assert( pPager->dbSize>=nPage ); assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); pPager->dbSize = nPage; /* At one point the code here called assertTruncateConstraint() to ** ensure that all pages being truncated away by this operation are, ** if one or more savepoints are open, present in the savepoint ** journal so that they can be restored if the savepoint is rolled ** back. This is no longer necessary as this function is now only ** called right before committing a transaction. So although the ** Pager object may still have open savepoints (Pager.nSavepoint!=0), ** they cannot be rolled back. So the assertTruncateConstraint() call ** is no longer correct. */ } /* ** This function is called before attempting a hot-journal rollback. It ** syncs the journal file to disk, then sets pPager->journalHdr to the ** size of the journal file so that the pager_playback() routine knows ** that the entire journal file has been synced. ** ** Syncing a hot-journal to disk before attempting to roll it back ensures ** that if a power-failure occurs during the rollback, the process that ** attempts rollback following system recovery sees the same journal ** content as this process. ** ** If everything goes as planned, SQLITE_OK is returned. Otherwise, ** an SQLite error code. */ static int pagerSyncHotJournal(Pager *pPager){ int rc = SQLITE_OK; if( !pPager->noSync ){ rc = sqlite3OsSync(pPager->jfd, SQLITE_SYNC_NORMAL); } if( rc==SQLITE_OK ){ rc = sqlite3OsFileSize(pPager->jfd, &pPager->journalHdr); } return rc; } /* ** Obtain a reference to a memory mapped page object for page number pgno. ** The new object will use the pointer pData, obtained from xFetch(). ** If successful, set *ppPage to point to the new page reference ** and return SQLITE_OK. Otherwise, return an SQLite error code and set ** *ppPage to zero. ** ** Page references obtained by calling this function should be released ** by calling pagerReleaseMapPage(). */ static int pagerAcquireMapPage( Pager *pPager, /* Pager object */ Pgno pgno, /* Page number */ void *pData, /* xFetch()'d data for this page */ PgHdr **ppPage /* OUT: Acquired page object */ ){ PgHdr *p; /* Memory mapped page to return */ if( pPager->pMmapFreelist ){ *ppPage = p = pPager->pMmapFreelist; pPager->pMmapFreelist = p->pDirty; p->pDirty = 0; memset(p->pExtra, 0, pPager->nExtra); }else{ *ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra); if( p==0 ){ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pData); return SQLITE_NOMEM; } p->pExtra = (void *)&p[1]; p->flags = PGHDR_MMAP; p->nRef = 1; p->pPager = pPager; } assert( p->pExtra==(void *)&p[1] ); assert( p->pPage==0 ); assert( p->flags==PGHDR_MMAP ); assert( p->pPager==pPager ); assert( p->nRef==1 ); p->pgno = pgno; p->pData = pData; pPager->nMmapOut++; return SQLITE_OK; } /* ** Release a reference to page pPg. pPg must have been returned by an ** earlier call to pagerAcquireMapPage(). */ static void pagerReleaseMapPage(PgHdr *pPg){ Pager *pPager = pPg->pPager; pPager->nMmapOut--; pPg->pDirty = pPager->pMmapFreelist; pPager->pMmapFreelist = pPg; assert( pPager->fd->pMethods->iVersion>=3 ); sqlite3OsUnfetch(pPager->fd, (i64)(pPg->pgno-1)*pPager->pageSize, pPg->pData); } /* ** Free all PgHdr objects stored in the Pager.pMmapFreelist list. */ static void pagerFreeMapHdrs(Pager *pPager){ PgHdr *p; PgHdr *pNext; for(p=pPager->pMmapFreelist; p; p=pNext){ pNext = p->pDirty; sqlite3_free(p); } } /* ** Shutdown the page cache. Free all memory and close all files. ** ** If a transaction was in progress when this routine is called, that ** transaction is rolled back. All outstanding pages are invalidated ** and their memory is freed. Any attempt to use a page associated ** with this page cache after this function returns will likely ** result in a coredump. ** ** This function always succeeds. If a transaction is active an attempt ** is made to roll it back. If an error occurs during the rollback ** a hot journal may be left in the filesystem but no error is returned ** to the caller. */ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){ u8 *pTmp = (u8 *)pPager->pTmpSpace; assert( assert_pager_state(pPager) ); disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); pagerFreeMapHdrs(pPager); /* pPager->errCode = 0; */ pPager->exclusiveMode = 0; #ifndef SQLITE_OMIT_WAL sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, pPager->pageSize, pTmp); pPager->pWal = 0; #endif pager_reset(pPager); if( MEMDB ){ pager_unlock(pPager); }else{ /* If it is open, sync the journal file before calling UnlockAndRollback. ** If this is not done, then an unsynced portion of the open journal ** file may be played back into the database. If a power failure occurs ** while this is happening, the database could become corrupt. ** ** If an error occurs while trying to sync the journal, shift the pager ** into the ERROR state. This causes UnlockAndRollback to unlock the ** database and close the journal file without attempting to roll it ** back or finalize it. The next database user will have to do hot-journal ** rollback before accessing the database file. */ if( isOpen(pPager->jfd) ){ pager_error(pPager, pagerSyncHotJournal(pPager)); } pagerUnlockAndRollback(pPager); } sqlite3EndBenignMalloc(); enable_simulated_io_errors(); PAGERTRACE(("CLOSE %d\n", PAGERID(pPager))); IOTRACE(("CLOSE %p\n", pPager)) sqlite3OsClose(pPager->jfd); sqlite3OsClose(pPager->fd); sqlite3PageFree(pTmp); sqlite3PcacheClose(pPager->pPCache); #ifdef SQLITE_HAS_CODEC if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); #endif assert( !pPager->aSavepoint && !pPager->pInJournal ); assert( !isOpen(pPager->jfd) && !isOpen(pPager->sjfd) ); sqlite3_free(pPager); return SQLITE_OK; } #if !defined(NDEBUG) || defined(SQLITE_TEST) /* ** Return the page number for page pPg. */ SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage *pPg){ return pPg->pgno; } #endif /* ** Increment the reference count for page pPg. */ SQLITE_PRIVATE void sqlite3PagerRef(DbPage *pPg){ sqlite3PcacheRef(pPg); } /* ** Sync the journal. In other words, make sure all the pages that have ** been written to the journal have actually reached the surface of the ** disk and can be restored in the event of a hot-journal rollback. ** ** If the Pager.noSync flag is set, then this function is a no-op. ** Otherwise, the actions required depend on the journal-mode and the ** device characteristics of the file-system, as follows: ** ** * If the journal file is an in-memory journal file, no action need ** be taken. ** ** * Otherwise, if the device does not support the SAFE_APPEND property, ** then the nRec field of the most recently written journal header ** is updated to contain the number of journal records that have ** been written following it. If the pager is operating in full-sync ** mode, then the journal file is synced before this field is updated. ** ** * If the device does not support the SEQUENTIAL property, then ** journal file is synced. ** ** Or, in pseudo-code: ** ** if( NOT ){ ** if( NOT SAFE_APPEND ){ ** if( ) xSync(); ** ** } ** if( NOT SEQUENTIAL ) xSync(); ** } ** ** If successful, this routine clears the PGHDR_NEED_SYNC flag of every ** page currently held in memory before returning SQLITE_OK. If an IO ** error is encountered, then the IO error code is returned to the caller. */ static int syncJournal(Pager *pPager, int newHdr){ int rc; /* Return code */ assert( pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD ); assert( assert_pager_state(pPager) ); assert( !pagerUseWal(pPager) ); rc = sqlite3PagerExclusiveLock(pPager); if( rc!=SQLITE_OK ) return rc; if( !pPager->noSync ){ assert( !pPager->tempFile ); if( isOpen(pPager->jfd) && pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ const int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); assert( isOpen(pPager->jfd) ); if( 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ /* This block deals with an obscure problem. If the last connection ** that wrote to this database was operating in persistent-journal ** mode, then the journal file may at this point actually be larger ** than Pager.journalOff bytes. If the next thing in the journal ** file happens to be a journal-header (written as part of the ** previous connection's transaction), and a crash or power-failure ** occurs after nRec is updated but before this connection writes ** anything else to the journal file (or commits/rolls back its ** transaction), then SQLite may become confused when doing the ** hot-journal rollback following recovery. It may roll back all ** of this connections data, then proceed to rolling back the old, ** out-of-date data that follows it. Database corruption. ** ** To work around this, if the journal file does appear to contain ** a valid header following Pager.journalOff, then write a 0x00 ** byte to the start of it to prevent it from being recognized. ** ** Variable iNextHdrOffset is set to the offset at which this ** problematic header will occur, if it exists. aMagic is used ** as a temporary buffer to inspect the first couple of bytes of ** the potential journal header. */ i64 iNextHdrOffset; u8 aMagic[8]; u8 zHeader[sizeof(aJournalMagic)+4]; memcpy(zHeader, aJournalMagic, sizeof(aJournalMagic)); put32bits(&zHeader[sizeof(aJournalMagic)], pPager->nRec); iNextHdrOffset = journalHdrOffset(pPager); rc = sqlite3OsRead(pPager->jfd, aMagic, 8, iNextHdrOffset); if( rc==SQLITE_OK && 0==memcmp(aMagic, aJournalMagic, 8) ){ static const u8 zerobyte = 0; rc = sqlite3OsWrite(pPager->jfd, &zerobyte, 1, iNextHdrOffset); } if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ return rc; } /* Write the nRec value into the journal file header. If in ** full-synchronous mode, sync the journal first. This ensures that ** all data has really hit the disk before nRec is updated to mark ** it as a candidate for rollback. ** ** This is not required if the persistent media supports the ** SAFE_APPEND property. Because in this case it is not possible ** for garbage data to be appended to the file, the nRec field ** is populated with 0xFFFFFFFF when the journal header is written ** and never needs to be updated. */ if( pPager->fullSync && 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); IOTRACE(("JSYNC %p\n", pPager)) rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags); if( rc!=SQLITE_OK ) return rc; } IOTRACE(("JHDR %p %lld\n", pPager, pPager->journalHdr)); rc = sqlite3OsWrite( pPager->jfd, zHeader, sizeof(zHeader), pPager->journalHdr ); if( rc!=SQLITE_OK ) return rc; } if( 0==(iDc&SQLITE_IOCAP_SEQUENTIAL) ){ PAGERTRACE(("SYNC journal of %d\n", PAGERID(pPager))); IOTRACE(("JSYNC %p\n", pPager)) rc = sqlite3OsSync(pPager->jfd, pPager->syncFlags| (pPager->syncFlags==SQLITE_SYNC_FULL?SQLITE_SYNC_DATAONLY:0) ); if( rc!=SQLITE_OK ) return rc; } pPager->journalHdr = pPager->journalOff; if( newHdr && 0==(iDc&SQLITE_IOCAP_SAFE_APPEND) ){ pPager->nRec = 0; rc = writeJournalHdr(pPager); if( rc!=SQLITE_OK ) return rc; } }else{ pPager->journalHdr = pPager->journalOff; } } /* Unless the pager is in noSync mode, the journal file was just ** successfully synced. Either way, clear the PGHDR_NEED_SYNC flag on ** all pages. */ sqlite3PcacheClearSyncFlags(pPager->pPCache); pPager->eState = PAGER_WRITER_DBMOD; assert( assert_pager_state(pPager) ); return SQLITE_OK; } /* ** The argument is the first in a linked list of dirty pages connected ** by the PgHdr.pDirty pointer. This function writes each one of the ** in-memory pages in the list to the database file. The argument may ** be NULL, representing an empty list. In this case this function is ** a no-op. ** ** The pager must hold at least a RESERVED lock when this function ** is called. Before writing anything to the database file, this lock ** is upgraded to an EXCLUSIVE lock. If the lock cannot be obtained, ** SQLITE_BUSY is returned and no data is written to the database file. ** ** If the pager is a temp-file pager and the actual file-system file ** is not yet open, it is created and opened before any data is ** written out. ** ** Once the lock has been upgraded and, if necessary, the file opened, ** the pages are written out to the database file in list order. Writing ** a page is skipped if it meets either of the following criteria: ** ** * The page number is greater than Pager.dbSize, or ** * The PGHDR_DONT_WRITE flag is set on the page. ** ** If writing out a page causes the database file to grow, Pager.dbFileSize ** is updated accordingly. If page 1 is written out, then the value cached ** in Pager.dbFileVers[] is updated to match the new value stored in ** the database file. ** ** If everything is successful, SQLITE_OK is returned. If an IO error ** occurs, an IO error code is returned. Or, if the EXCLUSIVE lock cannot ** be obtained, SQLITE_BUSY is returned. */ static int pager_write_pagelist(Pager *pPager, PgHdr *pList){ int rc = SQLITE_OK; /* Return code */ /* This function is only called for rollback pagers in WRITER_DBMOD state. */ assert( !pagerUseWal(pPager) ); assert( pPager->eState==PAGER_WRITER_DBMOD ); assert( pPager->eLock==EXCLUSIVE_LOCK ); /* If the file is a temp-file has not yet been opened, open it now. It ** is not possible for rc to be other than SQLITE_OK if this branch ** is taken, as pager_wait_on_lock() is a no-op for temp-files. */ if( !isOpen(pPager->fd) ){ assert( pPager->tempFile && rc==SQLITE_OK ); rc = pagerOpentemp(pPager, pPager->fd, pPager->vfsFlags); } /* Before the first write, give the VFS a hint of what the final ** file size will be. */ assert( rc!=SQLITE_OK || isOpen(pPager->fd) ); if( rc==SQLITE_OK && pPager->dbHintSizedbSize && (pList->pDirty || pList->pgno>pPager->dbHintSize) ){ sqlite3_int64 szFile = pPager->pageSize * (sqlite3_int64)pPager->dbSize; sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &szFile); pPager->dbHintSize = pPager->dbSize; } while( rc==SQLITE_OK && pList ){ Pgno pgno = pList->pgno; /* If there are dirty pages in the page cache with page numbers greater ** than Pager.dbSize, this means sqlite3PagerTruncateImage() was called to ** make the file smaller (presumably by auto-vacuum code). Do not write ** any such pages to the file. ** ** Also, do not write out any page that has the PGHDR_DONT_WRITE flag ** set (set by sqlite3PagerDontWrite()). */ if( pgno<=pPager->dbSize && 0==(pList->flags&PGHDR_DONT_WRITE) ){ i64 offset = (pgno-1)*(i64)pPager->pageSize; /* Offset to write */ char *pData; /* Data to write */ assert( (pList->flags&PGHDR_NEED_SYNC)==0 ); if( pList->pgno==1 ) pager_write_changecounter(pList); /* Encode the database */ CODEC2(pPager, pList->pData, pgno, 6, return SQLITE_NOMEM, pData); /* Write out the page data. */ rc = sqlite3OsWrite(pPager->fd, pData, pPager->pageSize, offset); /* If page 1 was just written, update Pager.dbFileVers to match ** the value now stored in the database file. If writing this ** page caused the database file to grow, update dbFileSize. */ if( pgno==1 ){ memcpy(&pPager->dbFileVers, &pData[24], sizeof(pPager->dbFileVers)); } if( pgno>pPager->dbFileSize ){ pPager->dbFileSize = pgno; } pPager->aStat[PAGER_STAT_WRITE]++; /* Update any backup objects copying the contents of this pager. */ sqlite3BackupUpdate(pPager->pBackup, pgno, (u8*)pList->pData); PAGERTRACE(("STORE %d page %d hash(%08x)\n", PAGERID(pPager), pgno, pager_pagehash(pList))); IOTRACE(("PGOUT %p %d\n", pPager, pgno)); PAGER_INCR(sqlite3_pager_writedb_count); }else{ PAGERTRACE(("NOSTORE %d page %d\n", PAGERID(pPager), pgno)); } pager_set_pagehash(pList); pList = pList->pDirty; } return rc; } /* ** Ensure that the sub-journal file is open. If it is already open, this ** function is a no-op. ** ** SQLITE_OK is returned if everything goes according to plan. An ** SQLITE_IOERR_XXX error code is returned if a call to sqlite3OsOpen() ** fails. */ static int openSubJournal(Pager *pPager){ int rc = SQLITE_OK; if( !isOpen(pPager->sjfd) ){ if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY || pPager->subjInMemory ){ sqlite3MemJournalOpen(pPager->sjfd); }else{ rc = pagerOpentemp(pPager, pPager->sjfd, SQLITE_OPEN_SUBJOURNAL); } } return rc; } /* ** Append a record of the current state of page pPg to the sub-journal. ** ** If successful, set the bit corresponding to pPg->pgno in the bitvecs ** for all open savepoints before returning. ** ** This function returns SQLITE_OK if everything is successful, an IO ** error code if the attempt to write to the sub-journal fails, or ** SQLITE_NOMEM if a malloc fails while setting a bit in a savepoint ** bitvec. */ static int subjournalPage(PgHdr *pPg){ int rc = SQLITE_OK; Pager *pPager = pPg->pPager; if( pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ /* Open the sub-journal, if it has not already been opened */ assert( pPager->useJournal ); assert( isOpen(pPager->jfd) || pagerUseWal(pPager) ); assert( isOpen(pPager->sjfd) || pPager->nSubRec==0 ); assert( pagerUseWal(pPager) || pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize ); rc = openSubJournal(pPager); /* If the sub-journal was opened successfully (or was already open), ** write the journal record into the file. */ if( rc==SQLITE_OK ){ void *pData = pPg->pData; i64 offset = (i64)pPager->nSubRec*(4+pPager->pageSize); char *pData2; CODEC2(pPager, pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); PAGERTRACE(("STMT-JOURNAL %d page %d\n", PAGERID(pPager), pPg->pgno)); rc = write32bits(pPager->sjfd, offset, pPg->pgno); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pPager->sjfd, pData2, pPager->pageSize, offset+4); } } } if( rc==SQLITE_OK ){ pPager->nSubRec++; assert( pPager->nSavepoint>0 ); rc = addToSavepointBitvecs(pPager, pPg->pgno); } return rc; } static int subjournalPageIfRequired(PgHdr *pPg){ if( subjRequiresPage(pPg) ){ return subjournalPage(pPg); }else{ return SQLITE_OK; } } /* ** This function is called by the pcache layer when it has reached some ** soft memory limit. The first argument is a pointer to a Pager object ** (cast as a void*). The pager is always 'purgeable' (not an in-memory ** database). The second argument is a reference to a page that is ** currently dirty but has no outstanding references. The page ** is always associated with the Pager object passed as the first ** argument. ** ** The job of this function is to make pPg clean by writing its contents ** out to the database file, if possible. This may involve syncing the ** journal file. ** ** If successful, sqlite3PcacheMakeClean() is called on the page and ** SQLITE_OK returned. If an IO error occurs while trying to make the ** page clean, the IO error code is returned. If the page cannot be ** made clean for some other reason, but no error occurs, then SQLITE_OK ** is returned by sqlite3PcacheMakeClean() is not called. */ static int pagerStress(void *p, PgHdr *pPg){ Pager *pPager = (Pager *)p; int rc = SQLITE_OK; assert( pPg->pPager==pPager ); assert( pPg->flags&PGHDR_DIRTY ); /* The doNotSpill NOSYNC bit is set during times when doing a sync of ** journal (and adding a new header) is not allowed. This occurs ** during calls to sqlite3PagerWrite() while trying to journal multiple ** pages belonging to the same sector. ** ** The doNotSpill ROLLBACK and OFF bits inhibits all cache spilling ** regardless of whether or not a sync is required. This is set during ** a rollback or by user request, respectively. ** ** Spilling is also prohibited when in an error state since that could ** lead to database corruption. In the current implementation it ** is impossible for sqlite3PcacheFetch() to be called with createFlag==3 ** while in the error state, hence it is impossible for this routine to ** be called in the error state. Nevertheless, we include a NEVER() ** test for the error state as a safeguard against future changes. */ if( NEVER(pPager->errCode) ) return SQLITE_OK; testcase( pPager->doNotSpill & SPILLFLAG_ROLLBACK ); testcase( pPager->doNotSpill & SPILLFLAG_OFF ); testcase( pPager->doNotSpill & SPILLFLAG_NOSYNC ); if( pPager->doNotSpill && ((pPager->doNotSpill & (SPILLFLAG_ROLLBACK|SPILLFLAG_OFF))!=0 || (pPg->flags & PGHDR_NEED_SYNC)!=0) ){ return SQLITE_OK; } pPg->pDirty = 0; if( pagerUseWal(pPager) ){ /* Write a single frame for this page to the log. */ rc = subjournalPageIfRequired(pPg); if( rc==SQLITE_OK ){ rc = pagerWalFrames(pPager, pPg, 0, 0); } }else{ /* Sync the journal file if required. */ if( pPg->flags&PGHDR_NEED_SYNC || pPager->eState==PAGER_WRITER_CACHEMOD ){ rc = syncJournal(pPager, 1); } /* Write the contents of the page out to the database file. */ if( rc==SQLITE_OK ){ assert( (pPg->flags&PGHDR_NEED_SYNC)==0 ); rc = pager_write_pagelist(pPager, pPg); } } /* Mark the page as clean. */ if( rc==SQLITE_OK ){ PAGERTRACE(("STRESS %d page %d\n", PAGERID(pPager), pPg->pgno)); sqlite3PcacheMakeClean(pPg); } return pager_error(pPager, rc); } /* ** Allocate and initialize a new Pager object and put a pointer to it ** in *ppPager. The pager should eventually be freed by passing it ** to sqlite3PagerClose(). ** ** The zFilename argument is the path to the database file to open. ** If zFilename is NULL then a randomly-named temporary file is created ** and used as the file to be cached. Temporary files are be deleted ** automatically when they are closed. If zFilename is ":memory:" then ** all information is held in cache. It is never written to disk. ** This can be used to implement an in-memory database. ** ** The nExtra parameter specifies the number of bytes of space allocated ** along with each page reference. This space is available to the user ** via the sqlite3PagerGetExtra() API. ** ** The flags argument is used to specify properties that affect the ** operation of the pager. It should be passed some bitwise combination ** of the PAGER_* flags. ** ** The vfsFlags parameter is a bitmask to pass to the flags parameter ** of the xOpen() method of the supplied VFS when opening files. ** ** If the pager object is allocated and the specified file opened ** successfully, SQLITE_OK is returned and *ppPager set to point to ** the new pager object. If an error occurs, *ppPager is set to NULL ** and error code returned. This function may return SQLITE_NOMEM ** (sqlite3Malloc() is used to allocate memory), SQLITE_CANTOPEN or ** various SQLITE_IO_XXX errors. */ SQLITE_PRIVATE int sqlite3PagerOpen( sqlite3_vfs *pVfs, /* The virtual file system to use */ Pager **ppPager, /* OUT: Return the Pager structure here */ const char *zFilename, /* Name of the database file to open */ int nExtra, /* Extra bytes append to each in-memory page */ int flags, /* flags controlling this file */ int vfsFlags, /* flags passed through to sqlite3_vfs.xOpen() */ void (*xReinit)(DbPage*) /* Function to reinitialize pages */ ){ u8 *pPtr; Pager *pPager = 0; /* Pager object to allocate and return */ int rc = SQLITE_OK; /* Return code */ int tempFile = 0; /* True for temp files (incl. in-memory files) */ int memDb = 0; /* True if this is an in-memory file */ int readOnly = 0; /* True if this is a read-only file */ int journalFileSize; /* Bytes to allocate for each journal fd */ char *zPathname = 0; /* Full path to database file */ int nPathname = 0; /* Number of bytes in zPathname */ int useJournal = (flags & PAGER_OMIT_JOURNAL)==0; /* False to omit journal */ int pcacheSize = sqlite3PcacheSize(); /* Bytes to allocate for PCache */ u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ const char *zUri = 0; /* URI args to copy */ int nUri = 0; /* Number of bytes of URI args at *zUri */ /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). This ** is the maximum space required for an in-memory journal file handle ** and a regular journal file-handle. Note that a "regular journal-handle" ** may be a wrapper capable of caching the first portion of the journal ** file in memory to implement the atomic-write optimization (see ** source file journal.c). */ if( sqlite3JournalSize(pVfs)>sqlite3MemJournalSize() ){ journalFileSize = ROUND8(sqlite3JournalSize(pVfs)); }else{ journalFileSize = ROUND8(sqlite3MemJournalSize()); } /* Set the output variable to NULL in case an error occurs. */ *ppPager = 0; #ifndef SQLITE_OMIT_MEMORYDB if( flags & PAGER_MEMORY ){ memDb = 1; if( zFilename && zFilename[0] ){ zPathname = sqlite3DbStrDup(0, zFilename); if( zPathname==0 ) return SQLITE_NOMEM; nPathname = sqlite3Strlen30(zPathname); zFilename = 0; } } #endif /* Compute and store the full pathname in an allocated buffer pointed ** to by zPathname, length nPathname. Or, if this is a temporary file, ** leave both nPathname and zPathname set to 0. */ if( zFilename && zFilename[0] ){ const char *z; nPathname = pVfs->mxPathname+1; zPathname = sqlite3DbMallocRaw(0, nPathname*2); if( zPathname==0 ){ return SQLITE_NOMEM; } zPathname[0] = 0; /* Make sure initialized even if FullPathname() fails */ rc = sqlite3OsFullPathname(pVfs, zFilename, nPathname, zPathname); nPathname = sqlite3Strlen30(zPathname); z = zUri = &zFilename[sqlite3Strlen30(zFilename)+1]; while( *z ){ z += sqlite3Strlen30(z)+1; z += sqlite3Strlen30(z)+1; } nUri = (int)(&z[1] - zUri); assert( nUri>=0 ); if( rc==SQLITE_OK && nPathname+8>pVfs->mxPathname ){ /* This branch is taken when the journal path required by ** the database being opened will be more than pVfs->mxPathname ** bytes in length. This means the database cannot be opened, ** as it will not be possible to open the journal file or even ** check for a hot-journal before reading. */ rc = SQLITE_CANTOPEN_BKPT; } if( rc!=SQLITE_OK ){ sqlite3DbFree(0, zPathname); return rc; } } /* Allocate memory for the Pager structure, PCache object, the ** three file descriptors, the database file name and the journal ** file name. The layout in memory is as follows: ** ** Pager object (sizeof(Pager) bytes) ** PCache object (sqlite3PcacheSize() bytes) ** Database file handle (pVfs->szOsFile bytes) ** Sub-journal file handle (journalFileSize bytes) ** Main journal file handle (journalFileSize bytes) ** Database file name (nPathname+1 bytes) ** Journal file name (nPathname+8+1 bytes) */ pPtr = (u8 *)sqlite3MallocZero( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ journalFileSize * 2 + /* The two journal files */ nPathname + 1 + nUri + /* zFilename */ nPathname + 8 + 2 /* zJournal */ #ifndef SQLITE_OMIT_WAL + nPathname + 4 + 2 /* zWal */ #endif ); assert( EIGHT_BYTE_ALIGNMENT(SQLITE_INT_TO_PTR(journalFileSize)) ); if( !pPtr ){ sqlite3DbFree(0, zPathname); return SQLITE_NOMEM; } pPager = (Pager*)(pPtr); pPager->pPCache = (PCache*)(pPtr += ROUND8(sizeof(*pPager))); pPager->fd = (sqlite3_file*)(pPtr += ROUND8(pcacheSize)); pPager->sjfd = (sqlite3_file*)(pPtr += ROUND8(pVfs->szOsFile)); pPager->jfd = (sqlite3_file*)(pPtr += journalFileSize); pPager->zFilename = (char*)(pPtr += journalFileSize); assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); /* Fill in the Pager.zFilename and Pager.zJournal buffers, if required. */ if( zPathname ){ assert( nPathname>0 ); pPager->zJournal = (char*)(pPtr += nPathname + 1 + nUri); memcpy(pPager->zFilename, zPathname, nPathname); if( nUri ) memcpy(&pPager->zFilename[nPathname+1], zUri, nUri); memcpy(pPager->zJournal, zPathname, nPathname); memcpy(&pPager->zJournal[nPathname], "-journal\000", 8+2); sqlite3FileSuffix3(pPager->zFilename, pPager->zJournal); #ifndef SQLITE_OMIT_WAL pPager->zWal = &pPager->zJournal[nPathname+8+1]; memcpy(pPager->zWal, zPathname, nPathname); memcpy(&pPager->zWal[nPathname], "-wal\000", 4+1); sqlite3FileSuffix3(pPager->zFilename, pPager->zWal); #endif sqlite3DbFree(0, zPathname); } pPager->pVfs = pVfs; pPager->vfsFlags = vfsFlags; /* Open the pager file. */ if( zFilename && zFilename[0] ){ int fout = 0; /* VFS flags returned by xOpen() */ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); readOnly = (fout&SQLITE_OPEN_READONLY); /* If the file was successfully opened for read/write access, ** choose a default page size in case we have to create the ** database file. The default page size is the maximum of: ** ** + SQLITE_DEFAULT_PAGE_SIZE, ** + The value returned by sqlite3OsSectorSize() ** + The largest page size that can be written atomically. */ if( rc==SQLITE_OK ){ int iDc = sqlite3OsDeviceCharacteristics(pPager->fd); if( !readOnly ){ setSectorSize(pPager); assert(SQLITE_DEFAULT_PAGE_SIZE<=SQLITE_MAX_DEFAULT_PAGE_SIZE); if( szPageDfltsectorSize ){ if( pPager->sectorSize>SQLITE_MAX_DEFAULT_PAGE_SIZE ){ szPageDflt = SQLITE_MAX_DEFAULT_PAGE_SIZE; }else{ szPageDflt = (u32)pPager->sectorSize; } } #ifdef SQLITE_ENABLE_ATOMIC_WRITE { int ii; assert(SQLITE_IOCAP_ATOMIC512==(512>>8)); assert(SQLITE_IOCAP_ATOMIC64K==(65536>>8)); assert(SQLITE_MAX_DEFAULT_PAGE_SIZE<=65536); for(ii=szPageDflt; ii<=SQLITE_MAX_DEFAULT_PAGE_SIZE; ii=ii*2){ if( iDc&(SQLITE_IOCAP_ATOMIC|(ii>>8)) ){ szPageDflt = ii; } } } #endif } pPager->noLock = sqlite3_uri_boolean(zFilename, "nolock", 0); if( (iDc & SQLITE_IOCAP_IMMUTABLE)!=0 || sqlite3_uri_boolean(zFilename, "immutable", 0) ){ vfsFlags |= SQLITE_OPEN_READONLY; goto act_like_temp_file; } } }else{ /* If a temporary file is requested, it is not opened immediately. ** In this case we accept the default page size and delay actually ** opening the file until the first call to OsWrite(). ** ** This branch is also run for an in-memory database. An in-memory ** database is the same as a temp-file that is never written out to ** disk and uses an in-memory rollback journal. ** ** This branch also runs for files marked as immutable. */ act_like_temp_file: tempFile = 1; pPager->eState = PAGER_READER; /* Pretend we already have a lock */ pPager->eLock = EXCLUSIVE_LOCK; /* Pretend we are in EXCLUSIVE mode */ pPager->noLock = 1; /* Do no locking */ readOnly = (vfsFlags&SQLITE_OPEN_READONLY); } /* The following call to PagerSetPagesize() serves to set the value of ** Pager.pageSize and to allocate the Pager.pTmpSpace buffer. */ if( rc==SQLITE_OK ){ assert( pPager->memDb==0 ); rc = sqlite3PagerSetPagesize(pPager, &szPageDflt, -1); testcase( rc!=SQLITE_OK ); } /* Initialize the PCache object. */ if( rc==SQLITE_OK ){ assert( nExtra<1000 ); nExtra = ROUND8(nExtra); rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb, !memDb?pagerStress:0, (void *)pPager, pPager->pPCache); } /* If an error occurred above, free the Pager structure and close the file. */ if( rc!=SQLITE_OK ){ sqlite3OsClose(pPager->fd); sqlite3PageFree(pPager->pTmpSpace); sqlite3_free(pPager); return rc; } PAGERTRACE(("OPEN %d %s\n", FILEHANDLEID(pPager->fd), pPager->zFilename)); IOTRACE(("OPEN %p %s\n", pPager, pPager->zFilename)) pPager->useJournal = (u8)useJournal; /* pPager->stmtOpen = 0; */ /* pPager->stmtInUse = 0; */ /* pPager->nRef = 0; */ /* pPager->stmtSize = 0; */ /* pPager->stmtJSize = 0; */ /* pPager->nPage = 0; */ pPager->mxPgno = SQLITE_MAX_PAGE_COUNT; /* pPager->state = PAGER_UNLOCK; */ /* pPager->errMask = 0; */ pPager->tempFile = (u8)tempFile; assert( tempFile==PAGER_LOCKINGMODE_NORMAL || tempFile==PAGER_LOCKINGMODE_EXCLUSIVE ); assert( PAGER_LOCKINGMODE_EXCLUSIVE==1 ); pPager->exclusiveMode = (u8)tempFile; pPager->changeCountDone = pPager->tempFile; pPager->memDb = (u8)memDb; pPager->readOnly = (u8)readOnly; assert( useJournal || pPager->tempFile ); pPager->noSync = pPager->tempFile; if( pPager->noSync ){ assert( pPager->fullSync==0 ); assert( pPager->syncFlags==0 ); assert( pPager->walSyncFlags==0 ); assert( pPager->ckptSyncFlags==0 ); }else{ pPager->fullSync = 1; pPager->syncFlags = SQLITE_SYNC_NORMAL; pPager->walSyncFlags = SQLITE_SYNC_NORMAL | WAL_SYNC_TRANSACTIONS; pPager->ckptSyncFlags = SQLITE_SYNC_NORMAL; } /* pPager->pFirst = 0; */ /* pPager->pFirstSynced = 0; */ /* pPager->pLast = 0; */ pPager->nExtra = (u16)nExtra; pPager->journalSizeLimit = SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT; assert( isOpen(pPager->fd) || tempFile ); setSectorSize(pPager); if( !useJournal ){ pPager->journalMode = PAGER_JOURNALMODE_OFF; }else if( memDb ){ pPager->journalMode = PAGER_JOURNALMODE_MEMORY; } /* pPager->xBusyHandler = 0; */ /* pPager->pBusyHandlerArg = 0; */ pPager->xReiniter = xReinit; /* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */ /* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */ *ppPager = pPager; return SQLITE_OK; } /* Verify that the database file has not be deleted or renamed out from ** under the pager. Return SQLITE_OK if the database is still were it ought ** to be on disk. Return non-zero (SQLITE_READONLY_DBMOVED or some other error ** code from sqlite3OsAccess()) if the database has gone missing. */ static int databaseIsUnmoved(Pager *pPager){ int bHasMoved = 0; int rc; if( pPager->tempFile ) return SQLITE_OK; if( pPager->dbSize==0 ) return SQLITE_OK; assert( pPager->zFilename && pPager->zFilename[0] ); rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_HAS_MOVED, &bHasMoved); if( rc==SQLITE_NOTFOUND ){ /* If the HAS_MOVED file-control is unimplemented, assume that the file ** has not been moved. That is the historical behavior of SQLite: prior to ** version 3.8.3, it never checked */ rc = SQLITE_OK; }else if( rc==SQLITE_OK && bHasMoved ){ rc = SQLITE_READONLY_DBMOVED; } return rc; } /* ** This function is called after transitioning from PAGER_UNLOCK to ** PAGER_SHARED state. It tests if there is a hot journal present in ** the file-system for the given pager. A hot journal is one that ** needs to be played back. According to this function, a hot-journal ** file exists if the following criteria are met: ** ** * The journal file exists in the file system, and ** * No process holds a RESERVED or greater lock on the database file, and ** * The database file itself is greater than 0 bytes in size, and ** * The first byte of the journal file exists and is not 0x00. ** ** If the current size of the database file is 0 but a journal file ** exists, that is probably an old journal left over from a prior ** database with the same name. In this case the journal file is ** just deleted using OsDelete, *pExists is set to 0 and SQLITE_OK ** is returned. ** ** This routine does not check if there is a master journal filename ** at the end of the file. If there is, and that master journal file ** does not exist, then the journal file is not really hot. In this ** case this routine will return a false-positive. The pager_playback() ** routine will discover that the journal file is not really hot and ** will not roll it back. ** ** If a hot-journal file is found to exist, *pExists is set to 1 and ** SQLITE_OK returned. If no hot-journal file is present, *pExists is ** set to 0 and SQLITE_OK returned. If an IO error occurs while trying ** to determine whether or not a hot-journal file exists, the IO error ** code is returned and the value of *pExists is undefined. */ static int hasHotJournal(Pager *pPager, int *pExists){ sqlite3_vfs * const pVfs = pPager->pVfs; int rc = SQLITE_OK; /* Return code */ int exists = 1; /* True if a journal file is present */ int jrnlOpen = !!isOpen(pPager->jfd); assert( pPager->useJournal ); assert( isOpen(pPager->fd) ); assert( pPager->eState==PAGER_OPEN ); assert( jrnlOpen==0 || ( sqlite3OsDeviceCharacteristics(pPager->jfd) & SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN )); *pExists = 0; if( !jrnlOpen ){ rc = sqlite3OsAccess(pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &exists); } if( rc==SQLITE_OK && exists ){ int locked = 0; /* True if some process holds a RESERVED lock */ /* Race condition here: Another process might have been holding the ** the RESERVED lock and have a journal open at the sqlite3OsAccess() ** call above, but then delete the journal and drop the lock before ** we get to the following sqlite3OsCheckReservedLock() call. If that ** is the case, this routine might think there is a hot journal when ** in fact there is none. This results in a false-positive which will ** be dealt with by the playback routine. Ticket #3883. */ rc = sqlite3OsCheckReservedLock(pPager->fd, &locked); if( rc==SQLITE_OK && !locked ){ Pgno nPage; /* Number of pages in database file */ rc = pagerPagecount(pPager, &nPage); if( rc==SQLITE_OK ){ /* If the database is zero pages in size, that means that either (1) the ** journal is a remnant from a prior database with the same name where ** the database file but not the journal was deleted, or (2) the initial ** transaction that populates a new database is being rolled back. ** In either case, the journal file can be deleted. However, take care ** not to delete the journal file if it is already open due to ** journal_mode=PERSIST. */ if( nPage==0 && !jrnlOpen ){ sqlite3BeginBenignMalloc(); if( pagerLockDb(pPager, RESERVED_LOCK)==SQLITE_OK ){ sqlite3OsDelete(pVfs, pPager->zJournal, 0); if( !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); } sqlite3EndBenignMalloc(); }else{ /* The journal file exists and no other connection has a reserved ** or greater lock on the database file. Now check that there is ** at least one non-zero bytes at the start of the journal file. ** If there is, then we consider this journal to be hot. If not, ** it can be ignored. */ if( !jrnlOpen ){ int f = SQLITE_OPEN_READONLY|SQLITE_OPEN_MAIN_JOURNAL; rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &f); } if( rc==SQLITE_OK ){ u8 first = 0; rc = sqlite3OsRead(pPager->jfd, (void *)&first, 1, 0); if( rc==SQLITE_IOERR_SHORT_READ ){ rc = SQLITE_OK; } if( !jrnlOpen ){ sqlite3OsClose(pPager->jfd); } *pExists = (first!=0); }else if( rc==SQLITE_CANTOPEN ){ /* If we cannot open the rollback journal file in order to see if ** it has a zero header, that might be due to an I/O error, or ** it might be due to the race condition described above and in ** ticket #3883. Either way, assume that the journal is hot. ** This might be a false positive. But if it is, then the ** automatic journal playback and recovery mechanism will deal ** with it under an EXCLUSIVE lock where we do not need to ** worry so much with race conditions. */ *pExists = 1; rc = SQLITE_OK; } } } } } return rc; } /* ** This function is called to obtain a shared lock on the database file. ** It is illegal to call sqlite3PagerAcquire() until after this function ** has been successfully called. If a shared-lock is already held when ** this function is called, it is a no-op. ** ** The following operations are also performed by this function. ** ** 1) If the pager is currently in PAGER_OPEN state (no lock held ** on the database file), then an attempt is made to obtain a ** SHARED lock on the database file. Immediately after obtaining ** the SHARED lock, the file-system is checked for a hot-journal, ** which is played back if present. Following any hot-journal ** rollback, the contents of the cache are validated by checking ** the 'change-counter' field of the database file header and ** discarded if they are found to be invalid. ** ** 2) If the pager is running in exclusive-mode, and there are currently ** no outstanding references to any pages, and is in the error state, ** then an attempt is made to clear the error state by discarding ** the contents of the page cache and rolling back any open journal ** file. ** ** If everything is successful, SQLITE_OK is returned. If an IO error ** occurs while locking the database, checking for a hot-journal file or ** rolling back a journal file, the IO error code is returned. */ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ /* This routine is only called from b-tree and only when there are no ** outstanding pages. This implies that the pager state should either ** be OPEN or READER. READER is only possible if the pager is or was in ** exclusive access mode. */ assert( sqlite3PcacheRefCount(pPager->pPCache)==0 ); assert( assert_pager_state(pPager) ); assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); if( NEVER(MEMDB && pPager->errCode) ){ return pPager->errCode; } if( !pagerUseWal(pPager) && pPager->eState==PAGER_OPEN ){ int bHotJournal = 1; /* True if there exists a hot journal-file */ assert( !MEMDB ); rc = pager_wait_on_lock(pPager, SHARED_LOCK); if( rc!=SQLITE_OK ){ assert( pPager->eLock==NO_LOCK || pPager->eLock==UNKNOWN_LOCK ); goto failed; } /* If a journal file exists, and there is no RESERVED lock on the ** database file, then it either needs to be played back or deleted. */ if( pPager->eLock<=SHARED_LOCK ){ rc = hasHotJournal(pPager, &bHotJournal); } if( rc!=SQLITE_OK ){ goto failed; } if( bHotJournal ){ if( pPager->readOnly ){ rc = SQLITE_READONLY_ROLLBACK; goto failed; } /* Get an EXCLUSIVE lock on the database file. At this point it is ** important that a RESERVED lock is not obtained on the way to the ** EXCLUSIVE lock. If it were, another process might open the ** database file, detect the RESERVED lock, and conclude that the ** database is safe to read while this process is still rolling the ** hot-journal back. ** ** Because the intermediate RESERVED lock is not requested, any ** other process attempting to access the database file will get to ** this point in the code and fail to obtain its own EXCLUSIVE lock ** on the database file. ** ** Unless the pager is in locking_mode=exclusive mode, the lock is ** downgraded to SHARED_LOCK before this function returns. */ rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ goto failed; } /* If it is not already open and the file exists on disk, open the ** journal for read/write access. Write access is required because ** in exclusive-access mode the file descriptor will be kept open ** and possibly used for a transaction later on. Also, write-access ** is usually required to finalize the journal in journal_mode=persist ** mode (and also for journal_mode=truncate on some systems). ** ** If the journal does not exist, it usually means that some ** other connection managed to get in and roll it back before ** this connection obtained the exclusive lock above. Or, it ** may mean that the pager was in the error-state when this ** function was called and the journal file does not exist. */ if( !isOpen(pPager->jfd) ){ sqlite3_vfs * const pVfs = pPager->pVfs; int bExists; /* True if journal file exists */ rc = sqlite3OsAccess( pVfs, pPager->zJournal, SQLITE_ACCESS_EXISTS, &bExists); if( rc==SQLITE_OK && bExists ){ int fout = 0; int f = SQLITE_OPEN_READWRITE|SQLITE_OPEN_MAIN_JOURNAL; assert( !pPager->tempFile ); rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, f, &fout); assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); if( rc==SQLITE_OK && fout&SQLITE_OPEN_READONLY ){ rc = SQLITE_CANTOPEN_BKPT; sqlite3OsClose(pPager->jfd); } } } /* Playback and delete the journal. Drop the database write ** lock and reacquire the read lock. Purge the cache before ** playing back the hot-journal so that we don't end up with ** an inconsistent cache. Sync the hot journal before playing ** it back since the process that crashed and left the hot journal ** probably did not sync it and we are required to always sync ** the journal before playing it back. */ if( isOpen(pPager->jfd) ){ assert( rc==SQLITE_OK ); rc = pagerSyncHotJournal(pPager); if( rc==SQLITE_OK ){ rc = pager_playback(pPager, 1); pPager->eState = PAGER_OPEN; } }else if( !pPager->exclusiveMode ){ pagerUnlockDb(pPager, SHARED_LOCK); } if( rc!=SQLITE_OK ){ /* This branch is taken if an error occurs while trying to open ** or roll back a hot-journal while holding an EXCLUSIVE lock. The ** pager_unlock() routine will be called before returning to unlock ** the file. If the unlock attempt fails, then Pager.eLock must be ** set to UNKNOWN_LOCK (see the comment above the #define for ** UNKNOWN_LOCK above for an explanation). ** ** In order to get pager_unlock() to do this, set Pager.eState to ** PAGER_ERROR now. This is not actually counted as a transition ** to ERROR state in the state diagram at the top of this file, ** since we know that the same call to pager_unlock() will very ** shortly transition the pager object to the OPEN state. Calling ** assert_pager_state() would fail now, as it should not be possible ** to be in ERROR state when there are zero outstanding page ** references. */ pager_error(pPager, rc); goto failed; } assert( pPager->eState==PAGER_OPEN ); assert( (pPager->eLock==SHARED_LOCK) || (pPager->exclusiveMode && pPager->eLock>SHARED_LOCK) ); } if( !pPager->tempFile && pPager->hasBeenUsed ){ /* The shared-lock has just been acquired then check to ** see if the database has been modified. If the database has changed, ** flush the cache. The pPager->hasBeenUsed flag prevents this from ** occurring on the very first access to a file, in order to save a ** single unnecessary sqlite3OsRead() call at the start-up. ** ** Database changes are detected by looking at 15 bytes beginning ** at offset 24 into the file. The first 4 of these 16 bytes are ** a 32-bit counter that is incremented with each change. The ** other bytes change randomly with each file change when ** a codec is in use. ** ** There is a vanishingly small chance that a change will not be ** detected. The chance of an undetected change is so small that ** it can be neglected. */ Pgno nPage = 0; char dbFileVers[sizeof(pPager->dbFileVers)]; rc = pagerPagecount(pPager, &nPage); if( rc ) goto failed; if( nPage>0 ){ IOTRACE(("CKVERS %p %d\n", pPager, sizeof(dbFileVers))); rc = sqlite3OsRead(pPager->fd, &dbFileVers, sizeof(dbFileVers), 24); if( rc!=SQLITE_OK && rc!=SQLITE_IOERR_SHORT_READ ){ goto failed; } }else{ memset(dbFileVers, 0, sizeof(dbFileVers)); } if( memcmp(pPager->dbFileVers, dbFileVers, sizeof(dbFileVers))!=0 ){ pager_reset(pPager); /* Unmap the database file. It is possible that external processes ** may have truncated the database file and then extended it back ** to its original size while this process was not holding a lock. ** In this case there may exist a Pager.pMap mapping that appears ** to be the right size but is not actually valid. Avoid this ** possibility by unmapping the db here. */ if( USEFETCH(pPager) ){ sqlite3OsUnfetch(pPager->fd, 0, 0); } } } /* If there is a WAL file in the file-system, open this database in WAL ** mode. Otherwise, the following function call is a no-op. */ rc = pagerOpenWalIfPresent(pPager); #ifndef SQLITE_OMIT_WAL assert( pPager->pWal==0 || rc==SQLITE_OK ); #endif } if( pagerUseWal(pPager) ){ assert( rc==SQLITE_OK ); rc = pagerBeginReadTransaction(pPager); } if( pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){ rc = pagerPagecount(pPager, &pPager->dbSize); } failed: if( rc!=SQLITE_OK ){ assert( !MEMDB ); pager_unlock(pPager); assert( pPager->eState==PAGER_OPEN ); }else{ pPager->eState = PAGER_READER; } return rc; } /* ** If the reference count has reached zero, rollback any active ** transaction and unlock the pager. ** ** Except, in locking_mode=EXCLUSIVE when there is nothing to in ** the rollback journal, the unlock is not performed and there is ** nothing to rollback, so this routine is a no-op. */ static void pagerUnlockIfUnused(Pager *pPager){ if( pPager->nMmapOut==0 && (sqlite3PcacheRefCount(pPager->pPCache)==0) ){ pagerUnlockAndRollback(pPager); } } /* ** Acquire a reference to page number pgno in pager pPager (a page ** reference has type DbPage*). If the requested reference is ** successfully obtained, it is copied to *ppPage and SQLITE_OK returned. ** ** If the requested page is already in the cache, it is returned. ** Otherwise, a new page object is allocated and populated with data ** read from the database file. In some cases, the pcache module may ** choose not to allocate a new page object and may reuse an existing ** object with no outstanding references. ** ** The extra data appended to a page is always initialized to zeros the ** first time a page is loaded into memory. If the page requested is ** already in the cache when this function is called, then the extra ** data is left as it was when the page object was last used. ** ** If the database image is smaller than the requested page or if a ** non-zero value is passed as the noContent parameter and the ** requested page is not already stored in the cache, then no ** actual disk read occurs. In this case the memory image of the ** page is initialized to all zeros. ** ** If noContent is true, it means that we do not care about the contents ** of the page. This occurs in two scenarios: ** ** a) When reading a free-list leaf page from the database, and ** ** b) When a savepoint is being rolled back and we need to load ** a new page into the cache to be filled with the data read ** from the savepoint journal. ** ** If noContent is true, then the data returned is zeroed instead of ** being read from the database. Additionally, the bits corresponding ** to pgno in Pager.pInJournal (bitvec of pages already written to the ** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open ** savepoints are set. This means if the page is made writable at any ** point in the future, using a call to sqlite3PagerWrite(), its contents ** will not be journaled. This saves IO. ** ** The acquisition might fail for several reasons. In all cases, ** an appropriate error code is returned and *ppPage is set to NULL. ** ** See also sqlite3PagerLookup(). Both this routine and Lookup() attempt ** to find a page in the in-memory cache first. If the page is not already ** in memory, this routine goes to disk to read it in whereas Lookup() ** just returns 0. This routine acquires a read-lock the first time it ** has to go to disk, and could also playback an old journal if necessary. ** Since Lookup() never goes to disk, it never has to deal with locks ** or journal files. */ SQLITE_PRIVATE int sqlite3PagerAcquire( Pager *pPager, /* The pager open on the database file */ Pgno pgno, /* Page number to fetch */ DbPage **ppPage, /* Write a pointer to the page here */ int flags /* PAGER_GET_XXX flags */ ){ int rc = SQLITE_OK; PgHdr *pPg = 0; u32 iFrame = 0; /* Frame to read from WAL file */ const int noContent = (flags & PAGER_GET_NOCONTENT); /* It is acceptable to use a read-only (mmap) page for any page except ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY ** flag was specified by the caller. And so long as the db is not a ** temporary or in-memory database. */ const int bMmapOk = (pgno!=1 && USEFETCH(pPager) && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY)) #ifdef SQLITE_HAS_CODEC && pPager->xCodec==0 #endif ); assert( pPager->eState>=PAGER_READER ); assert( assert_pager_state(pPager) ); assert( noContent==0 || bMmapOk==0 ); if( pgno==0 ){ return SQLITE_CORRUPT_BKPT; } pPager->hasBeenUsed = 1; /* If the pager is in the error state, return an error immediately. ** Otherwise, request the page from the PCache layer. */ if( pPager->errCode!=SQLITE_OK ){ rc = pPager->errCode; }else{ if( bMmapOk && pagerUseWal(pPager) ){ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); if( rc!=SQLITE_OK ) goto pager_acquire_err; } if( bMmapOk && iFrame==0 ){ void *pData = 0; rc = sqlite3OsFetch(pPager->fd, (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData ); if( rc==SQLITE_OK && pData ){ if( pPager->eState>PAGER_READER ){ pPg = sqlite3PagerLookup(pPager, pgno); } if( pPg==0 ){ rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg); }else{ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData); } if( pPg ){ assert( rc==SQLITE_OK ); *ppPage = pPg; return SQLITE_OK; } } if( rc!=SQLITE_OK ){ goto pager_acquire_err; } } { sqlite3_pcache_page *pBase; pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3); if( pBase==0 ){ rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase); if( rc!=SQLITE_OK ) goto pager_acquire_err; if( pBase==0 ){ pPg = *ppPage = 0; rc = SQLITE_NOMEM; goto pager_acquire_err; } } pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase); assert( pPg!=0 ); } } if( rc!=SQLITE_OK ){ /* Either the call to sqlite3PcacheFetch() returned an error or the ** pager was already in the error-state when this function was called. ** Set pPg to 0 and jump to the exception handler. */ pPg = 0; goto pager_acquire_err; } assert( pPg==(*ppPage) ); assert( pPg->pgno==pgno ); assert( pPg->pPager==pPager || pPg->pPager==0 ); if( pPg->pPager && !noContent ){ /* In this case the pcache already contains an initialized copy of ** the page. Return without further ado. */ assert( pgno<=PAGER_MAX_PGNO && pgno!=PAGER_MJ_PGNO(pPager) ); pPager->aStat[PAGER_STAT_HIT]++; return SQLITE_OK; }else{ /* The pager cache has created a new page. Its content needs to ** be initialized. */ pPg->pPager = pPager; /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page ** number greater than this, or the unused locking-page, is requested. */ if( pgno>PAGER_MAX_PGNO || pgno==PAGER_MJ_PGNO(pPager) ){ rc = SQLITE_CORRUPT_BKPT; goto pager_acquire_err; } if( MEMDB || pPager->dbSizefd) ){ if( pgno>pPager->mxPgno ){ rc = SQLITE_FULL; goto pager_acquire_err; } if( noContent ){ /* Failure to set the bits in the InJournal bit-vectors is benign. ** It merely means that we might do some extra work to journal a ** page that does not need to be journaled. Nevertheless, be sure ** to test the case where a malloc error occurs while trying to set ** a bit in a bit vector. */ sqlite3BeginBenignMalloc(); if( pgno<=pPager->dbOrigSize ){ TESTONLY( rc = ) sqlite3BitvecSet(pPager->pInJournal, pgno); testcase( rc==SQLITE_NOMEM ); } TESTONLY( rc = ) addToSavepointBitvecs(pPager, pgno); testcase( rc==SQLITE_NOMEM ); sqlite3EndBenignMalloc(); } memset(pPg->pData, 0, pPager->pageSize); IOTRACE(("ZERO %p %d\n", pPager, pgno)); }else{ if( pagerUseWal(pPager) && bMmapOk==0 ){ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame); if( rc!=SQLITE_OK ) goto pager_acquire_err; } assert( pPg->pPager==pPager ); pPager->aStat[PAGER_STAT_MISS]++; rc = readDbPage(pPg, iFrame); if( rc!=SQLITE_OK ){ goto pager_acquire_err; } } pager_set_pagehash(pPg); } return SQLITE_OK; pager_acquire_err: assert( rc!=SQLITE_OK ); if( pPg ){ sqlite3PcacheDrop(pPg); } pagerUnlockIfUnused(pPager); *ppPage = 0; return rc; } /* ** Acquire a page if it is already in the in-memory cache. Do ** not read the page from disk. Return a pointer to the page, ** or 0 if the page is not in cache. ** ** See also sqlite3PagerGet(). The difference between this routine ** and sqlite3PagerGet() is that _get() will go to the disk and read ** in the page if the page is not already in cache. This routine ** returns NULL if the page is not in cache or if a disk I/O error ** has ever happened. */ SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ sqlite3_pcache_page *pPage; assert( pPager!=0 ); assert( pgno!=0 ); assert( pPager->pPCache!=0 ); pPage = sqlite3PcacheFetch(pPager->pPCache, pgno, 0); assert( pPage==0 || pPager->hasBeenUsed ); if( pPage==0 ) return 0; return sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pPage); } /* ** Release a page reference. ** ** If the number of references to the page drop to zero, then the ** page is added to the LRU list. When all references to all pages ** are released, a rollback occurs and the lock on the database is ** removed. */ SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){ Pager *pPager; assert( pPg!=0 ); pPager = pPg->pPager; if( pPg->flags & PGHDR_MMAP ){ pagerReleaseMapPage(pPg); }else{ sqlite3PcacheRelease(pPg); } pagerUnlockIfUnused(pPager); } SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ if( pPg ) sqlite3PagerUnrefNotNull(pPg); } /* ** This function is called at the start of every write transaction. ** There must already be a RESERVED or EXCLUSIVE lock on the database ** file when this routine is called. ** ** Open the journal file for pager pPager and write a journal header ** to the start of it. If there are active savepoints, open the sub-journal ** as well. This function is only used when the journal file is being ** opened to write a rollback log for a transaction. It is not used ** when opening a hot journal file to roll it back. ** ** If the journal file is already open (as it may be in exclusive mode), ** then this function just writes a journal header to the start of the ** already open file. ** ** Whether or not the journal file is opened by this function, the ** Pager.pInJournal bitvec structure is allocated. ** ** Return SQLITE_OK if everything is successful. Otherwise, return ** SQLITE_NOMEM if the attempt to allocate Pager.pInJournal fails, or ** an IO error code if opening or writing the journal file fails. */ static int pager_open_journal(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ sqlite3_vfs * const pVfs = pPager->pVfs; /* Local cache of vfs pointer */ assert( pPager->eState==PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); assert( pPager->pInJournal==0 ); /* If already in the error state, this function is a no-op. But on ** the other hand, this routine is never called if we are already in ** an error state. */ if( NEVER(pPager->errCode) ) return pPager->errCode; if( !pagerUseWal(pPager) && pPager->journalMode!=PAGER_JOURNALMODE_OFF ){ pPager->pInJournal = sqlite3BitvecCreate(pPager->dbSize); if( pPager->pInJournal==0 ){ return SQLITE_NOMEM; } /* Open the journal file if it is not already open. */ if( !isOpen(pPager->jfd) ){ if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ sqlite3MemJournalOpen(pPager->jfd); }else{ const int flags = /* VFS flags to open journal file */ SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| (pPager->tempFile ? (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL): (SQLITE_OPEN_MAIN_JOURNAL) ); /* Verify that the database still has the same name as it did when ** it was originally opened. */ rc = databaseIsUnmoved(pPager); if( rc==SQLITE_OK ){ #ifdef SQLITE_ENABLE_ATOMIC_WRITE rc = sqlite3JournalOpen( pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager) ); #else rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0); #endif } } assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); } /* Write the first journal header to the journal file and open ** the sub-journal if necessary. */ if( rc==SQLITE_OK ){ /* TODO: Check if all of these are really required. */ pPager->nRec = 0; pPager->journalOff = 0; pPager->setMaster = 0; pPager->journalHdr = 0; rc = writeJournalHdr(pPager); } } if( rc!=SQLITE_OK ){ sqlite3BitvecDestroy(pPager->pInJournal); pPager->pInJournal = 0; }else{ assert( pPager->eState==PAGER_WRITER_LOCKED ); pPager->eState = PAGER_WRITER_CACHEMOD; } return rc; } /* ** Begin a write-transaction on the specified pager object. If a ** write-transaction has already been opened, this function is a no-op. ** ** If the exFlag argument is false, then acquire at least a RESERVED ** lock on the database file. If exFlag is true, then acquire at least ** an EXCLUSIVE lock. If such a lock is already held, no locking ** functions need be called. ** ** If the subjInMemory argument is non-zero, then any sub-journal opened ** within this transaction will be opened as an in-memory file. This ** has no effect if the sub-journal is already opened (as it may be when ** running in exclusive mode) or if the transaction does not require a ** sub-journal. If the subjInMemory argument is zero, then any required ** sub-journal is implemented in-memory if pPager is an in-memory database, ** or using a temporary file otherwise. */ SQLITE_PRIVATE int sqlite3PagerBegin(Pager *pPager, int exFlag, int subjInMemory){ int rc = SQLITE_OK; if( pPager->errCode ) return pPager->errCode; assert( pPager->eState>=PAGER_READER && pPager->eStatesubjInMemory = (u8)subjInMemory; if( ALWAYS(pPager->eState==PAGER_READER) ){ assert( pPager->pInJournal==0 ); if( pagerUseWal(pPager) ){ /* If the pager is configured to use locking_mode=exclusive, and an ** exclusive lock on the database is not already held, obtain it now. */ if( pPager->exclusiveMode && sqlite3WalExclusiveMode(pPager->pWal, -1) ){ rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ return rc; } sqlite3WalExclusiveMode(pPager->pWal, 1); } /* Grab the write lock on the log file. If successful, upgrade to ** PAGER_RESERVED state. Otherwise, return an error code to the caller. ** The busy-handler is not invoked if another connection already ** holds the write-lock. If possible, the upper layer will call it. */ rc = sqlite3WalBeginWriteTransaction(pPager->pWal); }else{ /* Obtain a RESERVED lock on the database file. If the exFlag parameter ** is true, then immediately upgrade this to an EXCLUSIVE lock. The ** busy-handler callback can be used when upgrading to the EXCLUSIVE ** lock, but not when obtaining the RESERVED lock. */ rc = pagerLockDb(pPager, RESERVED_LOCK); if( rc==SQLITE_OK && exFlag ){ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); } } if( rc==SQLITE_OK ){ /* Change to WRITER_LOCKED state. ** ** WAL mode sets Pager.eState to PAGER_WRITER_LOCKED or CACHEMOD ** when it has an open transaction, but never to DBMOD or FINISHED. ** This is because in those states the code to roll back savepoint ** transactions may copy data from the sub-journal into the database ** file as well as into the page cache. Which would be incorrect in ** WAL mode. */ pPager->eState = PAGER_WRITER_LOCKED; pPager->dbHintSize = pPager->dbSize; pPager->dbFileSize = pPager->dbSize; pPager->dbOrigSize = pPager->dbSize; pPager->journalOff = 0; } assert( rc==SQLITE_OK || pPager->eState==PAGER_READER ); assert( rc!=SQLITE_OK || pPager->eState==PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); } PAGERTRACE(("TRANSACTION %d\n", PAGERID(pPager))); return rc; } /* ** Write page pPg onto the end of the rollback journal. */ static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){ Pager *pPager = pPg->pPager; int rc; u32 cksum; char *pData2; i64 iOff = pPager->journalOff; /* We should never write to the journal file the page that ** contains the database locks. The following assert verifies ** that we do not. */ assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) ); assert( pPager->journalHdr<=pPager->journalOff ); CODEC2(pPager, pPg->pData, pPg->pgno, 7, return SQLITE_NOMEM, pData2); cksum = pager_cksum(pPager, (u8*)pData2); /* Even if an IO or diskfull error occurs while journalling the ** page in the block above, set the need-sync flag for the page. ** Otherwise, when the transaction is rolled back, the logic in ** playback_one_page() will think that the page needs to be restored ** in the database file. And if an IO error occurs while doing so, ** then corruption may follow. */ pPg->flags |= PGHDR_NEED_SYNC; rc = write32bits(pPager->jfd, iOff, pPg->pgno); if( rc!=SQLITE_OK ) return rc; rc = sqlite3OsWrite(pPager->jfd, pData2, pPager->pageSize, iOff+4); if( rc!=SQLITE_OK ) return rc; rc = write32bits(pPager->jfd, iOff+pPager->pageSize+4, cksum); if( rc!=SQLITE_OK ) return rc; IOTRACE(("JOUT %p %d %lld %d\n", pPager, pPg->pgno, pPager->journalOff, pPager->pageSize)); PAGER_INCR(sqlite3_pager_writej_count); PAGERTRACE(("JOURNAL %d page %d needSync=%d hash(%08x)\n", PAGERID(pPager), pPg->pgno, ((pPg->flags&PGHDR_NEED_SYNC)?1:0), pager_pagehash(pPg))); pPager->journalOff += 8 + pPager->pageSize; pPager->nRec++; assert( pPager->pInJournal!=0 ); rc = sqlite3BitvecSet(pPager->pInJournal, pPg->pgno); testcase( rc==SQLITE_NOMEM ); assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); rc |= addToSavepointBitvecs(pPager, pPg->pgno); assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); return rc; } /* ** Mark a single data page as writeable. The page is written into the ** main journal or sub-journal as required. If the page is written into ** one of the journals, the corresponding bit is set in the ** Pager.pInJournal bitvec and the PagerSavepoint.pInSavepoint bitvecs ** of any open savepoints as appropriate. */ static int pager_write(PgHdr *pPg){ Pager *pPager = pPg->pPager; int rc = SQLITE_OK; /* This routine is not called unless a write-transaction has already ** been started. The journal file may or may not be open at this point. ** It is never called in the ERROR state. */ assert( pPager->eState==PAGER_WRITER_LOCKED || pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD ); assert( assert_pager_state(pPager) ); assert( pPager->errCode==0 ); assert( pPager->readOnly==0 ); CHECK_PAGE(pPg); /* The journal file needs to be opened. Higher level routines have already ** obtained the necessary locks to begin the write-transaction, but the ** rollback journal might not yet be open. Open it now if this is the case. ** ** This is done before calling sqlite3PcacheMakeDirty() on the page. ** Otherwise, if it were done after calling sqlite3PcacheMakeDirty(), then ** an error might occur and the pager would end up in WRITER_LOCKED state ** with pages marked as dirty in the cache. */ if( pPager->eState==PAGER_WRITER_LOCKED ){ rc = pager_open_journal(pPager); if( rc!=SQLITE_OK ) return rc; } assert( pPager->eState>=PAGER_WRITER_CACHEMOD ); assert( assert_pager_state(pPager) ); /* Mark the page that is about to be modified as dirty. */ sqlite3PcacheMakeDirty(pPg); /* If a rollback journal is in use, them make sure the page that is about ** to change is in the rollback journal, or if the page is a new page off ** then end of the file, make sure it is marked as PGHDR_NEED_SYNC. */ assert( (pPager->pInJournal!=0) == isOpen(pPager->jfd) ); if( pPager->pInJournal!=0 && sqlite3BitvecTestNotNull(pPager->pInJournal, pPg->pgno)==0 ){ assert( pagerUseWal(pPager)==0 ); if( pPg->pgno<=pPager->dbOrigSize ){ rc = pagerAddPageToRollbackJournal(pPg); if( rc!=SQLITE_OK ){ return rc; } }else{ if( pPager->eState!=PAGER_WRITER_DBMOD ){ pPg->flags |= PGHDR_NEED_SYNC; } PAGERTRACE(("APPEND %d page %d needSync=%d\n", PAGERID(pPager), pPg->pgno, ((pPg->flags&PGHDR_NEED_SYNC)?1:0))); } } /* The PGHDR_DIRTY bit is set above when the page was added to the dirty-list ** and before writing the page into the rollback journal. Wait until now, ** after the page has been successfully journalled, before setting the ** PGHDR_WRITEABLE bit that indicates that the page can be safely modified. */ pPg->flags |= PGHDR_WRITEABLE; /* If the statement journal is open and the page is not in it, ** then write the page into the statement journal. */ if( pPager->nSavepoint>0 ){ rc = subjournalPageIfRequired(pPg); } /* Update the database size and return. */ if( pPager->dbSizepgno ){ pPager->dbSize = pPg->pgno; } return rc; } /* ** This is a variant of sqlite3PagerWrite() that runs when the sector size ** is larger than the page size. SQLite makes the (reasonable) assumption that ** all bytes of a sector are written together by hardware. Hence, all bytes of ** a sector need to be journalled in case of a power loss in the middle of ** a write. ** ** Usually, the sector size is less than or equal to the page size, in which ** case pages can be individually written. This routine only runs in the ** exceptional case where the page size is smaller than the sector size. */ static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){ int rc = SQLITE_OK; /* Return code */ Pgno nPageCount; /* Total number of pages in database file */ Pgno pg1; /* First page of the sector pPg is located on. */ int nPage = 0; /* Number of pages starting at pg1 to journal */ int ii; /* Loop counter */ int needSync = 0; /* True if any page has PGHDR_NEED_SYNC */ Pager *pPager = pPg->pPager; /* The pager that owns pPg */ Pgno nPagePerSector = (pPager->sectorSize/pPager->pageSize); /* Set the doNotSpill NOSYNC bit to 1. This is because we cannot allow ** a journal header to be written between the pages journaled by ** this function. */ assert( !MEMDB ); assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)==0 ); pPager->doNotSpill |= SPILLFLAG_NOSYNC; /* This trick assumes that both the page-size and sector-size are ** an integer power of 2. It sets variable pg1 to the identifier ** of the first page of the sector pPg is located on. */ pg1 = ((pPg->pgno-1) & ~(nPagePerSector-1)) + 1; nPageCount = pPager->dbSize; if( pPg->pgno>nPageCount ){ nPage = (pPg->pgno - pg1)+1; }else if( (pg1+nPagePerSector-1)>nPageCount ){ nPage = nPageCount+1-pg1; }else{ nPage = nPagePerSector; } assert(nPage>0); assert(pg1<=pPg->pgno); assert((pg1+nPage)>pPg->pgno); for(ii=0; iipgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){ if( pg!=PAGER_MJ_PGNO(pPager) ){ rc = sqlite3PagerGet(pPager, pg, &pPage); if( rc==SQLITE_OK ){ rc = pager_write(pPage); if( pPage->flags&PGHDR_NEED_SYNC ){ needSync = 1; } sqlite3PagerUnrefNotNull(pPage); } } }else if( (pPage = sqlite3PagerLookup(pPager, pg))!=0 ){ if( pPage->flags&PGHDR_NEED_SYNC ){ needSync = 1; } sqlite3PagerUnrefNotNull(pPage); } } /* If the PGHDR_NEED_SYNC flag is set for any of the nPage pages ** starting at pg1, then it needs to be set for all of them. Because ** writing to any of these nPage pages may damage the others, the ** journal file must contain sync()ed copies of all of them ** before any of them can be written out to the database file. */ if( rc==SQLITE_OK && needSync ){ assert( !MEMDB ); for(ii=0; iiflags |= PGHDR_NEED_SYNC; sqlite3PagerUnrefNotNull(pPage); } } } assert( (pPager->doNotSpill & SPILLFLAG_NOSYNC)!=0 ); pPager->doNotSpill &= ~SPILLFLAG_NOSYNC; return rc; } /* ** Mark a data page as writeable. This routine must be called before ** making changes to a page. The caller must check the return value ** of this function and be careful not to change any page data unless ** this routine returns SQLITE_OK. ** ** The difference between this function and pager_write() is that this ** function also deals with the special case where 2 or more pages ** fit on a single disk sector. In this case all co-resident pages ** must have been written to the journal file before returning. ** ** If an error occurs, SQLITE_NOMEM or an IO error code is returned ** as appropriate. Otherwise, SQLITE_OK. */ SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){ Pager *pPager = pPg->pPager; assert( (pPg->flags & PGHDR_MMAP)==0 ); assert( pPager->eState>=PAGER_WRITER_LOCKED ); assert( pPager->eState!=PAGER_ERROR ); assert( assert_pager_state(pPager) ); if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){ if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg); return SQLITE_OK; }else if( pPager->sectorSize > (u32)pPager->pageSize ){ return pagerWriteLargeSector(pPg); }else{ return pager_write(pPg); } } /* ** Return TRUE if the page given in the argument was previously passed ** to sqlite3PagerWrite(). In other words, return TRUE if it is ok ** to change the content of the page. */ #ifndef NDEBUG SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){ return pPg->flags & PGHDR_WRITEABLE; } #endif /* ** A call to this routine tells the pager that it is not necessary to ** write the information on page pPg back to the disk, even though ** that page might be marked as dirty. This happens, for example, when ** the page has been added as a leaf of the freelist and so its ** content no longer matters. ** ** The overlying software layer calls this routine when all of the data ** on the given page is unused. The pager marks the page as clean so ** that it does not get written to disk. ** ** Tests show that this optimization can quadruple the speed of large ** DELETE operations. */ SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){ Pager *pPager = pPg->pPager; if( (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) pPg->flags |= PGHDR_DONT_WRITE; pPg->flags &= ~PGHDR_WRITEABLE; pager_set_pagehash(pPg); } } /* ** This routine is called to increment the value of the database file ** change-counter, stored as a 4-byte big-endian integer starting at ** byte offset 24 of the pager file. The secondary change counter at ** 92 is also updated, as is the SQLite version number at offset 96. ** ** But this only happens if the pPager->changeCountDone flag is false. ** To avoid excess churning of page 1, the update only happens once. ** See also the pager_write_changecounter() routine that does an ** unconditional update of the change counters. ** ** If the isDirectMode flag is zero, then this is done by calling ** sqlite3PagerWrite() on page 1, then modifying the contents of the ** page data. In this case the file will be updated when the current ** transaction is committed. ** ** The isDirectMode flag may only be non-zero if the library was compiled ** with the SQLITE_ENABLE_ATOMIC_WRITE macro defined. In this case, ** if isDirect is non-zero, then the database file is updated directly ** by writing an updated version of page 1 using a call to the ** sqlite3OsWrite() function. */ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ int rc = SQLITE_OK; assert( pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD ); assert( assert_pager_state(pPager) ); /* Declare and initialize constant integer 'isDirect'. If the ** atomic-write optimization is enabled in this build, then isDirect ** is initialized to the value passed as the isDirectMode parameter ** to this function. Otherwise, it is always set to zero. ** ** The idea is that if the atomic-write optimization is not ** enabled at compile time, the compiler can omit the tests of ** 'isDirect' below, as well as the block enclosed in the ** "if( isDirect )" condition. */ #ifndef SQLITE_ENABLE_ATOMIC_WRITE # define DIRECT_MODE 0 assert( isDirectMode==0 ); UNUSED_PARAMETER(isDirectMode); #else # define DIRECT_MODE isDirectMode #endif if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){ PgHdr *pPgHdr; /* Reference to page 1 */ assert( !pPager->tempFile && isOpen(pPager->fd) ); /* Open page 1 of the file for writing. */ rc = sqlite3PagerGet(pPager, 1, &pPgHdr); assert( pPgHdr==0 || rc==SQLITE_OK ); /* If page one was fetched successfully, and this function is not ** operating in direct-mode, make page 1 writable. When not in ** direct mode, page 1 is always held in cache and hence the PagerGet() ** above is always successful - hence the ALWAYS on rc==SQLITE_OK. */ if( !DIRECT_MODE && ALWAYS(rc==SQLITE_OK) ){ rc = sqlite3PagerWrite(pPgHdr); } if( rc==SQLITE_OK ){ /* Actually do the update of the change counter */ pager_write_changecounter(pPgHdr); /* If running in direct mode, write the contents of page 1 to the file. */ if( DIRECT_MODE ){ const void *zBuf; assert( pPager->dbFileSize>0 ); CODEC2(pPager, pPgHdr->pData, 1, 6, rc=SQLITE_NOMEM, zBuf); if( rc==SQLITE_OK ){ rc = sqlite3OsWrite(pPager->fd, zBuf, pPager->pageSize, 0); pPager->aStat[PAGER_STAT_WRITE]++; } if( rc==SQLITE_OK ){ /* Update the pager's copy of the change-counter. Otherwise, the ** next time a read transaction is opened the cache will be ** flushed (as the change-counter values will not match). */ const void *pCopy = (const void *)&((const char *)zBuf)[24]; memcpy(&pPager->dbFileVers, pCopy, sizeof(pPager->dbFileVers)); pPager->changeCountDone = 1; } }else{ pPager->changeCountDone = 1; } } /* Release the page reference. */ sqlite3PagerUnref(pPgHdr); } return rc; } /* ** Sync the database file to disk. This is a no-op for in-memory databases ** or pages with the Pager.noSync flag set. ** ** If successful, or if called on a pager for which it is a no-op, this ** function returns SQLITE_OK. Otherwise, an IO error code is returned. */ SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster){ int rc = SQLITE_OK; if( isOpen(pPager->fd) ){ void *pArg = (void*)zMaster; rc = sqlite3OsFileControl(pPager->fd, SQLITE_FCNTL_SYNC, pArg); if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; } if( rc==SQLITE_OK && !pPager->noSync ){ assert( !MEMDB ); rc = sqlite3OsSync(pPager->fd, pPager->syncFlags); } return rc; } /* ** This function may only be called while a write-transaction is active in ** rollback. If the connection is in WAL mode, this call is a no-op. ** Otherwise, if the connection does not already have an EXCLUSIVE lock on ** the database file, an attempt is made to obtain one. ** ** If the EXCLUSIVE lock is already held or the attempt to obtain it is ** successful, or the connection is in WAL mode, SQLITE_OK is returned. ** Otherwise, either SQLITE_BUSY or an SQLITE_IOERR_XXX error code is ** returned. */ SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager *pPager){ int rc = SQLITE_OK; assert( pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD || pPager->eState==PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); if( 0==pagerUseWal(pPager) ){ rc = pager_wait_on_lock(pPager, EXCLUSIVE_LOCK); } return rc; } /* ** Sync the database file for the pager pPager. zMaster points to the name ** of a master journal file that should be written into the individual ** journal file. zMaster may be NULL, which is interpreted as no master ** journal (a single database transaction). ** ** This routine ensures that: ** ** * The database file change-counter is updated, ** * the journal is synced (unless the atomic-write optimization is used), ** * all dirty pages are written to the database file, ** * the database file is truncated (if required), and ** * the database file synced. ** ** The only thing that remains to commit the transaction is to finalize ** (delete, truncate or zero the first part of) the journal file (or ** delete the master journal file if specified). ** ** Note that if zMaster==NULL, this does not overwrite a previous value ** passed to an sqlite3PagerCommitPhaseOne() call. ** ** If the final parameter - noSync - is true, then the database file itself ** is not synced. The caller must call sqlite3PagerSync() directly to ** sync the database file before calling CommitPhaseTwo() to delete the ** journal file in this case. */ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( Pager *pPager, /* Pager object */ const char *zMaster, /* If not NULL, the master journal name */ int noSync /* True to omit the xSync on the db file */ ){ int rc = SQLITE_OK; /* Return code */ assert( pPager->eState==PAGER_WRITER_LOCKED || pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD || pPager->eState==PAGER_ERROR ); assert( assert_pager_state(pPager) ); /* If a prior error occurred, report that error again. */ if( NEVER(pPager->errCode) ) return pPager->errCode; PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", pPager->zFilename, zMaster, pPager->dbSize)); /* If no database changes have been made, return early. */ if( pPager->eStatepBackup); }else{ if( pagerUseWal(pPager) ){ PgHdr *pList = sqlite3PcacheDirtyList(pPager->pPCache); PgHdr *pPageOne = 0; if( pList==0 ){ /* Must have at least one page for the WAL commit flag. ** Ticket [2d1a5c67dfc2363e44f29d9bbd57f] 2011-05-18 */ rc = sqlite3PagerGet(pPager, 1, &pPageOne); pList = pPageOne; pList->pDirty = 0; } assert( rc==SQLITE_OK ); if( ALWAYS(pList) ){ rc = pagerWalFrames(pPager, pList, pPager->dbSize, 1); } sqlite3PagerUnref(pPageOne); if( rc==SQLITE_OK ){ sqlite3PcacheCleanAll(pPager->pPCache); } }else{ /* The following block updates the change-counter. Exactly how it ** does this depends on whether or not the atomic-update optimization ** was enabled at compile time, and if this transaction meets the ** runtime criteria to use the operation: ** ** * The file-system supports the atomic-write property for ** blocks of size page-size, and ** * This commit is not part of a multi-file transaction, and ** * Exactly one page has been modified and store in the journal file. ** ** If the optimization was not enabled at compile time, then the ** pager_incr_changecounter() function is called to update the change ** counter in 'indirect-mode'. If the optimization is compiled in but ** is not applicable to this transaction, call sqlite3JournalCreate() ** to make sure the journal file has actually been created, then call ** pager_incr_changecounter() to update the change-counter in indirect ** mode. ** ** Otherwise, if the optimization is both enabled and applicable, ** then call pager_incr_changecounter() to update the change-counter ** in 'direct' mode. In this case the journal file will never be ** created for this transaction. */ #ifdef SQLITE_ENABLE_ATOMIC_WRITE PgHdr *pPg; assert( isOpen(pPager->jfd) || pPager->journalMode==PAGER_JOURNALMODE_OFF || pPager->journalMode==PAGER_JOURNALMODE_WAL ); if( !zMaster && isOpen(pPager->jfd) && pPager->journalOff==jrnlBufferSize(pPager) && pPager->dbSize>=pPager->dbOrigSize && (0==(pPg = sqlite3PcacheDirtyList(pPager->pPCache)) || 0==pPg->pDirty) ){ /* Update the db file change counter via the direct-write method. The ** following call will modify the in-memory representation of page 1 ** to include the updated change counter and then write page 1 ** directly to the database file. Because of the atomic-write ** property of the host file-system, this is safe. */ rc = pager_incr_changecounter(pPager, 1); }else{ rc = sqlite3JournalCreate(pPager->jfd); if( rc==SQLITE_OK ){ rc = pager_incr_changecounter(pPager, 0); } } #else rc = pager_incr_changecounter(pPager, 0); #endif if( rc!=SQLITE_OK ) goto commit_phase_one_exit; /* Write the master journal name into the journal file. If a master ** journal file name has already been written to the journal file, ** or if zMaster is NULL (no master journal), then this call is a no-op. */ rc = writeMasterJournal(pPager, zMaster); if( rc!=SQLITE_OK ) goto commit_phase_one_exit; /* Sync the journal file and write all dirty pages to the database. ** If the atomic-update optimization is being used, this sync will not ** create the journal file or perform any real IO. ** ** Because the change-counter page was just modified, unless the ** atomic-update optimization is used it is almost certain that the ** journal requires a sync here. However, in locking_mode=exclusive ** on a system under memory pressure it is just possible that this is ** not the case. In this case it is likely enough that the redundant ** xSync() call will be changed to a no-op by the OS anyhow. */ rc = syncJournal(pPager, 0); if( rc!=SQLITE_OK ) goto commit_phase_one_exit; rc = pager_write_pagelist(pPager,sqlite3PcacheDirtyList(pPager->pPCache)); if( rc!=SQLITE_OK ){ assert( rc!=SQLITE_IOERR_BLOCKED ); goto commit_phase_one_exit; } sqlite3PcacheCleanAll(pPager->pPCache); /* If the file on disk is smaller than the database image, use ** pager_truncate to grow the file here. This can happen if the database ** image was extended as part of the current transaction and then the ** last page in the db image moved to the free-list. In this case the ** last page is never written out to disk, leaving the database file ** undersized. Fix this now if it is the case. */ if( pPager->dbSize>pPager->dbFileSize ){ Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager)); assert( pPager->eState==PAGER_WRITER_DBMOD ); rc = pager_truncate(pPager, nNew); if( rc!=SQLITE_OK ) goto commit_phase_one_exit; } /* Finally, sync the database file. */ if( !noSync ){ rc = sqlite3PagerSync(pPager, zMaster); } IOTRACE(("DBSYNC %p\n", pPager)) } } commit_phase_one_exit: if( rc==SQLITE_OK && !pagerUseWal(pPager) ){ pPager->eState = PAGER_WRITER_FINISHED; } return rc; } /* ** When this function is called, the database file has been completely ** updated to reflect the changes made by the current transaction and ** synced to disk. The journal file still exists in the file-system ** though, and if a failure occurs at this point it will eventually ** be used as a hot-journal and the current transaction rolled back. ** ** This function finalizes the journal file, either by deleting, ** truncating or partially zeroing it, so that it cannot be used ** for hot-journal rollback. Once this is done the transaction is ** irrevocably committed. ** ** If an error occurs, an IO error code is returned and the pager ** moves into the error state. Otherwise, SQLITE_OK is returned. */ SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ /* This routine should not be called if a prior error has occurred. ** But if (due to a coding error elsewhere in the system) it does get ** called, just return the same error code without doing anything. */ if( NEVER(pPager->errCode) ) return pPager->errCode; assert( pPager->eState==PAGER_WRITER_LOCKED || pPager->eState==PAGER_WRITER_FINISHED || (pagerUseWal(pPager) && pPager->eState==PAGER_WRITER_CACHEMOD) ); assert( assert_pager_state(pPager) ); /* An optimization. If the database was not actually modified during ** this transaction, the pager is running in exclusive-mode and is ** using persistent journals, then this function is a no-op. ** ** The start of the journal file currently contains a single journal ** header with the nRec field set to 0. If such a journal is used as ** a hot-journal during hot-journal rollback, 0 changes will be made ** to the database file. So there is no need to zero the journal ** header. Since the pager is in exclusive mode, there is no need ** to drop any locks either. */ if( pPager->eState==PAGER_WRITER_LOCKED && pPager->exclusiveMode && pPager->journalMode==PAGER_JOURNALMODE_PERSIST ){ assert( pPager->journalOff==JOURNAL_HDR_SZ(pPager) || !pPager->journalOff ); pPager->eState = PAGER_READER; return SQLITE_OK; } PAGERTRACE(("COMMIT %d\n", PAGERID(pPager))); pPager->iDataVersion++; rc = pager_end_transaction(pPager, pPager->setMaster, 1); return pager_error(pPager, rc); } /* ** If a write transaction is open, then all changes made within the ** transaction are reverted and the current write-transaction is closed. ** The pager falls back to PAGER_READER state if successful, or PAGER_ERROR ** state if an error occurs. ** ** If the pager is already in PAGER_ERROR state when this function is called, ** it returns Pager.errCode immediately. No work is performed in this case. ** ** Otherwise, in rollback mode, this function performs two functions: ** ** 1) It rolls back the journal file, restoring all database file and ** in-memory cache pages to the state they were in when the transaction ** was opened, and ** ** 2) It finalizes the journal file, so that it is not used for hot ** rollback at any point in the future. ** ** Finalization of the journal file (task 2) is only performed if the ** rollback is successful. ** ** In WAL mode, all cache-entries containing data modified within the ** current transaction are either expelled from the cache or reverted to ** their pre-transaction state by re-reading data from the database or ** WAL files. The WAL transaction is then closed. */ SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){ int rc = SQLITE_OK; /* Return code */ PAGERTRACE(("ROLLBACK %d\n", PAGERID(pPager))); /* PagerRollback() is a no-op if called in READER or OPEN state. If ** the pager is already in the ERROR state, the rollback is not ** attempted here. Instead, the error code is returned to the caller. */ assert( assert_pager_state(pPager) ); if( pPager->eState==PAGER_ERROR ) return pPager->errCode; if( pPager->eState<=PAGER_READER ) return SQLITE_OK; if( pagerUseWal(pPager) ){ int rc2; rc = sqlite3PagerSavepoint(pPager, SAVEPOINT_ROLLBACK, -1); rc2 = pager_end_transaction(pPager, pPager->setMaster, 0); if( rc==SQLITE_OK ) rc = rc2; }else if( !isOpen(pPager->jfd) || pPager->eState==PAGER_WRITER_LOCKED ){ int eState = pPager->eState; rc = pager_end_transaction(pPager, 0, 0); if( !MEMDB && eState>PAGER_WRITER_LOCKED ){ /* This can happen using journal_mode=off. Move the pager to the error ** state to indicate that the contents of the cache may not be trusted. ** Any active readers will get SQLITE_ABORT. */ pPager->errCode = SQLITE_ABORT; pPager->eState = PAGER_ERROR; return rc; } }else{ rc = pager_playback(pPager, 0); } assert( pPager->eState==PAGER_READER || rc!=SQLITE_OK ); assert( rc==SQLITE_OK || rc==SQLITE_FULL || rc==SQLITE_CORRUPT || rc==SQLITE_NOMEM || (rc&0xFF)==SQLITE_IOERR || rc==SQLITE_CANTOPEN ); /* If an error occurs during a ROLLBACK, we can no longer trust the pager ** cache. So call pager_error() on the way out to make any error persistent. */ return pager_error(pPager, rc); } /* ** Return TRUE if the database file is opened read-only. Return FALSE ** if the database is (in theory) writable. */ SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager *pPager){ return pPager->readOnly; } #ifdef SQLITE_DEBUG /* ** Return the number of references to the pager. */ SQLITE_PRIVATE int sqlite3PagerRefcount(Pager *pPager){ return sqlite3PcacheRefCount(pPager->pPCache); } #endif /* ** Return the approximate number of bytes of memory currently ** used by the pager and its associated cache. */ SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager *pPager){ int perPageSize = pPager->pageSize + pPager->nExtra + sizeof(PgHdr) + 5*sizeof(void*); return perPageSize*sqlite3PcachePagecount(pPager->pPCache) + sqlite3MallocSize(pPager) + pPager->pageSize; } /* ** Return the number of references to the specified page. */ SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage *pPage){ return sqlite3PcachePageRefcount(pPage); } #ifdef SQLITE_TEST /* ** This routine is used for testing and analysis only. */ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ static int a[11]; a[0] = sqlite3PcacheRefCount(pPager->pPCache); a[1] = sqlite3PcachePagecount(pPager->pPCache); a[2] = sqlite3PcacheGetCachesize(pPager->pPCache); a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; a[6] = pPager->aStat[PAGER_STAT_HIT]; a[7] = pPager->aStat[PAGER_STAT_MISS]; a[8] = 0; /* Used to be pPager->nOvfl */ a[9] = pPager->nRead; a[10] = pPager->aStat[PAGER_STAT_WRITE]; return a; } #endif /* ** Parameter eStat must be either SQLITE_DBSTATUS_CACHE_HIT or ** SQLITE_DBSTATUS_CACHE_MISS. Before returning, *pnVal is incremented by the ** current cache hit or miss count, according to the value of eStat. If the ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. */ SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS || eStat==SQLITE_DBSTATUS_CACHE_WRITE ); assert( SQLITE_DBSTATUS_CACHE_HIT+1==SQLITE_DBSTATUS_CACHE_MISS ); assert( SQLITE_DBSTATUS_CACHE_HIT+2==SQLITE_DBSTATUS_CACHE_WRITE ); assert( PAGER_STAT_HIT==0 && PAGER_STAT_MISS==1 && PAGER_STAT_WRITE==2 ); *pnVal += pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT]; if( reset ){ pPager->aStat[eStat - SQLITE_DBSTATUS_CACHE_HIT] = 0; } } /* ** Return true if this is an in-memory pager. */ SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){ return MEMDB; } /* ** Check that there are at least nSavepoint savepoints open. If there are ** currently less than nSavepoints open, then open one or more savepoints ** to make up the difference. If the number of savepoints is already ** equal to nSavepoint, then this function is a no-op. ** ** If a memory allocation fails, SQLITE_NOMEM is returned. If an error ** occurs while opening the sub-journal file, then an IO error code is ** returned. Otherwise, SQLITE_OK. */ static SQLITE_NOINLINE int pagerOpenSavepoint(Pager *pPager, int nSavepoint){ int rc = SQLITE_OK; /* Return code */ int nCurrent = pPager->nSavepoint; /* Current number of savepoints */ int ii; /* Iterator variable */ PagerSavepoint *aNew; /* New Pager.aSavepoint array */ assert( pPager->eState>=PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); assert( nSavepoint>nCurrent && pPager->useJournal ); /* Grow the Pager.aSavepoint array using realloc(). Return SQLITE_NOMEM ** if the allocation fails. Otherwise, zero the new portion in case a ** malloc failure occurs while populating it in the for(...) loop below. */ aNew = (PagerSavepoint *)sqlite3Realloc( pPager->aSavepoint, sizeof(PagerSavepoint)*nSavepoint ); if( !aNew ){ return SQLITE_NOMEM; } memset(&aNew[nCurrent], 0, (nSavepoint-nCurrent) * sizeof(PagerSavepoint)); pPager->aSavepoint = aNew; /* Populate the PagerSavepoint structures just allocated. */ for(ii=nCurrent; iidbSize; if( isOpen(pPager->jfd) && pPager->journalOff>0 ){ aNew[ii].iOffset = pPager->journalOff; }else{ aNew[ii].iOffset = JOURNAL_HDR_SZ(pPager); } aNew[ii].iSubRec = pPager->nSubRec; aNew[ii].pInSavepoint = sqlite3BitvecCreate(pPager->dbSize); if( !aNew[ii].pInSavepoint ){ return SQLITE_NOMEM; } if( pagerUseWal(pPager) ){ sqlite3WalSavepoint(pPager->pWal, aNew[ii].aWalData); } pPager->nSavepoint = ii+1; } assert( pPager->nSavepoint==nSavepoint ); assertTruncateConstraint(pPager); return rc; } SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){ assert( pPager->eState>=PAGER_WRITER_LOCKED ); assert( assert_pager_state(pPager) ); if( nSavepoint>pPager->nSavepoint && pPager->useJournal ){ return pagerOpenSavepoint(pPager, nSavepoint); }else{ return SQLITE_OK; } } /* ** This function is called to rollback or release (commit) a savepoint. ** The savepoint to release or rollback need not be the most recently ** created savepoint. ** ** Parameter op is always either SAVEPOINT_ROLLBACK or SAVEPOINT_RELEASE. ** If it is SAVEPOINT_RELEASE, then release and destroy the savepoint with ** index iSavepoint. If it is SAVEPOINT_ROLLBACK, then rollback all changes ** that have occurred since the specified savepoint was created. ** ** The savepoint to rollback or release is identified by parameter ** iSavepoint. A value of 0 means to operate on the outermost savepoint ** (the first created). A value of (Pager.nSavepoint-1) means operate ** on the most recently created savepoint. If iSavepoint is greater than ** (Pager.nSavepoint-1), then this function is a no-op. ** ** If a negative value is passed to this function, then the current ** transaction is rolled back. This is different to calling ** sqlite3PagerRollback() because this function does not terminate ** the transaction or unlock the database, it just restores the ** contents of the database to its original state. ** ** In any case, all savepoints with an index greater than iSavepoint ** are destroyed. If this is a release operation (op==SAVEPOINT_RELEASE), ** then savepoint iSavepoint is also destroyed. ** ** This function may return SQLITE_NOMEM if a memory allocation fails, ** or an IO error code if an IO error occurs while rolling back a ** savepoint. If no errors occur, SQLITE_OK is returned. */ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ int rc = pPager->errCode; /* Return code */ assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK ); if( rc==SQLITE_OK && iSavepointnSavepoint ){ int ii; /* Iterator variable */ int nNew; /* Number of remaining savepoints after this op. */ /* Figure out how many savepoints will still be active after this ** operation. Store this value in nNew. Then free resources associated ** with any savepoints that are destroyed by this operation. */ nNew = iSavepoint + (( op==SAVEPOINT_RELEASE ) ? 0 : 1); for(ii=nNew; iinSavepoint; ii++){ sqlite3BitvecDestroy(pPager->aSavepoint[ii].pInSavepoint); } pPager->nSavepoint = nNew; /* If this is a release of the outermost savepoint, truncate ** the sub-journal to zero bytes in size. */ if( op==SAVEPOINT_RELEASE ){ if( nNew==0 && isOpen(pPager->sjfd) ){ /* Only truncate if it is an in-memory sub-journal. */ if( sqlite3IsMemJournal(pPager->sjfd) ){ rc = sqlite3OsTruncate(pPager->sjfd, 0); assert( rc==SQLITE_OK ); } pPager->nSubRec = 0; } } /* Else this is a rollback operation, playback the specified savepoint. ** If this is a temp-file, it is possible that the journal file has ** not yet been opened. In this case there have been no changes to ** the database file, so the playback operation can be skipped. */ else if( pagerUseWal(pPager) || isOpen(pPager->jfd) ){ PagerSavepoint *pSavepoint = (nNew==0)?0:&pPager->aSavepoint[nNew-1]; rc = pagerPlaybackSavepoint(pPager, pSavepoint); assert(rc!=SQLITE_DONE); } } return rc; } /* ** Return the full pathname of the database file. ** ** Except, if the pager is in-memory only, then return an empty string if ** nullIfMemDb is true. This routine is called with nullIfMemDb==1 when ** used to report the filename to the user, for compatibility with legacy ** behavior. But when the Btree needs to know the filename for matching to ** shared cache, it uses nullIfMemDb==0 so that in-memory databases can ** participate in shared-cache. */ SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager *pPager, int nullIfMemDb){ return (nullIfMemDb && pPager->memDb) ? "" : pPager->zFilename; } /* ** Return the VFS structure for the pager. */ SQLITE_PRIVATE const sqlite3_vfs *sqlite3PagerVfs(Pager *pPager){ return pPager->pVfs; } /* ** Return the file handle for the database file associated ** with the pager. This might return NULL if the file has ** not yet been opened. */ SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ return pPager->fd; } /* ** Return the full pathname of the journal file. */ SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager *pPager){ return pPager->zJournal; } /* ** Return true if fsync() calls are disabled for this pager. Return FALSE ** if fsync()s are executed normally. */ SQLITE_PRIVATE int sqlite3PagerNosync(Pager *pPager){ return pPager->noSync; } #ifdef SQLITE_HAS_CODEC /* ** Set or retrieve the codec for this pager */ SQLITE_PRIVATE void sqlite3PagerSetCodec( Pager *pPager, void *(*xCodec)(void*,void*,Pgno,int), void (*xCodecSizeChng)(void*,int,int), void (*xCodecFree)(void*), void *pCodec ){ if( pPager->xCodecFree ) pPager->xCodecFree(pPager->pCodec); pPager->xCodec = pPager->memDb ? 0 : xCodec; pPager->xCodecSizeChng = xCodecSizeChng; pPager->xCodecFree = xCodecFree; pPager->pCodec = pCodec; pagerReportSize(pPager); } SQLITE_PRIVATE void *sqlite3PagerGetCodec(Pager *pPager){ return pPager->pCodec; } /* ** This function is called by the wal module when writing page content ** into the log file. ** ** This function returns a pointer to a buffer containing the encrypted ** page content. If a malloc fails, this function may return NULL. */ SQLITE_PRIVATE void *sqlite3PagerCodec(PgHdr *pPg){ void *aData = 0; CODEC2(pPg->pPager, pPg->pData, pPg->pgno, 6, return 0, aData); return aData; } /* ** Return the current pager state */ SQLITE_PRIVATE int sqlite3PagerState(Pager *pPager){ return pPager->eState; } #endif /* SQLITE_HAS_CODEC */ #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Move the page pPg to location pgno in the file. ** ** There must be no references to the page previously located at ** pgno (which we call pPgOld) though that page is allowed to be ** in cache. If the page previously located at pgno is not already ** in the rollback journal, it is not put there by by this routine. ** ** References to the page pPg remain valid. Updating any ** meta-data associated with pPg (i.e. data stored in the nExtra bytes ** allocated along with the page) is the responsibility of the caller. ** ** A transaction must be active when this routine is called. It used to be ** required that a statement transaction was not active, but this restriction ** has been removed (CREATE INDEX needs to move a page when a statement ** transaction is active). ** ** If the fourth argument, isCommit, is non-zero, then this page is being ** moved as part of a database reorganization just before the transaction ** is being committed. In this case, it is guaranteed that the database page ** pPg refers to will not be written to again within this transaction. ** ** This function may return SQLITE_NOMEM or an IO error code if an error ** occurs. Otherwise, it returns SQLITE_OK. */ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, int isCommit){ PgHdr *pPgOld; /* The page being overwritten. */ Pgno needSyncPgno = 0; /* Old value of pPg->pgno, if sync is required */ int rc; /* Return code */ Pgno origPgno; /* The original page number */ assert( pPg->nRef>0 ); assert( pPager->eState==PAGER_WRITER_CACHEMOD || pPager->eState==PAGER_WRITER_DBMOD ); assert( assert_pager_state(pPager) ); /* In order to be able to rollback, an in-memory database must journal ** the page we are moving from. */ if( MEMDB ){ rc = sqlite3PagerWrite(pPg); if( rc ) return rc; } /* If the page being moved is dirty and has not been saved by the latest ** savepoint, then save the current contents of the page into the ** sub-journal now. This is required to handle the following scenario: ** ** BEGIN; ** ** SAVEPOINT one; ** ** ROLLBACK TO one; ** ** If page X were not written to the sub-journal here, it would not ** be possible to restore its contents when the "ROLLBACK TO one" ** statement were is processed. ** ** subjournalPage() may need to allocate space to store pPg->pgno into ** one or more savepoint bitvecs. This is the reason this function ** may return SQLITE_NOMEM. */ if( (pPg->flags & PGHDR_DIRTY)!=0 && SQLITE_OK!=(rc = subjournalPageIfRequired(pPg)) ){ return rc; } PAGERTRACE(("MOVE %d page %d (needSync=%d) moves to %d\n", PAGERID(pPager), pPg->pgno, (pPg->flags&PGHDR_NEED_SYNC)?1:0, pgno)); IOTRACE(("MOVE %p %d %d\n", pPager, pPg->pgno, pgno)) /* If the journal needs to be sync()ed before page pPg->pgno can ** be written to, store pPg->pgno in local variable needSyncPgno. ** ** If the isCommit flag is set, there is no need to remember that ** the journal needs to be sync()ed before database page pPg->pgno ** can be written to. The caller has already promised not to write to it. */ if( (pPg->flags&PGHDR_NEED_SYNC) && !isCommit ){ needSyncPgno = pPg->pgno; assert( pPager->journalMode==PAGER_JOURNALMODE_OFF || pageInJournal(pPager, pPg) || pPg->pgno>pPager->dbOrigSize ); assert( pPg->flags&PGHDR_DIRTY ); } /* If the cache contains a page with page-number pgno, remove it ** from its hash chain. Also, if the PGHDR_NEED_SYNC flag was set for ** page pgno before the 'move' operation, it needs to be retained ** for the page moved there. */ pPg->flags &= ~PGHDR_NEED_SYNC; pPgOld = sqlite3PagerLookup(pPager, pgno); assert( !pPgOld || pPgOld->nRef==1 ); if( pPgOld ){ pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC); if( MEMDB ){ /* Do not discard pages from an in-memory database since we might ** need to rollback later. Just move the page out of the way. */ sqlite3PcacheMove(pPgOld, pPager->dbSize+1); }else{ sqlite3PcacheDrop(pPgOld); } } origPgno = pPg->pgno; sqlite3PcacheMove(pPg, pgno); sqlite3PcacheMakeDirty(pPg); /* For an in-memory database, make sure the original page continues ** to exist, in case the transaction needs to roll back. Use pPgOld ** as the original page since it has already been allocated. */ if( MEMDB ){ assert( pPgOld ); sqlite3PcacheMove(pPgOld, origPgno); sqlite3PagerUnrefNotNull(pPgOld); } if( needSyncPgno ){ /* If needSyncPgno is non-zero, then the journal file needs to be ** sync()ed before any data is written to database file page needSyncPgno. ** Currently, no such page exists in the page-cache and the ** "is journaled" bitvec flag has been set. This needs to be remedied by ** loading the page into the pager-cache and setting the PGHDR_NEED_SYNC ** flag. ** ** If the attempt to load the page into the page-cache fails, (due ** to a malloc() or IO failure), clear the bit in the pInJournal[] ** array. Otherwise, if the page is loaded and written again in ** this transaction, it may be written to the database file before ** it is synced into the journal file. This way, it may end up in ** the journal file twice, but that is not a problem. */ PgHdr *pPgHdr; rc = sqlite3PagerGet(pPager, needSyncPgno, &pPgHdr); if( rc!=SQLITE_OK ){ if( needSyncPgno<=pPager->dbOrigSize ){ assert( pPager->pTmpSpace!=0 ); sqlite3BitvecClear(pPager->pInJournal, needSyncPgno, pPager->pTmpSpace); } return rc; } pPgHdr->flags |= PGHDR_NEED_SYNC; sqlite3PcacheMakeDirty(pPgHdr); sqlite3PagerUnrefNotNull(pPgHdr); } return SQLITE_OK; } #endif /* ** The page handle passed as the first argument refers to a dirty page ** with a page number other than iNew. This function changes the page's ** page number to iNew and sets the value of the PgHdr.flags field to ** the value passed as the third parameter. */ SQLITE_PRIVATE void sqlite3PagerRekey(DbPage *pPg, Pgno iNew, u16 flags){ assert( pPg->pgno!=iNew ); pPg->flags = flags; sqlite3PcacheMove(pPg, iNew); } /* ** Return a pointer to the data for the specified page. */ SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *pPg){ assert( pPg->nRef>0 || pPg->pPager->memDb ); return pPg->pData; } /* ** Return a pointer to the Pager.nExtra bytes of "extra" space ** allocated along with the specified page. */ SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *pPg){ return pPg->pExtra; } /* ** Get/set the locking-mode for this pager. Parameter eMode must be one ** of PAGER_LOCKINGMODE_QUERY, PAGER_LOCKINGMODE_NORMAL or ** PAGER_LOCKINGMODE_EXCLUSIVE. If the parameter is not _QUERY, then ** the locking-mode is set to the value specified. ** ** The returned value is either PAGER_LOCKINGMODE_NORMAL or ** PAGER_LOCKINGMODE_EXCLUSIVE, indicating the current (possibly updated) ** locking-mode. */ SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *pPager, int eMode){ assert( eMode==PAGER_LOCKINGMODE_QUERY || eMode==PAGER_LOCKINGMODE_NORMAL || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); assert( PAGER_LOCKINGMODE_QUERY<0 ); assert( PAGER_LOCKINGMODE_NORMAL>=0 && PAGER_LOCKINGMODE_EXCLUSIVE>=0 ); assert( pPager->exclusiveMode || 0==sqlite3WalHeapMemory(pPager->pWal) ); if( eMode>=0 && !pPager->tempFile && !sqlite3WalHeapMemory(pPager->pWal) ){ pPager->exclusiveMode = (u8)eMode; } return (int)pPager->exclusiveMode; } /* ** Set the journal-mode for this pager. Parameter eMode must be one of: ** ** PAGER_JOURNALMODE_DELETE ** PAGER_JOURNALMODE_TRUNCATE ** PAGER_JOURNALMODE_PERSIST ** PAGER_JOURNALMODE_OFF ** PAGER_JOURNALMODE_MEMORY ** PAGER_JOURNALMODE_WAL ** ** The journalmode is set to the value specified if the change is allowed. ** The change may be disallowed for the following reasons: ** ** * An in-memory database can only have its journal_mode set to _OFF ** or _MEMORY. ** ** * Temporary databases cannot have _WAL journalmode. ** ** The returned indicate the current (possibly updated) journal-mode. */ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ u8 eOld = pPager->journalMode; /* Prior journalmode */ #ifdef SQLITE_DEBUG /* The print_pager_state() routine is intended to be used by the debugger ** only. We invoke it once here to suppress a compiler warning. */ print_pager_state(pPager); #endif /* The eMode parameter is always valid */ assert( eMode==PAGER_JOURNALMODE_DELETE || eMode==PAGER_JOURNALMODE_TRUNCATE || eMode==PAGER_JOURNALMODE_PERSIST || eMode==PAGER_JOURNALMODE_OFF || eMode==PAGER_JOURNALMODE_WAL || eMode==PAGER_JOURNALMODE_MEMORY ); /* This routine is only called from the OP_JournalMode opcode, and ** the logic there will never allow a temporary file to be changed ** to WAL mode. */ assert( pPager->tempFile==0 || eMode!=PAGER_JOURNALMODE_WAL ); /* Do allow the journalmode of an in-memory database to be set to ** anything other than MEMORY or OFF */ if( MEMDB ){ assert( eOld==PAGER_JOURNALMODE_MEMORY || eOld==PAGER_JOURNALMODE_OFF ); if( eMode!=PAGER_JOURNALMODE_MEMORY && eMode!=PAGER_JOURNALMODE_OFF ){ eMode = eOld; } } if( eMode!=eOld ){ /* Change the journal mode. */ assert( pPager->eState!=PAGER_ERROR ); pPager->journalMode = (u8)eMode; /* When transistioning from TRUNCATE or PERSIST to any other journal ** mode except WAL, unless the pager is in locking_mode=exclusive mode, ** delete the journal file. */ assert( (PAGER_JOURNALMODE_TRUNCATE & 5)==1 ); assert( (PAGER_JOURNALMODE_PERSIST & 5)==1 ); assert( (PAGER_JOURNALMODE_DELETE & 5)==0 ); assert( (PAGER_JOURNALMODE_MEMORY & 5)==4 ); assert( (PAGER_JOURNALMODE_OFF & 5)==0 ); assert( (PAGER_JOURNALMODE_WAL & 5)==5 ); assert( isOpen(pPager->fd) || pPager->exclusiveMode ); if( !pPager->exclusiveMode && (eOld & 5)==1 && (eMode & 1)==0 ){ /* In this case we would like to delete the journal file. If it is ** not possible, then that is not a problem. Deleting the journal file ** here is an optimization only. ** ** Before deleting the journal file, obtain a RESERVED lock on the ** database file. This ensures that the journal file is not deleted ** while it is in use by some other client. */ sqlite3OsClose(pPager->jfd); if( pPager->eLock>=RESERVED_LOCK ){ sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); }else{ int rc = SQLITE_OK; int state = pPager->eState; assert( state==PAGER_OPEN || state==PAGER_READER ); if( state==PAGER_OPEN ){ rc = sqlite3PagerSharedLock(pPager); } if( pPager->eState==PAGER_READER ){ assert( rc==SQLITE_OK ); rc = pagerLockDb(pPager, RESERVED_LOCK); } if( rc==SQLITE_OK ){ sqlite3OsDelete(pPager->pVfs, pPager->zJournal, 0); } if( rc==SQLITE_OK && state==PAGER_READER ){ pagerUnlockDb(pPager, SHARED_LOCK); }else if( state==PAGER_OPEN ){ pager_unlock(pPager); } assert( state==pPager->eState ); } }else if( eMode==PAGER_JOURNALMODE_OFF ){ sqlite3OsClose(pPager->jfd); } } /* Return the new journal mode */ return (int)pPager->journalMode; } /* ** Return the current journal mode. */ SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){ return (int)pPager->journalMode; } /* ** Return TRUE if the pager is in a state where it is OK to change the ** journalmode. Journalmode changes can only happen when the database ** is unmodified. */ SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ assert( assert_pager_state(pPager) ); if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0; if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; return 1; } /* ** Get/set the size-limit used for persistent journal files. ** ** Setting the size limit to -1 means no limit is enforced. ** An attempt to set a limit smaller than -1 is a no-op. */ SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *pPager, i64 iLimit){ if( iLimit>=-1 ){ pPager->journalSizeLimit = iLimit; sqlite3WalLimit(pPager->pWal, iLimit); } return pPager->journalSizeLimit; } /* ** Return a pointer to the pPager->pBackup variable. The backup module ** in backup.c maintains the content of this variable. This module ** uses it opaquely as an argument to sqlite3BackupRestart() and ** sqlite3BackupUpdate() only. */ SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ return &pPager->pBackup; } #ifndef SQLITE_OMIT_VACUUM /* ** Unless this is an in-memory or temporary database, clear the pager cache. */ SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){ if( !MEMDB && pPager->tempFile==0 ) pager_reset(pPager); } #endif #ifndef SQLITE_OMIT_WAL /* ** This function is called when the user invokes "PRAGMA wal_checkpoint", ** "PRAGMA wal_blocking_checkpoint" or calls the sqlite3_wal_checkpoint() ** or wal_blocking_checkpoint() API functions. ** ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. */ SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int eMode, int *pnLog, int *pnCkpt){ int rc = SQLITE_OK; if( pPager->pWal ){ rc = sqlite3WalCheckpoint(pPager->pWal, eMode, (eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler), pPager->pBusyHandlerArg, pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace, pnLog, pnCkpt ); } return rc; } SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager){ return sqlite3WalCallback(pPager->pWal); } /* ** Return true if the underlying VFS for the given pager supports the ** primitives necessary for write-ahead logging. */ SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ const sqlite3_io_methods *pMethods = pPager->fd->pMethods; return pPager->exclusiveMode || (pMethods->iVersion>=2 && pMethods->xShmMap); } /* ** Attempt to take an exclusive lock on the database file. If a PENDING lock ** is obtained instead, immediately release it. */ static int pagerExclusiveLock(Pager *pPager){ int rc; /* Return code */ assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ /* If the attempt to grab the exclusive lock failed, release the ** pending lock that may have been obtained instead. */ pagerUnlockDb(pPager, SHARED_LOCK); } return rc; } /* ** Call sqlite3WalOpen() to open the WAL handle. If the pager is in ** exclusive-locking mode when this function is called, take an EXCLUSIVE ** lock on the database file and use heap-memory to store the wal-index ** in. Otherwise, use the normal shared-memory. */ static int pagerOpenWal(Pager *pPager){ int rc = SQLITE_OK; assert( pPager->pWal==0 && pPager->tempFile==0 ); assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); /* If the pager is already in exclusive-mode, the WAL module will use ** heap-memory for the wal-index instead of the VFS shared-memory ** implementation. Take the exclusive lock now, before opening the WAL ** file, to make sure this is safe. */ if( pPager->exclusiveMode ){ rc = pagerExclusiveLock(pPager); } /* Open the connection to the log file. If this operation fails, ** (e.g. due to malloc() failure), return an error code. */ if( rc==SQLITE_OK ){ rc = sqlite3WalOpen(pPager->pVfs, pPager->fd, pPager->zWal, pPager->exclusiveMode, pPager->journalSizeLimit, &pPager->pWal ); } pagerFixMaplimit(pPager); return rc; } /* ** The caller must be holding a SHARED lock on the database file to call ** this function. ** ** If the pager passed as the first argument is open on a real database ** file (not a temp file or an in-memory database), and the WAL file ** is not already open, make an attempt to open it now. If successful, ** return SQLITE_OK. If an error occurs or the VFS used by the pager does ** not support the xShmXXX() methods, return an error code. *pbOpen is ** not modified in either case. ** ** If the pager is open on a temp-file (or in-memory database), or if ** the WAL file is already open, set *pbOpen to 1 and return SQLITE_OK ** without doing anything. */ SQLITE_PRIVATE int sqlite3PagerOpenWal( Pager *pPager, /* Pager object */ int *pbOpen /* OUT: Set to true if call is a no-op */ ){ int rc = SQLITE_OK; /* Return code */ assert( assert_pager_state(pPager) ); assert( pPager->eState==PAGER_OPEN || pbOpen ); assert( pPager->eState==PAGER_READER || !pbOpen ); assert( pbOpen==0 || *pbOpen==0 ); assert( pbOpen!=0 || (!pPager->tempFile && !pPager->pWal) ); if( !pPager->tempFile && !pPager->pWal ){ if( !sqlite3PagerWalSupported(pPager) ) return SQLITE_CANTOPEN; /* Close any rollback journal previously open */ sqlite3OsClose(pPager->jfd); rc = pagerOpenWal(pPager); if( rc==SQLITE_OK ){ pPager->journalMode = PAGER_JOURNALMODE_WAL; pPager->eState = PAGER_OPEN; } }else{ *pbOpen = 1; } return rc; } /* ** This function is called to close the connection to the log file prior ** to switching from WAL to rollback mode. ** ** Before closing the log file, this function attempts to take an ** EXCLUSIVE lock on the database file. If this cannot be obtained, an ** error (SQLITE_BUSY) is returned and the log connection is not closed. ** If successful, the EXCLUSIVE lock is not released before returning. */ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){ int rc = SQLITE_OK; assert( pPager->journalMode==PAGER_JOURNALMODE_WAL ); /* If the log file is not already open, but does exist in the file-system, ** it may need to be checkpointed before the connection can switch to ** rollback mode. Open it now so this can happen. */ if( !pPager->pWal ){ int logexists = 0; rc = pagerLockDb(pPager, SHARED_LOCK); if( rc==SQLITE_OK ){ rc = sqlite3OsAccess( pPager->pVfs, pPager->zWal, SQLITE_ACCESS_EXISTS, &logexists ); } if( rc==SQLITE_OK && logexists ){ rc = pagerOpenWal(pPager); } } /* Checkpoint and close the log. Because an EXCLUSIVE lock is held on ** the database file, the log and log-summary files will be deleted. */ if( rc==SQLITE_OK && pPager->pWal ){ rc = pagerExclusiveLock(pPager); if( rc==SQLITE_OK ){ rc = sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, pPager->pageSize, (u8*)pPager->pTmpSpace); pPager->pWal = 0; pagerFixMaplimit(pPager); } } return rc; } #endif /* !SQLITE_OMIT_WAL */ #ifdef SQLITE_ENABLE_ZIPVFS /* ** A read-lock must be held on the pager when this function is called. If ** the pager is in WAL mode and the WAL file currently contains one or more ** frames, return the size in bytes of the page images stored within the ** WAL frames. Otherwise, if this is not a WAL database or the WAL file ** is empty, return 0. */ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ assert( pPager->eState>=PAGER_READER ); return sqlite3WalFramesize(pPager->pWal); } #endif #endif /* SQLITE_OMIT_DISKIO */ /************** End of pager.c ***********************************************/ /************** Begin file wal.c *********************************************/ /* ** 2010 February 1 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains the implementation of a write-ahead log (WAL) used in ** "journal_mode=WAL" mode. ** ** WRITE-AHEAD LOG (WAL) FILE FORMAT ** ** A WAL file consists of a header followed by zero or more "frames". ** Each frame records the revised content of a single page from the ** database file. All changes to the database are recorded by writing ** frames into the WAL. Transactions commit when a frame is written that ** contains a commit marker. A single WAL can and usually does record ** multiple transactions. Periodically, the content of the WAL is ** transferred back into the database file in an operation called a ** "checkpoint". ** ** A single WAL file can be used multiple times. In other words, the ** WAL can fill up with frames and then be checkpointed and then new ** frames can overwrite the old ones. A WAL always grows from beginning ** toward the end. Checksums and counters attached to each frame are ** used to determine which frames within the WAL are valid and which ** are leftovers from prior checkpoints. ** ** The WAL header is 32 bytes in size and consists of the following eight ** big-endian 32-bit unsigned integer values: ** ** 0: Magic number. 0x377f0682 or 0x377f0683 ** 4: File format version. Currently 3007000 ** 8: Database page size. Example: 1024 ** 12: Checkpoint sequence number ** 16: Salt-1, random integer incremented with each checkpoint ** 20: Salt-2, a different random integer changing with each ckpt ** 24: Checksum-1 (first part of checksum for first 24 bytes of header). ** 28: Checksum-2 (second part of checksum for first 24 bytes of header). ** ** Immediately following the wal-header are zero or more frames. Each ** frame consists of a 24-byte frame-header followed by a bytes ** of page data. The frame-header is six big-endian 32-bit unsigned ** integer values, as follows: ** ** 0: Page number. ** 4: For commit records, the size of the database image in pages ** after the commit. For all other records, zero. ** 8: Salt-1 (copied from the header) ** 12: Salt-2 (copied from the header) ** 16: Checksum-1. ** 20: Checksum-2. ** ** A frame is considered valid if and only if the following conditions are ** true: ** ** (1) The salt-1 and salt-2 values in the frame-header match ** salt values in the wal-header ** ** (2) The checksum values in the final 8 bytes of the frame-header ** exactly match the checksum computed consecutively on the ** WAL header and the first 8 bytes and the content of all frames ** up to and including the current frame. ** ** The checksum is computed using 32-bit big-endian integers if the ** magic number in the first 4 bytes of the WAL is 0x377f0683 and it ** is computed using little-endian if the magic number is 0x377f0682. ** The checksum values are always stored in the frame header in a ** big-endian format regardless of which byte order is used to compute ** the checksum. The checksum is computed by interpreting the input as ** an even number of unsigned 32-bit integers: x[0] through x[N]. The ** algorithm used for the checksum is as follows: ** ** for i from 0 to n-1 step 2: ** s0 += x[i] + s1; ** s1 += x[i+1] + s0; ** endfor ** ** Note that s0 and s1 are both weighted checksums using fibonacci weights ** in reverse order (the largest fibonacci weight occurs on the first element ** of the sequence being summed.) The s1 value spans all 32-bit ** terms of the sequence whereas s0 omits the final term. ** ** On a checkpoint, the WAL is first VFS.xSync-ed, then valid content of the ** WAL is transferred into the database, then the database is VFS.xSync-ed. ** The VFS.xSync operations serve as write barriers - all writes launched ** before the xSync must complete before any write that launches after the ** xSync begins. ** ** After each checkpoint, the salt-1 value is incremented and the salt-2 ** value is randomized. This prevents old and new frames in the WAL from ** being considered valid at the same time and being checkpointing together ** following a crash. ** ** READER ALGORITHM ** ** To read a page from the database (call it page number P), a reader ** first checks the WAL to see if it contains page P. If so, then the ** last valid instance of page P that is a followed by a commit frame ** or is a commit frame itself becomes the value read. If the WAL ** contains no copies of page P that are valid and which are a commit ** frame or are followed by a commit frame, then page P is read from ** the database file. ** ** To start a read transaction, the reader records the index of the last ** valid frame in the WAL. The reader uses this recorded "mxFrame" value ** for all subsequent read operations. New transactions can be appended ** to the WAL, but as long as the reader uses its original mxFrame value ** and ignores the newly appended content, it will see a consistent snapshot ** of the database from a single point in time. This technique allows ** multiple concurrent readers to view different versions of the database ** content simultaneously. ** ** The reader algorithm in the previous paragraphs works correctly, but ** because frames for page P can appear anywhere within the WAL, the ** reader has to scan the entire WAL looking for page P frames. If the ** WAL is large (multiple megabytes is typical) that scan can be slow, ** and read performance suffers. To overcome this problem, a separate ** data structure called the wal-index is maintained to expedite the ** search for frames of a particular page. ** ** WAL-INDEX FORMAT ** ** Conceptually, the wal-index is shared memory, though VFS implementations ** might choose to implement the wal-index using a mmapped file. Because ** the wal-index is shared memory, SQLite does not support journal_mode=WAL ** on a network filesystem. All users of the database must be able to ** share memory. ** ** The wal-index is transient. After a crash, the wal-index can (and should ** be) reconstructed from the original WAL file. In fact, the VFS is required ** to either truncate or zero the header of the wal-index when the last ** connection to it closes. Because the wal-index is transient, it can ** use an architecture-specific format; it does not have to be cross-platform. ** Hence, unlike the database and WAL file formats which store all values ** as big endian, the wal-index can store multi-byte values in the native ** byte order of the host computer. ** ** The purpose of the wal-index is to answer this question quickly: Given ** a page number P and a maximum frame index M, return the index of the ** last frame in the wal before frame M for page P in the WAL, or return ** NULL if there are no frames for page P in the WAL prior to M. ** ** The wal-index consists of a header region, followed by an one or ** more index blocks. ** ** The wal-index header contains the total number of frames within the WAL ** in the mxFrame field. ** ** Each index block except for the first contains information on ** HASHTABLE_NPAGE frames. The first index block contains information on ** HASHTABLE_NPAGE_ONE frames. The values of HASHTABLE_NPAGE_ONE and ** HASHTABLE_NPAGE are selected so that together the wal-index header and ** first index block are the same size as all other index blocks in the ** wal-index. ** ** Each index block contains two sections, a page-mapping that contains the ** database page number associated with each wal frame, and a hash-table ** that allows readers to query an index block for a specific page number. ** The page-mapping is an array of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE ** for the first index block) 32-bit page numbers. The first entry in the ** first index-block contains the database page number corresponding to the ** first frame in the WAL file. The first entry in the second index block ** in the WAL file corresponds to the (HASHTABLE_NPAGE_ONE+1)th frame in ** the log, and so on. ** ** The last index block in a wal-index usually contains less than the full ** complement of HASHTABLE_NPAGE (or HASHTABLE_NPAGE_ONE) page-numbers, ** depending on the contents of the WAL file. This does not change the ** allocated size of the page-mapping array - the page-mapping array merely ** contains unused entries. ** ** Even without using the hash table, the last frame for page P ** can be found by scanning the page-mapping sections of each index block ** starting with the last index block and moving toward the first, and ** within each index block, starting at the end and moving toward the ** beginning. The first entry that equals P corresponds to the frame ** holding the content for that page. ** ** The hash table consists of HASHTABLE_NSLOT 16-bit unsigned integers. ** HASHTABLE_NSLOT = 2*HASHTABLE_NPAGE, and there is one entry in the ** hash table for each page number in the mapping section, so the hash ** table is never more than half full. The expected number of collisions ** prior to finding a match is 1. Each entry of the hash table is an ** 1-based index of an entry in the mapping section of the same ** index block. Let K be the 1-based index of the largest entry in ** the mapping section. (For index blocks other than the last, K will ** always be exactly HASHTABLE_NPAGE (4096) and for the last index block ** K will be (mxFrame%HASHTABLE_NPAGE).) Unused slots of the hash table ** contain a value of 0. ** ** To look for page P in the hash table, first compute a hash iKey on ** P as follows: ** ** iKey = (P * 383) % HASHTABLE_NSLOT ** ** Then start scanning entries of the hash table, starting with iKey ** (wrapping around to the beginning when the end of the hash table is ** reached) until an unused hash slot is found. Let the first unused slot ** be at index iUnused. (iUnused might be less than iKey if there was ** wrap-around.) Because the hash table is never more than half full, ** the search is guaranteed to eventually hit an unused entry. Let ** iMax be the value between iKey and iUnused, closest to iUnused, ** where aHash[iMax]==P. If there is no iMax entry (if there exists ** no hash slot such that aHash[i]==p) then page P is not in the ** current index block. Otherwise the iMax-th mapping entry of the ** current index block corresponds to the last entry that references ** page P. ** ** A hash search begins with the last index block and moves toward the ** first index block, looking for entries corresponding to page P. On ** average, only two or three slots in each index block need to be ** examined in order to either find the last entry for page P, or to ** establish that no such entry exists in the block. Each index block ** holds over 4000 entries. So two or three index blocks are sufficient ** to cover a typical 10 megabyte WAL file, assuming 1K pages. 8 or 10 ** comparisons (on average) suffice to either locate a frame in the ** WAL or to establish that the frame does not exist in the WAL. This ** is much faster than scanning the entire 10MB WAL. ** ** Note that entries are added in order of increasing K. Hence, one ** reader might be using some value K0 and a second reader that started ** at a later time (after additional transactions were added to the WAL ** and to the wal-index) might be using a different value K1, where K1>K0. ** Both readers can use the same hash table and mapping section to get ** the correct result. There may be entries in the hash table with ** K>K0 but to the first reader, those entries will appear to be unused ** slots in the hash table and so the first reader will get an answer as ** if no values greater than K0 had ever been inserted into the hash table ** in the first place - which is what reader one wants. Meanwhile, the ** second reader using K1 will see additional values that were inserted ** later, which is exactly what reader two wants. ** ** When a rollback occurs, the value of K is decreased. Hash table entries ** that correspond to frames greater than the new K value are removed ** from the hash table at this point. */ #ifndef SQLITE_OMIT_WAL /* #include "wal.h" */ /* ** Trace output macros */ #if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) SQLITE_PRIVATE int sqlite3WalTrace = 0; # define WALTRACE(X) if(sqlite3WalTrace) sqlite3DebugPrintf X #else # define WALTRACE(X) #endif /* ** The maximum (and only) versions of the wal and wal-index formats ** that may be interpreted by this version of SQLite. ** ** If a client begins recovering a WAL file and finds that (a) the checksum ** values in the wal-header are correct and (b) the version field is not ** WAL_MAX_VERSION, recovery fails and SQLite returns SQLITE_CANTOPEN. ** ** Similarly, if a client successfully reads a wal-index header (i.e. the ** checksum test is successful) and finds that the version field is not ** WALINDEX_MAX_VERSION, then no read-transaction is opened and SQLite ** returns SQLITE_CANTOPEN. */ #define WAL_MAX_VERSION 3007000 #define WALINDEX_MAX_VERSION 3007000 /* ** Indices of various locking bytes. WAL_NREADER is the number ** of available reader locks and should be at least 3. */ #define WAL_WRITE_LOCK 0 #define WAL_ALL_BUT_WRITE 1 #define WAL_CKPT_LOCK 1 #define WAL_RECOVER_LOCK 2 #define WAL_READ_LOCK(I) (3+(I)) #define WAL_NREADER (SQLITE_SHM_NLOCK-3) /* Object declarations */ typedef struct WalIndexHdr WalIndexHdr; typedef struct WalIterator WalIterator; typedef struct WalCkptInfo WalCkptInfo; /* ** The following object holds a copy of the wal-index header content. ** ** The actual header in the wal-index consists of two copies of this ** object. ** ** The szPage value can be any power of 2 between 512 and 32768, inclusive. ** Or it can be 1 to represent a 65536-byte page. The latter case was ** added in 3.7.1 when support for 64K pages was added. */ struct WalIndexHdr { u32 iVersion; /* Wal-index version */ u32 unused; /* Unused (padding) field */ u32 iChange; /* Counter incremented each transaction */ u8 isInit; /* 1 when initialized */ u8 bigEndCksum; /* True if checksums in WAL are big-endian */ u16 szPage; /* Database page size in bytes. 1==64K */ u32 mxFrame; /* Index of last valid frame in the WAL */ u32 nPage; /* Size of database in pages */ u32 aFrameCksum[2]; /* Checksum of last frame in log */ u32 aSalt[2]; /* Two salt values copied from WAL header */ u32 aCksum[2]; /* Checksum over all prior fields */ }; /* ** A copy of the following object occurs in the wal-index immediately ** following the second copy of the WalIndexHdr. This object stores ** information used by checkpoint. ** ** nBackfill is the number of frames in the WAL that have been written ** back into the database. (We call the act of moving content from WAL to ** database "backfilling".) The nBackfill number is never greater than ** WalIndexHdr.mxFrame. nBackfill can only be increased by threads ** holding the WAL_CKPT_LOCK lock (which includes a recovery thread). ** However, a WAL_WRITE_LOCK thread can move the value of nBackfill from ** mxFrame back to zero when the WAL is reset. ** ** There is one entry in aReadMark[] for each reader lock. If a reader ** holds read-lock K, then the value in aReadMark[K] is no greater than ** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) ** for any aReadMark[] means that entry is unused. aReadMark[0] is ** a special case; its value is never used and it exists as a place-holder ** to avoid having to offset aReadMark[] indexs by one. Readers holding ** WAL_READ_LOCK(0) always ignore the entire WAL and read all content ** directly from the database. ** ** The value of aReadMark[K] may only be changed by a thread that ** is holding an exclusive lock on WAL_READ_LOCK(K). Thus, the value of ** aReadMark[K] cannot changed while there is a reader is using that mark ** since the reader will be holding a shared lock on WAL_READ_LOCK(K). ** ** The checkpointer may only transfer frames from WAL to database where ** the frame numbers are less than or equal to every aReadMark[] that is ** in use (that is, every aReadMark[j] for which there is a corresponding ** WAL_READ_LOCK(j)). New readers (usually) pick the aReadMark[] with the ** largest value and will increase an unused aReadMark[] to mxFrame if there ** is not already an aReadMark[] equal to mxFrame. The exception to the ** previous sentence is when nBackfill equals mxFrame (meaning that everything ** in the WAL has been backfilled into the database) then new readers ** will choose aReadMark[0] which has value 0 and hence such reader will ** get all their all content directly from the database file and ignore ** the WAL. ** ** Writers normally append new frames to the end of the WAL. However, ** if nBackfill equals mxFrame (meaning that all WAL content has been ** written back into the database) and if no readers are using the WAL ** (in other words, if there are no WAL_READ_LOCK(i) where i>0) then ** the writer will first "reset" the WAL back to the beginning and start ** writing new content beginning at frame 1. ** ** We assume that 32-bit loads are atomic and so no locks are needed in ** order to read from any aReadMark[] entries. */ struct WalCkptInfo { u32 nBackfill; /* Number of WAL frames backfilled into DB */ u32 aReadMark[WAL_NREADER]; /* Reader marks */ }; #define READMARK_NOT_USED 0xffffffff /* A block of WALINDEX_LOCK_RESERVED bytes beginning at ** WALINDEX_LOCK_OFFSET is reserved for locks. Since some systems ** only support mandatory file-locks, we do not read or write data ** from the region of the file on which locks are applied. */ #define WALINDEX_LOCK_OFFSET (sizeof(WalIndexHdr)*2 + sizeof(WalCkptInfo)) #define WALINDEX_LOCK_RESERVED 16 #define WALINDEX_HDR_SIZE (WALINDEX_LOCK_OFFSET+WALINDEX_LOCK_RESERVED) /* Size of header before each frame in wal */ #define WAL_FRAME_HDRSIZE 24 /* Size of write ahead log header, including checksum. */ /* #define WAL_HDRSIZE 24 */ #define WAL_HDRSIZE 32 /* WAL magic value. Either this value, or the same value with the least ** significant bit also set (WAL_MAGIC | 0x00000001) is stored in 32-bit ** big-endian format in the first 4 bytes of a WAL file. ** ** If the LSB is set, then the checksums for each frame within the WAL ** file are calculated by treating all data as an array of 32-bit ** big-endian words. Otherwise, they are calculated by interpreting ** all data as 32-bit little-endian words. */ #define WAL_MAGIC 0x377f0682 /* ** Return the offset of frame iFrame in the write-ahead log file, ** assuming a database page size of szPage bytes. The offset returned ** is to the start of the write-ahead log frame-header. */ #define walFrameOffset(iFrame, szPage) ( \ WAL_HDRSIZE + ((iFrame)-1)*(i64)((szPage)+WAL_FRAME_HDRSIZE) \ ) /* ** An open write-ahead log file is represented by an instance of the ** following object. */ struct Wal { sqlite3_vfs *pVfs; /* The VFS used to create pDbFd */ sqlite3_file *pDbFd; /* File handle for the database file */ sqlite3_file *pWalFd; /* File handle for WAL file */ u32 iCallback; /* Value to pass to log callback (or 0) */ i64 mxWalSize; /* Truncate WAL to this size upon reset */ int nWiData; /* Size of array apWiData */ int szFirstBlock; /* Size of first block written to WAL file */ volatile u32 **apWiData; /* Pointer to wal-index content in memory */ u32 szPage; /* Database page size */ i16 readLock; /* Which read lock is being held. -1 for none */ u8 syncFlags; /* Flags to use to sync header writes */ u8 exclusiveMode; /* Non-zero if connection is in exclusive mode */ u8 writeLock; /* True if in a write transaction */ u8 ckptLock; /* True if holding a checkpoint lock */ u8 readOnly; /* WAL_RDWR, WAL_RDONLY, or WAL_SHM_RDONLY */ u8 truncateOnCommit; /* True to truncate WAL file on commit */ u8 syncHeader; /* Fsync the WAL header if true */ u8 padToSectorBoundary; /* Pad transactions out to the next sector */ WalIndexHdr hdr; /* Wal-index header for current transaction */ const char *zWalName; /* Name of WAL file */ u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ #ifdef SQLITE_DEBUG u8 lockError; /* True if a locking error has occurred */ #endif }; /* ** Candidate values for Wal.exclusiveMode. */ #define WAL_NORMAL_MODE 0 #define WAL_EXCLUSIVE_MODE 1 #define WAL_HEAPMEMORY_MODE 2 /* ** Possible values for WAL.readOnly */ #define WAL_RDWR 0 /* Normal read/write connection */ #define WAL_RDONLY 1 /* The WAL file is readonly */ #define WAL_SHM_RDONLY 2 /* The SHM file is readonly */ /* ** Each page of the wal-index mapping contains a hash-table made up of ** an array of HASHTABLE_NSLOT elements of the following type. */ typedef u16 ht_slot; /* ** This structure is used to implement an iterator that loops through ** all frames in the WAL in database page order. Where two or more frames ** correspond to the same database page, the iterator visits only the ** frame most recently written to the WAL (in other words, the frame with ** the largest index). ** ** The internals of this structure are only accessed by: ** ** walIteratorInit() - Create a new iterator, ** walIteratorNext() - Step an iterator, ** walIteratorFree() - Free an iterator. ** ** This functionality is used by the checkpoint code (see walCheckpoint()). */ struct WalIterator { int iPrior; /* Last result returned from the iterator */ int nSegment; /* Number of entries in aSegment[] */ struct WalSegment { int iNext; /* Next slot in aIndex[] not yet returned */ ht_slot *aIndex; /* i0, i1, i2... such that aPgno[iN] ascend */ u32 *aPgno; /* Array of page numbers. */ int nEntry; /* Nr. of entries in aPgno[] and aIndex[] */ int iZero; /* Frame number associated with aPgno[0] */ } aSegment[1]; /* One for every 32KB page in the wal-index */ }; /* ** Define the parameters of the hash tables in the wal-index file. There ** is a hash-table following every HASHTABLE_NPAGE page numbers in the ** wal-index. ** ** Changing any of these constants will alter the wal-index format and ** create incompatibilities. */ #define HASHTABLE_NPAGE 4096 /* Must be power of 2 */ #define HASHTABLE_HASH_1 383 /* Should be prime */ #define HASHTABLE_NSLOT (HASHTABLE_NPAGE*2) /* Must be a power of 2 */ /* ** The block of page numbers associated with the first hash-table in a ** wal-index is smaller than usual. This is so that there is a complete ** hash-table on each aligned 32KB page of the wal-index. */ #define HASHTABLE_NPAGE_ONE (HASHTABLE_NPAGE - (WALINDEX_HDR_SIZE/sizeof(u32))) /* The wal-index is divided into pages of WALINDEX_PGSZ bytes each. */ #define WALINDEX_PGSZ ( \ sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \ ) /* ** Obtain a pointer to the iPage'th page of the wal-index. The wal-index ** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are ** numbered from zero. ** ** If this call is successful, *ppPage is set to point to the wal-index ** page and SQLITE_OK is returned. If an error (an OOM or VFS error) occurs, ** then an SQLite error code is returned and *ppPage is set to 0. */ static int walIndexPage(Wal *pWal, int iPage, volatile u32 **ppPage){ int rc = SQLITE_OK; /* Enlarge the pWal->apWiData[] array if required */ if( pWal->nWiData<=iPage ){ int nByte = sizeof(u32*)*(iPage+1); volatile u32 **apNew; apNew = (volatile u32 **)sqlite3_realloc64((void *)pWal->apWiData, nByte); if( !apNew ){ *ppPage = 0; return SQLITE_NOMEM; } memset((void*)&apNew[pWal->nWiData], 0, sizeof(u32*)*(iPage+1-pWal->nWiData)); pWal->apWiData = apNew; pWal->nWiData = iPage+1; } /* Request a pointer to the required page from the VFS */ if( pWal->apWiData[iPage]==0 ){ if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ pWal->apWiData[iPage] = (u32 volatile *)sqlite3MallocZero(WALINDEX_PGSZ); if( !pWal->apWiData[iPage] ) rc = SQLITE_NOMEM; }else{ rc = sqlite3OsShmMap(pWal->pDbFd, iPage, WALINDEX_PGSZ, pWal->writeLock, (void volatile **)&pWal->apWiData[iPage] ); if( rc==SQLITE_READONLY ){ pWal->readOnly |= WAL_SHM_RDONLY; rc = SQLITE_OK; } } } *ppPage = pWal->apWiData[iPage]; assert( iPage==0 || *ppPage || rc!=SQLITE_OK ); return rc; } /* ** Return a pointer to the WalCkptInfo structure in the wal-index. */ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); } /* ** Return a pointer to the WalIndexHdr structure in the wal-index. */ static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); return (volatile WalIndexHdr*)pWal->apWiData[0]; } /* ** The argument to this macro must be of type u32. On a little-endian ** architecture, it returns the u32 value that results from interpreting ** the 4 bytes as a big-endian value. On a big-endian architecture, it ** returns the value that would be produced by interpreting the 4 bytes ** of the input value as a little-endian integer. */ #define BYTESWAP32(x) ( \ (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \ + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \ ) /* ** Generate or extend an 8 byte checksum based on the data in ** array aByte[] and the initial values of aIn[0] and aIn[1] (or ** initial values of 0 and 0 if aIn==NULL). ** ** The checksum is written back into aOut[] before returning. ** ** nByte must be a positive multiple of 8. */ static void walChecksumBytes( int nativeCksum, /* True for native byte-order, false for non-native */ u8 *a, /* Content to be checksummed */ int nByte, /* Bytes of content in a[]. Must be a multiple of 8. */ const u32 *aIn, /* Initial checksum value input */ u32 *aOut /* OUT: Final checksum value output */ ){ u32 s1, s2; u32 *aData = (u32 *)a; u32 *aEnd = (u32 *)&a[nByte]; if( aIn ){ s1 = aIn[0]; s2 = aIn[1]; }else{ s1 = s2 = 0; } assert( nByte>=8 ); assert( (nByte&0x00000007)==0 ); if( nativeCksum ){ do { s1 += *aData++ + s2; s2 += *aData++ + s1; }while( aDataexclusiveMode!=WAL_HEAPMEMORY_MODE ){ sqlite3OsShmBarrier(pWal->pDbFd); } } /* ** Write the header information in pWal->hdr into the wal-index. ** ** The checksum on pWal->hdr is updated before it is written. */ static void walIndexWriteHdr(Wal *pWal){ volatile WalIndexHdr *aHdr = walIndexHdr(pWal); const int nCksum = offsetof(WalIndexHdr, aCksum); assert( pWal->writeLock ); pWal->hdr.isInit = 1; pWal->hdr.iVersion = WALINDEX_MAX_VERSION; walChecksumBytes(1, (u8*)&pWal->hdr, nCksum, 0, pWal->hdr.aCksum); memcpy((void*)&aHdr[1], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); walShmBarrier(pWal); memcpy((void*)&aHdr[0], (const void*)&pWal->hdr, sizeof(WalIndexHdr)); } /* ** This function encodes a single frame header and writes it to a buffer ** supplied by the caller. A frame-header is made up of a series of ** 4-byte big-endian integers, as follows: ** ** 0: Page number. ** 4: For commit records, the size of the database image in pages ** after the commit. For all other records, zero. ** 8: Salt-1 (copied from the wal-header) ** 12: Salt-2 (copied from the wal-header) ** 16: Checksum-1. ** 20: Checksum-2. */ static void walEncodeFrame( Wal *pWal, /* The write-ahead log */ u32 iPage, /* Database page number for frame */ u32 nTruncate, /* New db size (or 0 for non-commit frames) */ u8 *aData, /* Pointer to page data */ u8 *aFrame /* OUT: Write encoded frame here */ ){ int nativeCksum; /* True for native byte-order checksums */ u32 *aCksum = pWal->hdr.aFrameCksum; assert( WAL_FRAME_HDRSIZE==24 ); sqlite3Put4byte(&aFrame[0], iPage); sqlite3Put4byte(&aFrame[4], nTruncate); memcpy(&aFrame[8], pWal->hdr.aSalt, 8); nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); sqlite3Put4byte(&aFrame[16], aCksum[0]); sqlite3Put4byte(&aFrame[20], aCksum[1]); } /* ** Check to see if the frame with header in aFrame[] and content ** in aData[] is valid. If it is a valid frame, fill *piPage and ** *pnTruncate and return true. Return if the frame is not valid. */ static int walDecodeFrame( Wal *pWal, /* The write-ahead log */ u32 *piPage, /* OUT: Database page number for frame */ u32 *pnTruncate, /* OUT: New db size (or 0 if not commit) */ u8 *aData, /* Pointer to page data (for checksum) */ u8 *aFrame /* Frame data */ ){ int nativeCksum; /* True for native byte-order checksums */ u32 *aCksum = pWal->hdr.aFrameCksum; u32 pgno; /* Page number of the frame */ assert( WAL_FRAME_HDRSIZE==24 ); /* A frame is only valid if the salt values in the frame-header ** match the salt values in the wal-header. */ if( memcmp(&pWal->hdr.aSalt, &aFrame[8], 8)!=0 ){ return 0; } /* A frame is only valid if the page number is creater than zero. */ pgno = sqlite3Get4byte(&aFrame[0]); if( pgno==0 ){ return 0; } /* A frame is only valid if a checksum of the WAL header, ** all prior frams, the first 16 bytes of this frame-header, ** and the frame-data matches the checksum in the last 8 ** bytes of this frame-header. */ nativeCksum = (pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN); walChecksumBytes(nativeCksum, aFrame, 8, aCksum, aCksum); walChecksumBytes(nativeCksum, aData, pWal->szPage, aCksum, aCksum); if( aCksum[0]!=sqlite3Get4byte(&aFrame[16]) || aCksum[1]!=sqlite3Get4byte(&aFrame[20]) ){ /* Checksum failed. */ return 0; } /* If we reach this point, the frame is valid. Return the page number ** and the new database size. */ *piPage = pgno; *pnTruncate = sqlite3Get4byte(&aFrame[4]); return 1; } #if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) /* ** Names of locks. This routine is used to provide debugging output and is not ** a part of an ordinary build. */ static const char *walLockName(int lockIdx){ if( lockIdx==WAL_WRITE_LOCK ){ return "WRITE-LOCK"; }else if( lockIdx==WAL_CKPT_LOCK ){ return "CKPT-LOCK"; }else if( lockIdx==WAL_RECOVER_LOCK ){ return "RECOVER-LOCK"; }else{ static char zName[15]; sqlite3_snprintf(sizeof(zName), zName, "READ-LOCK[%d]", lockIdx-WAL_READ_LOCK(0)); return zName; } } #endif /*defined(SQLITE_TEST) || defined(SQLITE_DEBUG) */ /* ** Set or release locks on the WAL. Locks are either shared or exclusive. ** A lock cannot be moved directly between shared and exclusive - it must go ** through the unlocked state first. ** ** In locking_mode=EXCLUSIVE, all of these routines become no-ops. */ static int walLockShared(Wal *pWal, int lockIdx){ int rc; if( pWal->exclusiveMode ) return SQLITE_OK; rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, SQLITE_SHM_LOCK | SQLITE_SHM_SHARED); WALTRACE(("WAL%p: acquire SHARED-%s %s\n", pWal, walLockName(lockIdx), rc ? "failed" : "ok")); VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) return rc; } static void walUnlockShared(Wal *pWal, int lockIdx){ if( pWal->exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); } static int walLockExclusive(Wal *pWal, int lockIdx, int n, int fBlock){ int rc; if( pWal->exclusiveMode ) return SQLITE_OK; if( fBlock ) sqlite3OsFileControl(pWal->pDbFd, SQLITE_FCNTL_WAL_BLOCK, 0); rc = sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE); WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, walLockName(lockIdx), n, rc ? "failed" : "ok")); VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && rc!=SQLITE_BUSY); ) return rc; } static void walUnlockExclusive(Wal *pWal, int lockIdx, int n){ if( pWal->exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); WALTRACE(("WAL%p: release EXCLUSIVE-%s cnt=%d\n", pWal, walLockName(lockIdx), n)); } /* ** Compute a hash on a page number. The resulting hash value must land ** between 0 and (HASHTABLE_NSLOT-1). The walHashNext() function advances ** the hash to the next value in the event of a collision. */ static int walHash(u32 iPage){ assert( iPage>0 ); assert( (HASHTABLE_NSLOT & (HASHTABLE_NSLOT-1))==0 ); return (iPage*HASHTABLE_HASH_1) & (HASHTABLE_NSLOT-1); } static int walNextHash(int iPriorHash){ return (iPriorHash+1)&(HASHTABLE_NSLOT-1); } /* ** Return pointers to the hash table and page number array stored on ** page iHash of the wal-index. The wal-index is broken into 32KB pages ** numbered starting from 0. ** ** Set output variable *paHash to point to the start of the hash table ** in the wal-index file. Set *piZero to one less than the frame ** number of the first frame indexed by this hash table. If a ** slot in the hash table is set to N, it refers to frame number ** (*piZero+N) in the log. ** ** Finally, set *paPgno so that *paPgno[1] is the page number of the ** first frame indexed by the hash table, frame (*piZero+1). */ static int walHashGet( Wal *pWal, /* WAL handle */ int iHash, /* Find the iHash'th table */ volatile ht_slot **paHash, /* OUT: Pointer to hash index */ volatile u32 **paPgno, /* OUT: Pointer to page number array */ u32 *piZero /* OUT: Frame associated with *paPgno[0] */ ){ int rc; /* Return code */ volatile u32 *aPgno; rc = walIndexPage(pWal, iHash, &aPgno); assert( rc==SQLITE_OK || iHash>0 ); if( rc==SQLITE_OK ){ u32 iZero; volatile ht_slot *aHash; aHash = (volatile ht_slot *)&aPgno[HASHTABLE_NPAGE]; if( iHash==0 ){ aPgno = &aPgno[WALINDEX_HDR_SIZE/sizeof(u32)]; iZero = 0; }else{ iZero = HASHTABLE_NPAGE_ONE + (iHash-1)*HASHTABLE_NPAGE; } *paPgno = &aPgno[-1]; *paHash = aHash; *piZero = iZero; } return rc; } /* ** Return the number of the wal-index page that contains the hash-table ** and page-number array that contain entries corresponding to WAL frame ** iFrame. The wal-index is broken up into 32KB pages. Wal-index pages ** are numbered starting from 0. */ static int walFramePage(u32 iFrame){ int iHash = (iFrame+HASHTABLE_NPAGE-HASHTABLE_NPAGE_ONE-1) / HASHTABLE_NPAGE; assert( (iHash==0 || iFrame>HASHTABLE_NPAGE_ONE) && (iHash>=1 || iFrame<=HASHTABLE_NPAGE_ONE) && (iHash<=1 || iFrame>(HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE)) && (iHash>=2 || iFrame<=HASHTABLE_NPAGE_ONE+HASHTABLE_NPAGE) && (iHash<=2 || iFrame>(HASHTABLE_NPAGE_ONE+2*HASHTABLE_NPAGE)) ); return iHash; } /* ** Return the page number associated with frame iFrame in this WAL. */ static u32 walFramePgno(Wal *pWal, u32 iFrame){ int iHash = walFramePage(iFrame); if( iHash==0 ){ return pWal->apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; } return pWal->apWiData[iHash][(iFrame-1-HASHTABLE_NPAGE_ONE)%HASHTABLE_NPAGE]; } /* ** Remove entries from the hash table that point to WAL slots greater ** than pWal->hdr.mxFrame. ** ** This function is called whenever pWal->hdr.mxFrame is decreased due ** to a rollback or savepoint. ** ** At most only the hash table containing pWal->hdr.mxFrame needs to be ** updated. Any later hash tables will be automatically cleared when ** pWal->hdr.mxFrame advances to the point where those hash tables are ** actually needed. */ static void walCleanupHash(Wal *pWal){ volatile ht_slot *aHash = 0; /* Pointer to hash table to clear */ volatile u32 *aPgno = 0; /* Page number array for hash table */ u32 iZero = 0; /* frame == (aHash[x]+iZero) */ int iLimit = 0; /* Zero values greater than this */ int nByte; /* Number of bytes to zero in aPgno[] */ int i; /* Used to iterate through aHash[] */ assert( pWal->writeLock ); testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE-1 ); testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE ); testcase( pWal->hdr.mxFrame==HASHTABLE_NPAGE_ONE+1 ); if( pWal->hdr.mxFrame==0 ) return; /* Obtain pointers to the hash-table and page-number array containing ** the entry that corresponds to frame pWal->hdr.mxFrame. It is guaranteed ** that the page said hash-table and array reside on is already mapped. */ assert( pWal->nWiData>walFramePage(pWal->hdr.mxFrame) ); assert( pWal->apWiData[walFramePage(pWal->hdr.mxFrame)] ); walHashGet(pWal, walFramePage(pWal->hdr.mxFrame), &aHash, &aPgno, &iZero); /* Zero all hash-table entries that correspond to frame numbers greater ** than pWal->hdr.mxFrame. */ iLimit = pWal->hdr.mxFrame - iZero; assert( iLimit>0 ); for(i=0; iiLimit ){ aHash[i] = 0; } } /* Zero the entries in the aPgno array that correspond to frames with ** frame numbers greater than pWal->hdr.mxFrame. */ nByte = (int)((char *)aHash - (char *)&aPgno[iLimit+1]); memset((void *)&aPgno[iLimit+1], 0, nByte); #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT /* Verify that the every entry in the mapping region is still reachable ** via the hash table even after the cleanup. */ if( iLimit ){ int j; /* Loop counter */ int iKey; /* Hash key */ for(j=1; j<=iLimit; j++){ for(iKey=walHash(aPgno[j]); aHash[iKey]; iKey=walNextHash(iKey)){ if( aHash[iKey]==j ) break; } assert( aHash[iKey]==j ); } } #endif /* SQLITE_ENABLE_EXPENSIVE_ASSERT */ } /* ** Set an entry in the wal-index that will map database page number ** pPage into WAL frame iFrame. */ static int walIndexAppend(Wal *pWal, u32 iFrame, u32 iPage){ int rc; /* Return code */ u32 iZero = 0; /* One less than frame number of aPgno[1] */ volatile u32 *aPgno = 0; /* Page number array */ volatile ht_slot *aHash = 0; /* Hash table */ rc = walHashGet(pWal, walFramePage(iFrame), &aHash, &aPgno, &iZero); /* Assuming the wal-index file was successfully mapped, populate the ** page number array and hash table entry. */ if( rc==SQLITE_OK ){ int iKey; /* Hash table key */ int idx; /* Value to write to hash-table slot */ int nCollide; /* Number of hash collisions */ idx = iFrame - iZero; assert( idx <= HASHTABLE_NSLOT/2 + 1 ); /* If this is the first entry to be added to this hash-table, zero the ** entire hash table and aPgno[] array before proceeding. */ if( idx==1 ){ int nByte = (int)((u8 *)&aHash[HASHTABLE_NSLOT] - (u8 *)&aPgno[1]); memset((void*)&aPgno[1], 0, nByte); } /* If the entry in aPgno[] is already set, then the previous writer ** must have exited unexpectedly in the middle of a transaction (after ** writing one or more dirty pages to the WAL to free up memory). ** Remove the remnants of that writers uncommitted transaction from ** the hash-table before writing any new entries. */ if( aPgno[idx] ){ walCleanupHash(pWal); assert( !aPgno[idx] ); } /* Write the aPgno[] array entry and the hash-table slot. */ nCollide = idx; for(iKey=walHash(iPage); aHash[iKey]; iKey=walNextHash(iKey)){ if( (nCollide--)==0 ) return SQLITE_CORRUPT_BKPT; } aPgno[idx] = iPage; aHash[iKey] = (ht_slot)idx; #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT /* Verify that the number of entries in the hash table exactly equals ** the number of entries in the mapping region. */ { int i; /* Loop counter */ int nEntry = 0; /* Number of entries in the hash table */ for(i=0; ickptLock==1 || pWal->ckptLock==0 ); assert( WAL_ALL_BUT_WRITE==WAL_WRITE_LOCK+1 ); assert( WAL_CKPT_LOCK==WAL_ALL_BUT_WRITE ); assert( pWal->writeLock ); iLock = WAL_ALL_BUT_WRITE + pWal->ckptLock; nLock = SQLITE_SHM_NLOCK - iLock; rc = walLockExclusive(pWal, iLock, nLock, 0); if( rc ){ return rc; } WALTRACE(("WAL%p: recovery begin...\n", pWal)); memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); rc = sqlite3OsFileSize(pWal->pWalFd, &nSize); if( rc!=SQLITE_OK ){ goto recovery_error; } if( nSize>WAL_HDRSIZE ){ u8 aBuf[WAL_HDRSIZE]; /* Buffer to load WAL header into */ u8 *aFrame = 0; /* Malloc'd buffer to load entire frame */ int szFrame; /* Number of bytes in buffer aFrame[] */ u8 *aData; /* Pointer to data part of aFrame buffer */ int iFrame; /* Index of last frame read */ i64 iOffset; /* Next offset to read from log file */ int szPage; /* Page size according to the log */ u32 magic; /* Magic value read from WAL header */ u32 version; /* Magic value read from WAL header */ int isValid; /* True if this frame is valid */ /* Read in the WAL header. */ rc = sqlite3OsRead(pWal->pWalFd, aBuf, WAL_HDRSIZE, 0); if( rc!=SQLITE_OK ){ goto recovery_error; } /* If the database page size is not a power of two, or is greater than ** SQLITE_MAX_PAGE_SIZE, conclude that the WAL file contains no valid ** data. Similarly, if the 'magic' value is invalid, ignore the whole ** WAL file. */ magic = sqlite3Get4byte(&aBuf[0]); szPage = sqlite3Get4byte(&aBuf[8]); if( (magic&0xFFFFFFFE)!=WAL_MAGIC || szPage&(szPage-1) || szPage>SQLITE_MAX_PAGE_SIZE || szPage<512 ){ goto finished; } pWal->hdr.bigEndCksum = (u8)(magic&0x00000001); pWal->szPage = szPage; pWal->nCkpt = sqlite3Get4byte(&aBuf[12]); memcpy(&pWal->hdr.aSalt, &aBuf[16], 8); /* Verify that the WAL header checksum is correct */ walChecksumBytes(pWal->hdr.bigEndCksum==SQLITE_BIGENDIAN, aBuf, WAL_HDRSIZE-2*4, 0, pWal->hdr.aFrameCksum ); if( pWal->hdr.aFrameCksum[0]!=sqlite3Get4byte(&aBuf[24]) || pWal->hdr.aFrameCksum[1]!=sqlite3Get4byte(&aBuf[28]) ){ goto finished; } /* Verify that the version number on the WAL format is one that ** are able to understand */ version = sqlite3Get4byte(&aBuf[4]); if( version!=WAL_MAX_VERSION ){ rc = SQLITE_CANTOPEN_BKPT; goto finished; } /* Malloc a buffer to read frames into. */ szFrame = szPage + WAL_FRAME_HDRSIZE; aFrame = (u8 *)sqlite3_malloc64(szFrame); if( !aFrame ){ rc = SQLITE_NOMEM; goto recovery_error; } aData = &aFrame[WAL_FRAME_HDRSIZE]; /* Read all frames from the log file. */ iFrame = 0; for(iOffset=WAL_HDRSIZE; (iOffset+szFrame)<=nSize; iOffset+=szFrame){ u32 pgno; /* Database page number for frame */ u32 nTruncate; /* dbsize field from frame header */ /* Read and decode the next log frame. */ iFrame++; rc = sqlite3OsRead(pWal->pWalFd, aFrame, szFrame, iOffset); if( rc!=SQLITE_OK ) break; isValid = walDecodeFrame(pWal, &pgno, &nTruncate, aData, aFrame); if( !isValid ) break; rc = walIndexAppend(pWal, iFrame, pgno); if( rc!=SQLITE_OK ) break; /* If nTruncate is non-zero, this is a commit record. */ if( nTruncate ){ pWal->hdr.mxFrame = iFrame; pWal->hdr.nPage = nTruncate; pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); testcase( szPage<=32768 ); testcase( szPage>=65536 ); aFrameCksum[0] = pWal->hdr.aFrameCksum[0]; aFrameCksum[1] = pWal->hdr.aFrameCksum[1]; } } sqlite3_free(aFrame); } finished: if( rc==SQLITE_OK ){ volatile WalCkptInfo *pInfo; int i; pWal->hdr.aFrameCksum[0] = aFrameCksum[0]; pWal->hdr.aFrameCksum[1] = aFrameCksum[1]; walIndexWriteHdr(pWal); /* Reset the checkpoint-header. This is safe because this thread is ** currently holding locks that exclude all other readers, writers and ** checkpointers. */ pInfo = walCkptInfo(pWal); pInfo->nBackfill = 0; pInfo->aReadMark[0] = 0; for(i=1; iaReadMark[i] = READMARK_NOT_USED; if( pWal->hdr.mxFrame ) pInfo->aReadMark[1] = pWal->hdr.mxFrame; /* If more than one frame was recovered from the log file, report an ** event via sqlite3_log(). This is to help with identifying performance ** problems caused by applications routinely shutting down without ** checkpointing the log file. */ if( pWal->hdr.nPage ){ sqlite3_log(SQLITE_NOTICE_RECOVER_WAL, "recovered %d frames from WAL file %s", pWal->hdr.mxFrame, pWal->zWalName ); } } recovery_error: WALTRACE(("WAL%p: recovery %s\n", pWal, rc ? "failed" : "ok")); walUnlockExclusive(pWal, iLock, nLock); return rc; } /* ** Close an open wal-index. */ static void walIndexClose(Wal *pWal, int isDelete){ if( pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ){ int i; for(i=0; inWiData; i++){ sqlite3_free((void *)pWal->apWiData[i]); pWal->apWiData[i] = 0; } }else{ sqlite3OsShmUnmap(pWal->pDbFd, isDelete); } } /* ** Open a connection to the WAL file zWalName. The database file must ** already be opened on connection pDbFd. The buffer that zWalName points ** to must remain valid for the lifetime of the returned Wal* handle. ** ** A SHARED lock should be held on the database file when this function ** is called. The purpose of this SHARED lock is to prevent any other ** client from unlinking the WAL or wal-index file. If another process ** were to do this just after this client opened one of these files, the ** system would be badly broken. ** ** If the log file is successfully opened, SQLITE_OK is returned and ** *ppWal is set to point to a new WAL handle. If an error occurs, ** an SQLite error code is returned and *ppWal is left unmodified. */ SQLITE_PRIVATE int sqlite3WalOpen( sqlite3_vfs *pVfs, /* vfs module to open wal and wal-index */ sqlite3_file *pDbFd, /* The open database file */ const char *zWalName, /* Name of the WAL file */ int bNoShm, /* True to run in heap-memory mode */ i64 mxWalSize, /* Truncate WAL to this size on reset */ Wal **ppWal /* OUT: Allocated Wal handle */ ){ int rc; /* Return Code */ Wal *pRet; /* Object to allocate and return */ int flags; /* Flags passed to OsOpen() */ assert( zWalName && zWalName[0] ); assert( pDbFd ); /* In the amalgamation, the os_unix.c and os_win.c source files come before ** this source file. Verify that the #defines of the locking byte offsets ** in os_unix.c and os_win.c agree with the WALINDEX_LOCK_OFFSET value. */ #ifdef WIN_SHM_BASE assert( WIN_SHM_BASE==WALINDEX_LOCK_OFFSET ); #endif #ifdef UNIX_SHM_BASE assert( UNIX_SHM_BASE==WALINDEX_LOCK_OFFSET ); #endif /* Allocate an instance of struct Wal to return. */ *ppWal = 0; pRet = (Wal*)sqlite3MallocZero(sizeof(Wal) + pVfs->szOsFile); if( !pRet ){ return SQLITE_NOMEM; } pRet->pVfs = pVfs; pRet->pWalFd = (sqlite3_file *)&pRet[1]; pRet->pDbFd = pDbFd; pRet->readLock = -1; pRet->mxWalSize = mxWalSize; pRet->zWalName = zWalName; pRet->syncHeader = 1; pRet->padToSectorBoundary = 1; pRet->exclusiveMode = (bNoShm ? WAL_HEAPMEMORY_MODE: WAL_NORMAL_MODE); /* Open file handle on the write-ahead log file. */ flags = (SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_WAL); rc = sqlite3OsOpen(pVfs, zWalName, pRet->pWalFd, flags, &flags); if( rc==SQLITE_OK && flags&SQLITE_OPEN_READONLY ){ pRet->readOnly = WAL_RDONLY; } if( rc!=SQLITE_OK ){ walIndexClose(pRet, 0); sqlite3OsClose(pRet->pWalFd); sqlite3_free(pRet); }else{ int iDC = sqlite3OsDeviceCharacteristics(pDbFd); if( iDC & SQLITE_IOCAP_SEQUENTIAL ){ pRet->syncHeader = 0; } if( iDC & SQLITE_IOCAP_POWERSAFE_OVERWRITE ){ pRet->padToSectorBoundary = 0; } *ppWal = pRet; WALTRACE(("WAL%d: opened\n", pRet)); } return rc; } /* ** Change the size to which the WAL file is trucated on each reset. */ SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){ if( pWal ) pWal->mxWalSize = iLimit; } /* ** Find the smallest page number out of all pages held in the WAL that ** has not been returned by any prior invocation of this method on the ** same WalIterator object. Write into *piFrame the frame index where ** that page was last written into the WAL. Write into *piPage the page ** number. ** ** Return 0 on success. If there are no pages in the WAL with a page ** number larger than *piPage, then return 1. */ static int walIteratorNext( WalIterator *p, /* Iterator */ u32 *piPage, /* OUT: The page number of the next page */ u32 *piFrame /* OUT: Wal frame index of next page */ ){ u32 iMin; /* Result pgno must be greater than iMin */ u32 iRet = 0xFFFFFFFF; /* 0xffffffff is never a valid page number */ int i; /* For looping through segments */ iMin = p->iPrior; assert( iMin<0xffffffff ); for(i=p->nSegment-1; i>=0; i--){ struct WalSegment *pSegment = &p->aSegment[i]; while( pSegment->iNextnEntry ){ u32 iPg = pSegment->aPgno[pSegment->aIndex[pSegment->iNext]]; if( iPg>iMin ){ if( iPgiZero + pSegment->aIndex[pSegment->iNext]; } break; } pSegment->iNext++; } } *piPage = p->iPrior = iRet; return (iRet==0xFFFFFFFF); } /* ** This function merges two sorted lists into a single sorted list. ** ** aLeft[] and aRight[] are arrays of indices. The sort key is ** aContent[aLeft[]] and aContent[aRight[]]. Upon entry, the following ** is guaranteed for all J0 && nRight>0 ); while( iRight=nRight || aContent[aLeft[iLeft]]=nLeft || aContent[aLeft[iLeft]]>dbpage ); assert( iRight>=nRight || aContent[aRight[iRight]]>dbpage ); } *paRight = aLeft; *pnRight = iOut; memcpy(aLeft, aTmp, sizeof(aTmp[0])*iOut); } /* ** Sort the elements in list aList using aContent[] as the sort key. ** Remove elements with duplicate keys, preferring to keep the ** larger aList[] values. ** ** The aList[] entries are indices into aContent[]. The values in ** aList[] are to be sorted so that for all J0 ); assert( HASHTABLE_NPAGE==(1<<(ArraySize(aSub)-1)) ); for(iList=0; iListaList && p->nList<=(1<aList==&aList[iList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); } aSub[iSub].aList = aMerge; aSub[iSub].nList = nMerge; } for(iSub++; iSubnList<=(1<aList==&aList[nList&~((2<aList, p->nList, &aMerge, &nMerge, aBuffer); } } assert( aMerge==aList ); *pnList = nMerge; #ifdef SQLITE_DEBUG { int i; for(i=1; i<*pnList; i++){ assert( aContent[aList[i]] > aContent[aList[i-1]] ); } } #endif } /* ** Free an iterator allocated by walIteratorInit(). */ static void walIteratorFree(WalIterator *p){ sqlite3_free(p); } /* ** Construct a WalInterator object that can be used to loop over all ** pages in the WAL in ascending order. The caller must hold the checkpoint ** lock. ** ** On success, make *pp point to the newly allocated WalInterator object ** return SQLITE_OK. Otherwise, return an error code. If this routine ** returns an error, the value of *pp is undefined. ** ** The calling routine should invoke walIteratorFree() to destroy the ** WalIterator object when it has finished with it. */ static int walIteratorInit(Wal *pWal, WalIterator **pp){ WalIterator *p; /* Return value */ int nSegment; /* Number of segments to merge */ u32 iLast; /* Last frame in log */ int nByte; /* Number of bytes to allocate */ int i; /* Iterator variable */ ht_slot *aTmp; /* Temp space used by merge-sort */ int rc = SQLITE_OK; /* Return Code */ /* This routine only runs while holding the checkpoint lock. And ** it only runs if there is actually content in the log (mxFrame>0). */ assert( pWal->ckptLock && pWal->hdr.mxFrame>0 ); iLast = pWal->hdr.mxFrame; /* Allocate space for the WalIterator object. */ nSegment = walFramePage(iLast) + 1; nByte = sizeof(WalIterator) + (nSegment-1)*sizeof(struct WalSegment) + iLast*sizeof(ht_slot); p = (WalIterator *)sqlite3_malloc64(nByte); if( !p ){ return SQLITE_NOMEM; } memset(p, 0, nByte); p->nSegment = nSegment; /* Allocate temporary space used by the merge-sort routine. This block ** of memory will be freed before this function returns. */ aTmp = (ht_slot *)sqlite3_malloc64( sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) ); if( !aTmp ){ rc = SQLITE_NOMEM; } for(i=0; rc==SQLITE_OK && iaSegment[p->nSegment])[iZero]; iZero++; for(j=0; jaSegment[i].iZero = iZero; p->aSegment[i].nEntry = nEntry; p->aSegment[i].aIndex = aIndex; p->aSegment[i].aPgno = (u32 *)aPgno; } } sqlite3_free(aTmp); if( rc!=SQLITE_OK ){ walIteratorFree(p); } *pp = p; return rc; } /* ** Attempt to obtain the exclusive WAL lock defined by parameters lockIdx and ** n. If the attempt fails and parameter xBusy is not NULL, then it is a ** busy-handler function. Invoke it and retry the lock until either the ** lock is successfully obtained or the busy-handler returns 0. */ static int walBusyLock( Wal *pWal, /* WAL connection */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ int lockIdx, /* Offset of first byte to lock */ int n /* Number of bytes to lock */ ){ int rc; do { rc = walLockExclusive(pWal, lockIdx, n, 0); }while( xBusy && rc==SQLITE_BUSY && xBusy(pBusyArg) ); return rc; } /* ** The cache of the wal-index header must be valid to call this function. ** Return the page-size in bytes used by the database. */ static int walPagesize(Wal *pWal){ return (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); } /* ** The following is guaranteed when this function is called: ** ** a) the WRITER lock is held, ** b) the entire log file has been checkpointed, and ** c) any existing readers are reading exclusively from the database ** file - there are no readers that may attempt to read a frame from ** the log file. ** ** This function updates the shared-memory structures so that the next ** client to write to the database (which may be this one) does so by ** writing frames into the start of the log file. ** ** The value of parameter salt1 is used as the aSalt[1] value in the ** new wal-index header. It should be passed a pseudo-random value (i.e. ** one obtained from sqlite3_randomness()). */ static void walRestartHdr(Wal *pWal, u32 salt1){ volatile WalCkptInfo *pInfo = walCkptInfo(pWal); int i; /* Loop counter */ u32 *aSalt = pWal->hdr.aSalt; /* Big-endian salt values */ pWal->nCkpt++; pWal->hdr.mxFrame = 0; sqlite3Put4byte((u8*)&aSalt[0], 1 + sqlite3Get4byte((u8*)&aSalt[0])); memcpy(&pWal->hdr.aSalt[1], &salt1, 4); walIndexWriteHdr(pWal); pInfo->nBackfill = 0; pInfo->aReadMark[1] = 0; for(i=2; iaReadMark[i] = READMARK_NOT_USED; assert( pInfo->aReadMark[0]==0 ); } /* ** Copy as much content as we can from the WAL back into the database file ** in response to an sqlite3_wal_checkpoint() request or the equivalent. ** ** The amount of information copies from WAL to database might be limited ** by active readers. This routine will never overwrite a database page ** that a concurrent reader might be using. ** ** All I/O barrier operations (a.k.a fsyncs) occur in this routine when ** SQLite is in WAL-mode in synchronous=NORMAL. That means that if ** checkpoints are always run by a background thread or background ** process, foreground threads will never block on a lengthy fsync call. ** ** Fsync is called on the WAL before writing content out of the WAL and ** into the database. This ensures that if the new content is persistent ** in the WAL and can be recovered following a power-loss or hard reset. ** ** Fsync is also called on the database file if (and only if) the entire ** WAL content is copied into the database file. This second fsync makes ** it safe to delete the WAL since the new content will persist in the ** database file. ** ** This routine uses and updates the nBackfill field of the wal-index header. ** This is the only routine that will increase the value of nBackfill. ** (A WAL reset or recovery will revert nBackfill to zero, but not increase ** its value.) ** ** The caller must be holding sufficient locks to ensure that no other ** checkpoint is running (in any other thread or process) at the same ** time. */ static int walCheckpoint( Wal *pWal, /* Wal connection */ int eMode, /* One of PASSIVE, FULL or RESTART */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ int sync_flags, /* Flags for OsSync() (or 0) */ u8 *zBuf /* Temporary buffer to use */ ){ int rc = SQLITE_OK; /* Return code */ int szPage; /* Database page-size */ WalIterator *pIter = 0; /* Wal iterator context */ u32 iDbpage = 0; /* Next database page to write */ u32 iFrame = 0; /* Wal frame containing data for iDbpage */ u32 mxSafeFrame; /* Max frame that can be backfilled */ u32 mxPage; /* Max database page to write */ int i; /* Loop counter */ volatile WalCkptInfo *pInfo; /* The checkpoint status information */ szPage = walPagesize(pWal); testcase( szPage<=32768 ); testcase( szPage>=65536 ); pInfo = walCkptInfo(pWal); if( pInfo->nBackfillhdr.mxFrame ){ /* Allocate the iterator */ rc = walIteratorInit(pWal, &pIter); if( rc!=SQLITE_OK ){ return rc; } assert( pIter ); /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); /* Compute in mxSafeFrame the index of the last frame of the WAL that is ** safe to write into the database. Frames beyond mxSafeFrame might ** overwrite database pages that are in use by active readers and thus ** cannot be backfilled from the WAL. */ mxSafeFrame = pWal->hdr.mxFrame; mxPage = pWal->hdr.nPage; for(i=1; iaReadMark[i]; if( mxSafeFrame>y ){ assert( y<=pWal->hdr.mxFrame ); rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); if( rc==SQLITE_OK ){ pInfo->aReadMark[i] = (i==1 ? mxSafeFrame : READMARK_NOT_USED); walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); }else if( rc==SQLITE_BUSY ){ mxSafeFrame = y; xBusy = 0; }else{ goto walcheckpoint_out; } } } if( pInfo->nBackfillnBackfill; /* Sync the WAL to disk */ if( sync_flags ){ rc = sqlite3OsSync(pWal->pWalFd, sync_flags); } /* If the database may grow as a result of this checkpoint, hint ** about the eventual size of the db file to the VFS layer. */ if( rc==SQLITE_OK ){ i64 nReq = ((i64)mxPage * szPage); rc = sqlite3OsFileSize(pWal->pDbFd, &nSize); if( rc==SQLITE_OK && nSizepDbFd, SQLITE_FCNTL_SIZE_HINT, &nReq); } } /* Iterate through the contents of the WAL, copying data to the db file */ while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ i64 iOffset; assert( walFramePgno(pWal, iFrame)==iDbpage ); if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){ continue; } iOffset = walFrameOffset(iFrame, szPage) + WAL_FRAME_HDRSIZE; /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL file */ rc = sqlite3OsRead(pWal->pWalFd, zBuf, szPage, iOffset); if( rc!=SQLITE_OK ) break; iOffset = (iDbpage-1)*(i64)szPage; testcase( IS_BIG_INT(iOffset) ); rc = sqlite3OsWrite(pWal->pDbFd, zBuf, szPage, iOffset); if( rc!=SQLITE_OK ) break; } /* If work was actually accomplished... */ if( rc==SQLITE_OK ){ if( mxSafeFrame==walIndexHdr(pWal)->mxFrame ){ i64 szDb = pWal->hdr.nPage*(i64)szPage; testcase( IS_BIG_INT(szDb) ); rc = sqlite3OsTruncate(pWal->pDbFd, szDb); if( rc==SQLITE_OK && sync_flags ){ rc = sqlite3OsSync(pWal->pDbFd, sync_flags); } } if( rc==SQLITE_OK ){ pInfo->nBackfill = mxSafeFrame; } } /* Release the reader lock held while backfilling */ walUnlockExclusive(pWal, WAL_READ_LOCK(0), 1); } if( rc==SQLITE_BUSY ){ /* Reset the return code so as not to report a checkpoint failure ** just because there are active readers. */ rc = SQLITE_OK; } } /* If this is an SQLITE_CHECKPOINT_RESTART or TRUNCATE operation, and the ** entire wal file has been copied into the database file, then block ** until all readers have finished using the wal file. This ensures that ** the next process to write to the database restarts the wal file. */ if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ assert( pWal->writeLock ); if( pInfo->nBackfillhdr.mxFrame ){ rc = SQLITE_BUSY; }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){ u32 salt1; sqlite3_randomness(4, &salt1); assert( pInfo->nBackfill==pWal->hdr.mxFrame ); rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(1), WAL_NREADER-1); if( rc==SQLITE_OK ){ if( eMode==SQLITE_CHECKPOINT_TRUNCATE ){ /* IMPLEMENTATION-OF: R-44699-57140 This mode works the same way as ** SQLITE_CHECKPOINT_RESTART with the addition that it also ** truncates the log file to zero bytes just prior to a ** successful return. ** ** In theory, it might be safe to do this without updating the ** wal-index header in shared memory, as all subsequent reader or ** writer clients should see that the entire log file has been ** checkpointed and behave accordingly. This seems unsafe though, ** as it would leave the system in a state where the contents of ** the wal-index header do not match the contents of the ** file-system. To avoid this, update the wal-index header to ** indicate that the log file contains zero valid frames. */ walRestartHdr(pWal, salt1); rc = sqlite3OsTruncate(pWal->pWalFd, 0); } walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); } } } walcheckpoint_out: walIteratorFree(pIter); return rc; } /* ** If the WAL file is currently larger than nMax bytes in size, truncate ** it to exactly nMax bytes. If an error occurs while doing so, ignore it. */ static void walLimitSize(Wal *pWal, i64 nMax){ i64 sz; int rx; sqlite3BeginBenignMalloc(); rx = sqlite3OsFileSize(pWal->pWalFd, &sz); if( rx==SQLITE_OK && (sz > nMax ) ){ rx = sqlite3OsTruncate(pWal->pWalFd, nMax); } sqlite3EndBenignMalloc(); if( rx ){ sqlite3_log(rx, "cannot limit WAL size: %s", pWal->zWalName); } } /* ** Close a connection to a log file. */ SQLITE_PRIVATE int sqlite3WalClose( Wal *pWal, /* Wal to close */ int sync_flags, /* Flags to pass to OsSync() (or 0) */ int nBuf, u8 *zBuf /* Buffer of at least nBuf bytes */ ){ int rc = SQLITE_OK; if( pWal ){ int isDelete = 0; /* True to unlink wal and wal-index files */ /* If an EXCLUSIVE lock can be obtained on the database file (using the ** ordinary, rollback-mode locking methods, this guarantees that the ** connection associated with this log file is the only connection to ** the database. In this case checkpoint the database and unlink both ** the wal and wal-index files. ** ** The EXCLUSIVE lock is not released before returning. */ rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE); if( rc==SQLITE_OK ){ if( pWal->exclusiveMode==WAL_NORMAL_MODE ){ pWal->exclusiveMode = WAL_EXCLUSIVE_MODE; } rc = sqlite3WalCheckpoint( pWal, SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0 ); if( rc==SQLITE_OK ){ int bPersist = -1; sqlite3OsFileControlHint( pWal->pDbFd, SQLITE_FCNTL_PERSIST_WAL, &bPersist ); if( bPersist!=1 ){ /* Try to delete the WAL file if the checkpoint completed and ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal ** mode (!bPersist) */ isDelete = 1; }else if( pWal->mxWalSize>=0 ){ /* Try to truncate the WAL file to zero bytes if the checkpoint ** completed and fsynced (rc==SQLITE_OK) and we are in persistent ** WAL mode (bPersist) and if the PRAGMA journal_size_limit is a ** non-negative value (pWal->mxWalSize>=0). Note that we truncate ** to zero bytes as truncating to the journal_size_limit might ** leave a corrupt WAL file on disk. */ walLimitSize(pWal, 0); } } } walIndexClose(pWal, isDelete); sqlite3OsClose(pWal->pWalFd); if( isDelete ){ sqlite3BeginBenignMalloc(); sqlite3OsDelete(pWal->pVfs, pWal->zWalName, 0); sqlite3EndBenignMalloc(); } WALTRACE(("WAL%p: closed\n", pWal)); sqlite3_free((void *)pWal->apWiData); sqlite3_free(pWal); } return rc; } /* ** Try to read the wal-index header. Return 0 on success and 1 if ** there is a problem. ** ** The wal-index is in shared memory. Another thread or process might ** be writing the header at the same time this procedure is trying to ** read it, which might result in inconsistency. A dirty read is detected ** by verifying that both copies of the header are the same and also by ** a checksum on the header. ** ** If and only if the read is consistent and the header is different from ** pWal->hdr, then pWal->hdr is updated to the content of the new header ** and *pChanged is set to 1. ** ** If the checksum cannot be verified return non-zero. If the header ** is read successfully and the checksum verified, return zero. */ static int walIndexTryHdr(Wal *pWal, int *pChanged){ u32 aCksum[2]; /* Checksum on the header content */ WalIndexHdr h1, h2; /* Two copies of the header content */ WalIndexHdr volatile *aHdr; /* Header in shared memory */ /* The first page of the wal-index must be mapped at this point. */ assert( pWal->nWiData>0 && pWal->apWiData[0] ); /* Read the header. This might happen concurrently with a write to the ** same area of shared memory on a different CPU in a SMP, ** meaning it is possible that an inconsistent snapshot is read ** from the file. If this happens, return non-zero. ** ** There are two copies of the header at the beginning of the wal-index. ** When reading, read [0] first then [1]. Writes are in the reverse order. ** Memory barriers are used to prevent the compiler or the hardware from ** reordering the reads and writes. */ aHdr = walIndexHdr(pWal); memcpy(&h1, (void *)&aHdr[0], sizeof(h1)); walShmBarrier(pWal); memcpy(&h2, (void *)&aHdr[1], sizeof(h2)); if( memcmp(&h1, &h2, sizeof(h1))!=0 ){ return 1; /* Dirty read */ } if( h1.isInit==0 ){ return 1; /* Malformed header - probably all zeros */ } walChecksumBytes(1, (u8*)&h1, sizeof(h1)-sizeof(h1.aCksum), 0, aCksum); if( aCksum[0]!=h1.aCksum[0] || aCksum[1]!=h1.aCksum[1] ){ return 1; /* Checksum does not match */ } if( memcmp(&pWal->hdr, &h1, sizeof(WalIndexHdr)) ){ *pChanged = 1; memcpy(&pWal->hdr, &h1, sizeof(WalIndexHdr)); pWal->szPage = (pWal->hdr.szPage&0xfe00) + ((pWal->hdr.szPage&0x0001)<<16); testcase( pWal->szPage<=32768 ); testcase( pWal->szPage>=65536 ); } /* The header was successfully read. Return zero. */ return 0; } /* ** Read the wal-index header from the wal-index and into pWal->hdr. ** If the wal-header appears to be corrupt, try to reconstruct the ** wal-index from the WAL before returning. ** ** Set *pChanged to 1 if the wal-index header value in pWal->hdr is ** changed by this operation. If pWal->hdr is unchanged, set *pChanged ** to 0. ** ** If the wal-index header is successfully read, return SQLITE_OK. ** Otherwise an SQLite error code. */ static int walIndexReadHdr(Wal *pWal, int *pChanged){ int rc; /* Return code */ int badHdr; /* True if a header read failed */ volatile u32 *page0; /* Chunk of wal-index containing header */ /* Ensure that page 0 of the wal-index (the page that contains the ** wal-index header) is mapped. Return early if an error occurs here. */ assert( pChanged ); rc = walIndexPage(pWal, 0, &page0); if( rc!=SQLITE_OK ){ return rc; }; assert( page0 || pWal->writeLock==0 ); /* If the first page of the wal-index has been mapped, try to read the ** wal-index header immediately, without holding any lock. This usually ** works, but may fail if the wal-index header is corrupt or currently ** being modified by another thread or process. */ badHdr = (page0 ? walIndexTryHdr(pWal, pChanged) : 1); /* If the first attempt failed, it might have been due to a race ** with a writer. So get a WRITE lock and try again. */ assert( badHdr==0 || pWal->writeLock==0 ); if( badHdr ){ if( pWal->readOnly & WAL_SHM_RDONLY ){ if( SQLITE_OK==(rc = walLockShared(pWal, WAL_WRITE_LOCK)) ){ walUnlockShared(pWal, WAL_WRITE_LOCK); rc = SQLITE_READONLY_RECOVERY; } }else if( SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1, 1)) ){ pWal->writeLock = 1; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); if( badHdr ){ /* If the wal-index header is still malformed even while holding ** a WRITE lock, it can only mean that the header is corrupted and ** needs to be reconstructed. So run recovery to do exactly that. */ rc = walIndexRecover(pWal); *pChanged = 1; } } pWal->writeLock = 0; walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); } } /* If the header is read successfully, check the version number to make ** sure the wal-index was not constructed with some future format that ** this version of SQLite cannot understand. */ if( badHdr==0 && pWal->hdr.iVersion!=WALINDEX_MAX_VERSION ){ rc = SQLITE_CANTOPEN_BKPT; } return rc; } /* ** This is the value that walTryBeginRead returns when it needs to ** be retried. */ #define WAL_RETRY (-1) /* ** Attempt to start a read transaction. This might fail due to a race or ** other transient condition. When that happens, it returns WAL_RETRY to ** indicate to the caller that it is safe to retry immediately. ** ** On success return SQLITE_OK. On a permanent failure (such an ** I/O error or an SQLITE_BUSY because another process is running ** recovery) return a positive error code. ** ** The useWal parameter is true to force the use of the WAL and disable ** the case where the WAL is bypassed because it has been completely ** checkpointed. If useWal==0 then this routine calls walIndexReadHdr() ** to make a copy of the wal-index header into pWal->hdr. If the ** wal-index header has changed, *pChanged is set to 1 (as an indication ** to the caller that the local paget cache is obsolete and needs to be ** flushed.) When useWal==1, the wal-index header is assumed to already ** be loaded and the pChanged parameter is unused. ** ** The caller must set the cnt parameter to the number of prior calls to ** this routine during the current read attempt that returned WAL_RETRY. ** This routine will start taking more aggressive measures to clear the ** race conditions after multiple WAL_RETRY returns, and after an excessive ** number of errors will ultimately return SQLITE_PROTOCOL. The ** SQLITE_PROTOCOL return indicates that some other process has gone rogue ** and is not honoring the locking protocol. There is a vanishingly small ** chance that SQLITE_PROTOCOL could be returned because of a run of really ** bad luck when there is lots of contention for the wal-index, but that ** possibility is so small that it can be safely neglected, we believe. ** ** On success, this routine obtains a read lock on ** WAL_READ_LOCK(pWal->readLock). The pWal->readLock integer is ** in the range 0 <= pWal->readLock < WAL_NREADER. If pWal->readLock==(-1) ** that means the Wal does not hold any read lock. The reader must not ** access any database page that is modified by a WAL frame up to and ** including frame number aReadMark[pWal->readLock]. The reader will ** use WAL frames up to and including pWal->hdr.mxFrame if pWal->readLock>0 ** Or if pWal->readLock==0, then the reader will ignore the WAL ** completely and get all content directly from the database file. ** If the useWal parameter is 1 then the WAL will never be ignored and ** this routine will always set pWal->readLock>0 on success. ** When the read transaction is completed, the caller must release the ** lock on WAL_READ_LOCK(pWal->readLock) and set pWal->readLock to -1. ** ** This routine uses the nBackfill and aReadMark[] fields of the header ** to select a particular WAL_READ_LOCK() that strives to let the ** checkpoint process do as much work as possible. This routine might ** update values of the aReadMark[] array in the header, but if it does ** so it takes care to hold an exclusive lock on the corresponding ** WAL_READ_LOCK() while changing values. */ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ u32 mxReadMark; /* Largest aReadMark[] value */ int mxI; /* Index of largest aReadMark[] value */ int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ assert( pWal->readLock<0 ); /* Not currently locked */ /* Take steps to avoid spinning forever if there is a protocol error. ** ** Circumstances that cause a RETRY should only last for the briefest ** instances of time. No I/O or other system calls are done while the ** locks are held, so the locks should not be held for very long. But ** if we are unlucky, another process that is holding a lock might get ** paged out or take a page-fault that is time-consuming to resolve, ** during the few nanoseconds that it is holding the lock. In that case, ** it might take longer than normal for the lock to free. ** ** After 5 RETRYs, we begin calling sqlite3OsSleep(). The first few ** calls to sqlite3OsSleep() have a delay of 1 microsecond. Really this ** is more of a scheduler yield than an actual delay. But on the 10th ** an subsequent retries, the delays start becoming longer and longer, ** so that on the 100th (and last) RETRY we delay for 323 milliseconds. ** The total delay time before giving up is less than 10 seconds. */ if( cnt>5 ){ int nDelay = 1; /* Pause time in microseconds */ if( cnt>100 ){ VVA_ONLY( pWal->lockError = 1; ) return SQLITE_PROTOCOL; } if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; sqlite3OsSleep(pWal->pVfs, nDelay); } if( !useWal ){ rc = walIndexReadHdr(pWal, pChanged); if( rc==SQLITE_BUSY ){ /* If there is not a recovery running in another thread or process ** then convert BUSY errors to WAL_RETRY. If recovery is known to ** be running, convert BUSY to BUSY_RECOVERY. There is a race here ** which might cause WAL_RETRY to be returned even if BUSY_RECOVERY ** would be technically correct. But the race is benign since with ** WAL_RETRY this routine will be called again and will probably be ** right on the second iteration. */ if( pWal->apWiData[0]==0 ){ /* This branch is taken when the xShmMap() method returns SQLITE_BUSY. ** We assume this is a transient condition, so return WAL_RETRY. The ** xShmMap() implementation used by the default unix and win32 VFS ** modules may return SQLITE_BUSY due to a race condition in the ** code that determines whether or not the shared-memory region ** must be zeroed before the requested page is returned. */ rc = WAL_RETRY; }else if( SQLITE_OK==(rc = walLockShared(pWal, WAL_RECOVER_LOCK)) ){ walUnlockShared(pWal, WAL_RECOVER_LOCK); rc = WAL_RETRY; }else if( rc==SQLITE_BUSY ){ rc = SQLITE_BUSY_RECOVERY; } } if( rc!=SQLITE_OK ){ return rc; } } pInfo = walCkptInfo(pWal); if( !useWal && pInfo->nBackfill==pWal->hdr.mxFrame ){ /* The WAL has been completely backfilled (or it is empty). ** and can be safely ignored. */ rc = walLockShared(pWal, WAL_READ_LOCK(0)); walShmBarrier(pWal); if( rc==SQLITE_OK ){ if( memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ /* It is not safe to allow the reader to continue here if frames ** may have been appended to the log before READ_LOCK(0) was obtained. ** When holding READ_LOCK(0), the reader ignores the entire log file, ** which implies that the database file contains a trustworthy ** snapshot. Since holding READ_LOCK(0) prevents a checkpoint from ** happening, this is usually correct. ** ** However, if frames have been appended to the log (or if the log ** is wrapped and written for that matter) before the READ_LOCK(0) ** is obtained, that is not necessarily true. A checkpointer may ** have started to backfill the appended frames but crashed before ** it finished. Leaving a corrupt image in the database file. */ walUnlockShared(pWal, WAL_READ_LOCK(0)); return WAL_RETRY; } pWal->readLock = 0; return SQLITE_OK; }else if( rc!=SQLITE_BUSY ){ return rc; } } /* If we get this far, it means that the reader will want to use ** the WAL to get at content from recent commits. The job now is ** to select one of the aReadMark[] entries that is closest to ** but not exceeding pWal->hdr.mxFrame and lock that entry. */ mxReadMark = 0; mxI = 0; for(i=1; iaReadMark[i]; if( mxReadMark<=thisMark && thisMark<=pWal->hdr.mxFrame ){ assert( thisMark!=READMARK_NOT_USED ); mxReadMark = thisMark; mxI = i; } } /* There was once an "if" here. The extra "{" is to preserve indentation. */ { if( (pWal->readOnly & WAL_SHM_RDONLY)==0 && (mxReadMarkhdr.mxFrame || mxI==0) ){ for(i=1; iaReadMark[i] = pWal->hdr.mxFrame; mxI = i; walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); break; }else if( rc!=SQLITE_BUSY ){ return rc; } } } if( mxI==0 ){ assert( rc==SQLITE_BUSY || (pWal->readOnly & WAL_SHM_RDONLY)!=0 ); return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTLOCK; } rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); if( rc ){ return rc==SQLITE_BUSY ? WAL_RETRY : rc; } /* Now that the read-lock has been obtained, check that neither the ** value in the aReadMark[] array or the contents of the wal-index ** header have changed. ** ** It is necessary to check that the wal-index header did not change ** between the time it was read and when the shared-lock was obtained ** on WAL_READ_LOCK(mxI) was obtained to account for the possibility ** that the log file may have been wrapped by a writer, or that frames ** that occur later in the log than pWal->hdr.mxFrame may have been ** copied into the database by a checkpointer. If either of these things ** happened, then reading the database with the current value of ** pWal->hdr.mxFrame risks reading a corrupted snapshot. So, retry ** instead. ** ** This does not guarantee that the copy of the wal-index header is up to ** date before proceeding. That would not be possible without somehow ** blocking writers. It only guarantees that a dangerous checkpoint or ** log-wrap (either of which would require an exclusive lock on ** WAL_READ_LOCK(mxI)) has not occurred since the snapshot was valid. */ walShmBarrier(pWal); if( pInfo->aReadMark[mxI]!=mxReadMark || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) ){ walUnlockShared(pWal, WAL_READ_LOCK(mxI)); return WAL_RETRY; }else{ assert( mxReadMark<=pWal->hdr.mxFrame ); pWal->readLock = (i16)mxI; } } return rc; } /* ** Begin a read transaction on the database. ** ** This routine used to be called sqlite3OpenSnapshot() and with good reason: ** it takes a snapshot of the state of the WAL and wal-index for the current ** instant in time. The current thread will continue to use this snapshot. ** Other threads might append new content to the WAL and wal-index but ** that extra content is ignored by the current thread. ** ** If the database contents have changes since the previous read ** transaction, then *pChanged is set to 1 before returning. The ** Pager layer will use this to know that is cache is stale and ** needs to be flushed. */ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ int rc; /* Return code */ int cnt = 0; /* Number of TryBeginRead attempts */ do{ rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); }while( rc==WAL_RETRY ); testcase( (rc&0xff)==SQLITE_BUSY ); testcase( (rc&0xff)==SQLITE_IOERR ); testcase( rc==SQLITE_PROTOCOL ); testcase( rc==SQLITE_OK ); return rc; } /* ** Finish with a read transaction. All this does is release the ** read-lock. */ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ sqlite3WalEndWriteTransaction(pWal); if( pWal->readLock>=0 ){ walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->readLock = -1; } } /* ** Search the wal file for page pgno. If found, set *piRead to the frame that ** contains the page. Otherwise, if pgno is not in the wal file, set *piRead ** to zero. ** ** Return SQLITE_OK if successful, or an error code if an error occurs. If an ** error does occur, the final value of *piRead is undefined. */ SQLITE_PRIVATE int sqlite3WalFindFrame( Wal *pWal, /* WAL handle */ Pgno pgno, /* Database page number to read data for */ u32 *piRead /* OUT: Frame number (or zero) */ ){ u32 iRead = 0; /* If !=0, WAL frame to return data from */ u32 iLast = pWal->hdr.mxFrame; /* Last page in WAL for this reader */ int iHash; /* Used to loop through N hash tables */ /* This routine is only be called from within a read transaction. */ assert( pWal->readLock>=0 || pWal->lockError ); /* If the "last page" field of the wal-index header snapshot is 0, then ** no data will be read from the wal under any circumstances. Return early ** in this case as an optimization. Likewise, if pWal->readLock==0, ** then the WAL is ignored by the reader so return early, as if the ** WAL were empty. */ if( iLast==0 || pWal->readLock==0 ){ *piRead = 0; return SQLITE_OK; } /* Search the hash table or tables for an entry matching page number ** pgno. Each iteration of the following for() loop searches one ** hash table (each hash table indexes up to HASHTABLE_NPAGE frames). ** ** This code might run concurrently to the code in walIndexAppend() ** that adds entries to the wal-index (and possibly to this hash ** table). This means the value just read from the hash ** slot (aHash[iKey]) may have been added before or after the ** current read transaction was opened. Values added after the ** read transaction was opened may have been written incorrectly - ** i.e. these slots may contain garbage data. However, we assume ** that any slots written before the current read transaction was ** opened remain unmodified. ** ** For the reasons above, the if(...) condition featured in the inner ** loop of the following block is more stringent that would be required ** if we had exclusive access to the hash-table: ** ** (aPgno[iFrame]==pgno): ** This condition filters out normal hash-table collisions. ** ** (iFrame<=iLast): ** This condition filters out entries that were added to the hash ** table after the current read-transaction had started. */ for(iHash=walFramePage(iLast); iHash>=0 && iRead==0; iHash--){ volatile ht_slot *aHash; /* Pointer to hash table */ volatile u32 *aPgno; /* Pointer to array of page numbers */ u32 iZero; /* Frame number corresponding to aPgno[0] */ int iKey; /* Hash slot index */ int nCollide; /* Number of hash collisions remaining */ int rc; /* Error code */ rc = walHashGet(pWal, iHash, &aHash, &aPgno, &iZero); if( rc!=SQLITE_OK ){ return rc; } nCollide = HASHTABLE_NSLOT; for(iKey=walHash(pgno); aHash[iKey]; iKey=walNextHash(iKey)){ u32 iFrame = aHash[iKey] + iZero; if( iFrame<=iLast && aPgno[aHash[iKey]]==pgno ){ assert( iFrame>iRead || CORRUPT_DB ); iRead = iFrame; } if( (nCollide--)==0 ){ return SQLITE_CORRUPT_BKPT; } } } #ifdef SQLITE_ENABLE_EXPENSIVE_ASSERT /* If expensive assert() statements are available, do a linear search ** of the wal-index file content. Make sure the results agree with the ** result obtained using the hash indexes above. */ { u32 iRead2 = 0; u32 iTest; for(iTest=iLast; iTest>0; iTest--){ if( walFramePgno(pWal, iTest)==pgno ){ iRead2 = iTest; break; } } assert( iRead==iRead2 ); } #endif *piRead = iRead; return SQLITE_OK; } /* ** Read the contents of frame iRead from the wal file into buffer pOut ** (which is nOut bytes in size). Return SQLITE_OK if successful, or an ** error code otherwise. */ SQLITE_PRIVATE int sqlite3WalReadFrame( Wal *pWal, /* WAL handle */ u32 iRead, /* Frame to read */ int nOut, /* Size of buffer pOut in bytes */ u8 *pOut /* Buffer to write page data to */ ){ int sz; i64 iOffset; sz = pWal->hdr.szPage; sz = (sz&0xfe00) + ((sz&0x0001)<<16); testcase( sz<=32768 ); testcase( sz>=65536 ); iOffset = walFrameOffset(iRead, sz) + WAL_FRAME_HDRSIZE; /* testcase( IS_BIG_INT(iOffset) ); // requires a 4GiB WAL */ return sqlite3OsRead(pWal->pWalFd, pOut, (nOut>sz ? sz : nOut), iOffset); } /* ** Return the size of the database in pages (or zero, if unknown). */ SQLITE_PRIVATE Pgno sqlite3WalDbsize(Wal *pWal){ if( pWal && ALWAYS(pWal->readLock>=0) ){ return pWal->hdr.nPage; } return 0; } /* ** This function starts a write transaction on the WAL. ** ** A read transaction must have already been started by a prior call ** to sqlite3WalBeginReadTransaction(). ** ** If another thread or process has written into the database since ** the read transaction was started, then it is not possible for this ** thread to write as doing so would cause a fork. So this routine ** returns SQLITE_BUSY in that case and no write transaction is started. ** ** There can only be a single writer active at a time. */ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ int rc; /* Cannot start a write transaction without first holding a read ** transaction. */ assert( pWal->readLock>=0 ); if( pWal->readOnly ){ return SQLITE_READONLY; } /* Only one writer allowed at a time. Get the write lock. Return ** SQLITE_BUSY if unable. */ rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1, 0); if( rc ){ return rc; } pWal->writeLock = 1; /* If another connection has written to the database file since the ** time the read transaction on this connection was started, then ** the write is disallowed. */ if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); pWal->writeLock = 0; rc = SQLITE_BUSY_SNAPSHOT; } return rc; } /* ** End a write transaction. The commit has already been done. This ** routine merely releases the lock. */ SQLITE_PRIVATE int sqlite3WalEndWriteTransaction(Wal *pWal){ if( pWal->writeLock ){ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); pWal->writeLock = 0; pWal->truncateOnCommit = 0; } return SQLITE_OK; } /* ** If any data has been written (but not committed) to the log file, this ** function moves the write-pointer back to the start of the transaction. ** ** Additionally, the callback function is invoked for each frame written ** to the WAL since the start of the transaction. If the callback returns ** other than SQLITE_OK, it is not invoked again and the error code is ** returned to the caller. ** ** Otherwise, if the callback function does not return an error, this ** function returns SQLITE_OK. */ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *pUndoCtx){ int rc = SQLITE_OK; if( ALWAYS(pWal->writeLock) ){ Pgno iMax = pWal->hdr.mxFrame; Pgno iFrame; /* Restore the clients cache of the wal-index header to the state it ** was in before the client began writing to the database. */ memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); for(iFrame=pWal->hdr.mxFrame+1; ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; iFrame++ ){ /* This call cannot fail. Unless the page for which the page number ** is passed as the second argument is (a) in the cache and ** (b) has an outstanding reference, then xUndo is either a no-op ** (if (a) is false) or simply expels the page from the cache (if (b) ** is false). ** ** If the upper layer is doing a rollback, it is guaranteed that there ** are no outstanding references to any page other than page 1. And ** page 1 is never written to the log until the transaction is ** committed. As a result, the call to xUndo may not fail. */ assert( walFramePgno(pWal, iFrame)!=1 ); rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); } if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } return rc; } /* ** Argument aWalData must point to an array of WAL_SAVEPOINT_NDATA u32 ** values. This function populates the array with values required to ** "rollback" the write position of the WAL handle back to the current ** point in the event of a savepoint rollback (via WalSavepointUndo()). */ SQLITE_PRIVATE void sqlite3WalSavepoint(Wal *pWal, u32 *aWalData){ assert( pWal->writeLock ); aWalData[0] = pWal->hdr.mxFrame; aWalData[1] = pWal->hdr.aFrameCksum[0]; aWalData[2] = pWal->hdr.aFrameCksum[1]; aWalData[3] = pWal->nCkpt; } /* ** Move the write position of the WAL back to the point identified by ** the values in the aWalData[] array. aWalData must point to an array ** of WAL_SAVEPOINT_NDATA u32 values that has been previously populated ** by a call to WalSavepoint(). */ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ int rc = SQLITE_OK; assert( pWal->writeLock ); assert( aWalData[3]!=pWal->nCkpt || aWalData[0]<=pWal->hdr.mxFrame ); if( aWalData[3]!=pWal->nCkpt ){ /* This savepoint was opened immediately after the write-transaction ** was started. Right after that, the writer decided to wrap around ** to the start of the log. Update the savepoint values to match. */ aWalData[0] = 0; aWalData[3] = pWal->nCkpt; } if( aWalData[0]hdr.mxFrame ){ pWal->hdr.mxFrame = aWalData[0]; pWal->hdr.aFrameCksum[0] = aWalData[1]; pWal->hdr.aFrameCksum[1] = aWalData[2]; walCleanupHash(pWal); } return rc; } /* ** This function is called just before writing a set of frames to the log ** file (see sqlite3WalFrames()). It checks to see if, instead of appending ** to the current log file, it is possible to overwrite the start of the ** existing log file with the new frames (i.e. "reset" the log). If so, ** it sets pWal->hdr.mxFrame to 0. Otherwise, pWal->hdr.mxFrame is left ** unchanged. ** ** SQLITE_OK is returned if no error is encountered (regardless of whether ** or not pWal->hdr.mxFrame is modified). An SQLite error code is returned ** if an error occurs. */ static int walRestartLog(Wal *pWal){ int rc = SQLITE_OK; int cnt; if( pWal->readLock==0 ){ volatile WalCkptInfo *pInfo = walCkptInfo(pWal); assert( pInfo->nBackfill==pWal->hdr.mxFrame ); if( pInfo->nBackfill>0 ){ u32 salt1; sqlite3_randomness(4, &salt1); rc = walLockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1, 0); if( rc==SQLITE_OK ){ /* If all readers are using WAL_READ_LOCK(0) (in other words if no ** readers are currently using the WAL), then the transactions ** frames will overwrite the start of the existing log. Update the ** wal-index header to reflect this. ** ** In theory it would be Ok to update the cache of the header only ** at this point. But updating the actual wal-index header is also ** safe and means there is no special case for sqlite3WalUndo() ** to handle if this transaction is rolled back. */ walRestartHdr(pWal, salt1); walUnlockExclusive(pWal, WAL_READ_LOCK(1), WAL_NREADER-1); }else if( rc!=SQLITE_BUSY ){ return rc; } } walUnlockShared(pWal, WAL_READ_LOCK(0)); pWal->readLock = -1; cnt = 0; do{ int notUsed; rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); }while( rc==WAL_RETRY ); assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ testcase( (rc&0xff)==SQLITE_IOERR ); testcase( rc==SQLITE_PROTOCOL ); testcase( rc==SQLITE_OK ); } return rc; } /* ** Information about the current state of the WAL file and where ** the next fsync should occur - passed from sqlite3WalFrames() into ** walWriteToLog(). */ typedef struct WalWriter { Wal *pWal; /* The complete WAL information */ sqlite3_file *pFd; /* The WAL file to which we write */ sqlite3_int64 iSyncPoint; /* Fsync at this offset */ int syncFlags; /* Flags for the fsync */ int szPage; /* Size of one page */ } WalWriter; /* ** Write iAmt bytes of content into the WAL file beginning at iOffset. ** Do a sync when crossing the p->iSyncPoint boundary. ** ** In other words, if iSyncPoint is in between iOffset and iOffset+iAmt, ** first write the part before iSyncPoint, then sync, then write the ** rest. */ static int walWriteToLog( WalWriter *p, /* WAL to write to */ void *pContent, /* Content to be written */ int iAmt, /* Number of bytes to write */ sqlite3_int64 iOffset /* Start writing at this offset */ ){ int rc; if( iOffsetiSyncPoint && iOffset+iAmt>=p->iSyncPoint ){ int iFirstAmt = (int)(p->iSyncPoint - iOffset); rc = sqlite3OsWrite(p->pFd, pContent, iFirstAmt, iOffset); if( rc ) return rc; iOffset += iFirstAmt; iAmt -= iFirstAmt; pContent = (void*)(iFirstAmt + (char*)pContent); assert( p->syncFlags & (SQLITE_SYNC_NORMAL|SQLITE_SYNC_FULL) ); rc = sqlite3OsSync(p->pFd, p->syncFlags & SQLITE_SYNC_MASK); if( iAmt==0 || rc ) return rc; } rc = sqlite3OsWrite(p->pFd, pContent, iAmt, iOffset); return rc; } /* ** Write out a single frame of the WAL */ static int walWriteOneFrame( WalWriter *p, /* Where to write the frame */ PgHdr *pPage, /* The page of the frame to be written */ int nTruncate, /* The commit flag. Usually 0. >0 for commit */ sqlite3_int64 iOffset /* Byte offset at which to write */ ){ int rc; /* Result code from subfunctions */ void *pData; /* Data actually written */ u8 aFrame[WAL_FRAME_HDRSIZE]; /* Buffer to assemble frame-header in */ #if defined(SQLITE_HAS_CODEC) if( (pData = sqlite3PagerCodec(pPage))==0 ) return SQLITE_NOMEM; #else pData = pPage->pData; #endif walEncodeFrame(p->pWal, pPage->pgno, nTruncate, pData, aFrame); rc = walWriteToLog(p, aFrame, sizeof(aFrame), iOffset); if( rc ) return rc; /* Write the page data */ rc = walWriteToLog(p, pData, p->szPage, iOffset+sizeof(aFrame)); return rc; } /* ** Write a set of frames to the log. The caller must hold the write-lock ** on the log file (obtained using sqlite3WalBeginWriteTransaction()). */ SQLITE_PRIVATE int sqlite3WalFrames( Wal *pWal, /* Wal handle to write to */ int szPage, /* Database page-size in bytes */ PgHdr *pList, /* List of dirty pages to write */ Pgno nTruncate, /* Database size after this commit */ int isCommit, /* True if this is a commit */ int sync_flags /* Flags to pass to OsSync() (or 0) */ ){ int rc; /* Used to catch return codes */ u32 iFrame; /* Next frame address */ PgHdr *p; /* Iterator to run through pList with. */ PgHdr *pLast = 0; /* Last frame in list */ int nExtra = 0; /* Number of extra copies of last page */ int szFrame; /* The size of a single frame */ i64 iOffset; /* Next byte to write in WAL file */ WalWriter w; /* The writer */ assert( pList ); assert( pWal->writeLock ); /* If this frame set completes a transaction, then nTruncate>0. If ** nTruncate==0 then this frame set does not complete the transaction. */ assert( (isCommit!=0)==(nTruncate!=0) ); #if defined(SQLITE_TEST) && defined(SQLITE_DEBUG) { int cnt; for(cnt=0, p=pList; p; p=p->pDirty, cnt++){} WALTRACE(("WAL%p: frame write begin. %d frames. mxFrame=%d. %s\n", pWal, cnt, pWal->hdr.mxFrame, isCommit ? "Commit" : "Spill")); } #endif /* See if it is possible to write these frames into the start of the ** log file, instead of appending to it at pWal->hdr.mxFrame. */ if( SQLITE_OK!=(rc = walRestartLog(pWal)) ){ return rc; } /* If this is the first frame written into the log, write the WAL ** header to the start of the WAL file. See comments at the top of ** this source file for a description of the WAL header format. */ iFrame = pWal->hdr.mxFrame; if( iFrame==0 ){ u8 aWalHdr[WAL_HDRSIZE]; /* Buffer to assemble wal-header in */ u32 aCksum[2]; /* Checksum for wal-header */ sqlite3Put4byte(&aWalHdr[0], (WAL_MAGIC | SQLITE_BIGENDIAN)); sqlite3Put4byte(&aWalHdr[4], WAL_MAX_VERSION); sqlite3Put4byte(&aWalHdr[8], szPage); sqlite3Put4byte(&aWalHdr[12], pWal->nCkpt); if( pWal->nCkpt==0 ) sqlite3_randomness(8, pWal->hdr.aSalt); memcpy(&aWalHdr[16], pWal->hdr.aSalt, 8); walChecksumBytes(1, aWalHdr, WAL_HDRSIZE-2*4, 0, aCksum); sqlite3Put4byte(&aWalHdr[24], aCksum[0]); sqlite3Put4byte(&aWalHdr[28], aCksum[1]); pWal->szPage = szPage; pWal->hdr.bigEndCksum = SQLITE_BIGENDIAN; pWal->hdr.aFrameCksum[0] = aCksum[0]; pWal->hdr.aFrameCksum[1] = aCksum[1]; pWal->truncateOnCommit = 1; rc = sqlite3OsWrite(pWal->pWalFd, aWalHdr, sizeof(aWalHdr), 0); WALTRACE(("WAL%p: wal-header write %s\n", pWal, rc ? "failed" : "ok")); if( rc!=SQLITE_OK ){ return rc; } /* Sync the header (unless SQLITE_IOCAP_SEQUENTIAL is true or unless ** all syncing is turned off by PRAGMA synchronous=OFF). Otherwise ** an out-of-order write following a WAL restart could result in ** database corruption. See the ticket: ** ** http://localhost:591/sqlite/info/ff5be73dee */ if( pWal->syncHeader && sync_flags ){ rc = sqlite3OsSync(pWal->pWalFd, sync_flags & SQLITE_SYNC_MASK); if( rc ) return rc; } } assert( (int)pWal->szPage==szPage ); /* Setup information needed to write frames into the WAL */ w.pWal = pWal; w.pFd = pWal->pWalFd; w.iSyncPoint = 0; w.syncFlags = sync_flags; w.szPage = szPage; iOffset = walFrameOffset(iFrame+1, szPage); szFrame = szPage + WAL_FRAME_HDRSIZE; /* Write all frames into the log file exactly once */ for(p=pList; p; p=p->pDirty){ int nDbSize; /* 0 normally. Positive == commit flag */ iFrame++; assert( iOffset==walFrameOffset(iFrame, szPage) ); nDbSize = (isCommit && p->pDirty==0) ? nTruncate : 0; rc = walWriteOneFrame(&w, p, nDbSize, iOffset); if( rc ) return rc; pLast = p; iOffset += szFrame; } /* If this is the end of a transaction, then we might need to pad ** the transaction and/or sync the WAL file. ** ** Padding and syncing only occur if this set of frames complete a ** transaction and if PRAGMA synchronous=FULL. If synchronous==NORMAL ** or synchronous==OFF, then no padding or syncing are needed. ** ** If SQLITE_IOCAP_POWERSAFE_OVERWRITE is defined, then padding is not ** needed and only the sync is done. If padding is needed, then the ** final frame is repeated (with its commit mark) until the next sector ** boundary is crossed. Only the part of the WAL prior to the last ** sector boundary is synced; the part of the last frame that extends ** past the sector boundary is written after the sync. */ if( isCommit && (sync_flags & WAL_SYNC_TRANSACTIONS)!=0 ){ if( pWal->padToSectorBoundary ){ int sectorSize = sqlite3SectorSize(pWal->pWalFd); w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize; while( iOffsettruncateOnCommit && pWal->mxWalSize>=0 ){ i64 sz = pWal->mxWalSize; if( walFrameOffset(iFrame+nExtra+1, szPage)>pWal->mxWalSize ){ sz = walFrameOffset(iFrame+nExtra+1, szPage); } walLimitSize(pWal, sz); pWal->truncateOnCommit = 0; } /* Append data to the wal-index. It is not necessary to lock the ** wal-index to do this as the SQLITE_SHM_WRITE lock held on the wal-index ** guarantees that there are no other writers, and no data that may ** be in use by existing readers is being overwritten. */ iFrame = pWal->hdr.mxFrame; for(p=pList; p && rc==SQLITE_OK; p=p->pDirty){ iFrame++; rc = walIndexAppend(pWal, iFrame, p->pgno); } while( rc==SQLITE_OK && nExtra>0 ){ iFrame++; nExtra--; rc = walIndexAppend(pWal, iFrame, pLast->pgno); } if( rc==SQLITE_OK ){ /* Update the private copy of the header. */ pWal->hdr.szPage = (u16)((szPage&0xff00) | (szPage>>16)); testcase( szPage<=32768 ); testcase( szPage>=65536 ); pWal->hdr.mxFrame = iFrame; if( isCommit ){ pWal->hdr.iChange++; pWal->hdr.nPage = nTruncate; } /* If this is a commit, update the wal-index header too. */ if( isCommit ){ walIndexWriteHdr(pWal); pWal->iCallback = iFrame; } } WALTRACE(("WAL%p: frame write %s\n", pWal, rc ? "failed" : "ok")); return rc; } /* ** This routine is called to implement sqlite3_wal_checkpoint() and ** related interfaces. ** ** Obtain a CHECKPOINT lock and then backfill as much information as ** we can from WAL into the database. ** ** If parameter xBusy is not NULL, it is a pointer to a busy-handler ** callback. In this case this function runs a blocking checkpoint. */ SQLITE_PRIVATE int sqlite3WalCheckpoint( Wal *pWal, /* Wal connection */ int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */ int (*xBusy)(void*), /* Function to call when busy */ void *pBusyArg, /* Context argument for xBusyHandler */ int sync_flags, /* Flags to sync db file with (or 0) */ int nBuf, /* Size of temporary buffer */ u8 *zBuf, /* Temporary buffer to use */ int *pnLog, /* OUT: Number of frames in WAL */ int *pnCkpt /* OUT: Number of backfilled frames in WAL */ ){ int rc; /* Return code */ int isChanged = 0; /* True if a new wal-index header is loaded */ int eMode2 = eMode; /* Mode to pass to walCheckpoint() */ int (*xBusy2)(void*) = xBusy; /* Busy handler for eMode2 */ assert( pWal->ckptLock==0 ); assert( pWal->writeLock==0 ); /* EVIDENCE-OF: R-62920-47450 The busy-handler callback is never invoked ** in the SQLITE_CHECKPOINT_PASSIVE mode. */ assert( eMode!=SQLITE_CHECKPOINT_PASSIVE || xBusy==0 ); if( pWal->readOnly ) return SQLITE_READONLY; WALTRACE(("WAL%p: checkpoint begins\n", pWal)); /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive ** "checkpoint" lock on the database file. */ rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1, 0); if( rc ){ /* EVIDENCE-OF: R-10421-19736 If any other process is running a ** checkpoint operation at the same time, the lock cannot be obtained and ** SQLITE_BUSY is returned. ** EVIDENCE-OF: R-53820-33897 Even if there is a busy-handler configured, ** it will not be invoked in this case. */ testcase( rc==SQLITE_BUSY ); testcase( xBusy!=0 ); return rc; } pWal->ckptLock = 1; /* IMPLEMENTATION-OF: R-59782-36818 The SQLITE_CHECKPOINT_FULL, RESTART and ** TRUNCATE modes also obtain the exclusive "writer" lock on the database ** file. ** ** EVIDENCE-OF: R-60642-04082 If the writer lock cannot be obtained ** immediately, and a busy-handler is configured, it is invoked and the ** writer lock retried until either the busy-handler returns 0 or the ** lock is successfully obtained. */ if( eMode!=SQLITE_CHECKPOINT_PASSIVE ){ rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_WRITE_LOCK, 1); if( rc==SQLITE_OK ){ pWal->writeLock = 1; }else if( rc==SQLITE_BUSY ){ eMode2 = SQLITE_CHECKPOINT_PASSIVE; xBusy2 = 0; rc = SQLITE_OK; } } /* Read the wal-index header. */ if( rc==SQLITE_OK ){ rc = walIndexReadHdr(pWal, &isChanged); if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ sqlite3OsUnfetch(pWal->pDbFd, 0, 0); } } /* Copy data from the log to the database file. */ if( rc==SQLITE_OK ){ if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ rc = SQLITE_CORRUPT_BKPT; }else{ rc = walCheckpoint(pWal, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); } /* If no error occurred, set the output variables. */ if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); } } if( isChanged ){ /* If a new wal-index header was loaded before the checkpoint was ** performed, then the pager-cache associated with pWal is now ** out of date. So zero the cached wal-index header to ensure that ** next time the pager opens a snapshot on this database it knows that ** the cache needs to be reset. */ memset(&pWal->hdr, 0, sizeof(WalIndexHdr)); } /* Release the locks. */ sqlite3WalEndWriteTransaction(pWal); walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); pWal->ckptLock = 0; WALTRACE(("WAL%p: checkpoint %s\n", pWal, rc ? "failed" : "ok")); return (rc==SQLITE_OK && eMode!=eMode2 ? SQLITE_BUSY : rc); } /* Return the value to pass to a sqlite3_wal_hook callback, the ** number of frames in the WAL at the point of the last commit since ** sqlite3WalCallback() was called. If no commits have occurred since ** the last call, then return 0. */ SQLITE_PRIVATE int sqlite3WalCallback(Wal *pWal){ u32 ret = 0; if( pWal ){ ret = pWal->iCallback; pWal->iCallback = 0; } return (int)ret; } /* ** This function is called to change the WAL subsystem into or out ** of locking_mode=EXCLUSIVE. ** ** If op is zero, then attempt to change from locking_mode=EXCLUSIVE ** into locking_mode=NORMAL. This means that we must acquire a lock ** on the pWal->readLock byte. If the WAL is already in locking_mode=NORMAL ** or if the acquisition of the lock fails, then return 0. If the ** transition out of exclusive-mode is successful, return 1. This ** operation must occur while the pager is still holding the exclusive ** lock on the main database file. ** ** If op is one, then change from locking_mode=NORMAL into ** locking_mode=EXCLUSIVE. This means that the pWal->readLock must ** be released. Return 1 if the transition is made and 0 if the ** WAL is already in exclusive-locking mode - meaning that this ** routine is a no-op. The pager must already hold the exclusive lock ** on the main database file before invoking this operation. ** ** If op is negative, then do a dry-run of the op==1 case but do ** not actually change anything. The pager uses this to see if it ** should acquire the database exclusive lock prior to invoking ** the op==1 case. */ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){ int rc; assert( pWal->writeLock==0 ); assert( pWal->exclusiveMode!=WAL_HEAPMEMORY_MODE || op==-1 ); /* pWal->readLock is usually set, but might be -1 if there was a ** prior error while attempting to acquire are read-lock. This cannot ** happen if the connection is actually in exclusive mode (as no xShmLock ** locks are taken in this case). Nor should the pager attempt to ** upgrade to exclusive-mode following such an error. */ assert( pWal->readLock>=0 || pWal->lockError ); assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); if( op==0 ){ if( pWal->exclusiveMode ){ pWal->exclusiveMode = 0; if( walLockShared(pWal, WAL_READ_LOCK(pWal->readLock))!=SQLITE_OK ){ pWal->exclusiveMode = 1; } rc = pWal->exclusiveMode==0; }else{ /* Already in locking_mode=NORMAL */ rc = 0; } }else if( op>0 ){ assert( pWal->exclusiveMode==0 ); assert( pWal->readLock>=0 ); walUnlockShared(pWal, WAL_READ_LOCK(pWal->readLock)); pWal->exclusiveMode = 1; rc = 1; }else{ rc = pWal->exclusiveMode==0; } return rc; } /* ** Return true if the argument is non-NULL and the WAL module is using ** heap-memory for the wal-index. Otherwise, if the argument is NULL or the ** WAL module is using shared-memory, return false. */ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){ return (pWal && pWal->exclusiveMode==WAL_HEAPMEMORY_MODE ); } #ifdef SQLITE_ENABLE_ZIPVFS /* ** If the argument is not NULL, it points to a Wal object that holds a ** read-lock. This function returns the database page-size if it is known, ** or zero if it is not (or if pWal is NULL). */ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal){ assert( pWal==0 || pWal->readLock>=0 ); return (pWal ? pWal->szPage : 0); } #endif #endif /* #ifndef SQLITE_OMIT_WAL */ /************** End of wal.c *************************************************/ /************** Begin file btmutex.c *****************************************/ /* ** 2007 August 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code used to implement mutexes on Btree objects. ** This code really belongs in btree.c. But btree.c is getting too ** big and we want to break it down some. This packaged seemed like ** a good breakout. */ /************** Include btreeInt.h in the middle of btmutex.c ****************/ /************** Begin file btreeInt.h ****************************************/ /* ** 2004 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements an external (disk-based) database using BTrees. ** For a detailed discussion of BTrees, refer to ** ** Donald E. Knuth, THE ART OF COMPUTER PROGRAMMING, Volume 3: ** "Sorting And Searching", pages 473-480. Addison-Wesley ** Publishing Company, Reading, Massachusetts. ** ** The basic idea is that each page of the file contains N database ** entries and N+1 pointers to subpages. ** ** ---------------------------------------------------------------- ** | Ptr(0) | Key(0) | Ptr(1) | Key(1) | ... | Key(N-1) | Ptr(N) | ** ---------------------------------------------------------------- ** ** All of the keys on the page that Ptr(0) points to have values less ** than Key(0). All of the keys on page Ptr(1) and its subpages have ** values greater than Key(0) and less than Key(1). All of the keys ** on Ptr(N) and its subpages have values greater than Key(N-1). And ** so forth. ** ** Finding a particular key requires reading O(log(M)) pages from the ** disk where M is the number of entries in the tree. ** ** In this implementation, a single file can hold one or more separate ** BTrees. Each BTree is identified by the index of its root page. The ** key and data for any entry are combined to form the "payload". A ** fixed amount of payload can be carried directly on the database ** page. If the payload is larger than the preset amount then surplus ** bytes are stored on overflow pages. The payload for an entry ** and the preceding pointer are combined to form a "Cell". Each ** page has a small header which contains the Ptr(N) pointer and other ** information such as the size of key and data. ** ** FORMAT DETAILS ** ** The file is divided into pages. The first page is called page 1, ** the second is page 2, and so forth. A page number of zero indicates ** "no such page". The page size can be any power of 2 between 512 and 65536. ** Each page can be either a btree page, a freelist page, an overflow ** page, or a pointer-map page. ** ** The first page is always a btree page. The first 100 bytes of the first ** page contain a special header (the "file header") that describes the file. ** The format of the file header is as follows: ** ** OFFSET SIZE DESCRIPTION ** 0 16 Header string: "SQLite format 3\000" ** 16 2 Page size in bytes. (1 means 65536) ** 18 1 File format write version ** 19 1 File format read version ** 20 1 Bytes of unused space at the end of each page ** 21 1 Max embedded payload fraction (must be 64) ** 22 1 Min embedded payload fraction (must be 32) ** 23 1 Min leaf payload fraction (must be 32) ** 24 4 File change counter ** 28 4 Reserved for future use ** 32 4 First freelist page ** 36 4 Number of freelist pages in the file ** 40 60 15 4-byte meta values passed to higher layers ** ** 40 4 Schema cookie ** 44 4 File format of schema layer ** 48 4 Size of page cache ** 52 4 Largest root-page (auto/incr_vacuum) ** 56 4 1=UTF-8 2=UTF16le 3=UTF16be ** 60 4 User version ** 64 4 Incremental vacuum mode ** 68 4 Application-ID ** 72 20 unused ** 92 4 The version-valid-for number ** 96 4 SQLITE_VERSION_NUMBER ** ** All of the integer values are big-endian (most significant byte first). ** ** The file change counter is incremented when the database is changed ** This counter allows other processes to know when the file has changed ** and thus when they need to flush their cache. ** ** The max embedded payload fraction is the amount of the total usable ** space in a page that can be consumed by a single cell for standard ** B-tree (non-LEAFDATA) tables. A value of 255 means 100%. The default ** is to limit the maximum cell size so that at least 4 cells will fit ** on one page. Thus the default max embedded payload fraction is 64. ** ** If the payload for a cell is larger than the max payload, then extra ** payload is spilled to overflow pages. Once an overflow page is allocated, ** as many bytes as possible are moved into the overflow pages without letting ** the cell size drop below the min embedded payload fraction. ** ** The min leaf payload fraction is like the min embedded payload fraction ** except that it applies to leaf nodes in a LEAFDATA tree. The maximum ** payload fraction for a LEAFDATA tree is always 100% (or 255) and it ** not specified in the header. ** ** Each btree pages is divided into three sections: The header, the ** cell pointer array, and the cell content area. Page 1 also has a 100-byte ** file header that occurs before the page header. ** ** |----------------| ** | file header | 100 bytes. Page 1 only. ** |----------------| ** | page header | 8 bytes for leaves. 12 bytes for interior nodes ** |----------------| ** | cell pointer | | 2 bytes per cell. Sorted order. ** | array | | Grows downward ** | | v ** |----------------| ** | unallocated | ** | space | ** |----------------| ^ Grows upwards ** | cell content | | Arbitrary order interspersed with freeblocks. ** | area | | and free space fragments. ** |----------------| ** ** The page headers looks like this: ** ** OFFSET SIZE DESCRIPTION ** 0 1 Flags. 1: intkey, 2: zerodata, 4: leafdata, 8: leaf ** 1 2 byte offset to the first freeblock ** 3 2 number of cells on this page ** 5 2 first byte of the cell content area ** 7 1 number of fragmented free bytes ** 8 4 Right child (the Ptr(N) value). Omitted on leaves. ** ** The flags define the format of this btree page. The leaf flag means that ** this page has no children. The zerodata flag means that this page carries ** only keys and no data. The intkey flag means that the key is an integer ** which is stored in the key size entry of the cell header rather than in ** the payload area. ** ** The cell pointer array begins on the first byte after the page header. ** The cell pointer array contains zero or more 2-byte numbers which are ** offsets from the beginning of the page to the cell content in the cell ** content area. The cell pointers occur in sorted order. The system strives ** to keep free space after the last cell pointer so that new cells can ** be easily added without having to defragment the page. ** ** Cell content is stored at the very end of the page and grows toward the ** beginning of the page. ** ** Unused space within the cell content area is collected into a linked list of ** freeblocks. Each freeblock is at least 4 bytes in size. The byte offset ** to the first freeblock is given in the header. Freeblocks occur in ** increasing order. Because a freeblock must be at least 4 bytes in size, ** any group of 3 or fewer unused bytes in the cell content area cannot ** exist on the freeblock chain. A group of 3 or fewer free bytes is called ** a fragment. The total number of bytes in all fragments is recorded. ** in the page header at offset 7. ** ** SIZE DESCRIPTION ** 2 Byte offset of the next freeblock ** 2 Bytes in this freeblock ** ** Cells are of variable length. Cells are stored in the cell content area at ** the end of the page. Pointers to the cells are in the cell pointer array ** that immediately follows the page header. Cells is not necessarily ** contiguous or in order, but cell pointers are contiguous and in order. ** ** Cell content makes use of variable length integers. A variable ** length integer is 1 to 9 bytes where the lower 7 bits of each ** byte are used. The integer consists of all bytes that have bit 8 set and ** the first byte with bit 8 clear. The most significant byte of the integer ** appears first. A variable-length integer may not be more than 9 bytes long. ** As a special case, all 8 bytes of the 9th byte are used as data. This ** allows a 64-bit integer to be encoded in 9 bytes. ** ** 0x00 becomes 0x00000000 ** 0x7f becomes 0x0000007f ** 0x81 0x00 becomes 0x00000080 ** 0x82 0x00 becomes 0x00000100 ** 0x80 0x7f becomes 0x0000007f ** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 ** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 ** ** Variable length integers are used for rowids and to hold the number of ** bytes of key and data in a btree cell. ** ** The content of a cell looks like this: ** ** SIZE DESCRIPTION ** 4 Page number of the left child. Omitted if leaf flag is set. ** var Number of bytes of data. Omitted if the zerodata flag is set. ** var Number of bytes of key. Or the key itself if intkey flag is set. ** * Payload ** 4 First page of the overflow chain. Omitted if no overflow ** ** Overflow pages form a linked list. Each page except the last is completely ** filled with data (pagesize - 4 bytes). The last page can have as little ** as 1 byte of data. ** ** SIZE DESCRIPTION ** 4 Page number of next overflow page ** * Data ** ** Freelist pages come in two subtypes: trunk pages and leaf pages. The ** file header points to the first in a linked list of trunk page. Each trunk ** page points to multiple leaf pages. The content of a leaf page is ** unspecified. A trunk page looks like this: ** ** SIZE DESCRIPTION ** 4 Page number of next trunk page ** 4 Number of leaf pointers on this page ** * zero or more pages numbers of leaves */ /* #include "sqliteInt.h" */ /* The following value is the maximum cell size assuming a maximum page ** size give above. */ #define MX_CELL_SIZE(pBt) ((int)(pBt->pageSize-8)) /* The maximum number of cells on a single page of the database. This ** assumes a minimum cell size of 6 bytes (4 bytes for the cell itself ** plus 2 bytes for the index to the cell in the page header). Such ** small cells will be rare, but they are possible. */ #define MX_CELL(pBt) ((pBt->pageSize-8)/6) /* Forward declarations */ typedef struct MemPage MemPage; typedef struct BtLock BtLock; typedef struct CellInfo CellInfo; /* ** This is a magic string that appears at the beginning of every ** SQLite database in order to identify the file as a real database. ** ** You can change this value at compile-time by specifying a ** -DSQLITE_FILE_HEADER="..." on the compiler command-line. The ** header must be exactly 16 bytes including the zero-terminator so ** the string itself should be 15 characters long. If you change ** the header, then your custom library will not be able to read ** databases generated by the standard tools and the standard tools ** will not be able to read databases created by your custom library. */ #ifndef SQLITE_FILE_HEADER /* 123456789 123456 */ # define SQLITE_FILE_HEADER "SQLite format 3" #endif /* ** Page type flags. An ORed combination of these flags appear as the ** first byte of on-disk image of every BTree page. */ #define PTF_INTKEY 0x01 #define PTF_ZERODATA 0x02 #define PTF_LEAFDATA 0x04 #define PTF_LEAF 0x08 /* ** As each page of the file is loaded into memory, an instance of the following ** structure is appended and initialized to zero. This structure stores ** information about the page that is decoded from the raw file page. ** ** The pParent field points back to the parent page. This allows us to ** walk up the BTree from any leaf to the root. Care must be taken to ** unref() the parent page pointer when this page is no longer referenced. ** The pageDestructor() routine handles that chore. ** ** Access to all fields of this structure is controlled by the mutex ** stored in MemPage.pBt->mutex. */ struct MemPage { u8 isInit; /* True if previously initialized. MUST BE FIRST! */ u8 nOverflow; /* Number of overflow cell bodies in aCell[] */ u8 intKey; /* True if table b-trees. False for index b-trees */ u8 intKeyLeaf; /* True if the leaf of an intKey table */ u8 noPayload; /* True if internal intKey page (thus w/o data) */ u8 leaf; /* True if a leaf page */ u8 hdrOffset; /* 100 for page 1. 0 otherwise */ u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */ u8 max1bytePayload; /* min(maxLocal,127) */ u8 bBusy; /* Prevent endless loops on corrupt database files */ u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */ u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */ u16 cellOffset; /* Index in aData of first cell pointer */ u16 nFree; /* Number of free bytes on the page */ u16 nCell; /* Number of cells on this page, local and ovfl */ u16 maskPage; /* Mask for page offset */ u16 aiOvfl[5]; /* Insert the i-th overflow cell before the aiOvfl-th ** non-overflow cell */ u8 *apOvfl[5]; /* Pointers to the body of overflow cells */ BtShared *pBt; /* Pointer to BtShared that this page is part of */ u8 *aData; /* Pointer to disk image of the page data */ u8 *aDataEnd; /* One byte past the end of usable data */ u8 *aCellIdx; /* The cell index area */ u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */ DbPage *pDbPage; /* Pager page handle */ u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */ void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */ Pgno pgno; /* Page number for this page */ }; /* ** The in-memory image of a disk page has the auxiliary information appended ** to the end. EXTRA_SIZE is the number of bytes of space needed to hold ** that extra information. */ #define EXTRA_SIZE sizeof(MemPage) /* ** A linked list of the following structures is stored at BtShared.pLock. ** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor ** is opened on the table with root page BtShared.iTable. Locks are removed ** from this list when a transaction is committed or rolled back, or when ** a btree handle is closed. */ struct BtLock { Btree *pBtree; /* Btree handle holding this lock */ Pgno iTable; /* Root page of table */ u8 eLock; /* READ_LOCK or WRITE_LOCK */ BtLock *pNext; /* Next in BtShared.pLock list */ }; /* Candidate values for BtLock.eLock */ #define READ_LOCK 1 #define WRITE_LOCK 2 /* A Btree handle ** ** A database connection contains a pointer to an instance of ** this object for every database file that it has open. This structure ** is opaque to the database connection. The database connection cannot ** see the internals of this structure and only deals with pointers to ** this structure. ** ** For some database files, the same underlying database cache might be ** shared between multiple connections. In that case, each connection ** has it own instance of this object. But each instance of this object ** points to the same BtShared object. The database cache and the ** schema associated with the database file are all contained within ** the BtShared object. ** ** All fields in this structure are accessed under sqlite3.mutex. ** The pBt pointer itself may not be changed while there exists cursors ** in the referenced BtShared that point back to this Btree since those ** cursors have to go through this Btree to find their BtShared and ** they often do so without holding sqlite3.mutex. */ struct Btree { sqlite3 *db; /* The database connection holding this btree */ BtShared *pBt; /* Sharable content of this btree */ u8 inTrans; /* TRANS_NONE, TRANS_READ or TRANS_WRITE */ u8 sharable; /* True if we can share pBt with another db */ u8 locked; /* True if db currently has pBt locked */ u8 hasIncrblobCur; /* True if there are one or more Incrblob cursors */ int wantToLock; /* Number of nested calls to sqlite3BtreeEnter() */ int nBackup; /* Number of backup operations reading this btree */ u32 iDataVersion; /* Combines with pBt->pPager->iDataVersion */ Btree *pNext; /* List of other sharable Btrees from the same db */ Btree *pPrev; /* Back pointer of the same list */ #ifndef SQLITE_OMIT_SHARED_CACHE BtLock lock; /* Object used to lock page 1 */ #endif }; /* ** Btree.inTrans may take one of the following values. ** ** If the shared-data extension is enabled, there may be multiple users ** of the Btree structure. At most one of these may open a write transaction, ** but any number may have active read transactions. */ #define TRANS_NONE 0 #define TRANS_READ 1 #define TRANS_WRITE 2 /* ** An instance of this object represents a single database file. ** ** A single database file can be in use at the same time by two ** or more database connections. When two or more connections are ** sharing the same database file, each connection has it own ** private Btree object for the file and each of those Btrees points ** to this one BtShared object. BtShared.nRef is the number of ** connections currently sharing this database file. ** ** Fields in this structure are accessed under the BtShared.mutex ** mutex, except for nRef and pNext which are accessed under the ** global SQLITE_MUTEX_STATIC_MASTER mutex. The pPager field ** may not be modified once it is initially set as long as nRef>0. ** The pSchema field may be set once under BtShared.mutex and ** thereafter is unchanged as long as nRef>0. ** ** isPending: ** ** If a BtShared client fails to obtain a write-lock on a database ** table (because there exists one or more read-locks on the table), ** the shared-cache enters 'pending-lock' state and isPending is ** set to true. ** ** The shared-cache leaves the 'pending lock' state when either of ** the following occur: ** ** 1) The current writer (BtShared.pWriter) concludes its transaction, OR ** 2) The number of locks held by other connections drops to zero. ** ** while in the 'pending-lock' state, no connection may start a new ** transaction. ** ** This feature is included to help prevent writer-starvation. */ struct BtShared { Pager *pPager; /* The page cache */ sqlite3 *db; /* Database connection currently using this Btree */ BtCursor *pCursor; /* A list of all open cursors */ MemPage *pPage1; /* First page of the database */ u8 openFlags; /* Flags to sqlite3BtreeOpen() */ #ifndef SQLITE_OMIT_AUTOVACUUM u8 autoVacuum; /* True if auto-vacuum is enabled */ u8 incrVacuum; /* True if incr-vacuum is enabled */ u8 bDoTruncate; /* True to truncate db on commit */ #endif u8 inTransaction; /* Transaction state */ u8 max1bytePayload; /* Maximum first byte of cell for a 1-byte payload */ #ifdef SQLITE_HAS_CODEC u8 optimalReserve; /* Desired amount of reserved space per page */ #endif u16 btsFlags; /* Boolean parameters. See BTS_* macros below */ u16 maxLocal; /* Maximum local payload in non-LEAFDATA tables */ u16 minLocal; /* Minimum local payload in non-LEAFDATA tables */ u16 maxLeaf; /* Maximum local payload in a LEAFDATA table */ u16 minLeaf; /* Minimum local payload in a LEAFDATA table */ u32 pageSize; /* Total number of bytes on a page */ u32 usableSize; /* Number of usable bytes on each page */ int nTransaction; /* Number of open transactions (read + write) */ u32 nPage; /* Number of pages in the database */ void *pSchema; /* Pointer to space allocated by sqlite3BtreeSchema() */ void (*xFreeSchema)(void*); /* Destructor for BtShared.pSchema */ sqlite3_mutex *mutex; /* Non-recursive mutex required to access this object */ Bitvec *pHasContent; /* Set of pages moved to free-list this transaction */ #ifndef SQLITE_OMIT_SHARED_CACHE int nRef; /* Number of references to this structure */ BtShared *pNext; /* Next on a list of sharable BtShared structs */ BtLock *pLock; /* List of locks held on this shared-btree struct */ Btree *pWriter; /* Btree with currently open write transaction */ #endif u8 *pTmpSpace; /* Temp space sufficient to hold a single cell */ }; /* ** Allowed values for BtShared.btsFlags */ #define BTS_READ_ONLY 0x0001 /* Underlying file is readonly */ #define BTS_PAGESIZE_FIXED 0x0002 /* Page size can no longer be changed */ #define BTS_SECURE_DELETE 0x0004 /* PRAGMA secure_delete is enabled */ #define BTS_INITIALLY_EMPTY 0x0008 /* Database was empty at trans start */ #define BTS_NO_WAL 0x0010 /* Do not open write-ahead-log files */ #define BTS_EXCLUSIVE 0x0020 /* pWriter has an exclusive lock */ #define BTS_PENDING 0x0040 /* Waiting for read-locks to clear */ /* ** An instance of the following structure is used to hold information ** about a cell. The parseCellPtr() function fills in this structure ** based on information extract from the raw disk page. */ struct CellInfo { i64 nKey; /* The key for INTKEY tables, or nPayload otherwise */ u8 *pPayload; /* Pointer to the start of payload */ u32 nPayload; /* Bytes of payload */ u16 nLocal; /* Amount of payload held locally, not on overflow */ u16 iOverflow; /* Offset to overflow page number. Zero if no overflow */ u16 nSize; /* Size of the cell content on the main b-tree page */ }; /* ** Maximum depth of an SQLite B-Tree structure. Any B-Tree deeper than ** this will be declared corrupt. This value is calculated based on a ** maximum database size of 2^31 pages a minimum fanout of 2 for a ** root-node and 3 for all other internal nodes. ** ** If a tree that appears to be taller than this is encountered, it is ** assumed that the database is corrupt. */ #define BTCURSOR_MAX_DEPTH 20 /* ** A cursor is a pointer to a particular entry within a particular ** b-tree within a database file. ** ** The entry is identified by its MemPage and the index in ** MemPage.aCell[] of the entry. ** ** A single database file can be shared by two more database connections, ** but cursors cannot be shared. Each cursor is associated with a ** particular database connection identified BtCursor.pBtree.db. ** ** Fields in this structure are accessed under the BtShared.mutex ** found at self->pBt->mutex. ** ** skipNext meaning: ** eState==SKIPNEXT && skipNext>0: Next sqlite3BtreeNext() is no-op. ** eState==SKIPNEXT && skipNext<0: Next sqlite3BtreePrevious() is no-op. ** eState==FAULT: Cursor fault with skipNext as error code. */ struct BtCursor { Btree *pBtree; /* The Btree to which this cursor belongs */ BtShared *pBt; /* The BtShared this cursor points to */ BtCursor *pNext; /* Forms a linked list of all cursors */ Pgno *aOverflow; /* Cache of overflow page locations */ CellInfo info; /* A parse of the cell we are pointing at */ i64 nKey; /* Size of pKey, or last integer key */ void *pKey; /* Saved key that was cursor last known position */ Pgno pgnoRoot; /* The root page of this tree */ int nOvflAlloc; /* Allocated size of aOverflow[] array */ int skipNext; /* Prev() is noop if negative. Next() is noop if positive. ** Error code if eState==CURSOR_FAULT */ u8 curFlags; /* zero or more BTCF_* flags defined below */ u8 curPagerFlags; /* Flags to send to sqlite3PagerAcquire() */ u8 eState; /* One of the CURSOR_XXX constants (see below) */ u8 hints; /* As configured by CursorSetHints() */ /* All fields above are zeroed when the cursor is allocated. See ** sqlite3BtreeCursorZero(). Fields that follow must be manually ** initialized. */ i8 iPage; /* Index of current page in apPage */ u8 curIntKey; /* Value of apPage[0]->intKey */ struct KeyInfo *pKeyInfo; /* Argument passed to comparison function */ void *padding1; /* Make object size a multiple of 16 */ u16 aiIdx[BTCURSOR_MAX_DEPTH]; /* Current index in apPage[i] */ MemPage *apPage[BTCURSOR_MAX_DEPTH]; /* Pages from root to current page */ }; /* ** Legal values for BtCursor.curFlags */ #define BTCF_WriteFlag 0x01 /* True if a write cursor */ #define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ #define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ #define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ #define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ #define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */ /* ** Potential values for BtCursor.eState. ** ** CURSOR_INVALID: ** Cursor does not point to a valid entry. This can happen (for example) ** because the table is empty or because BtreeCursorFirst() has not been ** called. ** ** CURSOR_VALID: ** Cursor points to a valid entry. getPayload() etc. may be called. ** ** CURSOR_SKIPNEXT: ** Cursor is valid except that the Cursor.skipNext field is non-zero ** indicating that the next sqlite3BtreeNext() or sqlite3BtreePrevious() ** operation should be a no-op. ** ** CURSOR_REQUIRESEEK: ** The table that this cursor was opened on still exists, but has been ** modified since the cursor was last used. The cursor position is saved ** in variables BtCursor.pKey and BtCursor.nKey. When a cursor is in ** this state, restoreCursorPosition() can be called to attempt to ** seek the cursor to the saved position. ** ** CURSOR_FAULT: ** An unrecoverable error (an I/O error or a malloc failure) has occurred ** on a different connection that shares the BtShared cache with this ** cursor. The error has left the cache in an inconsistent state. ** Do nothing else with this cursor. Any attempt to use the cursor ** should return the error code stored in BtCursor.skipNext */ #define CURSOR_INVALID 0 #define CURSOR_VALID 1 #define CURSOR_SKIPNEXT 2 #define CURSOR_REQUIRESEEK 3 #define CURSOR_FAULT 4 /* ** The database page the PENDING_BYTE occupies. This page is never used. */ # define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt) /* ** These macros define the location of the pointer-map entry for a ** database page. The first argument to each is the number of usable ** bytes on each page of the database (often 1024). The second is the ** page number to look up in the pointer map. ** ** PTRMAP_PAGENO returns the database page number of the pointer-map ** page that stores the required pointer. PTRMAP_PTROFFSET returns ** the offset of the requested map entry. ** ** If the pgno argument passed to PTRMAP_PAGENO is a pointer-map page, ** then pgno is returned. So (pgno==PTRMAP_PAGENO(pgsz, pgno)) can be ** used to test if pgno is a pointer-map page. PTRMAP_ISPAGE implements ** this test. */ #define PTRMAP_PAGENO(pBt, pgno) ptrmapPageno(pBt, pgno) #define PTRMAP_PTROFFSET(pgptrmap, pgno) (5*(pgno-pgptrmap-1)) #define PTRMAP_ISPAGE(pBt, pgno) (PTRMAP_PAGENO((pBt),(pgno))==(pgno)) /* ** The pointer map is a lookup table that identifies the parent page for ** each child page in the database file. The parent page is the page that ** contains a pointer to the child. Every page in the database contains ** 0 or 1 parent pages. (In this context 'database page' refers ** to any page that is not part of the pointer map itself.) Each pointer map ** entry consists of a single byte 'type' and a 4 byte parent page number. ** The PTRMAP_XXX identifiers below are the valid types. ** ** The purpose of the pointer map is to facility moving pages from one ** position in the file to another as part of autovacuum. When a page ** is moved, the pointer in its parent must be updated to point to the ** new location. The pointer map is used to locate the parent page quickly. ** ** PTRMAP_ROOTPAGE: The database page is a root-page. The page-number is not ** used in this case. ** ** PTRMAP_FREEPAGE: The database page is an unused (free) page. The page-number ** is not used in this case. ** ** PTRMAP_OVERFLOW1: The database page is the first page in a list of ** overflow pages. The page number identifies the page that ** contains the cell with a pointer to this overflow page. ** ** PTRMAP_OVERFLOW2: The database page is the second or later page in a list of ** overflow pages. The page-number identifies the previous ** page in the overflow page list. ** ** PTRMAP_BTREE: The database page is a non-root btree page. The page number ** identifies the parent page in the btree. */ #define PTRMAP_ROOTPAGE 1 #define PTRMAP_FREEPAGE 2 #define PTRMAP_OVERFLOW1 3 #define PTRMAP_OVERFLOW2 4 #define PTRMAP_BTREE 5 /* A bunch of assert() statements to check the transaction state variables ** of handle p (type Btree*) are internally consistent. */ #define btreeIntegrity(p) \ assert( p->pBt->inTransaction!=TRANS_NONE || p->pBt->nTransaction==0 ); \ assert( p->pBt->inTransaction>=p->inTrans ); /* ** The ISAUTOVACUUM macro is used within balance_nonroot() to determine ** if the database supports auto-vacuum or not. Because it is used ** within an expression that is an argument to another macro ** (sqliteMallocRaw), it is not possible to use conditional compilation. ** So, this macro is defined instead. */ #ifndef SQLITE_OMIT_AUTOVACUUM #define ISAUTOVACUUM (pBt->autoVacuum) #else #define ISAUTOVACUUM 0 #endif /* ** This structure is passed around through all the sanity checking routines ** in order to keep track of some global state information. ** ** The aRef[] array is allocated so that there is 1 bit for each page in ** the database. As the integrity-check proceeds, for each page used in ** the database the corresponding bit is set. This allows integrity-check to ** detect pages that are used twice and orphaned pages (both of which ** indicate corruption). */ typedef struct IntegrityCk IntegrityCk; struct IntegrityCk { BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ u8 *aPgRef; /* 1 bit per page in the db (see above) */ Pgno nPage; /* Number of pages in the database */ int mxErr; /* Stop accumulating errors when this reaches zero */ int nErr; /* Number of messages written to zErrMsg so far */ int mallocFailed; /* A memory allocation error has occurred */ const char *zPfx; /* Error message prefix */ int v1, v2; /* Values for up to two %d fields in zPfx */ StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ }; /* ** Routines to read or write a two- and four-byte big-endian integer values. */ #define get2byte(x) ((x)[0]<<8 | (x)[1]) #define put2byte(p,v) ((p)[0] = (u8)((v)>>8), (p)[1] = (u8)(v)) #define get4byte sqlite3Get4byte #define put4byte sqlite3Put4byte /* ** get2byteAligned(), unlike get2byte(), requires that its argument point to a ** two-byte aligned address. get2bytea() is only used for accessing the ** cell addresses in a btree header. */ #if SQLITE_BYTEORDER==4321 # define get2byteAligned(x) (*(u16*)(x)) #elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4008000 # define get2byteAligned(x) __builtin_bswap16(*(u16*)(x)) #elif SQLITE_BYTEORDER==1234 && defined(_MSC_VER) && _MSC_VER>=1300 # define get2byteAligned(x) _byteswap_ushort(*(u16*)(x)) #else # define get2byteAligned(x) ((x)[0]<<8 | (x)[1]) #endif /************** End of btreeInt.h ********************************************/ /************** Continuing where we left off in btmutex.c ********************/ #ifndef SQLITE_OMIT_SHARED_CACHE #if SQLITE_THREADSAFE /* ** Obtain the BtShared mutex associated with B-Tree handle p. Also, ** set BtShared.db to the database handle associated with p and the ** p->locked boolean to true. */ static void lockBtreeMutex(Btree *p){ assert( p->locked==0 ); assert( sqlite3_mutex_notheld(p->pBt->mutex) ); assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3_mutex_enter(p->pBt->mutex); p->pBt->db = p->db; p->locked = 1; } /* ** Release the BtShared mutex associated with B-Tree handle p and ** clear the p->locked boolean. */ static void SQLITE_NOINLINE unlockBtreeMutex(Btree *p){ BtShared *pBt = p->pBt; assert( p->locked==1 ); assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3_mutex_held(p->db->mutex) ); assert( p->db==pBt->db ); sqlite3_mutex_leave(pBt->mutex); p->locked = 0; } /* Forward reference */ static void SQLITE_NOINLINE btreeLockCarefully(Btree *p); /* ** Enter a mutex on the given BTree object. ** ** If the object is not sharable, then no mutex is ever required ** and this routine is a no-op. The underlying mutex is non-recursive. ** But we keep a reference count in Btree.wantToLock so the behavior ** of this interface is recursive. ** ** To avoid deadlocks, multiple Btrees are locked in the same order ** by all database connections. The p->pNext is a list of other ** Btrees belonging to the same database connection as the p Btree ** which need to be locked after p. If we cannot get a lock on ** p, then first unlock all of the others on p->pNext, then wait ** for the lock to become available on p, then relock all of the ** subsequent Btrees that desire a lock. */ SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ /* Some basic sanity checking on the Btree. The list of Btrees ** connected by pNext and pPrev should be in sorted order by ** Btree.pBt value. All elements of the list should belong to ** the same connection. Only shared Btrees are on the list. */ assert( p->pNext==0 || p->pNext->pBt>p->pBt ); assert( p->pPrev==0 || p->pPrev->pBtpBt ); assert( p->pNext==0 || p->pNext->db==p->db ); assert( p->pPrev==0 || p->pPrev->db==p->db ); assert( p->sharable || (p->pNext==0 && p->pPrev==0) ); /* Check for locking consistency */ assert( !p->locked || p->wantToLock>0 ); assert( p->sharable || p->wantToLock==0 ); /* We should already hold a lock on the database connection */ assert( sqlite3_mutex_held(p->db->mutex) ); /* Unless the database is sharable and unlocked, then BtShared.db ** should already be set correctly. */ assert( (p->locked==0 && p->sharable) || p->pBt->db==p->db ); if( !p->sharable ) return; p->wantToLock++; if( p->locked ) return; btreeLockCarefully(p); } /* This is a helper function for sqlite3BtreeLock(). By moving ** complex, but seldom used logic, out of sqlite3BtreeLock() and ** into this routine, we avoid unnecessary stack pointer changes ** and thus help the sqlite3BtreeLock() routine to run much faster ** in the common case. */ static void SQLITE_NOINLINE btreeLockCarefully(Btree *p){ Btree *pLater; /* In most cases, we should be able to acquire the lock we ** want without having to go through the ascending lock ** procedure that follows. Just be sure not to block. */ if( sqlite3_mutex_try(p->pBt->mutex)==SQLITE_OK ){ p->pBt->db = p->db; p->locked = 1; return; } /* To avoid deadlock, first release all locks with a larger ** BtShared address. Then acquire our lock. Then reacquire ** the other BtShared locks that we used to hold in ascending ** order. */ for(pLater=p->pNext; pLater; pLater=pLater->pNext){ assert( pLater->sharable ); assert( pLater->pNext==0 || pLater->pNext->pBt>pLater->pBt ); assert( !pLater->locked || pLater->wantToLock>0 ); if( pLater->locked ){ unlockBtreeMutex(pLater); } } lockBtreeMutex(p); for(pLater=p->pNext; pLater; pLater=pLater->pNext){ if( pLater->wantToLock ){ lockBtreeMutex(pLater); } } } /* ** Exit the recursive mutex on a Btree. */ SQLITE_PRIVATE void sqlite3BtreeLeave(Btree *p){ assert( sqlite3_mutex_held(p->db->mutex) ); if( p->sharable ){ assert( p->wantToLock>0 ); p->wantToLock--; if( p->wantToLock==0 ){ unlockBtreeMutex(p); } } } #ifndef NDEBUG /* ** Return true if the BtShared mutex is held on the btree, or if the ** B-Tree is not marked as sharable. ** ** This routine is used only from within assert() statements. */ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ assert( p->sharable==0 || p->locked==0 || p->wantToLock>0 ); assert( p->sharable==0 || p->locked==0 || p->db==p->pBt->db ); assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->pBt->mutex) ); assert( p->sharable==0 || p->locked==0 || sqlite3_mutex_held(p->db->mutex) ); return (p->sharable==0 || p->locked); } #endif #ifndef SQLITE_OMIT_INCRBLOB /* ** Enter and leave a mutex on a Btree given a cursor owned by that ** Btree. These entry points are used by incremental I/O and can be ** omitted if that module is not used. */ SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor *pCur){ sqlite3BtreeEnter(pCur->pBtree); } SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor *pCur){ sqlite3BtreeLeave(pCur->pBtree); } #endif /* SQLITE_OMIT_INCRBLOB */ /* ** Enter the mutex on every Btree associated with a database ** connection. This is needed (for example) prior to parsing ** a statement since we will be comparing table and column names ** against all schemas and we do not want those schemas being ** reset out from under us. ** ** There is a corresponding leave-all procedures. ** ** Enter the mutexes in accending order by BtShared pointer address ** to avoid the possibility of deadlock when two threads with ** two or more btrees in common both try to lock all their btrees ** at the same instant. */ SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ int i; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ p = db->aDb[i].pBt; if( p ) sqlite3BtreeEnter(p); } } SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){ int i; Btree *p; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inDb; i++){ p = db->aDb[i].pBt; if( p ) sqlite3BtreeLeave(p); } } /* ** Return true if a particular Btree requires a lock. Return FALSE if ** no lock is ever required since it is not sharable. */ SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){ return p->sharable; } #ifndef NDEBUG /* ** Return true if the current thread holds the database connection ** mutex and all required BtShared mutexes. ** ** This routine is used inside assert() statements only. */ SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ int i; if( !sqlite3_mutex_held(db->mutex) ){ return 0; } for(i=0; inDb; i++){ Btree *p; p = db->aDb[i].pBt; if( p && p->sharable && (p->wantToLock==0 || !sqlite3_mutex_held(p->pBt->mutex)) ){ return 0; } } return 1; } #endif /* NDEBUG */ #ifndef NDEBUG /* ** Return true if the correct mutexes are held for accessing the ** db->aDb[iDb].pSchema structure. The mutexes required for schema ** access are: ** ** (1) The mutex on db ** (2) if iDb!=1, then the mutex on db->aDb[iDb].pBt. ** ** If pSchema is not NULL, then iDb is computed from pSchema and ** db using sqlite3SchemaToIndex(). */ SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){ Btree *p; assert( db!=0 ); if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema); assert( iDb>=0 && iDbnDb ); if( !sqlite3_mutex_held(db->mutex) ) return 0; if( iDb==1 ) return 1; p = db->aDb[iDb].pBt; assert( p!=0 ); return p->sharable==0 || p->locked==1; } #endif /* NDEBUG */ #else /* SQLITE_THREADSAFE>0 above. SQLITE_THREADSAFE==0 below */ /* ** The following are special cases for mutex enter routines for use ** in single threaded applications that use shared cache. Except for ** these two routines, all mutex operations are no-ops in that case and ** are null #defines in btree.h. ** ** If shared cache is disabled, then all btree mutex routines, including ** the ones below, are no-ops and are null #defines in btree.h. */ SQLITE_PRIVATE void sqlite3BtreeEnter(Btree *p){ p->pBt->db = p->db; } SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){ int i; for(i=0; inDb; i++){ Btree *p = db->aDb[i].pBt; if( p ){ p->pBt->db = p->db; } } } #endif /* if SQLITE_THREADSAFE */ #endif /* ifndef SQLITE_OMIT_SHARED_CACHE */ /************** End of btmutex.c *********************************************/ /************** Begin file btree.c *******************************************/ /* ** 2004 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements an external (disk-based) database using BTrees. ** See the header comment on "btreeInt.h" for additional information. ** Including a description of file format and an overview of operation. */ /* #include "btreeInt.h" */ /* ** The header string that appears at the beginning of every ** SQLite database. */ static const char zMagicHeader[] = SQLITE_FILE_HEADER; /* ** Set this global variable to 1 to enable tracing using the TRACE ** macro. */ #if 0 int sqlite3BtreeTrace=1; /* True to enable tracing */ # define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);} #else # define TRACE(X) #endif /* ** Extract a 2-byte big-endian integer from an array of unsigned bytes. ** But if the value is zero, make it 65536. ** ** This routine is used to extract the "offset to cell content area" value ** from the header of a btree page. If the page size is 65536 and the page ** is empty, the offset should be 65536, but the 2-byte value stores zero. ** This routine makes the necessary adjustment to 65536. */ #define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1) /* ** Values passed as the 5th argument to allocateBtreePage() */ #define BTALLOC_ANY 0 /* Allocate any page */ #define BTALLOC_EXACT 1 /* Allocate exact page if possible */ #define BTALLOC_LE 2 /* Allocate any page <= the parameter */ /* ** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not ** defined, or 0 if it is. For example: ** ** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum); */ #ifndef SQLITE_OMIT_AUTOVACUUM #define IfNotOmitAV(expr) (expr) #else #define IfNotOmitAV(expr) 0 #endif #ifndef SQLITE_OMIT_SHARED_CACHE /* ** A list of BtShared objects that are eligible for participation ** in shared cache. This variable has file scope during normal builds, ** but the test harness needs to access it so we make it global for ** test builds. ** ** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER. */ #ifdef SQLITE_TEST SQLITE_PRIVATE BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; #else static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; #endif #endif /* SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Enable or disable the shared pager and schema features. ** ** This routine has no effect on existing database connections. ** The shared cache setting effects only future calls to ** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int enable){ sqlite3GlobalConfig.sharedCacheEnabled = enable; return SQLITE_OK; } #endif #ifdef SQLITE_OMIT_SHARED_CACHE /* ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(), ** and clearAllSharedCacheTableLocks() ** manipulate entries in the BtShared.pLock linked list used to store ** shared-cache table level locks. If the library is compiled with the ** shared-cache feature disabled, then there is only ever one user ** of each BtShared structure and so this locking is not necessary. ** So define the lock related functions as no-ops. */ #define querySharedCacheTableLock(a,b,c) SQLITE_OK #define setSharedCacheTableLock(a,b,c) SQLITE_OK #define clearAllSharedCacheTableLocks(a) #define downgradeAllSharedCacheTableLocks(a) #define hasSharedCacheTableLock(a,b,c,d) 1 #define hasReadConflicts(a, b) 0 #endif #ifndef SQLITE_OMIT_SHARED_CACHE #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** ** ** Check to see if pBtree holds the required locks to read or write to the ** table with root page iRoot. Return 1 if it does and 0 if not. ** ** For example, when writing to a table with root-page iRoot via ** Btree connection pBtree: ** ** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) ); ** ** When writing to an index that resides in a sharable database, the ** caller should have first obtained a lock specifying the root page of ** the corresponding table. This makes things a bit more complicated, ** as this module treats each table as a separate structure. To determine ** the table corresponding to the index being written, this ** function has to search through the database schema. ** ** Instead of a lock on the table/index rooted at page iRoot, the caller may ** hold a write-lock on the schema table (root page 1). This is also ** acceptable. */ static int hasSharedCacheTableLock( Btree *pBtree, /* Handle that must hold lock */ Pgno iRoot, /* Root page of b-tree */ int isIndex, /* True if iRoot is the root of an index b-tree */ int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */ ){ Schema *pSchema = (Schema *)pBtree->pBt->pSchema; Pgno iTab = 0; BtLock *pLock; /* If this database is not shareable, or if the client is reading ** and has the read-uncommitted flag set, then no lock is required. ** Return true immediately. */ if( (pBtree->sharable==0) || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommitted)) ){ return 1; } /* If the client is reading or writing an index and the schema is ** not loaded, then it is too difficult to actually check to see if ** the correct locks are held. So do not bother - just return true. ** This case does not come up very often anyhow. */ if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){ return 1; } /* Figure out the root-page that the lock should be held on. For table ** b-trees, this is just the root page of the b-tree being read or ** written. For index b-trees, it is the root page of the associated ** table. */ if( isIndex ){ HashElem *p; for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){ Index *pIdx = (Index *)sqliteHashData(p); if( pIdx->tnum==(int)iRoot ){ if( iTab ){ /* Two or more indexes share the same root page. There must ** be imposter tables. So just return true. The assert is not ** useful in that case. */ return 1; } iTab = pIdx->pTable->tnum; } } }else{ iTab = iRoot; } /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){ if( pLock->pBtree==pBtree && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1)) && pLock->eLock>=eLockType ){ return 1; } } /* Failed to find the required lock. */ return 0; } #endif /* SQLITE_DEBUG */ #ifdef SQLITE_DEBUG /* **** This function may be used as part of assert() statements only. **** ** ** Return true if it would be illegal for pBtree to write into the ** table or index rooted at iRoot because other shared connections are ** simultaneously reading that same table or index. ** ** It is illegal for pBtree to write if some other Btree object that ** shares the same BtShared object is currently reading or writing ** the iRoot table. Except, if the other Btree object has the ** read-uncommitted flag set, then it is OK for the other object to ** have a read cursor. ** ** For example, before writing to any part of the table or index ** rooted at page iRoot, one should call: ** ** assert( !hasReadConflicts(pBtree, iRoot) ); */ static int hasReadConflicts(Btree *pBtree, Pgno iRoot){ BtCursor *p; for(p=pBtree->pBt->pCursor; p; p=p->pNext){ if( p->pgnoRoot==iRoot && p->pBtree!=pBtree && 0==(p->pBtree->db->flags & SQLITE_ReadUncommitted) ){ return 1; } } return 0; } #endif /* #ifdef SQLITE_DEBUG */ /* ** Query to see if Btree handle p may obtain a lock of type eLock ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return ** SQLITE_OK if the lock may be obtained (by calling ** setSharedCacheTableLock()), or SQLITE_LOCKED if not. */ static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pIter; assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); assert( !(p->db->flags&SQLITE_ReadUncommitted)||eLock==WRITE_LOCK||iTab==1 ); /* If requesting a write-lock, then the Btree must have an open write ** transaction on this file. And, obviously, for this to be so there ** must be an open write transaction on the file itself. */ assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) ); assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE ); /* This routine is a no-op if the shared-cache is not enabled */ if( !p->sharable ){ return SQLITE_OK; } /* If some other connection is holding an exclusive lock, the ** requested lock may not be obtained. */ if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){ sqlite3ConnectionBlocked(p->db, pBt->pWriter->db); return SQLITE_LOCKED_SHAREDCACHE; } for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ /* The condition (pIter->eLock!=eLock) in the following if(...) ** statement is a simplification of: ** ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) ** ** since we know that if eLock==WRITE_LOCK, then no other connection ** may hold a WRITE_LOCK on any table in this file (since there can ** only be a single writer). */ assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK ); assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK); if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){ sqlite3ConnectionBlocked(p->db, pIter->pBtree->db); if( eLock==WRITE_LOCK ){ assert( p==pBt->pWriter ); pBt->btsFlags |= BTS_PENDING; } return SQLITE_LOCKED_SHAREDCACHE; } } return SQLITE_OK; } #endif /* !SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Add a lock on the table with root-page iTable to the shared-btree used ** by Btree handle p. Parameter eLock must be either READ_LOCK or ** WRITE_LOCK. ** ** This function assumes the following: ** ** (a) The specified Btree object p is connected to a sharable ** database (one with the BtShared.sharable flag set), and ** ** (b) No other Btree objects hold a lock that conflicts ** with the requested lock (i.e. querySharedCacheTableLock() has ** already been called and returned SQLITE_OK). ** ** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM ** is returned if a malloc attempt fails. */ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pLock = 0; BtLock *pIter; assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); /* A connection with the read-uncommitted flag set will never try to ** obtain a read-lock using this function. The only read-lock obtained ** by a connection in read-uncommitted mode is on the sqlite_master ** table, and that lock is obtained in BtreeBeginTrans(). */ assert( 0==(p->db->flags&SQLITE_ReadUncommitted) || eLock==WRITE_LOCK ); /* This function should only be called on a sharable b-tree after it ** has been determined that no other b-tree holds a conflicting lock. */ assert( p->sharable ); assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) ); /* First search the list for an existing lock on this table. */ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ if( pIter->iTable==iTable && pIter->pBtree==p ){ pLock = pIter; break; } } /* If the above search did not find a BtLock struct associating Btree p ** with table iTable, allocate one and link it into the list. */ if( !pLock ){ pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); if( !pLock ){ return SQLITE_NOMEM; } pLock->iTable = iTable; pLock->pBtree = p; pLock->pNext = pBt->pLock; pBt->pLock = pLock; } /* Set the BtLock.eLock variable to the maximum of the current lock ** and the requested lock. This means if a write-lock was already held ** and a read-lock requested, we don't incorrectly downgrade the lock. */ assert( WRITE_LOCK>READ_LOCK ); if( eLock>pLock->eLock ){ pLock->eLock = eLock; } return SQLITE_OK; } #endif /* !SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Release all the table locks (locks obtained via calls to ** the setSharedCacheTableLock() procedure) held by Btree object p. ** ** This function assumes that Btree p has an open read or write ** transaction. If it does not, then the BTS_PENDING flag ** may be incorrectly cleared. */ static void clearAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; BtLock **ppIter = &pBt->pLock; assert( sqlite3BtreeHoldsMutex(p) ); assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); assert( pLock->pBtree->inTrans>=pLock->eLock ); if( pLock->pBtree==p ){ *ppIter = pLock->pNext; assert( pLock->iTable!=1 || pLock==&p->lock ); if( pLock->iTable!=1 ){ sqlite3_free(pLock); } }else{ ppIter = &pLock->pNext; } } assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter ); if( pBt->pWriter==p ){ pBt->pWriter = 0; pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); }else if( pBt->nTransaction==2 ){ /* This function is called when Btree p is concluding its ** transaction. If there currently exists a writer, and p is not ** that writer, then the number of locks held by connections other ** than the writer must be about to drop to zero. In this case ** set the BTS_PENDING flag to 0. ** ** If there is not currently a writer, then BTS_PENDING must ** be zero already. So this next line is harmless in that case. */ pBt->btsFlags &= ~BTS_PENDING; } } /* ** This function changes all write-locks held by Btree p into read-locks. */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ assert( pLock->eLock==READ_LOCK || pLock->pBtree==p ); pLock->eLock = READ_LOCK; } } } #endif /* SQLITE_OMIT_SHARED_CACHE */ static void releasePage(MemPage *pPage); /* Forward reference */ /* ***** This routine is used inside of assert() only **** ** ** Verify that the cursor holds the mutex on its BtShared */ #ifdef SQLITE_DEBUG static int cursorHoldsMutex(BtCursor *p){ return sqlite3_mutex_held(p->pBt->mutex); } #endif /* ** Invalidate the overflow cache of the cursor passed as the first argument. ** on the shared btree structure pBt. */ #define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl) /* ** Invalidate the overflow page-list cache for all cursors opened ** on the shared btree structure pBt. */ static void invalidateAllOverflowCache(BtShared *pBt){ BtCursor *p; assert( sqlite3_mutex_held(pBt->mutex) ); for(p=pBt->pCursor; p; p=p->pNext){ invalidateOverflowCache(p); } } #ifndef SQLITE_OMIT_INCRBLOB /* ** This function is called before modifying the contents of a table ** to invalidate any incrblob cursors that are open on the ** row or one of the rows being modified. ** ** If argument isClearTable is true, then the entire contents of the ** table is about to be deleted. In this case invalidate all incrblob ** cursors open on any row within the table with root-page pgnoRoot. ** ** Otherwise, if argument isClearTable is false, then the row with ** rowid iRow is being replaced or deleted. In this case invalidate ** only those incrblob cursors open on that specific row. */ static void invalidateIncrblobCursors( Btree *pBtree, /* The database file to check */ i64 iRow, /* The rowid that might be changing */ int isClearTable /* True if all rows are being deleted */ ){ BtCursor *p; if( pBtree->hasIncrblobCur==0 ) return; assert( sqlite3BtreeHoldsMutex(pBtree) ); pBtree->hasIncrblobCur = 0; for(p=pBtree->pBt->pCursor; p; p=p->pNext){ if( (p->curFlags & BTCF_Incrblob)!=0 ){ pBtree->hasIncrblobCur = 1; if( isClearTable || p->info.nKey==iRow ){ p->eState = CURSOR_INVALID; } } } } #else /* Stub function when INCRBLOB is omitted */ #define invalidateIncrblobCursors(x,y,z) #endif /* SQLITE_OMIT_INCRBLOB */ /* ** Set bit pgno of the BtShared.pHasContent bitvec. This is called ** when a page that previously contained data becomes a free-list leaf ** page. ** ** The BtShared.pHasContent bitvec exists to work around an obscure ** bug caused by the interaction of two useful IO optimizations surrounding ** free-list leaf pages: ** ** 1) When all data is deleted from a page and the page becomes ** a free-list leaf page, the page is not written to the database ** (as free-list leaf pages contain no meaningful data). Sometimes ** such a page is not even journalled (as it will not be modified, ** why bother journalling it?). ** ** 2) When a free-list leaf page is reused, its content is not read ** from the database or written to the journal file (why should it ** be, if it is not at all meaningful?). ** ** By themselves, these optimizations work fine and provide a handy ** performance boost to bulk delete or insert operations. However, if ** a page is moved to the free-list and then reused within the same ** transaction, a problem comes up. If the page is not journalled when ** it is moved to the free-list and it is also not journalled when it ** is extracted from the free-list and reused, then the original data ** may be lost. In the event of a rollback, it may not be possible ** to restore the database to its original configuration. ** ** The solution is the BtShared.pHasContent bitvec. Whenever a page is ** moved to become a free-list leaf page, the corresponding bit is ** set in the bitvec. Whenever a leaf page is extracted from the free-list, ** optimization 2 above is omitted if the corresponding bit is already ** set in BtShared.pHasContent. The contents of the bitvec are cleared ** at the end of every transaction. */ static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ int rc = SQLITE_OK; if( !pBt->pHasContent ){ assert( pgno<=pBt->nPage ); pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage); if( !pBt->pHasContent ){ rc = SQLITE_NOMEM; } } if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){ rc = sqlite3BitvecSet(pBt->pHasContent, pgno); } return rc; } /* ** Query the BtShared.pHasContent vector. ** ** This function is called when a free-list leaf page is removed from the ** free-list for reuse. It returns false if it is safe to retrieve the ** page from the pager layer with the 'no-content' flag set. True otherwise. */ static int btreeGetHasContent(BtShared *pBt, Pgno pgno){ Bitvec *p = pBt->pHasContent; return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno))); } /* ** Clear (destroy) the BtShared.pHasContent bitvec. This should be ** invoked at the conclusion of each write-transaction. */ static void btreeClearHasContent(BtShared *pBt){ sqlite3BitvecDestroy(pBt->pHasContent); pBt->pHasContent = 0; } /* ** Release all of the apPage[] pages for a cursor. */ static void btreeReleaseAllCursorPages(BtCursor *pCur){ int i; for(i=0; i<=pCur->iPage; i++){ releasePage(pCur->apPage[i]); pCur->apPage[i] = 0; } pCur->iPage = -1; } /* ** Save the current cursor position in the variables BtCursor.nKey ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. ** ** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID) ** prior to calling this routine. */ static int saveCursorPosition(BtCursor *pCur){ int rc; assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState ); assert( 0==pCur->pKey ); assert( cursorHoldsMutex(pCur) ); if( pCur->eState==CURSOR_SKIPNEXT ){ pCur->eState = CURSOR_VALID; }else{ pCur->skipNext = 0; } rc = sqlite3BtreeKeySize(pCur, &pCur->nKey); assert( rc==SQLITE_OK ); /* KeySize() cannot fail */ /* If this is an intKey table, then the above call to BtreeKeySize() ** stores the integer key in pCur->nKey. In this case this value is ** all that is required. Otherwise, if pCur is not open on an intKey ** table, then malloc space for and store the pCur->nKey bytes of key ** data. */ if( 0==pCur->curIntKey ){ void *pKey = sqlite3Malloc( pCur->nKey ); if( pKey ){ rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ pCur->pKey = pKey; }else{ sqlite3_free(pKey); } }else{ rc = SQLITE_NOMEM; } } assert( !pCur->curIntKey || !pCur->pKey ); if( rc==SQLITE_OK ){ btreeReleaseAllCursorPages(pCur); pCur->eState = CURSOR_REQUIRESEEK; } invalidateOverflowCache(pCur); return rc; } /* Forward reference */ static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*); /* ** Save the positions of all cursors (except pExcept) that are open on ** the table with root-page iRoot. "Saving the cursor position" means that ** the location in the btree is remembered in such a way that it can be ** moved back to the same spot after the btree has been modified. This ** routine is called just before cursor pExcept is used to modify the ** table, for example in BtreeDelete() or BtreeInsert(). ** ** If there are two or more cursors on the same btree, then all such ** cursors should have their BTCF_Multiple flag set. The btreeCursor() ** routine enforces that rule. This routine only needs to be called in ** the uncommon case when pExpect has the BTCF_Multiple flag set. ** ** If pExpect!=NULL and if no other cursors are found on the same root-page, ** then the BTCF_Multiple flag on pExpect is cleared, to avoid another ** pointless call to this routine. ** ** Implementation note: This routine merely checks to see if any cursors ** need to be saved. It calls out to saveCursorsOnList() in the (unusual) ** event that cursors are in need to being saved. */ static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ BtCursor *p; assert( sqlite3_mutex_held(pBt->mutex) ); assert( pExcept==0 || pExcept->pBt==pBt ); for(p=pBt->pCursor; p; p=p->pNext){ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break; } if( p ) return saveCursorsOnList(p, iRoot, pExcept); if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple; return SQLITE_OK; } /* This helper routine to saveAllCursors does the actual work of saving ** the cursors if and when a cursor is found that actually requires saving. ** The common case is that no cursors need to be saved, so this routine is ** broken out from its caller to avoid unnecessary stack pointer movement. */ static int SQLITE_NOINLINE saveCursorsOnList( BtCursor *p, /* The first cursor that needs saving */ Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */ BtCursor *pExcept /* Do not save this cursor */ ){ do{ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ int rc = saveCursorPosition(p); if( SQLITE_OK!=rc ){ return rc; } }else{ testcase( p->iPage>0 ); btreeReleaseAllCursorPages(p); } } p = p->pNext; }while( p ); return SQLITE_OK; } /* ** Clear the current cursor position. */ SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); sqlite3_free(pCur->pKey); pCur->pKey = 0; pCur->eState = CURSOR_INVALID; } /* ** In this version of BtreeMoveto, pKey is a packed index record ** such as is generated by the OP_MakeRecord opcode. Unpack the ** record and then call BtreeMovetoUnpacked() to do the work. */ static int btreeMoveto( BtCursor *pCur, /* Cursor open on the btree to be searched */ const void *pKey, /* Packed key if the btree is an index */ i64 nKey, /* Integer key for tables. Size of pKey for indices */ int bias, /* Bias search to the high end */ int *pRes /* Write search results here */ ){ int rc; /* Status code */ UnpackedRecord *pIdxKey; /* Unpacked index key */ char aSpace[200]; /* Temp space for pIdxKey - to avoid a malloc */ char *pFree = 0; if( pKey ){ assert( nKey==(i64)(int)nKey ); pIdxKey = sqlite3VdbeAllocUnpackedRecord( pCur->pKeyInfo, aSpace, sizeof(aSpace), &pFree ); if( pIdxKey==0 ) return SQLITE_NOMEM; sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey); if( pIdxKey->nField==0 ){ sqlite3DbFree(pCur->pKeyInfo->db, pFree); return SQLITE_CORRUPT_BKPT; } }else{ pIdxKey = 0; } rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); if( pFree ){ sqlite3DbFree(pCur->pKeyInfo->db, pFree); } return rc; } /* ** Restore the cursor to the position it was in (or as close to as possible) ** when saveCursorPosition() was called. Note that this call deletes the ** saved position info stored by saveCursorPosition(), so there can be ** at most one effective restoreCursorPosition() call after each ** saveCursorPosition(). */ static int btreeRestoreCursorPosition(BtCursor *pCur){ int rc; int skipNext; assert( cursorHoldsMutex(pCur) ); assert( pCur->eState>=CURSOR_REQUIRESEEK ); if( pCur->eState==CURSOR_FAULT ){ return pCur->skipNext; } pCur->eState = CURSOR_INVALID; rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext); if( rc==SQLITE_OK ){ sqlite3_free(pCur->pKey); pCur->pKey = 0; assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); pCur->skipNext |= skipNext; if( pCur->skipNext && pCur->eState==CURSOR_VALID ){ pCur->eState = CURSOR_SKIPNEXT; } } return rc; } #define restoreCursorPosition(p) \ (p->eState>=CURSOR_REQUIRESEEK ? \ btreeRestoreCursorPosition(p) : \ SQLITE_OK) /* ** Determine whether or not a cursor has moved from the position where ** it was last placed, or has been invalidated for any other reason. ** Cursors can move when the row they are pointing at is deleted out ** from under them, for example. Cursor might also move if a btree ** is rebalanced. ** ** Calling this routine with a NULL cursor pointer returns false. ** ** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor ** back to where it ought to be if this routine returns true. */ SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor *pCur){ return pCur->eState!=CURSOR_VALID; } /* ** This routine restores a cursor back to its original position after it ** has been moved by some outside activity (such as a btree rebalance or ** a row having been deleted out from under the cursor). ** ** On success, the *pDifferentRow parameter is false if the cursor is left ** pointing at exactly the same row. *pDifferntRow is the row the cursor ** was pointing to has been deleted, forcing the cursor to point to some ** nearby row. ** ** This routine should only be called for a cursor that just returned ** TRUE from sqlite3BtreeCursorHasMoved(). */ SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){ int rc; assert( pCur!=0 ); assert( pCur->eState!=CURSOR_VALID ); rc = restoreCursorPosition(pCur); if( rc ){ *pDifferentRow = 1; return rc; } if( pCur->eState!=CURSOR_VALID ){ *pDifferentRow = 1; }else{ assert( pCur->skipNext==0 ); *pDifferentRow = 0; } return SQLITE_OK; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Given a page number of a regular database page, return the page ** number for the pointer-map page that contains the entry for the ** input page number. ** ** Return 0 (not a valid page) for pgno==1 since there is ** no pointer map associated with page 1. The integrity_check logic ** requires that ptrmapPageno(*,1)!=1. */ static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ int nPagesPerMapPage; Pgno iPtrMap, ret; assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno<2 ) return 0; nPagesPerMapPage = (pBt->usableSize/5)+1; iPtrMap = (pgno-2)/nPagesPerMapPage; ret = (iPtrMap*nPagesPerMapPage) + 2; if( ret==PENDING_BYTE_PAGE(pBt) ){ ret++; } return ret; } /* ** Write an entry into the pointer map. ** ** This routine updates the pointer map entry for page number 'key' ** so that it maps to type 'eType' and parent page number 'pgno'. ** ** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is ** a no-op. If an error occurs, the appropriate error code is written ** into *pRC. */ static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ DbPage *pDbPage; /* The pointer map page */ u8 *pPtrmap; /* The pointer map data */ Pgno iPtrmap; /* The pointer map page number */ int offset; /* Offset in pointer map page */ int rc; /* Return code from subfunctions */ if( *pRC ) return; assert( sqlite3_mutex_held(pBt->mutex) ); /* The master-journal page number must never be used as a pointer map page */ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); assert( pBt->autoVacuum ); if( key==0 ){ *pRC = SQLITE_CORRUPT_BKPT; return; } iPtrmap = PTRMAP_PAGENO(pBt, key); rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); if( rc!=SQLITE_OK ){ *pRC = rc; return; } offset = PTRMAP_PTROFFSET(iPtrmap, key); if( offset<0 ){ *pRC = SQLITE_CORRUPT_BKPT; goto ptrmap_exit; } assert( offset <= (int)pBt->usableSize-5 ); pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); *pRC= rc = sqlite3PagerWrite(pDbPage); if( rc==SQLITE_OK ){ pPtrmap[offset] = eType; put4byte(&pPtrmap[offset+1], parent); } } ptrmap_exit: sqlite3PagerUnref(pDbPage); } /* ** Read an entry from the pointer map. ** ** This routine retrieves the pointer map entry for page 'key', writing ** the type and parent page number to *pEType and *pPgno respectively. ** An error code is returned if something goes wrong, otherwise SQLITE_OK. */ static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){ DbPage *pDbPage; /* The pointer map page */ int iPtrmap; /* Pointer map page index */ u8 *pPtrmap; /* Pointer map page data */ int offset; /* Offset of entry in pointer map */ int rc; assert( sqlite3_mutex_held(pBt->mutex) ); iPtrmap = PTRMAP_PAGENO(pBt, key); rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage); if( rc!=0 ){ return rc; } pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); offset = PTRMAP_PTROFFSET(iPtrmap, key); if( offset<0 ){ sqlite3PagerUnref(pDbPage); return SQLITE_CORRUPT_BKPT; } assert( offset <= (int)pBt->usableSize-5 ); assert( pEType!=0 ); *pEType = pPtrmap[offset]; if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); sqlite3PagerUnref(pDbPage); if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_BKPT; return SQLITE_OK; } #else /* if defined SQLITE_OMIT_AUTOVACUUM */ #define ptrmapPut(w,x,y,z,rc) #define ptrmapGet(w,x,y,z) SQLITE_OK #define ptrmapPutOvflPtr(x, y, rc) #endif /* ** Given a btree page and a cell index (0 means the first cell on ** the page, 1 means the second cell, and so forth) return a pointer ** to the cell content. ** ** findCellPastPtr() does the same except it skips past the initial ** 4-byte child pointer found on interior pages, if there is one. ** ** This routine works only for pages that do not contain overflow cells. */ #define findCell(P,I) \ ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) #define findCellPastPtr(P,I) \ ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) /* ** This is common tail processing for btreeParseCellPtr() and ** btreeParseCellPtrIndex() for the case when the cell does not fit entirely ** on a single B-tree page. Make necessary adjustments to the CellInfo ** structure. */ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ /* If the payload will not fit completely on the local page, we have ** to decide how much to store locally and how much to spill onto ** overflow pages. The strategy is to minimize the amount of unused ** space on overflow pages while keeping the amount of local storage ** in between minLocal and maxLocal. ** ** Warning: changing the way overflow payload is distributed in any ** way will result in an incompatible file format. */ int minLocal; /* Minimum amount of payload held locally */ int maxLocal; /* Maximum amount of payload held locally */ int surplus; /* Overflow payload available for local storage */ minLocal = pPage->minLocal; maxLocal = pPage->maxLocal; surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4); testcase( surplus==maxLocal ); testcase( surplus==maxLocal+1 ); if( surplus <= maxLocal ){ pInfo->nLocal = (u16)surplus; }else{ pInfo->nLocal = (u16)minLocal; } pInfo->iOverflow = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell); pInfo->nSize = pInfo->iOverflow + 4; } /* ** The following routines are implementations of the MemPage.xParseCell() ** method. ** ** Parse a cell content block and fill in the CellInfo structure. ** ** btreeParseCellPtr() => table btree leaf nodes ** btreeParseCellNoPayload() => table btree internal nodes ** btreeParseCellPtrIndex() => index btree nodes ** ** There is also a wrapper function btreeParseCell() that works for ** all MemPage types and that references the cell by index rather than ** by pointer. */ static void btreeParseCellPtrNoPayload( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 ); assert( pPage->noPayload ); assert( pPage->childPtrSize==4 ); #ifndef SQLITE_DEBUG UNUSED_PARAMETER(pPage); #endif pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey); pInfo->nPayload = 0; pInfo->nLocal = 0; pInfo->iOverflow = 0; pInfo->pPayload = 0; return; } static void btreeParseCellPtr( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ u8 *pIter; /* For scanning through pCell */ u32 nPayload; /* Number of bytes of cell payload */ u64 iKey; /* Extracted Key value */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 || pPage->leaf==1 ); assert( pPage->intKeyLeaf || pPage->noPayload ); assert( pPage->noPayload==0 ); assert( pPage->intKeyLeaf ); assert( pPage->childPtrSize==0 ); pIter = pCell; /* The next block of code is equivalent to: ** ** pIter += getVarint32(pIter, nPayload); ** ** The code is inlined to avoid a function call. */ nPayload = *pIter; if( nPayload>=0x80 ){ u8 *pEnd = &pIter[8]; nPayload &= 0x7f; do{ nPayload = (nPayload<<7) | (*++pIter & 0x7f); }while( (*pIter)>=0x80 && pIternKey); ** ** The code is inlined to avoid a function call. */ iKey = *pIter; if( iKey>=0x80 ){ u8 *pEnd = &pIter[7]; iKey &= 0x7f; while(1){ iKey = (iKey<<7) | (*++pIter & 0x7f); if( (*pIter)<0x80 ) break; if( pIter>=pEnd ){ iKey = (iKey<<8) | *++pIter; break; } } } pIter++; pInfo->nKey = *(i64*)&iKey; pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ pInfo->nSize = nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; pInfo->iOverflow = 0; }else{ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); } } static void btreeParseCellPtrIndex( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ u8 *pIter; /* For scanning through pCell */ u32 nPayload; /* Number of bytes of cell payload */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 || pPage->leaf==1 ); assert( pPage->intKeyLeaf==0 ); assert( pPage->noPayload==0 ); pIter = pCell + pPage->childPtrSize; nPayload = *pIter; if( nPayload>=0x80 ){ u8 *pEnd = &pIter[8]; nPayload &= 0x7f; do{ nPayload = (nPayload<<7) | (*++pIter & 0x7f); }while( *(pIter)>=0x80 && pIternKey = nPayload; pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ pInfo->nSize = nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; pInfo->iOverflow = 0; }else{ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); } } static void btreeParseCell( MemPage *pPage, /* Page containing the cell */ int iCell, /* The cell index. First cell is 0 */ CellInfo *pInfo /* Fill in this structure */ ){ pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo); } /* ** The following routines are implementations of the MemPage.xCellSize ** method. ** ** Compute the total number of bytes that a Cell needs in the cell ** data area of the btree-page. The return number includes the cell ** data header and the local payload, but not any overflow page or ** the space used by the cell pointer. ** ** cellSizePtrNoPayload() => table internal nodes ** cellSizePtr() => all index nodes & table leaf nodes */ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ u32 nSize; /* Size value to return */ #ifdef SQLITE_DEBUG /* The value returned by this function should always be the same as ** the (CellInfo.nSize) value found by doing a full parse of the ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of ** this function verifies that this invariant is not violated. */ CellInfo debuginfo; pPage->xParseCell(pPage, pCell, &debuginfo); #endif assert( pPage->noPayload==0 ); nSize = *pIter; if( nSize>=0x80 ){ pEnd = &pIter[8]; nSize &= 0x7f; do{ nSize = (nSize<<7) | (*++pIter & 0x7f); }while( *(pIter)>=0x80 && pIterintKey ){ /* pIter now points at the 64-bit integer key value, a variable length ** integer. The following block moves pIter to point at the first byte ** past the end of the key value. */ pEnd = &pIter[9]; while( (*pIter++)&0x80 && pItermaxLocal ); testcase( nSize==pPage->maxLocal+1 ); if( nSize<=pPage->maxLocal ){ nSize += (u32)(pIter - pCell); if( nSize<4 ) nSize = 4; }else{ int minLocal = pPage->minLocal; nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); testcase( nSize==pPage->maxLocal ); testcase( nSize==pPage->maxLocal+1 ); if( nSize>pPage->maxLocal ){ nSize = minLocal; } nSize += 4 + (u16)(pIter - pCell); } assert( nSize==debuginfo.nSize || CORRUPT_DB ); return (u16)nSize; } static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){ u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ #ifdef SQLITE_DEBUG /* The value returned by this function should always be the same as ** the (CellInfo.nSize) value found by doing a full parse of the ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of ** this function verifies that this invariant is not violated. */ CellInfo debuginfo; pPage->xParseCell(pPage, pCell, &debuginfo); #else UNUSED_PARAMETER(pPage); #endif assert( pPage->childPtrSize==4 ); pEnd = pIter + 9; while( (*pIter++)&0x80 && pIterxCellSize(pPage, findCell(pPage, iCell)); } #endif #ifndef SQLITE_OMIT_AUTOVACUUM /* ** If the cell pCell, part of page pPage contains a pointer ** to an overflow page, insert an entry into the pointer-map ** for the overflow page. */ static void ptrmapPutOvflPtr(MemPage *pPage, u8 *pCell, int *pRC){ CellInfo info; if( *pRC ) return; assert( pCell!=0 ); pPage->xParseCell(pPage, pCell, &info); if( info.iOverflow ){ Pgno ovfl = get4byte(&pCell[info.iOverflow]); ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC); } } #endif /* ** Defragment the page given. All Cells are moved to the ** end of the page and all free space is collected into one ** big FreeBlk that occurs in between the header and cell ** pointer array and the cell content area. ** ** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a ** b-tree page so that there are no freeblocks or fragment bytes, all ** unused bytes are contained in the unallocated space region, and all ** cells are packed tightly at the end of the page. */ static int defragmentPage(MemPage *pPage){ int i; /* Loop counter */ int pc; /* Address of the i-th cell */ int hdr; /* Offset to the page header */ int size; /* Size of a cell */ int usableSize; /* Number of usable bytes on a page */ int cellOffset; /* Offset to the cell pointer array */ int cbrk; /* Offset to the cell content area */ int nCell; /* Number of cells on the page */ unsigned char *data; /* The page data */ unsigned char *temp; /* Temp area for cell content */ unsigned char *src; /* Source of content */ int iCellFirst; /* First allowable cell index */ int iCellLast; /* Last possible cell index */ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt!=0 ); assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); assert( pPage->nOverflow==0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); temp = 0; src = data = pPage->aData; hdr = pPage->hdrOffset; cellOffset = pPage->cellOffset; nCell = pPage->nCell; assert( nCell==get2byte(&data[hdr+3]) ); usableSize = pPage->pBt->usableSize; cbrk = usableSize; iCellFirst = cellOffset + 2*nCell; iCellLast = usableSize - 4; for(i=0; iiCellLast ){ return SQLITE_CORRUPT_BKPT; } assert( pc>=iCellFirst && pc<=iCellLast ); size = pPage->xCellSize(pPage, &src[pc]); cbrk -= size; if( cbrkusableSize ){ return SQLITE_CORRUPT_BKPT; } assert( cbrk+size<=usableSize && cbrk>=iCellFirst ); testcase( cbrk+size==usableSize ); testcase( pc+size==usableSize ); put2byte(pAddr, cbrk); if( temp==0 ){ int x; if( cbrk==pc ) continue; temp = sqlite3PagerTempSpace(pPage->pBt->pPager); x = get2byte(&data[hdr+5]); memcpy(&temp[x], &data[x], (cbrk+size) - x); src = temp; } memcpy(&data[cbrk], &src[pc], size); } assert( cbrk>=iCellFirst ); put2byte(&data[hdr+5], cbrk); data[hdr+1] = 0; data[hdr+2] = 0; data[hdr+7] = 0; memset(&data[iCellFirst], 0, cbrk-iCellFirst); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); if( cbrk-iCellFirst!=pPage->nFree ){ return SQLITE_CORRUPT_BKPT; } return SQLITE_OK; } /* ** Search the free-list on page pPg for space to store a cell nByte bytes in ** size. If one can be found, return a pointer to the space and remove it ** from the free-list. ** ** If no suitable space can be found on the free-list, return NULL. ** ** This function may detect corruption within pPg. If corruption is ** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned. ** ** Slots on the free list that are between 1 and 3 bytes larger than nByte ** will be ignored if adding the extra space to the fragmentation count ** causes the fragmentation count to exceed 60. */ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ const int hdr = pPg->hdrOffset; u8 * const aData = pPg->aData; int iAddr = hdr + 1; int pc = get2byte(&aData[iAddr]); int x; int usableSize = pPg->pBt->usableSize; assert( pc>0 ); do{ int size; /* Size of the free slot */ /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of ** increasing offset. */ if( pc>usableSize-4 || pc=0 ){ testcase( x==4 ); testcase( x==3 ); if( pc < pPg->cellOffset+2*pPg->nCell || size+pc > usableSize ){ *pRc = SQLITE_CORRUPT_BKPT; return 0; }else if( x<4 ){ /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total ** number of bytes in fragments may not exceed 60. */ if( aData[hdr+7]>57 ) return 0; /* Remove the slot from the free-list. Update the number of ** fragmented bytes within the page. */ memcpy(&aData[iAddr], &aData[pc], 2); aData[hdr+7] += (u8)x; }else{ /* The slot remains on the free-list. Reduce its size to account ** for the portion used by the new allocation. */ put2byte(&aData[pc+2], x); } return &aData[pc + x]; } iAddr = pc; pc = get2byte(&aData[pc]); }while( pc ); return 0; } /* ** Allocate nByte bytes of space from within the B-Tree page passed ** as the first argument. Write into *pIdx the index into pPage->aData[] ** of the first byte of allocated space. Return either SQLITE_OK or ** an error code (usually SQLITE_CORRUPT). ** ** The caller guarantees that there is sufficient space to make the ** allocation. This routine might need to defragment in order to bring ** all the space together, however. This routine will avoid using ** the first two bytes past the cell pointer area since presumably this ** allocation is being made in order to insert a new cell, so we will ** also end up needing a new cell pointer. */ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ u8 * const data = pPage->aData; /* Local cache of pPage->aData */ int top; /* First byte of cell content area */ int rc = SQLITE_OK; /* Integer return code */ int gap; /* First byte of gap between cell pointers and cell content */ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( nByte>=0 ); /* Minimum cell size is 4 */ assert( pPage->nFree>=nByte ); assert( pPage->nOverflow==0 ); assert( nByte < (int)(pPage->pBt->usableSize-8) ); assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf ); gap = pPage->cellOffset + 2*pPage->nCell; assert( gap<=65536 ); /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size ** and the reserved space is zero (the usual value for reserved space) ** then the cell content offset of an empty page wants to be 65536. ** However, that integer is too large to be stored in a 2-byte unsigned ** integer, so a value of 0 is used in its place. */ top = get2byte(&data[hdr+5]); assert( top<=(int)pPage->pBt->usableSize ); /* Prevent by getAndInitPage() */ if( gap>top ){ if( top==0 && pPage->pBt->usableSize==65536 ){ top = 65536; }else{ return SQLITE_CORRUPT_BKPT; } } /* If there is enough space between gap and top for one more cell pointer ** array entry offset, and if the freelist is not empty, then search the ** freelist looking for a free slot big enough to satisfy the request. */ testcase( gap+2==top ); testcase( gap+1==top ); testcase( gap==top ); if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){ u8 *pSpace = pageFindSlot(pPage, nByte, &rc); if( pSpace ){ assert( pSpace>=data && (pSpace - data)<65536 ); *pIdx = (int)(pSpace - data); return SQLITE_OK; }else if( rc ){ return rc; } } /* The request could not be fulfilled using a freelist slot. Check ** to see if defragmentation is necessary. */ testcase( gap+2+nByte==top ); if( gap+2+nByte>top ){ assert( pPage->nCell>0 || CORRUPT_DB ); rc = defragmentPage(pPage); if( rc ) return rc; top = get2byteNotZero(&data[hdr+5]); assert( gap+nByte<=top ); } /* Allocate memory from the gap in between the cell pointer array ** and the cell content area. The btreeInitPage() call has already ** validated the freelist. Given that the freelist is valid, there ** is no way that the allocation can extend off the end of the page. ** The assert() below verifies the previous sentence. */ top -= nByte; put2byte(&data[hdr+5], top); assert( top+nByte <= (int)pPage->pBt->usableSize ); *pIdx = top; return SQLITE_OK; } /* ** Return a section of the pPage->aData to the freelist. ** The first byte of the new free block is pPage->aData[iStart] ** and the size of the block is iSize bytes. ** ** Adjacent freeblocks are coalesced. ** ** Note that even though the freeblock list was checked by btreeInitPage(), ** that routine will not detect overlap between cells or freeblocks. Nor ** does it detect cells or freeblocks that encrouch into the reserved bytes ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ u16 iPtr; /* Address of ptr to next freeblock */ u16 iFreeBlk; /* Address of the next freeblock */ u8 hdr; /* Page header size. 0 or 100 */ u8 nFrag = 0; /* Reduction in fragmentation */ u16 iOrigSize = iSize; /* Original value of iSize */ u32 iLast = pPage->pBt->usableSize-4; /* Largest possible freeblock offset */ u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ unsigned char *data = pPage->aData; /* Page content */ assert( pPage->pBt!=0 ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize ); assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ assert( iStart<=iLast ); /* Overwrite deleted information with zeros when the secure_delete ** option is enabled */ if( pPage->pBt->btsFlags & BTS_SECURE_DELETE ){ memset(&data[iStart], 0, iSize); } /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted. */ hdr = pPage->hdrOffset; iPtr = hdr + 1; if( data[iPtr+1]==0 && data[iPtr]==0 ){ iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */ }else{ while( (iFreeBlk = get2byte(&data[iPtr]))>0 && iFreeBlkiLast ) return SQLITE_CORRUPT_BKPT; assert( iFreeBlk>iPtr || iFreeBlk==0 ); /* At this point: ** iFreeBlk: First freeblock after iStart, or zero if none ** iPtr: The address of a pointer to iFreeBlk ** ** Check to see if iFreeBlk should be coalesced onto the end of iStart. */ if( iFreeBlk && iEnd+3>=iFreeBlk ){ nFrag = iFreeBlk - iEnd; if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_BKPT; iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); if( iEnd > pPage->pBt->usableSize ) return SQLITE_CORRUPT_BKPT; iSize = iEnd - iStart; iFreeBlk = get2byte(&data[iFreeBlk]); } /* If iPtr is another freeblock (that is, if iPtr is not the freelist ** pointer in the page header) then check to see if iStart should be ** coalesced onto the end of iPtr. */ if( iPtr>hdr+1 ){ int iPtrEnd = iPtr + get2byte(&data[iPtr+2]); if( iPtrEnd+3>=iStart ){ if( iPtrEnd>iStart ) return SQLITE_CORRUPT_BKPT; nFrag += iStart - iPtrEnd; iSize = iEnd - iPtr; iStart = iPtr; } } if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_BKPT; data[hdr+7] -= nFrag; } if( iStart==get2byte(&data[hdr+5]) ){ /* The new freeblock is at the beginning of the cell content area, ** so just extend the cell content area rather than create another ** freelist entry */ if( iPtr!=hdr+1 ) return SQLITE_CORRUPT_BKPT; put2byte(&data[hdr+1], iFreeBlk); put2byte(&data[hdr+5], iEnd); }else{ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); put2byte(&data[iStart], iFreeBlk); put2byte(&data[iStart+2], iSize); } pPage->nFree += iOrigSize; return SQLITE_OK; } /* ** Decode the flags byte (the first byte of the header) for a page ** and initialize fields of the MemPage structure accordingly. ** ** Only the following combinations are supported. Anything different ** indicates a corrupt database files: ** ** PTF_ZERODATA ** PTF_ZERODATA | PTF_LEAF ** PTF_LEAFDATA | PTF_INTKEY ** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF */ static int decodeFlags(MemPage *pPage, int flagByte){ BtShared *pBt; /* A copy of pPage->pBt */ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); flagByte &= ~PTF_LEAF; pPage->childPtrSize = 4-4*pPage->leaf; pPage->xCellSize = cellSizePtr; pBt = pPage->pBt; if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior ** table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf ** table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); pPage->intKey = 1; if( pPage->leaf ){ pPage->intKeyLeaf = 1; pPage->noPayload = 0; pPage->xParseCell = btreeParseCellPtr; }else{ pPage->intKeyLeaf = 0; pPage->noPayload = 1; pPage->xCellSize = cellSizePtrNoPayload; pPage->xParseCell = btreeParseCellPtrNoPayload; } pPage->maxLocal = pBt->maxLeaf; pPage->minLocal = pBt->minLeaf; }else if( flagByte==PTF_ZERODATA ){ /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior ** index b-tree page. */ assert( (PTF_ZERODATA)==2 ); /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf ** index b-tree page. */ assert( (PTF_ZERODATA|PTF_LEAF)==10 ); pPage->intKey = 0; pPage->intKeyLeaf = 0; pPage->noPayload = 0; pPage->xParseCell = btreeParseCellPtrIndex; pPage->maxLocal = pBt->maxLocal; pPage->minLocal = pBt->minLocal; }else{ /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is ** an error. */ return SQLITE_CORRUPT_BKPT; } pPage->max1bytePayload = pBt->max1bytePayload; return SQLITE_OK; } /* ** Initialize the auxiliary information for a disk block. ** ** Return SQLITE_OK on success. If we see that the page does ** not contain a well-formed database page, then return ** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not ** guarantee that the page is well-formed. It only shows that ** we failed to detect any corruption. */ static int btreeInitPage(MemPage *pPage){ assert( pPage->pBt!=0 ); assert( pPage->pBt->db!=0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); if( !pPage->isInit ){ u16 pc; /* Address of a freeblock within pPage->aData[] */ u8 hdr; /* Offset to beginning of page header */ u8 *data; /* Equal to pPage->aData */ BtShared *pBt; /* The main btree structure */ int usableSize; /* Amount of usable space on each page */ u16 cellOffset; /* Offset from start of page to first cell pointer */ int nFree; /* Number of unused bytes on the page */ int top; /* First byte of the cell content area */ int iCellFirst; /* First allowable cell or freeblock offset */ int iCellLast; /* Last possible cell or freeblock offset */ pBt = pPage->pBt; hdr = pPage->hdrOffset; data = pPage->aData; /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating ** the b-tree page type. */ if( decodeFlags(pPage, data[hdr]) ) return SQLITE_CORRUPT_BKPT; assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nOverflow = 0; usableSize = pBt->usableSize; pPage->cellOffset = cellOffset = hdr + 8 + pPage->childPtrSize; pPage->aDataEnd = &data[usableSize]; pPage->aCellIdx = &data[cellOffset]; pPage->aDataOfst = &data[pPage->childPtrSize]; /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates ** the start of the cell content area. A zero value for this integer is ** interpreted as 65536. */ top = get2byteNotZero(&data[hdr+5]); /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the ** number of cells on the page. */ pPage->nCell = get2byte(&data[hdr+3]); if( pPage->nCell>MX_CELL(pBt) ){ /* To many cells for a single page. The page must be corrupt */ return SQLITE_CORRUPT_BKPT; } testcase( pPage->nCell==MX_CELL(pBt) ); /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only ** possible for a root page of a table that contains no rows) then the ** offset to the cell content area will equal the page size minus the ** bytes of reserved space. */ assert( pPage->nCell>0 || top==usableSize || CORRUPT_DB ); /* A malformed database page might cause us to read past the end ** of page when parsing a cell. ** ** The following block of code checks early to see if a cell extends ** past the end of a page boundary and causes SQLITE_CORRUPT to be ** returned if it does. */ iCellFirst = cellOffset + 2*pPage->nCell; iCellLast = usableSize - 4; if( pBt->db->flags & SQLITE_CellSizeCk ){ int i; /* Index into the cell pointer array */ int sz; /* Size of a cell */ if( !pPage->leaf ) iCellLast--; for(i=0; inCell; i++){ pc = get2byteAligned(&data[cellOffset+i*2]); testcase( pc==iCellFirst ); testcase( pc==iCellLast ); if( pciCellLast ){ return SQLITE_CORRUPT_BKPT; } sz = pPage->xCellSize(pPage, &data[pc]); testcase( pc+sz==usableSize ); if( pc+sz>usableSize ){ return SQLITE_CORRUPT_BKPT; } } if( !pPage->leaf ) iCellLast++; } /* Compute the total free space on the page ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the ** start of the first freeblock on the page, or is zero if there are no ** freeblocks. */ pc = get2byte(&data[hdr+1]); nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */ while( pc>0 ){ u16 next, size; if( pciCellLast ){ /* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will ** always be at least one cell before the first freeblock. ** ** Or, the freeblock is off the end of the page */ return SQLITE_CORRUPT_BKPT; } next = get2byte(&data[pc]); size = get2byte(&data[pc+2]); if( (next>0 && next<=pc+size+3) || pc+size>usableSize ){ /* Free blocks must be in ascending order. And the last byte of ** the free-block must lie on the database page. */ return SQLITE_CORRUPT_BKPT; } nFree = nFree + size; pc = next; } /* At this point, nFree contains the sum of the offset to the start ** of the cell-content area plus the number of free bytes within ** the cell-content area. If this is greater than the usable-size ** of the page, then the page must be corrupted. This check also ** serves to verify that the offset to the start of the cell-content ** area, according to the page header, lies within the page. */ if( nFree>usableSize ){ return SQLITE_CORRUPT_BKPT; } pPage->nFree = (u16)(nFree - iCellFirst); pPage->isInit = 1; } return SQLITE_OK; } /* ** Set up a raw page so that it looks like a database page holding ** no entries. */ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; u8 hdr = pPage->hdrOffset; u16 first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage) == data ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( sqlite3_mutex_held(pBt->mutex) ); if( pBt->btsFlags & BTS_SECURE_DELETE ){ memset(&data[hdr], 0, pBt->usableSize - hdr); } data[hdr] = (char)flags; first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8); memset(&data[hdr+1], 0, 4); data[hdr+7] = 0; put2byte(&data[hdr+5], pBt->usableSize); pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); pPage->cellOffset = first; pPage->aDataEnd = &data[pBt->usableSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; pPage->nOverflow = 0; assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nCell = 0; pPage->isInit = 1; } /* ** Convert a DbPage obtained from the pager into a MemPage used by ** the btree layer. */ static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){ MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); pPage->aData = sqlite3PagerGetData(pDbPage); pPage->pDbPage = pDbPage; pPage->pBt = pBt; pPage->pgno = pgno; pPage->hdrOffset = pgno==1 ? 100 : 0; return pPage; } /* ** Get a page from the pager. Initialize the MemPage.pBt and ** MemPage.aData elements if needed. See also: btreeGetUnusedPage(). ** ** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care ** about the content of the page at this time. So do not go to the disk ** to fetch the content. Just fill in the content with zeros for now. ** If in the future we call sqlite3PagerWrite() on this page, that ** means we have started to be concerned about content and the disk ** read should occur at that point. */ static int btreeGetPage( BtShared *pBt, /* The btree */ Pgno pgno, /* Number of the page to fetch */ MemPage **ppPage, /* Return the page in this parameter */ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ ){ int rc; DbPage *pDbPage; assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY ); assert( sqlite3_mutex_held(pBt->mutex) ); rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, flags); if( rc ) return rc; *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); return SQLITE_OK; } /* ** Retrieve a page from the pager cache. If the requested page is not ** already in the pager cache return NULL. Initialize the MemPage.pBt and ** MemPage.aData elements if needed. */ static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ DbPage *pDbPage; assert( sqlite3_mutex_held(pBt->mutex) ); pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); if( pDbPage ){ return btreePageFromDbPage(pDbPage, pgno, pBt); } return 0; } /* ** Return the size of the database file in pages. If there is any kind of ** error, return ((unsigned int)-1). */ static Pgno btreePagecount(BtShared *pBt){ return pBt->nPage; } SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree *p){ assert( sqlite3BtreeHoldsMutex(p) ); assert( ((p->pBt->nPage)&0x8000000)==0 ); return btreePagecount(p->pBt); } /* ** Get a page from the pager and initialize it. ** ** If pCur!=0 then the page is being fetched as part of a moveToChild() ** call. Do additional sanity checking on the page in this case. ** And if the fetch fails, this routine must decrement pCur->iPage. ** ** The page is fetched as read-write unless pCur is not NULL and is ** a read-only cursor. ** ** If an error occurs, then *ppPage is undefined. It ** may remain unchanged, or it may be set to an invalid value. */ static int getAndInitPage( BtShared *pBt, /* The database file */ Pgno pgno, /* Number of the page to get */ MemPage **ppPage, /* Write the page pointer here */ BtCursor *pCur, /* Cursor to receive the page, or NULL */ int bReadOnly /* True for a read-only page */ ){ int rc; DbPage *pDbPage; assert( sqlite3_mutex_held(pBt->mutex) ); assert( pCur==0 || ppPage==&pCur->apPage[pCur->iPage] ); assert( pCur==0 || bReadOnly==pCur->curPagerFlags ); assert( pCur==0 || pCur->iPage>0 ); if( pgno>btreePagecount(pBt) ){ rc = SQLITE_CORRUPT_BKPT; goto getAndInitPage_error; } rc = sqlite3PagerAcquire(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly); if( rc ){ goto getAndInitPage_error; } *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); if( (*ppPage)->isInit==0 ){ rc = btreeInitPage(*ppPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); goto getAndInitPage_error; } } /* If obtaining a child page for a cursor, we must verify that the page is ** compatible with the root page. */ if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){ rc = SQLITE_CORRUPT_BKPT; releasePage(*ppPage); goto getAndInitPage_error; } return SQLITE_OK; getAndInitPage_error: if( pCur ) pCur->iPage--; testcase( pgno==0 ); assert( pgno!=0 || rc==SQLITE_CORRUPT ); return rc; } /* ** Release a MemPage. This should be called once for each prior ** call to btreeGetPage. */ static void releasePageNotNull(MemPage *pPage){ assert( pPage->aData ); assert( pPage->pBt ); assert( pPage->pDbPage!=0 ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); sqlite3PagerUnrefNotNull(pPage->pDbPage); } static void releasePage(MemPage *pPage){ if( pPage ) releasePageNotNull(pPage); } /* ** Get an unused page. ** ** This works just like btreeGetPage() with the addition: ** ** * If the page is already in use for some other purpose, immediately ** release it and return an SQLITE_CURRUPT error. ** * Make sure the isInit flag is clear */ static int btreeGetUnusedPage( BtShared *pBt, /* The btree */ Pgno pgno, /* Number of the page to fetch */ MemPage **ppPage, /* Return the page in this parameter */ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ ){ int rc = btreeGetPage(pBt, pgno, ppPage, flags); if( rc==SQLITE_OK ){ if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){ releasePage(*ppPage); *ppPage = 0; return SQLITE_CORRUPT_BKPT; } (*ppPage)->isInit = 0; }else{ *ppPage = 0; } return rc; } /* ** During a rollback, when the pager reloads information into the cache ** so that the cache is restored to its original state at the start of ** the transaction, for each page restored this routine is called. ** ** This routine needs to reset the extra data section at the end of the ** page to agree with the restored data. */ static void pageReinit(DbPage *pData){ MemPage *pPage; pPage = (MemPage *)sqlite3PagerGetExtra(pData); assert( sqlite3PagerPageRefcount(pData)>0 ); if( pPage->isInit ){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->isInit = 0; if( sqlite3PagerPageRefcount(pData)>1 ){ /* pPage might not be a btree page; it might be an overflow page ** or ptrmap page or a free page. In those cases, the following ** call to btreeInitPage() will likely return SQLITE_CORRUPT. ** But no harm is done by this. And it is very important that ** btreeInitPage() be called on every btree page so we make ** the call for every page that comes in for re-initing. */ btreeInitPage(pPage); } } } /* ** Invoke the busy handler for a btree. */ static int btreeInvokeBusyHandler(void *pArg){ BtShared *pBt = (BtShared*)pArg; assert( pBt->db ); assert( sqlite3_mutex_held(pBt->db->mutex) ); return sqlite3InvokeBusyHandler(&pBt->db->busyHandler); } /* ** Open a database file. ** ** zFilename is the name of the database file. If zFilename is NULL ** then an ephemeral database is created. The ephemeral database might ** be exclusively in memory, or it might use a disk-based memory cache. ** Either way, the ephemeral database will be automatically deleted ** when sqlite3BtreeClose() is called. ** ** If zFilename is ":memory:" then an in-memory database is created ** that is automatically destroyed when it is closed. ** ** The "flags" parameter is a bitmask that might contain bits like ** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY. ** ** If the database is already opened in the same database connection ** and we are in shared cache mode, then the open will fail with an ** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared ** objects in the same database connection since doing so will lead ** to problems with locking. */ SQLITE_PRIVATE int sqlite3BtreeOpen( sqlite3_vfs *pVfs, /* VFS to use for this b-tree */ const char *zFilename, /* Name of the file containing the BTree database */ sqlite3 *db, /* Associated database handle */ Btree **ppBtree, /* Pointer to new Btree object written here */ int flags, /* Options */ int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ ){ BtShared *pBt = 0; /* Shared part of btree structure */ Btree *p; /* Handle to return */ sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */ int rc = SQLITE_OK; /* Result code from this function */ u8 nReserve; /* Byte of unused space on each page */ unsigned char zDbHeader[100]; /* Database header content */ /* True if opening an ephemeral, temporary database */ const int isTempDb = zFilename==0 || zFilename[0]==0; /* Set the variable isMemdb to true for an in-memory database, or ** false for a file-based database. */ #ifdef SQLITE_OMIT_MEMORYDB const int isMemdb = 0; #else const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0) || (isTempDb && sqlite3TempInMemory(db)) || (vfsFlags & SQLITE_OPEN_MEMORY)!=0; #endif assert( db!=0 ); assert( pVfs!=0 ); assert( sqlite3_mutex_held(db->mutex) ); assert( (flags&0xff)==flags ); /* flags fit in 8 bits */ /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */ assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 ); /* A BTREE_SINGLE database is always a temporary and/or ephemeral */ assert( (flags & BTREE_SINGLE)==0 || isTempDb ); if( isMemdb ){ flags |= BTREE_MEMORY; } if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){ vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; } p = sqlite3MallocZero(sizeof(Btree)); if( !p ){ return SQLITE_NOMEM; } p->inTrans = TRANS_NONE; p->db = db; #ifndef SQLITE_OMIT_SHARED_CACHE p->lock.pBtree = p; p->lock.iTable = 1; #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* ** If this Btree is a candidate for shared cache, try to find an ** existing BtShared object that we can share with */ if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){ if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){ int nFilename = sqlite3Strlen30(zFilename)+1; int nFullPathname = pVfs->mxPathname+1; char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename)); MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) p->sharable = 1; if( !zFullPathname ){ sqlite3_free(p); return SQLITE_NOMEM; } if( isMemdb ){ memcpy(zFullPathname, zFilename, nFilename); }else{ rc = sqlite3OsFullPathname(pVfs, zFilename, nFullPathname, zFullPathname); if( rc ){ sqlite3_free(zFullPathname); sqlite3_free(p); return rc; } } #if SQLITE_THREADSAFE mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN); sqlite3_mutex_enter(mutexOpen); mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); sqlite3_mutex_enter(mutexShared); #endif for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ assert( pBt->nRef>0 ); if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0)) && sqlite3PagerVfs(pBt->pPager)==pVfs ){ int iDb; for(iDb=db->nDb-1; iDb>=0; iDb--){ Btree *pExisting = db->aDb[iDb].pBt; if( pExisting && pExisting->pBt==pBt ){ sqlite3_mutex_leave(mutexShared); sqlite3_mutex_leave(mutexOpen); sqlite3_free(zFullPathname); sqlite3_free(p); return SQLITE_CONSTRAINT; } } p->pBt = pBt; pBt->nRef++; break; } } sqlite3_mutex_leave(mutexShared); sqlite3_free(zFullPathname); } #ifdef SQLITE_DEBUG else{ /* In debug mode, we mark all persistent databases as sharable ** even when they are not. This exercises the locking code and ** gives more opportunity for asserts(sqlite3_mutex_held()) ** statements to find locking problems. */ p->sharable = 1; } #endif } #endif if( pBt==0 ){ /* ** The following asserts make sure that structures used by the btree are ** the right size. This is to guard against size changes that result ** when compiling on a different architecture. */ assert( sizeof(i64)==8 ); assert( sizeof(u64)==8 ); assert( sizeof(u32)==4 ); assert( sizeof(u16)==2 ); assert( sizeof(Pgno)==4 ); pBt = sqlite3MallocZero( sizeof(*pBt) ); if( pBt==0 ){ rc = SQLITE_NOMEM; goto btree_open_out; } rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, EXTRA_SIZE, flags, vfsFlags, pageReinit); if( rc==SQLITE_OK ){ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap); rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); } if( rc!=SQLITE_OK ){ goto btree_open_out; } pBt->openFlags = (u8)flags; pBt->db = db; sqlite3PagerSetBusyhandler(pBt->pPager, btreeInvokeBusyHandler, pBt); p->pBt = pBt; pBt->pCursor = 0; pBt->pPage1 = 0; if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY; #ifdef SQLITE_SECURE_DELETE pBt->btsFlags |= BTS_SECURE_DELETE; #endif /* EVIDENCE-OF: R-51873-39618 The page size for a database file is ** determined by the 2-byte integer located at an offset of 16 bytes from ** the beginning of the database file. */ pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16); if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ pBt->pageSize = 0; #ifndef SQLITE_OMIT_AUTOVACUUM /* If the magic name ":memory:" will create an in-memory database, then ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a ** regular file-name. In this case the auto-vacuum applies as per normal. */ if( zFilename && !isMemdb ){ pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); } #endif nReserve = 0; }else{ /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is ** determined by the one-byte unsigned integer found at an offset of 20 ** into the database file header. */ nReserve = zDbHeader[20]; pBt->btsFlags |= BTS_PAGESIZE_FIXED; #ifndef SQLITE_OMIT_AUTOVACUUM pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); #endif } rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); if( rc ) goto btree_open_out; pBt->usableSize = pBt->pageSize - nReserve; assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* Add the new BtShared object to the linked list sharable BtShareds. */ if( p->sharable ){ MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) pBt->nRef = 1; MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);) if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); if( pBt->mutex==0 ){ rc = SQLITE_NOMEM; db->mallocFailed = 0; goto btree_open_out; } } sqlite3_mutex_enter(mutexShared); pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList); GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt; sqlite3_mutex_leave(mutexShared); } #endif } #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* If the new Btree uses a sharable pBtShared, then link the new ** Btree into the list of all sharable Btrees for the same connection. ** The list is kept in ascending order by pBt address. */ if( p->sharable ){ int i; Btree *pSib; for(i=0; inDb; i++){ if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){ while( pSib->pPrev ){ pSib = pSib->pPrev; } if( p->pBtpBt ){ p->pNext = pSib; p->pPrev = 0; pSib->pPrev = p; }else{ while( pSib->pNext && pSib->pNext->pBtpBt ){ pSib = pSib->pNext; } p->pNext = pSib->pNext; p->pPrev = pSib; if( p->pNext ){ p->pNext->pPrev = p; } pSib->pNext = p; } break; } } } #endif *ppBtree = p; btree_open_out: if( rc!=SQLITE_OK ){ if( pBt && pBt->pPager ){ sqlite3PagerClose(pBt->pPager); } sqlite3_free(pBt); sqlite3_free(p); *ppBtree = 0; }else{ /* If the B-Tree was successfully opened, set the pager-cache size to the ** default value. Except, when opening on an existing shared pager-cache, ** do not change the pager-cache size. */ if( sqlite3BtreeSchema(p, 0, 0)==0 ){ sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); } } if( mutexOpen ){ assert( sqlite3_mutex_held(mutexOpen) ); sqlite3_mutex_leave(mutexOpen); } return rc; } /* ** Decrement the BtShared.nRef counter. When it reaches zero, ** remove the BtShared structure from the sharing list. Return ** true if the BtShared.nRef counter reaches zero and return ** false if it is still positive. */ static int removeFromSharingList(BtShared *pBt){ #ifndef SQLITE_OMIT_SHARED_CACHE MUTEX_LOGIC( sqlite3_mutex *pMaster; ) BtShared *pList; int removed = 0; assert( sqlite3_mutex_notheld(pBt->mutex) ); MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) sqlite3_mutex_enter(pMaster); pBt->nRef--; if( pBt->nRef<=0 ){ if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext; }else{ pList = GLOBAL(BtShared*,sqlite3SharedCacheList); while( ALWAYS(pList) && pList->pNext!=pBt ){ pList=pList->pNext; } if( ALWAYS(pList) ){ pList->pNext = pBt->pNext; } } if( SQLITE_THREADSAFE ){ sqlite3_mutex_free(pBt->mutex); } removed = 1; } sqlite3_mutex_leave(pMaster); return removed; #else return 1; #endif } /* ** Make sure pBt->pTmpSpace points to an allocation of ** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child ** pointer. */ static void allocateTempSpace(BtShared *pBt){ if( !pBt->pTmpSpace ){ pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); /* One of the uses of pBt->pTmpSpace is to format cells before ** inserting them into a leaf page (function fillInCell()). If ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes ** by the various routines that manipulate binary cells. Which ** can mean that fillInCell() only initializes the first 2 or 3 ** bytes of pTmpSpace, but that the first 4 bytes are copied from ** it into a database page. This is not actually a problem, but it ** does cause a valgrind error when the 1 or 2 bytes of unitialized ** data is passed to system call write(). So to avoid this error, ** zero the first 4 bytes of temp space here. ** ** Also: Provide four bytes of initialized space before the ** beginning of pTmpSpace as an area available to prepend the ** left-child pointer to the beginning of a cell. */ if( pBt->pTmpSpace ){ memset(pBt->pTmpSpace, 0, 8); pBt->pTmpSpace += 4; } } } /* ** Free the pBt->pTmpSpace allocation */ static void freeTempSpace(BtShared *pBt){ if( pBt->pTmpSpace ){ pBt->pTmpSpace -= 4; sqlite3PageFree(pBt->pTmpSpace); pBt->pTmpSpace = 0; } } /* ** Close an open database and invalidate all cursors. */ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){ BtShared *pBt = p->pBt; BtCursor *pCur; /* Close all cursors opened via this handle. */ assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); pCur = pBt->pCursor; while( pCur ){ BtCursor *pTmp = pCur; pCur = pCur->pNext; if( pTmp->pBtree==p ){ sqlite3BtreeCloseCursor(pTmp); } } /* Rollback any active transaction and free the handle structure. ** The call to sqlite3BtreeRollback() drops any table-locks held by ** this handle. */ sqlite3BtreeRollback(p, SQLITE_OK, 0); sqlite3BtreeLeave(p); /* If there are still other outstanding references to the shared-btree ** structure, return now. The remainder of this procedure cleans ** up the shared-btree. */ assert( p->wantToLock==0 && p->locked==0 ); if( !p->sharable || removeFromSharingList(pBt) ){ /* The pBt is no longer on the sharing list, so we can access ** it without having to hold the mutex. ** ** Clean out and delete the BtShared object. */ assert( !pBt->pCursor ); sqlite3PagerClose(pBt->pPager); if( pBt->xFreeSchema && pBt->pSchema ){ pBt->xFreeSchema(pBt->pSchema); } sqlite3DbFree(0, pBt->pSchema); freeTempSpace(pBt); sqlite3_free(pBt); } #ifndef SQLITE_OMIT_SHARED_CACHE assert( p->wantToLock==0 ); assert( p->locked==0 ); if( p->pPrev ) p->pPrev->pNext = p->pNext; if( p->pNext ) p->pNext->pPrev = p->pPrev; #endif sqlite3_free(p); return SQLITE_OK; } /* ** Change the limit on the number of pages allowed in the cache. ** ** The maximum number of cache pages is set to the absolute ** value of mxPage. If mxPage is negative, the pager will ** operate asynchronously - it will not stop to do fsync()s ** to insure data is written to the disk surface before ** continuing. Transactions still work if synchronous is off, ** and the database cannot be corrupted if this program ** crashes. But if the operating system crashes or there is ** an abrupt power failure when synchronous is off, the database ** could be left in an inconsistent and unrecoverable state. ** Synchronous is on by default so database corruption is not ** normally a worry. */ SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetCachesize(pBt->pPager, mxPage); sqlite3BtreeLeave(p); return SQLITE_OK; } #if SQLITE_MAX_MMAP_SIZE>0 /* ** Change the limit on the amount of the database file that may be ** memory mapped. */ SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetMmapLimit(pBt->pPager, szMmap); sqlite3BtreeLeave(p); return SQLITE_OK; } #endif /* SQLITE_MAX_MMAP_SIZE>0 */ /* ** Change the way data is synced to disk in order to increase or decrease ** how well the database resists damage due to OS crashes and power ** failures. Level 1 is the same as asynchronous (no syncs() occur and ** there is a high probability of damage) Level 2 is the default. There ** is a very low but non-zero probability of damage. Level 3 reduces the ** probability of damage to near zero but with a write performance reduction. */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags( Btree *p, /* The btree to set the safety level on */ unsigned pgFlags /* Various PAGER_* flags */ ){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetFlags(pBt->pPager, pgFlags); sqlite3BtreeLeave(p); return SQLITE_OK; } #endif /* ** Return TRUE if the given btree is set to safety level 1. In other ** words, return TRUE if no sync() occurs on the disk files. */ SQLITE_PRIVATE int sqlite3BtreeSyncDisabled(Btree *p){ BtShared *pBt = p->pBt; int rc; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); assert( pBt && pBt->pPager ); rc = sqlite3PagerNosync(pBt->pPager); sqlite3BtreeLeave(p); return rc; } /* ** Change the default pages size and the number of reserved bytes per page. ** Or, if the page size has already been fixed, return SQLITE_READONLY ** without changing anything. ** ** The page size must be a power of 2 between 512 and 65536. If the page ** size supplied does not meet this constraint then the page size is not ** changed. ** ** Page sizes are constrained to be a power of two so that the region ** of the database file used for locking (beginning at PENDING_BYTE, ** the first byte past the 1GB boundary, 0x40000000) needs to occur ** at the beginning of a page. ** ** If parameter nReserve is less than zero, then the number of reserved ** bytes per page is left unchanged. ** ** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size ** and autovacuum mode can no longer be changed. */ SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){ int rc = SQLITE_OK; BtShared *pBt = p->pBt; assert( nReserve>=-1 && nReserve<=255 ); sqlite3BtreeEnter(p); #if SQLITE_HAS_CODEC if( nReserve>pBt->optimalReserve ) pBt->optimalReserve = (u8)nReserve; #endif if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){ sqlite3BtreeLeave(p); return SQLITE_READONLY; } if( nReserve<0 ){ nReserve = pBt->pageSize - pBt->usableSize; } assert( nReserve>=0 && nReserve<=255 ); if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && ((pageSize-1)&pageSize)==0 ){ assert( (pageSize & 7)==0 ); assert( !pBt->pCursor ); pBt->pageSize = (u32)pageSize; freeTempSpace(pBt); } rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); pBt->usableSize = pBt->pageSize - (u16)nReserve; if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED; sqlite3BtreeLeave(p); return rc; } /* ** Return the currently defined page size */ SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree *p){ return p->pBt->pageSize; } /* ** This function is similar to sqlite3BtreeGetReserve(), except that it ** may only be called if it is guaranteed that the b-tree mutex is already ** held. ** ** This is useful in one special case in the backup API code where it is ** known that the shared b-tree mutex is held, but the mutex on the ** database handle that owns *p is not. In this case if sqlite3BtreeEnter() ** were to be called, it might collide with some other operation on the ** database handle that owns *p, causing undefined behavior. */ SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){ int n; assert( sqlite3_mutex_held(p->pBt->mutex) ); n = p->pBt->pageSize - p->pBt->usableSize; return n; } /* ** Return the number of bytes of space at the end of every page that ** are intentually left unused. This is the "reserved" space that is ** sometimes used by extensions. ** ** If SQLITE_HAS_MUTEX is defined then the number returned is the ** greater of the current reserved space and the maximum requested ** reserve space. */ SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree *p){ int n; sqlite3BtreeEnter(p); n = sqlite3BtreeGetReserveNoMutex(p); #ifdef SQLITE_HAS_CODEC if( npBt->optimalReserve ) n = p->pBt->optimalReserve; #endif sqlite3BtreeLeave(p); return n; } /* ** Set the maximum page count for a database if mxPage is positive. ** No changes are made if mxPage is 0 or negative. ** Regardless of the value of mxPage, return the maximum page count. */ SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ int n; sqlite3BtreeEnter(p); n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); sqlite3BtreeLeave(p); return n; } /* ** Set the BTS_SECURE_DELETE flag if newFlag is 0 or 1. If newFlag is -1, ** then make no changes. Always return the value of the BTS_SECURE_DELETE ** setting after the change. */ SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ int b; if( p==0 ) return 0; sqlite3BtreeEnter(p); if( newFlag>=0 ){ p->pBt->btsFlags &= ~BTS_SECURE_DELETE; if( newFlag ) p->pBt->btsFlags |= BTS_SECURE_DELETE; } b = (p->pBt->btsFlags & BTS_SECURE_DELETE)!=0; sqlite3BtreeLeave(p); return b; } /* ** Change the 'auto-vacuum' property of the database. If the 'autoVacuum' ** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it ** is disabled. The default value for the auto-vacuum property is ** determined by the SQLITE_DEFAULT_AUTOVACUUM macro. */ SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){ #ifdef SQLITE_OMIT_AUTOVACUUM return SQLITE_READONLY; #else BtShared *pBt = p->pBt; int rc = SQLITE_OK; u8 av = (u8)autoVacuum; sqlite3BtreeEnter(p); if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){ rc = SQLITE_READONLY; }else{ pBt->autoVacuum = av ?1:0; pBt->incrVacuum = av==2 ?1:0; } sqlite3BtreeLeave(p); return rc; #endif } /* ** Return the value of the 'auto-vacuum' property. If auto-vacuum is ** enabled 1 is returned. Otherwise 0. */ SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *p){ #ifdef SQLITE_OMIT_AUTOVACUUM return BTREE_AUTOVACUUM_NONE; #else int rc; sqlite3BtreeEnter(p); rc = ( (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: BTREE_AUTOVACUUM_INCR ); sqlite3BtreeLeave(p); return rc; #endif } /* ** Get a reference to pPage1 of the database file. This will ** also acquire a readlock on that file. ** ** SQLITE_OK is returned on success. If the file is not a ** well-formed database file, then SQLITE_CORRUPT is returned. ** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM ** is returned if we run out of memory. */ static int lockBtree(BtShared *pBt){ int rc; /* Result code from subfunctions */ MemPage *pPage1; /* Page 1 of the database file */ int nPage; /* Number of pages in the database */ int nPageFile = 0; /* Number of pages in the database file */ int nPageHeader; /* Number of pages in the database according to hdr */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( pBt->pPage1==0 ); rc = sqlite3PagerSharedLock(pBt->pPager); if( rc!=SQLITE_OK ) return rc; rc = btreeGetPage(pBt, 1, &pPage1, 0); if( rc!=SQLITE_OK ) return rc; /* Do some checking to help insure the file we opened really is ** a valid database file. */ nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); sqlite3PagerPagecount(pBt->pPager, &nPageFile); if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ nPage = nPageFile; } if( nPage>0 ){ u32 pageSize; u32 usableSize; u8 *page1 = pPage1->aData; rc = SQLITE_NOTADB; /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d ** 61 74 20 33 00. */ if( memcmp(page1, zMagicHeader, 16)!=0 ){ goto page1_init_failed; } #ifdef SQLITE_OMIT_WAL if( page1[18]>1 ){ pBt->btsFlags |= BTS_READ_ONLY; } if( page1[19]>1 ){ goto page1_init_failed; } #else if( page1[18]>2 ){ pBt->btsFlags |= BTS_READ_ONLY; } if( page1[19]>2 ){ goto page1_init_failed; } /* If the write version is set to 2, this database should be accessed ** in WAL mode. If the log is not already open, open it now. Then ** return SQLITE_OK and return without populating BtShared.pPage1. ** The caller detects this and calls this function again. This is ** required as the version of page 1 currently in the page1 buffer ** may not be the latest version - there may be a newer one in the log ** file. */ if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ int isOpen = 0; rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); if( rc!=SQLITE_OK ){ goto page1_init_failed; }else if( isOpen==0 ){ releasePage(pPage1); return SQLITE_OK; } rc = SQLITE_NOTADB; } #endif /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload ** fractions and the leaf payload fraction values must be 64, 32, and 32. ** ** The original design allowed these amounts to vary, but as of ** version 3.6.0, we require them to be fixed. */ if( memcmp(&page1[21], "\100\040\040",3)!=0 ){ goto page1_init_failed; } /* EVIDENCE-OF: R-51873-39618 The page size for a database file is ** determined by the 2-byte integer located at an offset of 16 bytes from ** the beginning of the database file. */ pageSize = (page1[16]<<8) | (page1[17]<<16); /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two ** between 512 and 65536 inclusive. */ if( ((pageSize-1)&pageSize)!=0 || pageSize>SQLITE_MAX_PAGE_SIZE || pageSize<=256 ){ goto page1_init_failed; } assert( (pageSize & 7)==0 ); /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte ** integer at offset 20 is the number of bytes of space at the end of ** each page to reserve for extensions. ** ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is ** determined by the one-byte unsigned integer found at an offset of 20 ** into the database file header. */ usableSize = pageSize - page1[20]; if( (u32)pageSize!=pBt->pageSize ){ /* After reading the first page of the database assuming a page size ** of BtShared.pageSize, we have discovered that the page-size is ** actually pageSize. Unlock the database, leave pBt->pPage1 at ** zero and return SQLITE_OK. The caller will call this function ** again with the correct page-size. */ releasePage(pPage1); pBt->usableSize = usableSize; pBt->pageSize = pageSize; freeTempSpace(pBt); rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, pageSize-usableSize); return rc; } if( (pBt->db->flags & SQLITE_RecoveryMode)==0 && nPage>nPageFile ){ rc = SQLITE_CORRUPT_BKPT; goto page1_init_failed; } /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to ** be less than 480. In other words, if the page size is 512, then the ** reserved space size cannot exceed 32. */ if( usableSize<480 ){ goto page1_init_failed; } pBt->pageSize = pageSize; pBt->usableSize = usableSize; #ifndef SQLITE_OMIT_AUTOVACUUM pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); #endif } /* maxLocal is the maximum amount of payload to store locally for ** a cell. Make sure it is small enough so that at least minFanout ** cells can will fit on one page. We assume a 10-byte page header. ** Besides the payload, the cell must store: ** 2-byte pointer to the cell ** 4-byte child pointer ** 9-byte nKey value ** 4-byte nData value ** 4-byte overflow page pointer ** So a cell consists of a 2-byte pointer, a header which is as much as ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow ** page pointer. */ pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23); pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23); pBt->maxLeaf = (u16)(pBt->usableSize - 35); pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23); if( pBt->maxLocal>127 ){ pBt->max1bytePayload = 127; }else{ pBt->max1bytePayload = (u8)pBt->maxLocal; } assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); pBt->pPage1 = pPage1; pBt->nPage = nPage; return SQLITE_OK; page1_init_failed: releasePage(pPage1); pBt->pPage1 = 0; return rc; } #ifndef NDEBUG /* ** Return the number of cursors open on pBt. This is for use ** in assert() expressions, so it is only compiled if NDEBUG is not ** defined. ** ** Only write cursors are counted if wrOnly is true. If wrOnly is ** false then all cursors are counted. ** ** For the purposes of this routine, a cursor is any cursor that ** is capable of reading or writing to the database. Cursors that ** have been tripped into the CURSOR_FAULT state are not counted. */ static int countValidCursors(BtShared *pBt, int wrOnly){ BtCursor *pCur; int r = 0; for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0) && pCur->eState!=CURSOR_FAULT ) r++; } return r; } #endif /* ** If there are no outstanding cursors and we are not in the middle ** of a transaction but there is a read lock on the database, then ** this routine unrefs the first page of the database file which ** has the effect of releasing the read lock. ** ** If there is a transaction in progress, this routine is a no-op. */ static void unlockBtreeIfUnused(BtShared *pBt){ assert( sqlite3_mutex_held(pBt->mutex) ); assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE ); if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){ MemPage *pPage1 = pBt->pPage1; assert( pPage1->aData ); assert( sqlite3PagerRefcount(pBt->pPager)==1 ); pBt->pPage1 = 0; releasePageNotNull(pPage1); } } /* ** If pBt points to an empty file then convert that empty file ** into a new empty database by initializing the first page of ** the database. */ static int newDatabase(BtShared *pBt){ MemPage *pP1; unsigned char *data; int rc; assert( sqlite3_mutex_held(pBt->mutex) ); if( pBt->nPage>0 ){ return SQLITE_OK; } pP1 = pBt->pPage1; assert( pP1!=0 ); data = pP1->aData; rc = sqlite3PagerWrite(pP1->pDbPage); if( rc ) return rc; memcpy(data, zMagicHeader, sizeof(zMagicHeader)); assert( sizeof(zMagicHeader)==16 ); data[16] = (u8)((pBt->pageSize>>8)&0xff); data[17] = (u8)((pBt->pageSize>>16)&0xff); data[18] = 1; data[19] = 1; assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize); data[20] = (u8)(pBt->pageSize - pBt->usableSize); data[21] = 64; data[22] = 32; data[23] = 32; memset(&data[24], 0, 100-24); zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); pBt->btsFlags |= BTS_PAGESIZE_FIXED; #ifndef SQLITE_OMIT_AUTOVACUUM assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 ); assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 ); put4byte(&data[36 + 4*4], pBt->autoVacuum); put4byte(&data[36 + 7*4], pBt->incrVacuum); #endif pBt->nPage = 1; data[31] = 1; return SQLITE_OK; } /* ** Initialize the first page of the database file (creating a database ** consisting of a single page and no schema objects). Return SQLITE_OK ** if successful, or an SQLite error code otherwise. */ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){ int rc; sqlite3BtreeEnter(p); p->pBt->nPage = 0; rc = newDatabase(p->pBt); sqlite3BtreeLeave(p); return rc; } /* ** Attempt to start a new transaction. A write-transaction ** is started if the second argument is nonzero, otherwise a read- ** transaction. If the second argument is 2 or more and exclusive ** transaction is started, meaning that no other process is allowed ** to access the database. A preexisting transaction may not be ** upgraded to exclusive by calling this routine a second time - the ** exclusivity flag only works for a new transaction. ** ** A write-transaction must be started before attempting any ** changes to the database. None of the following routines ** will work unless a transaction is started first: ** ** sqlite3BtreeCreateTable() ** sqlite3BtreeCreateIndex() ** sqlite3BtreeClearTable() ** sqlite3BtreeDropTable() ** sqlite3BtreeInsert() ** sqlite3BtreeDelete() ** sqlite3BtreeUpdateMeta() ** ** If an initial attempt to acquire the lock fails because of lock contention ** and the database was previously unlocked, then invoke the busy handler ** if there is one. But if there was previously a read-lock, do not ** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is ** returned when there is already a read-lock in order to avoid a deadlock. ** ** Suppose there are two processes A and B. A has a read lock and B has ** a reserved lock. B tries to promote to exclusive but is blocked because ** of A's read lock. A tries to promote to reserved but is blocked by B. ** One or the other of the two processes must give way or there can be ** no progress. By returning SQLITE_BUSY and not invoking the busy callback ** when A already has a read lock, we encourage A to give up and let B ** proceed. */ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag){ sqlite3 *pBlock = 0; BtShared *pBt = p->pBt; int rc = SQLITE_OK; sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the btree is already in a write-transaction, or it ** is already in a read-transaction and a read-transaction ** is requested, this is a no-op. */ if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ goto trans_begun; } assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 ); /* Write transactions are not possible on a read-only database */ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){ rc = SQLITE_READONLY; goto trans_begun; } #ifndef SQLITE_OMIT_SHARED_CACHE /* If another database handle has already opened a write transaction ** on this shared-btree structure and a second write transaction is ** requested, return SQLITE_LOCKED. */ if( (wrflag && pBt->inTransaction==TRANS_WRITE) || (pBt->btsFlags & BTS_PENDING)!=0 ){ pBlock = pBt->pWriter->db; }else if( wrflag>1 ){ BtLock *pIter; for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ if( pIter->pBtree!=p ){ pBlock = pIter->pBtree->db; break; } } } if( pBlock ){ sqlite3ConnectionBlocked(p->db, pBlock); rc = SQLITE_LOCKED_SHAREDCACHE; goto trans_begun; } #endif /* Any read-only or read-write transaction implies a read-lock on ** page 1. So if some other shared-cache client already has a write-lock ** on page 1, the transaction cannot be opened. */ rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); if( SQLITE_OK!=rc ) goto trans_begun; pBt->btsFlags &= ~BTS_INITIALLY_EMPTY; if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY; do { /* Call lockBtree() until either pBt->pPage1 is populated or ** lockBtree() returns something other than SQLITE_OK. lockBtree() ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after ** reading page 1 it discovers that the page-size of the database ** file is not pBt->pageSize. In this case lockBtree() will update ** pBt->pageSize to the page-size of the file on disk. */ while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); if( rc==SQLITE_OK && wrflag ){ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){ rc = SQLITE_READONLY; }else{ rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db)); if( rc==SQLITE_OK ){ rc = newDatabase(pBt); } } } if( rc!=SQLITE_OK ){ unlockBtreeIfUnused(pBt); } }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); if( rc==SQLITE_OK ){ if( p->inTrans==TRANS_NONE ){ pBt->nTransaction++; #ifndef SQLITE_OMIT_SHARED_CACHE if( p->sharable ){ assert( p->lock.pBtree==p && p->lock.iTable==1 ); p->lock.eLock = READ_LOCK; p->lock.pNext = pBt->pLock; pBt->pLock = &p->lock; } #endif } p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); if( p->inTrans>pBt->inTransaction ){ pBt->inTransaction = p->inTrans; } if( wrflag ){ MemPage *pPage1 = pBt->pPage1; #ifndef SQLITE_OMIT_SHARED_CACHE assert( !pBt->pWriter ); pBt->pWriter = p; pBt->btsFlags &= ~BTS_EXCLUSIVE; if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE; #endif /* If the db-size header field is incorrect (as it may be if an old ** client has been writing the database file), update it now. Doing ** this sooner rather than later means the database size can safely ** re-read the database size from page 1 if a savepoint or transaction ** rollback occurs within the transaction. */ if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pPage1->aData[28], pBt->nPage); } } } } trans_begun: if( rc==SQLITE_OK && wrflag ){ /* This call makes sure that the pager has the correct number of ** open savepoints. If the second parameter is greater than 0 and ** the sub-journal is not already open, then it will be opened here. */ rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); } btreeIntegrity(p); sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Set the pointer-map entries for all children of page pPage. Also, if ** pPage contains cells that point to overflow pages, set the pointer ** map entries for the overflow pages as well. */ static int setChildPtrmaps(MemPage *pPage){ int i; /* Counter variable */ int nCell; /* Number of cells in page pPage */ int rc; /* Return code */ BtShared *pBt = pPage->pBt; u8 isInitOrig = pPage->isInit; Pgno pgno = pPage->pgno; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); rc = btreeInitPage(pPage); if( rc!=SQLITE_OK ){ goto set_child_ptrmaps_out; } nCell = pPage->nCell; for(i=0; ileaf ){ Pgno childPgno = get4byte(pCell); ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); } } if( !pPage->leaf ){ Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); } set_child_ptrmaps_out: pPage->isInit = isInitOrig; return rc; } /* ** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so ** that it points to iTo. Parameter eType describes the type of pointer to ** be modified, as follows: ** ** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child ** page of pPage. ** ** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow ** page pointed to by one of the cells on pPage. ** ** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next ** overflow page in the list. */ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); if( eType==PTRMAP_OVERFLOW2 ){ /* The pointer is always the first 4 bytes of the page in this case. */ if( get4byte(pPage->aData)!=iFrom ){ return SQLITE_CORRUPT_BKPT; } put4byte(pPage->aData, iTo); }else{ u8 isInitOrig = pPage->isInit; int i; int nCell; int rc; rc = btreeInitPage(pPage); if( rc ) return rc; nCell = pPage->nCell; for(i=0; ixParseCell(pPage, pCell, &info); if( info.iOverflow && pCell+info.iOverflow+3<=pPage->aData+pPage->maskPage && iFrom==get4byte(&pCell[info.iOverflow]) ){ put4byte(&pCell[info.iOverflow], iTo); break; } }else{ if( get4byte(pCell)==iFrom ){ put4byte(pCell, iTo); break; } } } if( i==nCell ){ if( eType!=PTRMAP_BTREE || get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){ return SQLITE_CORRUPT_BKPT; } put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); } pPage->isInit = isInitOrig; } return SQLITE_OK; } /* ** Move the open database page pDbPage to location iFreePage in the ** database. The pDbPage reference remains valid. ** ** The isCommit flag indicates that there is no need to remember that ** the journal needs to be sync()ed before database page pDbPage->pgno ** can be written to. The caller has already promised not to write to that ** page. */ static int relocatePage( BtShared *pBt, /* Btree */ MemPage *pDbPage, /* Open page to move */ u8 eType, /* Pointer map 'type' entry for pDbPage */ Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */ Pgno iFreePage, /* The location to move pDbPage to */ int isCommit /* isCommit flag passed to sqlite3PagerMovepage */ ){ MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */ Pgno iDbPage = pDbPage->pgno; Pager *pPager = pBt->pPager; int rc; assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); assert( sqlite3_mutex_held(pBt->mutex) ); assert( pDbPage->pBt==pBt ); /* Move page iDbPage from its current location to page number iFreePage */ TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", iDbPage, iFreePage, iPtrPage, eType)); rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){ return rc; } pDbPage->pgno = iFreePage; /* If pDbPage was a btree-page, then it may have child pages and/or cells ** that point to overflow pages. The pointer map entries for all these ** pages need to be changed. ** ** If pDbPage is an overflow page, then the first 4 bytes may store a ** pointer to a subsequent overflow page. If this is the case, then ** the pointer map needs to be updated for the subsequent overflow page. */ if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){ rc = setChildPtrmaps(pDbPage); if( rc!=SQLITE_OK ){ return rc; } }else{ Pgno nextOvfl = get4byte(pDbPage->aData); if( nextOvfl!=0 ){ ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc); if( rc!=SQLITE_OK ){ return rc; } } } /* Fix the database pointer on page iPtrPage that pointed at iDbPage so ** that it points at iFreePage. Also fix the pointer map entry for ** iPtrPage. */ if( eType!=PTRMAP_ROOTPAGE ){ rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0); if( rc!=SQLITE_OK ){ return rc; } rc = sqlite3PagerWrite(pPtrPage->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pPtrPage); return rc; } rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType); releasePage(pPtrPage); if( rc==SQLITE_OK ){ ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc); } } return rc; } /* Forward declaration required by incrVacuumStep(). */ static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); /* ** Perform a single step of an incremental-vacuum. If successful, return ** SQLITE_OK. If there is no work to do (and therefore no point in ** calling this function again), return SQLITE_DONE. Or, if an error ** occurs, return some other error code. ** ** More specifically, this function attempts to re-organize the database so ** that the last page of the file currently in use is no longer in use. ** ** Parameter nFin is the number of pages that this database would contain ** were this function called until it returns SQLITE_DONE. ** ** If the bCommit parameter is non-zero, this function assumes that the ** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE ** or an error. bCommit is passed true for an auto-vacuum-on-commit ** operation, or false for an incremental vacuum. */ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){ Pgno nFreeList; /* Number of pages still on the free-list */ int rc; assert( sqlite3_mutex_held(pBt->mutex) ); assert( iLastPg>nFin ); if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ u8 eType; Pgno iPtrPage; nFreeList = get4byte(&pBt->pPage1->aData[36]); if( nFreeList==0 ){ return SQLITE_DONE; } rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage); if( rc!=SQLITE_OK ){ return rc; } if( eType==PTRMAP_ROOTPAGE ){ return SQLITE_CORRUPT_BKPT; } if( eType==PTRMAP_FREEPAGE ){ if( bCommit==0 ){ /* Remove the page from the files free-list. This is not required ** if bCommit is non-zero. In that case, the free-list will be ** truncated to zero after this function returns, so it doesn't ** matter if it still contains some garbage entries. */ Pgno iFreePg; MemPage *pFreePg; rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT); if( rc!=SQLITE_OK ){ return rc; } assert( iFreePg==iLastPg ); releasePage(pFreePg); } } else { Pgno iFreePg; /* Index of free page to move pLastPg to */ MemPage *pLastPg; u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */ Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */ rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0); if( rc!=SQLITE_OK ){ return rc; } /* If bCommit is zero, this loop runs exactly once and page pLastPg ** is swapped with the first free page pulled off the free list. ** ** On the other hand, if bCommit is greater than zero, then keep ** looping until a free-page located within the first nFin pages ** of the file is found. */ if( bCommit==0 ){ eMode = BTALLOC_LE; iNear = nFin; } do { MemPage *pFreePg; rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode); if( rc!=SQLITE_OK ){ releasePage(pLastPg); return rc; } releasePage(pFreePg); }while( bCommit && iFreePg>nFin ); assert( iFreePgbDoTruncate = 1; pBt->nPage = iLastPg; } return SQLITE_OK; } /* ** The database opened by the first argument is an auto-vacuum database ** nOrig pages in size containing nFree free pages. Return the expected ** size of the database in pages following an auto-vacuum operation. */ static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){ int nEntry; /* Number of entries on one ptrmap page */ Pgno nPtrmap; /* Number of PtrMap pages to be freed */ Pgno nFin; /* Return value */ nEntry = pBt->usableSize/5; nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry; nFin = nOrig - nFree - nPtrmap; if( nOrig>PENDING_BYTE_PAGE(pBt) && nFinpBt; sqlite3BtreeEnter(p); assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); if( !pBt->autoVacuum ){ rc = SQLITE_DONE; }else{ Pgno nOrig = btreePagecount(pBt); Pgno nFree = get4byte(&pBt->pPage1->aData[36]); Pgno nFin = finalDbSize(pBt, nOrig, nFree); if( nOrig0 ){ rc = saveAllCursors(pBt, 0, 0); if( rc==SQLITE_OK ){ invalidateAllOverflowCache(pBt); rc = incrVacuumStep(pBt, nFin, nOrig, 0); } if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); put4byte(&pBt->pPage1->aData[28], pBt->nPage); } }else{ rc = SQLITE_DONE; } } sqlite3BtreeLeave(p); return rc; } /* ** This routine is called prior to sqlite3PagerCommit when a transaction ** is committed for an auto-vacuum database. ** ** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages ** the database file should be truncated to during the commit process. ** i.e. the database has been reorganized so that only the first *pnTrunc ** pages are in use. */ static int autoVacuumCommit(BtShared *pBt){ int rc = SQLITE_OK; Pager *pPager = pBt->pPager; VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); ) assert( sqlite3_mutex_held(pBt->mutex) ); invalidateAllOverflowCache(pBt); assert(pBt->autoVacuum); if( !pBt->incrVacuum ){ Pgno nFin; /* Number of pages in database after autovacuuming */ Pgno nFree; /* Number of pages on the freelist initially */ Pgno iFree; /* The next page to be freed */ Pgno nOrig; /* Database size before freeing */ nOrig = btreePagecount(pBt); if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ /* It is not possible to create a database for which the final page ** is either a pointer-map page or the pending-byte page. If one ** is encountered, this indicates corruption. */ return SQLITE_CORRUPT_BKPT; } nFree = get4byte(&pBt->pPage1->aData[36]); nFin = finalDbSize(pBt, nOrig, nFree); if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT; if( nFinnFin && rc==SQLITE_OK; iFree--){ rc = incrVacuumStep(pBt, nFin, iFree, 1); } if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); put4byte(&pBt->pPage1->aData[32], 0); put4byte(&pBt->pPage1->aData[36], 0); put4byte(&pBt->pPage1->aData[28], nFin); pBt->bDoTruncate = 1; pBt->nPage = nFin; } if( rc!=SQLITE_OK ){ sqlite3PagerRollback(pPager); } } assert( nRef>=sqlite3PagerRefcount(pPager) ); return rc; } #else /* ifndef SQLITE_OMIT_AUTOVACUUM */ # define setChildPtrmaps(x) SQLITE_OK #endif /* ** This routine does the first phase of a two-phase commit. This routine ** causes a rollback journal to be created (if it does not already exist) ** and populated with enough information so that if a power loss occurs ** the database can be restored to its original state by playing back ** the journal. Then the contents of the journal are flushed out to ** the disk. After the journal is safely on oxide, the changes to the ** database are written into the database file and flushed to oxide. ** At the end of this call, the rollback journal still exists on the ** disk and we are still holding all locks, so the transaction has not ** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the ** commit process. ** ** This call is a no-op if no write-transaction is currently active on pBt. ** ** Otherwise, sync the database file for the btree pBt. zMaster points to ** the name of a master journal file that should be written into the ** individual journal file, or is NULL, indicating no master journal file ** (single database transaction). ** ** When this is called, the master journal should already have been ** created, populated with this journal pointer and synced to disk. ** ** Once this is routine has returned, the only thing required to commit ** the write-transaction for this database file is to delete the journal. */ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){ int rc = SQLITE_OK; if( p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ rc = autoVacuumCommit(pBt); if( rc!=SQLITE_OK ){ sqlite3BtreeLeave(p); return rc; } } if( pBt->bDoTruncate ){ sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage); } #endif rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0); sqlite3BtreeLeave(p); } return rc; } /* ** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback() ** at the conclusion of a transaction. */ static void btreeEndTransaction(Btree *p){ BtShared *pBt = p->pBt; sqlite3 *db = p->db; assert( sqlite3BtreeHoldsMutex(p) ); #ifndef SQLITE_OMIT_AUTOVACUUM pBt->bDoTruncate = 0; #endif if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){ /* If there are other active statements that belong to this database ** handle, downgrade to a read-only transaction. The other statements ** may still be reading from the database. */ downgradeAllSharedCacheTableLocks(p); p->inTrans = TRANS_READ; }else{ /* If the handle had any kind of transaction open, decrement the ** transaction count of the shared btree. If the transaction count ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused() ** call below will unlock the pager. */ if( p->inTrans!=TRANS_NONE ){ clearAllSharedCacheTableLocks(p); pBt->nTransaction--; if( 0==pBt->nTransaction ){ pBt->inTransaction = TRANS_NONE; } } /* Set the current transaction state to TRANS_NONE and unlock the ** pager if this call closed the only read or write transaction. */ p->inTrans = TRANS_NONE; unlockBtreeIfUnused(pBt); } btreeIntegrity(p); } /* ** Commit the transaction currently in progress. ** ** This routine implements the second phase of a 2-phase commit. The ** sqlite3BtreeCommitPhaseOne() routine does the first phase and should ** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne() ** routine did all the work of writing information out to disk and flushing the ** contents so that they are written onto the disk platter. All this ** routine has to do is delete or truncate or zero the header in the ** the rollback journal (which causes the transaction to commit) and ** drop locks. ** ** Normally, if an error occurs while the pager layer is attempting to ** finalize the underlying journal file, this function returns an error and ** the upper layer will attempt a rollback. However, if the second argument ** is non-zero then this b-tree transaction is part of a multi-file ** transaction. In this case, the transaction has already been committed ** (by deleting a master journal file) and the caller will ignore this ** functions return code. So, even if an error occurs in the pager layer, ** reset the b-tree objects internal state to indicate that the write ** transaction has been closed. This is quite safe, as the pager will have ** transitioned to the error state. ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. */ SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ if( p->inTrans==TRANS_NONE ) return SQLITE_OK; sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the handle has a write-transaction open, commit the shared-btrees ** transaction and set the shared state to TRANS_READ. */ if( p->inTrans==TRANS_WRITE ){ int rc; BtShared *pBt = p->pBt; assert( pBt->inTransaction==TRANS_WRITE ); assert( pBt->nTransaction>0 ); rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); if( rc!=SQLITE_OK && bCleanup==0 ){ sqlite3BtreeLeave(p); return rc; } p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */ pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } btreeEndTransaction(p); sqlite3BtreeLeave(p); return SQLITE_OK; } /* ** Do both phases of a commit. */ SQLITE_PRIVATE int sqlite3BtreeCommit(Btree *p){ int rc; sqlite3BtreeEnter(p); rc = sqlite3BtreeCommitPhaseOne(p, 0); if( rc==SQLITE_OK ){ rc = sqlite3BtreeCommitPhaseTwo(p, 0); } sqlite3BtreeLeave(p); return rc; } /* ** This routine sets the state to CURSOR_FAULT and the error ** code to errCode for every cursor on any BtShared that pBtree ** references. Or if the writeOnly flag is set to 1, then only ** trip write cursors and leave read cursors unchanged. ** ** Every cursor is a candidate to be tripped, including cursors ** that belong to other database connections that happen to be ** sharing the cache with pBtree. ** ** This routine gets called when a rollback occurs. If the writeOnly ** flag is true, then only write-cursors need be tripped - read-only ** cursors save their current positions so that they may continue ** following the rollback. Or, if writeOnly is false, all cursors are ** tripped. In general, writeOnly is false if the transaction being ** rolled back modified the database schema. In this case b-tree root ** pages may be moved or deleted from the database altogether, making ** it unsafe for read cursors to continue. ** ** If the writeOnly flag is true and an error is encountered while ** saving the current position of a read-only cursor, all cursors, ** including all read-cursors are tripped. ** ** SQLITE_OK is returned if successful, or if an error occurs while ** saving a cursor position, an SQLite error code. */ SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){ BtCursor *p; int rc = SQLITE_OK; assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 ); if( pBtree ){ sqlite3BtreeEnter(pBtree); for(p=pBtree->pBt->pCursor; p; p=p->pNext){ int i; if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ rc = saveCursorPosition(p); if( rc!=SQLITE_OK ){ (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0); break; } } }else{ sqlite3BtreeClearCursor(p); p->eState = CURSOR_FAULT; p->skipNext = errCode; } for(i=0; i<=p->iPage; i++){ releasePage(p->apPage[i]); p->apPage[i] = 0; } } sqlite3BtreeLeave(pBtree); } return rc; } /* ** Rollback the transaction in progress. ** ** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped). ** Only write cursors are tripped if writeOnly is true but all cursors are ** tripped if writeOnly is false. Any attempt to use ** a tripped cursor will result in an error. ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. */ SQLITE_PRIVATE int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){ int rc; BtShared *pBt = p->pBt; MemPage *pPage1; assert( writeOnly==1 || writeOnly==0 ); assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK ); sqlite3BtreeEnter(p); if( tripCode==SQLITE_OK ){ rc = tripCode = saveAllCursors(pBt, 0, 0); if( rc ) writeOnly = 0; }else{ rc = SQLITE_OK; } if( tripCode ){ int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly); assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) ); if( rc2!=SQLITE_OK ) rc = rc2; } btreeIntegrity(p); if( p->inTrans==TRANS_WRITE ){ int rc2; assert( TRANS_WRITE==pBt->inTransaction ); rc2 = sqlite3PagerRollback(pBt->pPager); if( rc2!=SQLITE_OK ){ rc = rc2; } /* The rollback may have destroyed the pPage1->aData value. So ** call btreeGetPage() on page 1 again to make ** sure pPage1->aData is set correctly. */ if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ int nPage = get4byte(28+(u8*)pPage1->aData); testcase( nPage==0 ); if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); testcase( pBt->nPage!=nPage ); pBt->nPage = nPage; releasePage(pPage1); } assert( countValidCursors(pBt, 1)==0 ); pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } btreeEndTransaction(p); sqlite3BtreeLeave(p); return rc; } /* ** Start a statement subtransaction. The subtransaction can be rolled ** back independently of the main transaction. You must start a transaction ** before starting a subtransaction. The subtransaction is ended automatically ** if the main transaction commits or rolls back. ** ** Statement subtransactions are used around individual SQL statements ** that are contained within a BEGIN...COMMIT block. If a constraint ** error occurs within the statement, the effect of that one statement ** can be rolled back without having to rollback the entire transaction. ** ** A statement sub-transaction is implemented as an anonymous savepoint. The ** value passed as the second parameter is the total number of savepoints, ** including the new anonymous savepoint, open on the B-Tree. i.e. if there ** are no active savepoints and no other statement-transactions open, ** iStatement is 1. This anonymous savepoint can be released or rolled back ** using the sqlite3BtreeSavepoint() function. */ SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( iStatement>0 ); assert( iStatement>p->db->nSavepoint ); assert( pBt->inTransaction==TRANS_WRITE ); /* At the pager level, a statement transaction is a savepoint with ** an index greater than all savepoints created explicitly using ** SQL statements. It is illegal to open, release or rollback any ** such savepoints while the statement transaction savepoint is active. */ rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); sqlite3BtreeLeave(p); return rc; } /* ** The second argument to this function, op, is always SAVEPOINT_ROLLBACK ** or SAVEPOINT_RELEASE. This function either releases or rolls back the ** savepoint identified by parameter iSavepoint, depending on the value ** of op. ** ** Normally, iSavepoint is greater than or equal to zero. However, if op is ** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the ** contents of the entire transaction are rolled back. This is different ** from a normal transaction rollback, as no locks are released and the ** transaction remains open. */ SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ int rc = SQLITE_OK; if( p && p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); if( rc==SQLITE_OK ){ if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){ pBt->nPage = 0; } rc = newDatabase(pBt); pBt->nPage = get4byte(28 + pBt->pPage1->aData); /* The database size was written into the offset 28 of the header ** when the transaction started, so we know that the value at offset ** 28 is nonzero. */ assert( pBt->nPage>0 ); } sqlite3BtreeLeave(p); } return rc; } /* ** Create a new cursor for the BTree whose root is on the page ** iTable. If a read-only cursor is requested, it is assumed that ** the caller already has at least a read-only transaction open ** on the database already. If a write-cursor is requested, then ** the caller is assumed to have an open write transaction. ** ** If wrFlag==0, then the cursor can only be used for reading. ** If wrFlag==1, then the cursor can be used for reading or for ** writing if other conditions for writing are also met. These ** are the conditions that must be met in order for writing to ** be allowed: ** ** 1: The cursor must have been opened with wrFlag==1 ** ** 2: Other database connections that share the same pager cache ** but which are not in the READ_UNCOMMITTED state may not have ** cursors open with wrFlag==0 on the same table. Otherwise ** the changes made by this write cursor would be visible to ** the read cursors in the other database connection. ** ** 3: The database must be writable (not on read-only media) ** ** 4: There must be an active transaction. ** ** No checking is done to make sure that page iTable really is the ** root page of a b-tree. If it is not, then the cursor acquired ** will not work correctly. ** ** It is assumed that the sqlite3BtreeCursorZero() has been called ** on pCur to initialize the memory space prior to invoking this routine. */ static int btreeCursor( Btree *p, /* The btree */ int iTable, /* Root page of table to open */ int wrFlag, /* 1 to write. 0 read-only */ struct KeyInfo *pKeyInfo, /* First arg to comparison function */ BtCursor *pCur /* Space for new cursor */ ){ BtShared *pBt = p->pBt; /* Shared b-tree handle */ BtCursor *pX; /* Looping over other all cursors */ assert( sqlite3BtreeHoldsMutex(p) ); assert( wrFlag==0 || wrFlag==1 ); /* The following assert statements verify that if this is a sharable ** b-tree database, the connection is holding the required table locks, ** and that no other connection has any open cursor that conflicts with ** this lock. */ assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, wrFlag+1) ); assert( wrFlag==0 || !hasReadConflicts(p, iTable) ); /* Assert that the caller has opened the required transaction. */ assert( p->inTrans>TRANS_NONE ); assert( wrFlag==0 || p->inTrans==TRANS_WRITE ); assert( pBt->pPage1 && pBt->pPage1->aData ); assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 ); if( wrFlag ){ allocateTempSpace(pBt); if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM; } if( iTable==1 && btreePagecount(pBt)==0 ){ assert( wrFlag==0 ); iTable = 0; } /* Now that no other errors can occur, finish filling in the BtCursor ** variables and link the cursor into the BtShared list. */ pCur->pgnoRoot = (Pgno)iTable; pCur->iPage = -1; pCur->pKeyInfo = pKeyInfo; pCur->pBtree = p; pCur->pBt = pBt; assert( wrFlag==0 || wrFlag==BTCF_WriteFlag ); pCur->curFlags = wrFlag; pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY; /* If there are two or more cursors on the same btree, then all such ** cursors *must* have the BTCF_Multiple flag set. */ for(pX=pBt->pCursor; pX; pX=pX->pNext){ if( pX->pgnoRoot==(Pgno)iTable ){ pX->curFlags |= BTCF_Multiple; pCur->curFlags |= BTCF_Multiple; } } pCur->pNext = pBt->pCursor; pBt->pCursor = pCur; pCur->eState = CURSOR_INVALID; return SQLITE_OK; } SQLITE_PRIVATE int sqlite3BtreeCursor( Btree *p, /* The btree */ int iTable, /* Root page of table to open */ int wrFlag, /* 1 to write. 0 read-only */ struct KeyInfo *pKeyInfo, /* First arg to xCompare() */ BtCursor *pCur /* Write new cursor here */ ){ int rc; if( iTable<1 ){ rc = SQLITE_CORRUPT_BKPT; }else{ sqlite3BtreeEnter(p); rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur); sqlite3BtreeLeave(p); } return rc; } /* ** Return the size of a BtCursor object in bytes. ** ** This interfaces is needed so that users of cursors can preallocate ** sufficient storage to hold a cursor. The BtCursor object is opaque ** to users so they cannot do the sizeof() themselves - they must call ** this routine. */ SQLITE_PRIVATE int sqlite3BtreeCursorSize(void){ return ROUND8(sizeof(BtCursor)); } /* ** Initialize memory that will be converted into a BtCursor object. ** ** The simple approach here would be to memset() the entire object ** to zero. But it turns out that the apPage[] and aiIdx[] arrays ** do not need to be zeroed and they are large, so we can save a lot ** of run-time by skipping the initialization of those elements. */ SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor *p){ memset(p, 0, offsetof(BtCursor, iPage)); } /* ** Close a cursor. The read lock on the database file is released ** when the last cursor is closed. */ SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor *pCur){ Btree *pBtree = pCur->pBtree; if( pBtree ){ int i; BtShared *pBt = pCur->pBt; sqlite3BtreeEnter(pBtree); sqlite3BtreeClearCursor(pCur); assert( pBt->pCursor!=0 ); if( pBt->pCursor==pCur ){ pBt->pCursor = pCur->pNext; }else{ BtCursor *pPrev = pBt->pCursor; do{ if( pPrev->pNext==pCur ){ pPrev->pNext = pCur->pNext; break; } pPrev = pPrev->pNext; }while( ALWAYS(pPrev) ); } for(i=0; i<=pCur->iPage; i++){ releasePage(pCur->apPage[i]); } unlockBtreeIfUnused(pBt); sqlite3_free(pCur->aOverflow); /* sqlite3_free(pCur); */ sqlite3BtreeLeave(pBtree); } return SQLITE_OK; } /* ** Make sure the BtCursor* given in the argument has a valid ** BtCursor.info structure. If it is not already valid, call ** btreeParseCell() to fill it in. ** ** BtCursor.info is a cache of the information in the current cell. ** Using this cache reduces the number of calls to btreeParseCell(). */ #ifndef NDEBUG static void assertCellInfo(BtCursor *pCur){ CellInfo info; int iPage = pCur->iPage; memset(&info, 0, sizeof(info)); btreeParseCell(pCur->apPage[iPage], pCur->aiIdx[iPage], &info); assert( CORRUPT_DB || memcmp(&info, &pCur->info, sizeof(info))==0 ); } #else #define assertCellInfo(x) #endif static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){ if( pCur->info.nSize==0 ){ int iPage = pCur->iPage; pCur->curFlags |= BTCF_ValidNKey; btreeParseCell(pCur->apPage[iPage],pCur->aiIdx[iPage],&pCur->info); }else{ assertCellInfo(pCur); } } #ifndef NDEBUG /* The next routine used only within assert() statements */ /* ** Return true if the given BtCursor is valid. A valid cursor is one ** that is currently pointing to a row in a (non-empty) table. ** This is a verification routine is used only within assert() statements. */ SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){ return pCur && pCur->eState==CURSOR_VALID; } #endif /* NDEBUG */ /* ** Set *pSize to the size of the buffer needed to hold the value of ** the key for the current entry. If the cursor is not pointing ** to a valid entry, *pSize is set to 0. ** ** For a table with the INTKEY flag set, this routine returns the key ** itself, not the number of bytes in the key. ** ** The caller must position the cursor prior to invoking this routine. ** ** This routine cannot fail. It always returns SQLITE_OK. */ SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); getCellInfo(pCur); *pSize = pCur->info.nKey; return SQLITE_OK; } /* ** Set *pSize to the number of bytes of data in the entry the ** cursor currently points to. ** ** The caller must guarantee that the cursor is pointing to a non-NULL ** valid entry. In other words, the calling procedure must guarantee ** that the cursor has Cursor.eState==CURSOR_VALID. ** ** Failure is not possible. This function always returns SQLITE_OK. ** It might just as well be a procedure (returning void) but we continue ** to return an integer result code for historical reasons. */ SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 ); assert( pCur->iPageapPage[pCur->iPage]->intKeyLeaf==1 ); getCellInfo(pCur); *pSize = pCur->info.nPayload; return SQLITE_OK; } /* ** Given the page number of an overflow page in the database (parameter ** ovfl), this function finds the page number of the next page in the ** linked list of overflow pages. If possible, it uses the auto-vacuum ** pointer-map data instead of reading the content of page ovfl to do so. ** ** If an error occurs an SQLite error code is returned. Otherwise: ** ** The page number of the next overflow page in the linked list is ** written to *pPgnoNext. If page ovfl is the last page in its linked ** list, *pPgnoNext is set to zero. ** ** If ppPage is not NULL, and a reference to the MemPage object corresponding ** to page number pOvfl was obtained, then *ppPage is set to point to that ** reference. It is the responsibility of the caller to call releasePage() ** on *ppPage to free the reference. In no reference was obtained (because ** the pointer-map was used to obtain the value for *pPgnoNext), then ** *ppPage is set to zero. */ static int getOverflowPage( BtShared *pBt, /* The database file */ Pgno ovfl, /* Current overflow page number */ MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */ Pgno *pPgnoNext /* OUT: Next overflow page number */ ){ Pgno next = 0; MemPage *pPage = 0; int rc = SQLITE_OK; assert( sqlite3_mutex_held(pBt->mutex) ); assert(pPgnoNext); #ifndef SQLITE_OMIT_AUTOVACUUM /* Try to find the next page in the overflow list using the ** autovacuum pointer-map pages. Guess that the next page in ** the overflow list is page number (ovfl+1). If that guess turns ** out to be wrong, fall back to loading the data of page ** number ovfl to determine the next page number. */ if( pBt->autoVacuum ){ Pgno pgno; Pgno iGuess = ovfl+1; u8 eType; while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){ iGuess++; } if( iGuess<=btreePagecount(pBt) ){ rc = ptrmapGet(pBt, iGuess, &eType, &pgno); if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ next = iGuess; rc = SQLITE_DONE; } } } #endif assert( next==0 || rc==SQLITE_DONE ); if( rc==SQLITE_OK ){ rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0); assert( rc==SQLITE_OK || pPage==0 ); if( rc==SQLITE_OK ){ next = get4byte(pPage->aData); } } *pPgnoNext = next; if( ppPage ){ *ppPage = pPage; }else{ releasePage(pPage); } return (rc==SQLITE_DONE ? SQLITE_OK : rc); } /* ** Copy data from a buffer to a page, or from a page to a buffer. ** ** pPayload is a pointer to data stored on database page pDbPage. ** If argument eOp is false, then nByte bytes of data are copied ** from pPayload to the buffer pointed at by pBuf. If eOp is true, ** then sqlite3PagerWrite() is called on pDbPage and nByte bytes ** of data are copied from the buffer pBuf to pPayload. ** ** SQLITE_OK is returned on success, otherwise an error code. */ static int copyPayload( void *pPayload, /* Pointer to page data */ void *pBuf, /* Pointer to buffer */ int nByte, /* Number of bytes to copy */ int eOp, /* 0 -> copy from page, 1 -> copy to page */ DbPage *pDbPage /* Page containing pPayload */ ){ if( eOp ){ /* Copy data from buffer to page (a write operation) */ int rc = sqlite3PagerWrite(pDbPage); if( rc!=SQLITE_OK ){ return rc; } memcpy(pPayload, pBuf, nByte); }else{ /* Copy data from page to buffer (a read operation) */ memcpy(pBuf, pPayload, nByte); } return SQLITE_OK; } /* ** This function is used to read or overwrite payload information ** for the entry that the pCur cursor is pointing to. The eOp ** argument is interpreted as follows: ** ** 0: The operation is a read. Populate the overflow cache. ** 1: The operation is a write. Populate the overflow cache. ** 2: The operation is a read. Do not populate the overflow cache. ** ** A total of "amt" bytes are read or written beginning at "offset". ** Data is read to or from the buffer pBuf. ** ** The content being read or written might appear on the main page ** or be scattered out on multiple overflow pages. ** ** If the current cursor entry uses one or more overflow pages and the ** eOp argument is not 2, this function may allocate space for and lazily ** populates the overflow page-list cache array (BtCursor.aOverflow). ** Subsequent calls use this cache to make seeking to the supplied offset ** more efficient. ** ** Once an overflow page-list cache has been allocated, it may be ** invalidated if some other cursor writes to the same table, or if ** the cursor is moved to a different row. Additionally, in auto-vacuum ** mode, the following events may invalidate an overflow page-list cache. ** ** * An incremental vacuum, ** * A commit in auto_vacuum="full" mode, ** * Creating a table (may require moving an overflow page). */ static int accessPayload( BtCursor *pCur, /* Cursor pointing to entry to read from */ u32 offset, /* Begin reading this far into payload */ u32 amt, /* Read this many bytes */ unsigned char *pBuf, /* Write the bytes into this buffer */ int eOp /* zero to read. non-zero to write. */ ){ unsigned char *aPayload; int rc = SQLITE_OK; int iIdx = 0; MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */ BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ #ifdef SQLITE_DIRECT_OVERFLOW_READ unsigned char * const pBufStart = pBuf; int bEnd; /* True if reading to end of data */ #endif assert( pPage ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->aiIdx[pCur->iPage]nCell ); assert( cursorHoldsMutex(pCur) ); assert( eOp!=2 || offset==0 ); /* Always start from beginning for eOp==2 */ getCellInfo(pCur); aPayload = pCur->info.pPayload; #ifdef SQLITE_DIRECT_OVERFLOW_READ bEnd = offset+amt==pCur->info.nPayload; #endif assert( offset+amt <= pCur->info.nPayload ); if( &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] ){ /* Trying to read or write past the end of the data is an error */ return SQLITE_CORRUPT_BKPT; } /* Check if data must be read/written to/from the btree page itself. */ if( offsetinfo.nLocal ){ int a = amt; if( a+offset>pCur->info.nLocal ){ a = pCur->info.nLocal - offset; } rc = copyPayload(&aPayload[offset], pBuf, a, (eOp & 0x01), pPage->pDbPage); offset = 0; pBuf += a; amt -= a; }else{ offset -= pCur->info.nLocal; } if( rc==SQLITE_OK && amt>0 ){ const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ Pgno nextPage; nextPage = get4byte(&aPayload[pCur->info.nLocal]); /* If the BtCursor.aOverflow[] has not been allocated, allocate it now. ** Except, do not allocate aOverflow[] for eOp==2. ** ** The aOverflow[] array is sized at one entry for each overflow page ** in the overflow chain. The page number of the first overflow page is ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array ** means "not yet known" (the cache is lazily populated). */ if( eOp!=2 && (pCur->curFlags & BTCF_ValidOvfl)==0 ){ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; if( nOvfl>pCur->nOvflAlloc ){ Pgno *aNew = (Pgno*)sqlite3Realloc( pCur->aOverflow, nOvfl*2*sizeof(Pgno) ); if( aNew==0 ){ rc = SQLITE_NOMEM; }else{ pCur->nOvflAlloc = nOvfl*2; pCur->aOverflow = aNew; } } if( rc==SQLITE_OK ){ memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; } } /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. */ if( (pCur->curFlags & BTCF_ValidOvfl)!=0 && pCur->aOverflow[offset/ovflSize] ){ iIdx = (offset/ovflSize); nextPage = pCur->aOverflow[iIdx]; offset = (offset%ovflSize); } for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){ /* If required, populate the overflow page-list cache. */ if( (pCur->curFlags & BTCF_ValidOvfl)!=0 ){ assert(!pCur->aOverflow[iIdx] || pCur->aOverflow[iIdx]==nextPage); pCur->aOverflow[iIdx] = nextPage; } if( offset>=ovflSize ){ /* The only reason to read this page is to obtain the page ** number for the next page in the overflow chain. The page ** data is not required. So first try to lookup the overflow ** page-list cache, if any, then fall back to the getOverflowPage() ** function. ** ** Note that the aOverflow[] array must be allocated because eOp!=2 ** here. If eOp==2, then offset==0 and this branch is never taken. */ assert( eOp!=2 ); assert( pCur->curFlags & BTCF_ValidOvfl ); assert( pCur->pBtree->db==pBt->db ); if( pCur->aOverflow[iIdx+1] ){ nextPage = pCur->aOverflow[iIdx+1]; }else{ rc = getOverflowPage(pBt, nextPage, 0, &nextPage); } offset -= ovflSize; }else{ /* Need to read this page properly. It contains some of the ** range of data that is being read (eOp==0) or written (eOp!=0). */ #ifdef SQLITE_DIRECT_OVERFLOW_READ sqlite3_file *fd; #endif int a = amt; if( a + offset > ovflSize ){ a = ovflSize - offset; } #ifdef SQLITE_DIRECT_OVERFLOW_READ /* If all the following are true: ** ** 1) this is a read operation, and ** 2) data is required from the start of this overflow page, and ** 3) the database is file-backed, and ** 4) there is no open write-transaction, and ** 5) the database is not a WAL database, ** 6) all data from the page is being read. ** 7) at least 4 bytes have already been read into the output buffer ** ** then data can be read directly from the database file into the ** output buffer, bypassing the page-cache altogether. This speeds ** up loading large records that span many overflow pages. */ if( (eOp&0x01)==0 /* (1) */ && offset==0 /* (2) */ && (bEnd || a==ovflSize) /* (6) */ && pBt->inTransaction==TRANS_READ /* (4) */ && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */ && pBt->pPage1->aData[19]==0x01 /* (5) */ && &pBuf[-4]>=pBufStart /* (7) */ ){ u8 aSave[4]; u8 *aWrite = &pBuf[-4]; assert( aWrite>=pBufStart ); /* hence (7) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); nextPage = get4byte(aWrite); memcpy(aWrite, aSave, 4); }else #endif { DbPage *pDbPage; rc = sqlite3PagerAcquire(pBt->pPager, nextPage, &pDbPage, ((eOp&0x01)==0 ? PAGER_GET_READONLY : 0) ); if( rc==SQLITE_OK ){ aPayload = sqlite3PagerGetData(pDbPage); nextPage = get4byte(aPayload); rc = copyPayload(&aPayload[offset+4], pBuf, a, (eOp&0x01), pDbPage); sqlite3PagerUnref(pDbPage); offset = 0; } } amt -= a; pBuf += a; } } } if( rc==SQLITE_OK && amt>0 ){ return SQLITE_CORRUPT_BKPT; } return rc; } /* ** Read part of the key associated with cursor pCur. Exactly ** "amt" bytes will be transferred into pBuf[]. The transfer ** begins at "offset". ** ** The caller must ensure that pCur is pointing to a valid row ** in the table. ** ** Return SQLITE_OK on success or an error code if anything goes ** wrong. An error is returned if "offset+amt" is larger than ** the available payload. */ SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0); } /* ** Read part of the data associated with cursor pCur. Exactly ** "amt" bytes will be transfered into pBuf[]. The transfer ** begins at "offset". ** ** Return SQLITE_OK on success or an error code if anything goes ** wrong. An error is returned if "offset+amt" is larger than ** the available payload. */ SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ int rc; #ifndef SQLITE_OMIT_INCRBLOB if ( pCur->eState==CURSOR_INVALID ){ return SQLITE_ABORT; } #endif assert( cursorHoldsMutex(pCur) ); rc = restoreCursorPosition(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] ); assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); rc = accessPayload(pCur, offset, amt, pBuf, 0); } return rc; } /* ** Return a pointer to payload information from the entry that the ** pCur cursor is pointing to. The pointer is to the beginning of ** the key if index btrees (pPage->intKey==0) and is the data for ** table btrees (pPage->intKey==1). The number of bytes of available ** key/data is written into *pAmt. If *pAmt==0, then the value ** returned will not be a valid pointer. ** ** This routine is an optimization. It is common for the entire key ** and data to fit on the local page and for there to be no overflow ** pages. When that is so, this routine can be used to access the ** key and data without making a copy. If the key and/or data spills ** onto overflow pages, then accessPayload() must be used to reassemble ** the key/data and copy it into a preallocated buffer. ** ** The pointer returned by this routine looks directly into the cached ** page of the database. The data might change or move the next time ** any btree routine is called. */ static const void *fetchPayload( BtCursor *pCur, /* Cursor pointing to entry to read from */ u32 *pAmt /* Write the number of available bytes here */ ){ u32 amt; assert( pCur!=0 && pCur->iPage>=0 && pCur->apPage[pCur->iPage]); assert( pCur->eState==CURSOR_VALID ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( cursorHoldsMutex(pCur) ); assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); assert( pCur->info.nSize>0 ); assert( pCur->info.pPayload>pCur->apPage[pCur->iPage]->aData || CORRUPT_DB ); assert( pCur->info.pPayloadapPage[pCur->iPage]->aDataEnd ||CORRUPT_DB); amt = (int)(pCur->apPage[pCur->iPage]->aDataEnd - pCur->info.pPayload); if( pCur->info.nLocalinfo.nLocal; *pAmt = amt; return (void*)pCur->info.pPayload; } /* ** For the entry that cursor pCur is point to, return as ** many bytes of the key or data as are available on the local ** b-tree page. Write the number of available bytes into *pAmt. ** ** The pointer returned is ephemeral. The key/data may move ** or be destroyed on the next call to any Btree routine, ** including calls from other threads against the same cache. ** Hence, a mutex on the BtShared should be held prior to calling ** this routine. ** ** These routines is used to get quick access to key and data ** in the common case where no overflow pages are used. */ SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor *pCur, u32 *pAmt){ return fetchPayload(pCur, pAmt); } SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt){ return fetchPayload(pCur, pAmt); } /* ** Move the cursor down to a new child page. The newPgno argument is the ** page number of the child page to move to. ** ** This function returns SQLITE_CORRUPT if the page-header flags field of ** the new child page does not match the flags field of the parent (i.e. ** if an intkey page appears to be the parent of a non-intkey page, or ** vice-versa). */ static int moveToChild(BtCursor *pCur, u32 newPgno){ BtShared *pBt = pCur->pBt; assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPageiPage>=0 ); if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ return SQLITE_CORRUPT_BKPT; } pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); pCur->iPage++; pCur->aiIdx[pCur->iPage] = 0; return getAndInitPage(pBt, newPgno, &pCur->apPage[pCur->iPage], pCur, pCur->curPagerFlags); } #if SQLITE_DEBUG /* ** Page pParent is an internal (non-leaf) tree page. This function ** asserts that page number iChild is the left-child if the iIdx'th ** cell in page pParent. Or, if iIdx is equal to the total number of ** cells in pParent, that page number iChild is the right-child of ** the page. */ static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){ if( CORRUPT_DB ) return; /* The conditions tested below might not be true ** in a corrupt database */ assert( iIdx<=pParent->nCell ); if( iIdx==pParent->nCell ){ assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild ); }else{ assert( get4byte(findCell(pParent, iIdx))==iChild ); } } #else # define assertParentIndex(x,y,z) #endif /* ** Move the cursor up to the parent page. ** ** pCur->idx is set to the cell index that contains the pointer ** to the page we are coming from. If we are coming from the ** right-most child page then pCur->idx is set to one more than ** the largest cell index. */ static void moveToParent(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>0 ); assert( pCur->apPage[pCur->iPage] ); assertParentIndex( pCur->apPage[pCur->iPage-1], pCur->aiIdx[pCur->iPage-1], pCur->apPage[pCur->iPage]->pgno ); testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell ); pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); releasePageNotNull(pCur->apPage[pCur->iPage--]); } /* ** Move the cursor to point to the root page of its b-tree structure. ** ** If the table has a virtual root page, then the cursor is moved to point ** to the virtual root page instead of the actual root page. A table has a ** virtual root page when the actual root page contains no cells and a ** single child page. This can only happen with the table rooted at page 1. ** ** If the b-tree structure is empty, the cursor state is set to ** CURSOR_INVALID. Otherwise, the cursor is set to point to the first ** cell located on the root (or virtual root) page and the cursor state ** is set to CURSOR_VALID. ** ** If this function returns successfully, it may be assumed that the ** page-header flags indicate that the [virtual] root-page is the expected ** kind of b-tree page (i.e. if when opening the cursor the caller did not ** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D, ** indicating a table b-tree, or if the caller did specify a KeyInfo ** structure the flags byte is set to 0x02 or 0x0A, indicating an index ** b-tree). */ static int moveToRoot(BtCursor *pCur){ MemPage *pRoot; int rc = SQLITE_OK; assert( cursorHoldsMutex(pCur) ); assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); if( pCur->eState>=CURSOR_REQUIRESEEK ){ if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); return pCur->skipNext; } sqlite3BtreeClearCursor(pCur); } if( pCur->iPage>=0 ){ while( pCur->iPage ){ assert( pCur->apPage[pCur->iPage]!=0 ); releasePageNotNull(pCur->apPage[pCur->iPage--]); } }else if( pCur->pgnoRoot==0 ){ pCur->eState = CURSOR_INVALID; return SQLITE_OK; }else{ assert( pCur->iPage==(-1) ); rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->apPage[0], 0, pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; return rc; } pCur->iPage = 0; pCur->curIntKey = pCur->apPage[0]->intKey; } pRoot = pCur->apPage[0]; assert( pRoot->pgno==pCur->pgnoRoot ); /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is ** NULL, the caller expects a table b-tree. If this is not the case, ** return an SQLITE_CORRUPT error. ** ** Earlier versions of SQLite assumed that this test could not fail ** if the root page was already loaded when this function was called (i.e. ** if pCur->iPage>=0). But this is not so if the database is corrupted ** in such a way that page pRoot is linked into a second b-tree table ** (or the freelist). */ assert( pRoot->intKey==1 || pRoot->intKey==0 ); if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){ return SQLITE_CORRUPT_BKPT; } pCur->aiIdx[0] = 0; pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); if( pRoot->nCell>0 ){ pCur->eState = CURSOR_VALID; }else if( !pRoot->leaf ){ Pgno subpage; if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT; subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); pCur->eState = CURSOR_VALID; rc = moveToChild(pCur, subpage); }else{ pCur->eState = CURSOR_INVALID; } return rc; } /* ** Move the cursor down to the left-most leaf entry beneath the ** entry to which it is currently pointing. ** ** The left-most leaf is the one with the smallest key - the first ** in ascending order. */ static int moveToLeftmost(BtCursor *pCur){ Pgno pgno; int rc = SQLITE_OK; MemPage *pPage; assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); while( rc==SQLITE_OK && !(pPage = pCur->apPage[pCur->iPage])->leaf ){ assert( pCur->aiIdx[pCur->iPage]nCell ); pgno = get4byte(findCell(pPage, pCur->aiIdx[pCur->iPage])); rc = moveToChild(pCur, pgno); } return rc; } /* ** Move the cursor down to the right-most leaf entry beneath the ** page to which it is currently pointing. Notice the difference ** between moveToLeftmost() and moveToRightmost(). moveToLeftmost() ** finds the left-most entry beneath the *entry* whereas moveToRightmost() ** finds the right-most entry beneath the *page*. ** ** The right-most entry is the one with the largest key - the last ** key in ascending order. */ static int moveToRightmost(BtCursor *pCur){ Pgno pgno; int rc = SQLITE_OK; MemPage *pPage = 0; assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); while( !(pPage = pCur->apPage[pCur->iPage])->leaf ){ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); pCur->aiIdx[pCur->iPage] = pPage->nCell; rc = moveToChild(pCur, pgno); if( rc ) return rc; } pCur->aiIdx[pCur->iPage] = pPage->nCell-1; assert( pCur->info.nSize==0 ); assert( (pCur->curFlags & BTCF_ValidNKey)==0 ); return SQLITE_OK; } /* Move the cursor to the first entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ int rc; assert( cursorHoldsMutex(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); rc = moveToRoot(pCur); if( rc==SQLITE_OK ){ if( pCur->eState==CURSOR_INVALID ){ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); *pRes = 1; }else{ assert( pCur->apPage[pCur->iPage]->nCell>0 ); *pRes = 0; rc = moveToLeftmost(pCur); } } return rc; } /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ int rc; assert( cursorHoldsMutex(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ #ifdef SQLITE_DEBUG /* This block serves to assert() that the cursor really does point ** to the last entry in the b-tree. */ int ii; for(ii=0; iiiPage; ii++){ assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); } assert( pCur->aiIdx[pCur->iPage]==pCur->apPage[pCur->iPage]->nCell-1 ); assert( pCur->apPage[pCur->iPage]->leaf ); #endif return SQLITE_OK; } rc = moveToRoot(pCur); if( rc==SQLITE_OK ){ if( CURSOR_INVALID==pCur->eState ){ assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); *pRes = 1; }else{ assert( pCur->eState==CURSOR_VALID ); *pRes = 0; rc = moveToRightmost(pCur); if( rc==SQLITE_OK ){ pCur->curFlags |= BTCF_AtLast; }else{ pCur->curFlags &= ~BTCF_AtLast; } } } return rc; } /* Move the cursor so that it points to an entry near the key ** specified by pIdxKey or intKey. Return a success code. ** ** For INTKEY tables, the intKey parameter is used. pIdxKey ** must be NULL. For index tables, pIdxKey is used and intKey ** is ignored. ** ** If an exact match is not found, then the cursor is always ** left pointing at a leaf page which would hold the entry if it ** were present. The cursor might point to an entry that comes ** before or after the key. ** ** An integer is written into *pRes which is the result of ** comparing the key with the entry to which the cursor is ** pointing. The meaning of the integer written into ** *pRes is as follows: ** ** *pRes<0 The cursor is left pointing at an entry that ** is smaller than intKey/pIdxKey or if the table is empty ** and the cursor is therefore left point to nothing. ** ** *pRes==0 The cursor is left pointing at an entry that ** exactly matches intKey/pIdxKey. ** ** *pRes>0 The cursor is left pointing at an entry that ** is larger than intKey/pIdxKey. ** */ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( BtCursor *pCur, /* The cursor to be moved */ UnpackedRecord *pIdxKey, /* Unpacked index key */ i64 intKey, /* The table key */ int biasRight, /* If true, bias the search to the high end */ int *pRes /* Write search results here */ ){ int rc; RecordCompare xRecordCompare; assert( cursorHoldsMutex(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( pRes ); assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); /* If the cursor is already positioned at the point we are trying ** to move to, then just return without doing any work */ if( pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 && pCur->curIntKey ){ if( pCur->info.nKey==intKey ){ *pRes = 0; return SQLITE_OK; } if( (pCur->curFlags & BTCF_AtLast)!=0 && pCur->info.nKeyerrCode = 0; assert( pIdxKey->default_rc==1 || pIdxKey->default_rc==0 || pIdxKey->default_rc==-1 ); }else{ xRecordCompare = 0; /* All keys are integers */ } rc = moveToRoot(pCur); if( rc ){ return rc; } assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage] ); assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->isInit ); assert( pCur->eState==CURSOR_INVALID || pCur->apPage[pCur->iPage]->nCell>0 ); if( pCur->eState==CURSOR_INVALID ){ *pRes = -1; assert( pCur->pgnoRoot==0 || pCur->apPage[pCur->iPage]->nCell==0 ); return SQLITE_OK; } assert( pCur->apPage[0]->intKey==pCur->curIntKey ); assert( pCur->curIntKey || pIdxKey ); for(;;){ int lwr, upr, idx, c; Pgno chldPg; MemPage *pPage = pCur->apPage[pCur->iPage]; u8 *pCell; /* Pointer to current cell in pPage */ /* pPage->nCell must be greater than zero. If this is the root-page ** the cursor would have been INVALID above and this for(;;) loop ** not run. If this is not the root-page, then the moveToChild() routine ** would have already detected db corruption. Similarly, pPage must ** be the right kind (index or table) of b-tree page. Otherwise ** a moveToChild() or moveToRoot() call would have detected corruption. */ assert( pPage->nCell>0 ); assert( pPage->intKey==(pIdxKey==0) ); lwr = 0; upr = pPage->nCell-1; assert( biasRight==0 || biasRight==1 ); idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */ pCur->aiIdx[pCur->iPage] = (u16)idx; if( xRecordCompare==0 ){ for(;;){ i64 nCellKey; pCell = findCellPastPtr(pPage, idx); if( pPage->intKeyLeaf ){ while( 0x80 <= *(pCell++) ){ if( pCell>=pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; } } getVarint(pCell, (u64*)&nCellKey); if( nCellKeyupr ){ c = -1; break; } }else if( nCellKey>intKey ){ upr = idx-1; if( lwr>upr ){ c = +1; break; } }else{ assert( nCellKey==intKey ); pCur->curFlags |= BTCF_ValidNKey; pCur->info.nKey = nCellKey; pCur->aiIdx[pCur->iPage] = (u16)idx; if( !pPage->leaf ){ lwr = idx; goto moveto_next_layer; }else{ *pRes = 0; rc = SQLITE_OK; goto moveto_finish; } } assert( lwr+upr>=0 ); idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ } }else{ for(;;){ int nCell; /* Size of the pCell cell in bytes */ pCell = findCellPastPtr(pPage, idx); /* The maximum supported page-size is 65536 bytes. This means that ** the maximum number of record bytes stored on an index B-Tree ** page is less than 16384 bytes and may be stored as a 2-byte ** varint. This information is used to attempt to avoid parsing ** the entire cell by checking for the cases where the record is ** stored entirely within the b-tree page by inspecting the first ** 2 bytes of the cell. */ nCell = pCell[0]; if( nCell<=pPage->max1bytePayload ){ /* This branch runs if the record-size field of the cell is a ** single byte varint and the record fits entirely on the main ** b-tree page. */ testcase( pCell+nCell+1==pPage->aDataEnd ); c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey); }else if( !(pCell[1] & 0x80) && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal ){ /* The record-size field is a 2 byte varint and the record ** fits entirely on the main b-tree page. */ testcase( pCell+nCell+2==pPage->aDataEnd ); c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey); }else{ /* The record flows over onto one or more overflow pages. In ** this case the whole cell needs to be parsed, a buffer allocated ** and accessPayload() used to retrieve the record into the ** buffer before VdbeRecordCompare() can be called. ** ** If the record is corrupt, the xRecordCompare routine may read ** up to two varints past the end of the buffer. An extra 18 ** bytes of padding is allocated at the end of the buffer in ** case this happens. */ void *pCellKey; u8 * const pCellBody = pCell - pPage->childPtrSize; pPage->xParseCell(pPage, pCellBody, &pCur->info); nCell = (int)pCur->info.nKey; testcase( nCell<0 ); /* True if key size is 2^32 or more */ testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */ testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */ testcase( nCell==2 ); /* Minimum legal index key size */ if( nCell<2 ){ rc = SQLITE_CORRUPT_BKPT; goto moveto_finish; } pCellKey = sqlite3Malloc( nCell+18 ); if( pCellKey==0 ){ rc = SQLITE_NOMEM; goto moveto_finish; } pCur->aiIdx[pCur->iPage] = (u16)idx; rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 2); if( rc ){ sqlite3_free(pCellKey); goto moveto_finish; } c = xRecordCompare(nCell, pCellKey, pIdxKey); sqlite3_free(pCellKey); } assert( (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) ); if( c<0 ){ lwr = idx+1; }else if( c>0 ){ upr = idx-1; }else{ assert( c==0 ); *pRes = 0; rc = SQLITE_OK; pCur->aiIdx[pCur->iPage] = (u16)idx; if( pIdxKey->errCode ) rc = SQLITE_CORRUPT; goto moveto_finish; } if( lwr>upr ) break; assert( lwr+upr>=0 ); idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ } } assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) ); assert( pPage->isInit ); if( pPage->leaf ){ assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); pCur->aiIdx[pCur->iPage] = (u16)idx; *pRes = c; rc = SQLITE_OK; goto moveto_finish; } moveto_next_layer: if( lwr>=pPage->nCell ){ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); }else{ chldPg = get4byte(findCell(pPage, lwr)); } pCur->aiIdx[pCur->iPage] = (u16)lwr; rc = moveToChild(pCur, chldPg); if( rc ) break; } moveto_finish: pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); return rc; } /* ** Return TRUE if the cursor is not pointing at an entry of the table. ** ** TRUE will be returned after a call to sqlite3BtreeNext() moves ** past the last entry in the table or sqlite3BtreePrev() moves past ** the first entry. TRUE is also returned if the table is empty. */ SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor *pCur){ /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries ** have been deleted? This API will need to change to return an error code ** as well as the boolean result value. */ return (CURSOR_VALID!=pCur->eState); } /* ** Advance the cursor to the next entry in the database. If ** successful then set *pRes=0. If the cursor ** was already pointing to the last entry in the database before ** this routine was called, then set *pRes=1. ** ** The main entry point is sqlite3BtreeNext(). That routine is optimized ** for the common case of merely incrementing the cell counter BtCursor.aiIdx ** to the next cell on the current page. The (slower) btreeNext() helper ** routine is called when it is necessary to move to a different page or ** to restore the cursor. ** ** The calling function will set *pRes to 0 or 1. The initial *pRes value ** will be 1 if the cursor being stepped corresponds to an SQL index and ** if this routine could have been skipped if that SQL index had been ** a unique index. Otherwise the caller will have set *pRes to zero. ** Zero is the common case. The btree implementation is free to use the ** initial *pRes value as a hint to improve performance, but the current ** SQLite btree implementation does not. (Note that the comdb2 btree ** implementation does use this hint, however.) */ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur, int *pRes){ int rc; int idx; MemPage *pPage; assert( cursorHoldsMutex(pCur) ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); assert( *pRes==0 ); if( pCur->eState!=CURSOR_VALID ){ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } if( CURSOR_INVALID==pCur->eState ){ *pRes = 1; return SQLITE_OK; } if( pCur->skipNext ){ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); pCur->eState = CURSOR_VALID; if( pCur->skipNext>0 ){ pCur->skipNext = 0; return SQLITE_OK; } pCur->skipNext = 0; } } pPage = pCur->apPage[pCur->iPage]; idx = ++pCur->aiIdx[pCur->iPage]; assert( pPage->isInit ); /* If the database file is corrupt, it is possible for the value of idx ** to be invalid here. This can only occur if a second cursor modifies ** the page while cursor pCur is holding a reference to it. Which can ** only happen if the database is corrupt in such a way as to link the ** page into more than one b-tree structure. */ testcase( idx>pPage->nCell ); if( idx>=pPage->nCell ){ if( !pPage->leaf ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); if( rc ) return rc; return moveToLeftmost(pCur); } do{ if( pCur->iPage==0 ){ *pRes = 1; pCur->eState = CURSOR_INVALID; return SQLITE_OK; } moveToParent(pCur); pPage = pCur->apPage[pCur->iPage]; }while( pCur->aiIdx[pCur->iPage]>=pPage->nCell ); if( pPage->intKey ){ return sqlite3BtreeNext(pCur, pRes); }else{ return SQLITE_OK; } } if( pPage->leaf ){ return SQLITE_OK; }else{ return moveToLeftmost(pCur); } } SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor *pCur, int *pRes){ MemPage *pPage; assert( cursorHoldsMutex(pCur) ); assert( pRes!=0 ); assert( *pRes==0 || *pRes==1 ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); *pRes = 0; if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur, pRes); pPage = pCur->apPage[pCur->iPage]; if( (++pCur->aiIdx[pCur->iPage])>=pPage->nCell ){ pCur->aiIdx[pCur->iPage]--; return btreeNext(pCur, pRes); } if( pPage->leaf ){ return SQLITE_OK; }else{ return moveToLeftmost(pCur); } } /* ** Step the cursor to the back to the previous entry in the database. If ** successful then set *pRes=0. If the cursor ** was already pointing to the first entry in the database before ** this routine was called, then set *pRes=1. ** ** The main entry point is sqlite3BtreePrevious(). That routine is optimized ** for the common case of merely decrementing the cell counter BtCursor.aiIdx ** to the previous cell on the current page. The (slower) btreePrevious() ** helper routine is called when it is necessary to move to a different page ** or to restore the cursor. ** ** The calling function will set *pRes to 0 or 1. The initial *pRes value ** will be 1 if the cursor being stepped corresponds to an SQL index and ** if this routine could have been skipped if that SQL index had been ** a unique index. Otherwise the caller will have set *pRes to zero. ** Zero is the common case. The btree implementation is free to use the ** initial *pRes value as a hint to improve performance, but the current ** SQLite btree implementation does not. (Note that the comdb2 btree ** implementation does use this hint, however.) */ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){ int rc; MemPage *pPage; assert( cursorHoldsMutex(pCur) ); assert( pRes!=0 ); assert( *pRes==0 ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 ); assert( pCur->info.nSize==0 ); if( pCur->eState!=CURSOR_VALID ){ rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } if( CURSOR_INVALID==pCur->eState ){ *pRes = 1; return SQLITE_OK; } if( pCur->skipNext ){ assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_SKIPNEXT ); pCur->eState = CURSOR_VALID; if( pCur->skipNext<0 ){ pCur->skipNext = 0; return SQLITE_OK; } pCur->skipNext = 0; } } pPage = pCur->apPage[pCur->iPage]; assert( pPage->isInit ); if( !pPage->leaf ){ int idx = pCur->aiIdx[pCur->iPage]; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); if( rc ) return rc; rc = moveToRightmost(pCur); }else{ while( pCur->aiIdx[pCur->iPage]==0 ){ if( pCur->iPage==0 ){ pCur->eState = CURSOR_INVALID; *pRes = 1; return SQLITE_OK; } moveToParent(pCur); } assert( pCur->info.nSize==0 ); assert( (pCur->curFlags & (BTCF_ValidNKey|BTCF_ValidOvfl))==0 ); pCur->aiIdx[pCur->iPage]--; pPage = pCur->apPage[pCur->iPage]; if( pPage->intKey && !pPage->leaf ){ rc = sqlite3BtreePrevious(pCur, pRes); }else{ rc = SQLITE_OK; } } return rc; } SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor *pCur, int *pRes){ assert( cursorHoldsMutex(pCur) ); assert( pRes!=0 ); assert( *pRes==0 || *pRes==1 ); assert( pCur->skipNext==0 || pCur->eState!=CURSOR_VALID ); *pRes = 0; pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey); pCur->info.nSize = 0; if( pCur->eState!=CURSOR_VALID || pCur->aiIdx[pCur->iPage]==0 || pCur->apPage[pCur->iPage]->leaf==0 ){ return btreePrevious(pCur, pRes); } pCur->aiIdx[pCur->iPage]--; return SQLITE_OK; } /* ** Allocate a new page from the database file. ** ** The new page is marked as dirty. (In other words, sqlite3PagerWrite() ** has already been called on the new page.) The new page has also ** been referenced and the calling routine is responsible for calling ** sqlite3PagerUnref() on the new page when it is done. ** ** SQLITE_OK is returned on success. Any other return value indicates ** an error. *ppPage is set to NULL in the event of an error. ** ** If the "nearby" parameter is not 0, then an effort is made to ** locate a page close to the page number "nearby". This can be used in an ** attempt to keep related pages close to each other in the database file, ** which in turn can make database access faster. ** ** If the eMode parameter is BTALLOC_EXACT and the nearby page exists ** anywhere on the free-list, then it is guaranteed to be returned. If ** eMode is BTALLOC_LT then the page returned will be less than or equal ** to nearby if any such page exists. If eMode is BTALLOC_ANY then there ** are no restrictions on which page is returned. */ static int allocateBtreePage( BtShared *pBt, /* The btree */ MemPage **ppPage, /* Store pointer to the allocated page here */ Pgno *pPgno, /* Store the page number here */ Pgno nearby, /* Search for a page near this one */ u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */ ){ MemPage *pPage1; int rc; u32 n; /* Number of pages on the freelist */ u32 k; /* Number of leaves on the trunk of the freelist */ MemPage *pTrunk = 0; MemPage *pPrevTrunk = 0; Pgno mxPage; /* Total size of the database file */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); pPage1 = pBt->pPage1; mxPage = btreePagecount(pBt); /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36 ** stores stores the total number of pages on the freelist. */ n = get4byte(&pPage1->aData[36]); testcase( n==mxPage-1 ); if( n>=mxPage ){ return SQLITE_CORRUPT_BKPT; } if( n>0 ){ /* There are pages on the freelist. Reuse one of those pages. */ Pgno iTrunk; u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ u32 nSearch = 0; /* Count of the number of search attempts */ /* If eMode==BTALLOC_EXACT and a query of the pointer-map ** shows that the page 'nearby' is somewhere on the free-list, then ** the entire-list will be searched for that page. */ #ifndef SQLITE_OMIT_AUTOVACUUM if( eMode==BTALLOC_EXACT ){ if( nearby<=mxPage ){ u8 eType; assert( nearby>0 ); assert( pBt->autoVacuum ); rc = ptrmapGet(pBt, nearby, &eType, 0); if( rc ) return rc; if( eType==PTRMAP_FREEPAGE ){ searchList = 1; } } }else if( eMode==BTALLOC_LE ){ searchList = 1; } #endif /* Decrement the free-list count by 1. Set iTrunk to the index of the ** first free-list trunk page. iPrevTrunk is initially 1. */ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc ) return rc; put4byte(&pPage1->aData[36], n-1); /* The code within this loop is run only once if the 'searchList' variable ** is not true. Otherwise, it runs once for each trunk-page on the ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT) ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT) */ do { pPrevTrunk = pTrunk; if( pPrevTrunk ){ /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page ** is the page number of the next freelist trunk page in the list or ** zero if this is the last freelist trunk page. */ iTrunk = get4byte(&pPrevTrunk->aData[0]); }else{ /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32 ** stores the page number of the first page of the freelist, or zero if ** the freelist is empty. */ iTrunk = get4byte(&pPage1->aData[32]); } testcase( iTrunk==mxPage ); if( iTrunk>mxPage || nSearch++ > n ){ rc = SQLITE_CORRUPT_BKPT; }else{ rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0); } if( rc ){ pTrunk = 0; goto end_allocate_page; } assert( pTrunk!=0 ); assert( pTrunk->aData!=0 ); /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page ** is the number of leaf page pointers to follow. */ k = get4byte(&pTrunk->aData[4]); if( k==0 && !searchList ){ /* The trunk has no leaves and the list is not being searched. ** So extract the trunk page itself and use it as the newly ** allocated page */ assert( pPrevTrunk==0 ); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ){ goto end_allocate_page; } *pPgno = iTrunk; memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); *ppPage = pTrunk; pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_BKPT; goto end_allocate_page; #ifndef SQLITE_OMIT_AUTOVACUUM }else if( searchList && (nearby==iTrunk || (iTrunkpDbPage); if( rc ){ goto end_allocate_page; } if( k==0 ){ if( !pPrevTrunk ){ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); }else{ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); if( rc!=SQLITE_OK ){ goto end_allocate_page; } memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4); } }else{ /* The trunk page is required by the caller but it contains ** pointers to free-list leaves. The first leaf becomes a trunk ** page in this case. */ MemPage *pNewTrunk; Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); if( iNewTrunk>mxPage ){ rc = SQLITE_CORRUPT_BKPT; goto end_allocate_page; } testcase( iNewTrunk==mxPage ); rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0); if( rc!=SQLITE_OK ){ goto end_allocate_page; } rc = sqlite3PagerWrite(pNewTrunk->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pNewTrunk); goto end_allocate_page; } memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4); put4byte(&pNewTrunk->aData[4], k-1); memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); releasePage(pNewTrunk); if( !pPrevTrunk ){ assert( sqlite3PagerIswriteable(pPage1->pDbPage) ); put4byte(&pPage1->aData[32], iNewTrunk); }else{ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); if( rc ){ goto end_allocate_page; } put4byte(&pPrevTrunk->aData[0], iNewTrunk); } } pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); #endif }else if( k>0 ){ /* Extract a leaf from the trunk */ u32 closest; Pgno iPage; unsigned char *aData = pTrunk->aData; if( nearby>0 ){ u32 i; closest = 0; if( eMode==BTALLOC_LE ){ for(i=0; imxPage ){ rc = SQLITE_CORRUPT_BKPT; goto end_allocate_page; } testcase( iPage==mxPage ); if( !searchList || (iPage==nearby || (iPagepgno, n-1)); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ) goto end_allocate_page; if( closestpDbPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); *ppPage = 0; } } searchList = 0; } } releasePage(pPrevTrunk); pPrevTrunk = 0; }while( searchList ); }else{ /* There are no pages on the freelist, so append a new page to the ** database image. ** ** Normally, new pages allocated by this block can be requested from the ** pager layer with the 'no-content' flag set. This prevents the pager ** from trying to read the pages content from disk. However, if the ** current transaction has already run one or more incremental-vacuum ** steps, then the page we are about to allocate may contain content ** that is required in the event of a rollback. In this case, do ** not set the no-content flag. This causes the pager to load and journal ** the current page content before overwriting it. ** ** Note that the pager will not actually attempt to load or journal ** content for any page that really does lie past the end of the database ** file on disk. So the effects of disabling the no-content optimization ** here are confined to those pages that lie between the end of the ** database image and the end of the database file. */ int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc ) return rc; pBt->nPage++; if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++; #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){ /* If *pPgno refers to a pointer-map page, allocate two new pages ** at the end of the file instead of one. The first allocated page ** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pPg->pDbPage); releasePage(pPg); } if( rc ) return rc; pBt->nPage++; if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; } } #endif put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage); *pPgno = pBt->nPage; assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent); if( rc ) return rc; rc = sqlite3PagerWrite((*ppPage)->pDbPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); *ppPage = 0; } TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); } assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); end_allocate_page: releasePage(pTrunk); releasePage(pPrevTrunk); assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 ); assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 ); return rc; } /* ** This function is used to add page iPage to the database file free-list. ** It is assumed that the page is not already a part of the free-list. ** ** The value passed as the second argument to this function is optional. ** If the caller happens to have a pointer to the MemPage object ** corresponding to page iPage handy, it may pass it as the second value. ** Otherwise, it may pass NULL. ** ** If a pointer to a MemPage object is passed as the second argument, ** its reference count is not altered by this function. */ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ MemPage *pTrunk = 0; /* Free-list trunk page */ Pgno iTrunk = 0; /* Page number of free-list trunk page */ MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */ MemPage *pPage; /* Page being freed. May be NULL. */ int rc; /* Return Code */ int nFree; /* Initial number of pages on free-list */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( CORRUPT_DB || iPage>1 ); assert( !pMemPage || pMemPage->pgno==iPage ); if( iPage<2 ) return SQLITE_CORRUPT_BKPT; if( pMemPage ){ pPage = pMemPage; sqlite3PagerRef(pPage->pDbPage); }else{ pPage = btreePageLookup(pBt, iPage); } /* Increment the free page count on pPage1 */ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc ) goto freepage_out; nFree = get4byte(&pPage1->aData[36]); put4byte(&pPage1->aData[36], nFree+1); if( pBt->btsFlags & BTS_SECURE_DELETE ){ /* If the secure_delete option is enabled, then ** always fully overwrite deleted information with zeros. */ if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) ) || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0) ){ goto freepage_out; } memset(pPage->aData, 0, pPage->pBt->pageSize); } /* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); if( rc ) goto freepage_out; } /* Now manipulate the actual database free-list structure. There are two ** possibilities. If the free-list is currently empty, or if the first ** trunk page in the free-list is full, then this page will become a ** new free-list trunk page. Otherwise, it will become a leaf of the ** first trunk page in the current free-list. This block tests if it ** is possible to add the page as a new free-list leaf. */ if( nFree!=0 ){ u32 nLeaf; /* Initial number of leaf cells on trunk page */ iTrunk = get4byte(&pPage1->aData[32]); rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0); if( rc!=SQLITE_OK ){ goto freepage_out; } nLeaf = get4byte(&pTrunk->aData[4]); assert( pBt->usableSize>32 ); if( nLeaf > (u32)pBt->usableSize/4 - 2 ){ rc = SQLITE_CORRUPT_BKPT; goto freepage_out; } if( nLeaf < (u32)pBt->usableSize/4 - 8 ){ /* In this case there is room on the trunk page to insert the page ** being freed as a new leaf. ** ** Note that the trunk page is not really full until it contains ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have ** coded. But due to a coding error in versions of SQLite prior to ** 3.6.0, databases with freelist trunk pages holding more than ** usableSize/4 - 8 entries will be reported as corrupt. In order ** to maintain backwards compatibility with older versions of SQLite, ** we will continue to restrict the number of entries to usableSize/4 - 8 ** for now. At some point in the future (once everyone has upgraded ** to 3.6.0 or later) we should consider fixing the conditional above ** to read "usableSize/4-2" instead of "usableSize/4-8". ** ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still ** avoid using the last six entries in the freelist trunk page array in ** order that database files created by newer versions of SQLite can be ** read by older versions of SQLite. */ rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pTrunk->aData[4], nLeaf+1); put4byte(&pTrunk->aData[8+nLeaf*4], iPage); if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){ sqlite3PagerDontWrite(pPage->pDbPage); } rc = btreeSetHasContent(pBt, iPage); } TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); goto freepage_out; } } /* If control flows to this point, then it was not possible to add the ** the page being freed as a leaf page of the first trunk in the free-list. ** Possibly because the free-list is empty, or possibly because the ** first trunk in the free-list is full. Either way, the page being freed ** will become the new first trunk page in the free-list. */ if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){ goto freepage_out; } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ goto freepage_out; } put4byte(pPage->aData, iTrunk); put4byte(&pPage->aData[4], 0); put4byte(&pPage1->aData[32], iPage); TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); freepage_out: if( pPage ){ pPage->isInit = 0; } releasePage(pPage); releasePage(pTrunk); return rc; } static void freePage(MemPage *pPage, int *pRC){ if( (*pRC)==SQLITE_OK ){ *pRC = freePage2(pPage->pBt, pPage, pPage->pgno); } } /* ** Free any overflow pages associated with the given Cell. Write the ** local Cell size (the number of bytes on the original page, omitting ** overflow) into *pnSize. */ static int clearCell( MemPage *pPage, /* The page that contains the Cell */ unsigned char *pCell, /* First byte of the Cell */ u16 *pnSize /* Write the size of the Cell here */ ){ BtShared *pBt = pPage->pBt; CellInfo info; Pgno ovflPgno; int rc; int nOvfl; u32 ovflPageSize; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->xParseCell(pPage, pCell, &info); *pnSize = info.nSize; if( info.iOverflow==0 ){ return SQLITE_OK; /* No overflow pages. Return without doing anything */ } if( pCell+info.iOverflow+3 > pPage->aData+pPage->maskPage ){ return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */ } ovflPgno = get4byte(&pCell[info.iOverflow]); assert( pBt->usableSize > 4 ); ovflPageSize = pBt->usableSize - 4; nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize; assert( nOvfl>0 || (CORRUPT_DB && (info.nPayload + ovflPageSize)btreePagecount(pBt) ){ /* 0 is not a legal page number and page 1 cannot be an ** overflow page. Therefore if ovflPgno<2 or past the end of the ** file the database must be corrupt. */ return SQLITE_CORRUPT_BKPT; } if( nOvfl ){ rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext); if( rc ) return rc; } if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) ) && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1 ){ /* There is no reason any cursor should have an outstanding reference ** to an overflow page belonging to a cell that is being deleted/updated. ** So if there exists more than one reference to this page, then it ** must not really be an overflow page and the database must be corrupt. ** It is helpful to detect this before calling freePage2(), as ** freePage2() may zero the page contents if secure-delete mode is ** enabled. If this 'overflow' page happens to be a page that the ** caller is iterating through or using in some other way, this ** can be problematic. */ rc = SQLITE_CORRUPT_BKPT; }else{ rc = freePage2(pBt, pOvfl, ovflPgno); } if( pOvfl ){ sqlite3PagerUnref(pOvfl->pDbPage); } if( rc ) return rc; ovflPgno = iNext; } return SQLITE_OK; } /* ** Create the byte sequence used to represent a cell on page pPage ** and write that byte sequence into pCell[]. Overflow pages are ** allocated and filled in as necessary. The calling procedure ** is responsible for making sure sufficient space has been allocated ** for pCell[]. ** ** Note that pCell does not necessary need to point to the pPage->aData ** area. pCell might point to some temporary storage. The cell will ** be constructed in this temporary area then copied into pPage->aData ** later. */ static int fillInCell( MemPage *pPage, /* The page that contains the cell */ unsigned char *pCell, /* Complete text of the cell */ const void *pKey, i64 nKey, /* The key */ const void *pData,int nData, /* The data */ int nZero, /* Extra zero bytes to append to pData */ int *pnSize /* Write cell size here */ ){ int nPayload; const u8 *pSrc; int nSrc, n, rc; int spaceLeft; MemPage *pOvfl = 0; MemPage *pToRelease = 0; unsigned char *pPrior; unsigned char *pPayload; BtShared *pBt = pPage->pBt; Pgno pgnoOvfl = 0; int nHeader; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); /* pPage is not necessarily writeable since pCell might be auxiliary ** buffer space that is separate from the pPage buffer area */ assert( pCellaData || pCell>=&pPage->aData[pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); /* Fill in the header. */ nHeader = pPage->childPtrSize; nPayload = nData + nZero; if( pPage->intKeyLeaf ){ nHeader += putVarint32(&pCell[nHeader], nPayload); }else{ assert( nData==0 ); assert( nZero==0 ); } nHeader += putVarint(&pCell[nHeader], *(u64*)&nKey); /* Fill in the payload size */ if( pPage->intKey ){ pSrc = pData; nSrc = nData; nData = 0; }else{ assert( nKey<=0x7fffffff && pKey!=0 ); nPayload = (int)nKey; pSrc = pKey; nSrc = (int)nKey; } if( nPayload<=pPage->maxLocal ){ n = nHeader + nPayload; testcase( n==3 ); testcase( n==4 ); if( n<4 ) n = 4; *pnSize = n; spaceLeft = nPayload; pPrior = pCell; }else{ int mn = pPage->minLocal; n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4); testcase( n==pPage->maxLocal ); testcase( n==pPage->maxLocal+1 ); if( n > pPage->maxLocal ) n = mn; spaceLeft = n; *pnSize = n + nHeader + 4; pPrior = &pCell[nHeader+n]; } pPayload = &pCell[nHeader]; /* At this point variables should be set as follows: ** ** nPayload Total payload size in bytes ** pPayload Begin writing payload here ** spaceLeft Space available at pPayload. If nPayload>spaceLeft, ** that means content must spill into overflow pages. ** *pnSize Size of the local cell (not counting overflow pages) ** pPrior Where to write the pgno of the first overflow page ** ** Use a call to btreeParseCellPtr() to verify that the values above ** were computed correctly. */ #if SQLITE_DEBUG { CellInfo info; pPage->xParseCell(pPage, pCell, &info); assert( nHeader=(int)(info.pPayload - pCell) ); assert( info.nKey==nKey ); assert( *pnSize == info.nSize ); assert( spaceLeft == info.nLocal ); assert( pPrior == &pCell[info.iOverflow] ); } #endif /* Write the payload into the local Cell and any extra into overflow pages */ while( nPayload>0 ){ if( spaceLeft==0 ){ #ifndef SQLITE_OMIT_AUTOVACUUM Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ if( pBt->autoVacuum ){ do{ pgnoOvfl++; } while( PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) ); } #endif rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); #ifndef SQLITE_OMIT_AUTOVACUUM /* If the database supports auto-vacuum, and the second or subsequent ** overflow page is being allocated, add an entry to the pointer-map ** for that page now. ** ** If this is the first overflow page, then write a partial entry ** to the pointer-map. If we write nothing to this pointer-map slot, ** then the optimistic overflow chain processing in clearCell() ** may misinterpret the uninitialized values and delete the ** wrong pages from the database. */ if( pBt->autoVacuum && rc==SQLITE_OK ){ u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc); if( rc ){ releasePage(pOvfl); } } #endif if( rc ){ releasePage(pToRelease); return rc; } /* If pToRelease is not zero than pPrior points into the data area ** of pToRelease. Make sure pToRelease is still writeable. */ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); /* If pPrior is part of the data area of pPage, then make sure pPage ** is still writeable */ assert( pPrioraData || pPrior>=&pPage->aData[pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); put4byte(pPrior, pgnoOvfl); releasePage(pToRelease); pToRelease = pOvfl; pPrior = pOvfl->aData; put4byte(pPrior, 0); pPayload = &pOvfl->aData[4]; spaceLeft = pBt->usableSize - 4; } n = nPayload; if( n>spaceLeft ) n = spaceLeft; /* If pToRelease is not zero than pPayload points into the data area ** of pToRelease. Make sure pToRelease is still writeable. */ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); /* If pPayload is part of the data area of pPage, then make sure pPage ** is still writeable */ assert( pPayloadaData || pPayload>=&pPage->aData[pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); if( nSrc>0 ){ if( n>nSrc ) n = nSrc; assert( pSrc ); memcpy(pPayload, pSrc, n); }else{ memset(pPayload, 0, n); } nPayload -= n; pPayload += n; pSrc += n; nSrc -= n; spaceLeft -= n; if( nSrc==0 ){ nSrc = nData; pSrc = pData; } } releasePage(pToRelease); return SQLITE_OK; } /* ** Remove the i-th cell from pPage. This routine effects pPage only. ** The cell content is not freed or deallocated. It is assumed that ** the cell content has been copied someplace else. This routine just ** removes the reference to the cell from pPage. ** ** "sz" must be the number of bytes in the cell. */ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ u32 pc; /* Offset to cell content of cell being deleted */ u8 *data; /* pPage->aData */ u8 *ptr; /* Used to move bytes around within data[] */ int rc; /* The return code */ int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */ if( *pRC ) return; assert( idx>=0 && idxnCell ); assert( CORRUPT_DB || sz==cellSize(pPage, idx) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); data = pPage->aData; ptr = &pPage->aCellIdx[2*idx]; pc = get2byte(ptr); hdr = pPage->hdrOffset; testcase( pc==get2byte(&data[hdr+5]) ); testcase( pc+sz==pPage->pBt->usableSize ); if( pc < (u32)get2byte(&data[hdr+5]) || pc+sz > pPage->pBt->usableSize ){ *pRC = SQLITE_CORRUPT_BKPT; return; } rc = freeSpace(pPage, pc, sz); if( rc ){ *pRC = rc; return; } pPage->nCell--; if( pPage->nCell==0 ){ memset(&data[hdr+1], 0, 4); data[hdr+7] = 0; put2byte(&data[hdr+5], pPage->pBt->usableSize); pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset - pPage->childPtrSize - 8; }else{ memmove(ptr, ptr+2, 2*(pPage->nCell - idx)); put2byte(&data[hdr+3], pPage->nCell); pPage->nFree += 2; } } /* ** Insert a new cell on pPage at cell index "i". pCell points to the ** content of the cell. ** ** If the cell content will fit on the page, then put it there. If it ** will not fit, then make a copy of the cell content into pTemp if ** pTemp is not null. Regardless of pTemp, allocate a new entry ** in pPage->apOvfl[] and make it point to the cell content (either ** in pTemp or the original pCell) and also record its index. ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. */ static void insertCell( MemPage *pPage, /* Page into which we are copying */ int i, /* New cell becomes the i-th cell of the page */ u8 *pCell, /* Content of the new cell */ int sz, /* Bytes of content in pCell */ u8 *pTemp, /* Temp storage space for pCell, if needed */ Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ int *pRC /* Read and write return code from here */ ){ int idx = 0; /* Where to write new cell content in data[] */ int j; /* Loop counter */ u8 *data; /* The content of the whole page */ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ if( *pRC ) return; assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); /* The cell should normally be sized correctly. However, when moving a ** malformed cell from a leaf page to an interior page, if the cell size ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size ** might be less than 8 (leaf-size + pointer) on the interior node. Hence ** the term after the || in the following assert(). */ assert( sz==pPage->xCellSize(pPage, pCell) || (sz==8 && iChild>0) ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp, pCell, sz); pCell = pTemp; } if( iChild ){ put4byte(pCell, iChild); } j = pPage->nOverflow++; assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) ); pPage->apOvfl[j] = pCell; pPage->aiOvfl[j] = (u16)i; /* When multiple overflows occur, they are always sequential and in ** sorted order. This invariants arise because multiple overflows can ** only occur when inserting divider cells into the parent page during ** balancing, and the dividers are adjacent and sorted. */ assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ }else{ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ *pRC = rc; return; } assert( sqlite3PagerIswriteable(pPage->pDbPage) ); data = pPage->aData; assert( &data[pPage->cellOffset]==pPage->aCellIdx ); rc = allocateSpace(pPage, sz, &idx); if( rc ){ *pRC = rc; return; } /* The allocateSpace() routine guarantees the following properties ** if it returns successfully */ assert( idx >= 0 ); assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); memcpy(&data[idx], pCell, sz); if( iChild ){ put4byte(&data[idx], iChild); } pIns = pPage->aCellIdx + i*2; memmove(pIns+2, pIns, 2*(pPage->nCell - i)); put2byte(pIns, idx); pPage->nCell++; /* increment the cell count */ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell ); #ifndef SQLITE_OMIT_AUTOVACUUM if( pPage->pBt->autoVacuum ){ /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ ptrmapPutOvflPtr(pPage, pCell, pRC); } #endif } } /* ** A CellArray object contains a cache of pointers and sizes for a ** consecutive sequence of cells that might be held multiple pages. */ typedef struct CellArray CellArray; struct CellArray { int nCell; /* Number of cells in apCell[] */ MemPage *pRef; /* Reference page */ u8 **apCell; /* All cells begin balanced */ u16 *szCell; /* Local size of all cells in apCell[] */ }; /* ** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been ** computed. */ static void populateCellCache(CellArray *p, int idx, int N){ assert( idx>=0 && idx+N<=p->nCell ); while( N>0 ){ assert( p->apCell[idx]!=0 ); if( p->szCell[idx]==0 ){ p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]); }else{ assert( CORRUPT_DB || p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) ); } idx++; N--; } } /* ** Return the size of the Nth element of the cell array */ static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){ assert( N>=0 && NnCell ); assert( p->szCell[N]==0 ); p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]); return p->szCell[N]; } static u16 cachedCellSize(CellArray *p, int N){ assert( N>=0 && NnCell ); if( p->szCell[N] ) return p->szCell[N]; return computeCellSize(p, N); } /* ** Array apCell[] contains pointers to nCell b-tree page cells. The ** szCell[] array contains the size in bytes of each cell. This function ** replaces the current contents of page pPg with the contents of the cell ** array. ** ** Some of the cells in apCell[] may currently be stored in pPg. This ** function works around problems caused by this by making a copy of any ** such cells before overwriting the page data. ** ** The MemPage.nFree field is invalidated by this function. It is the ** responsibility of the caller to set it correctly. */ static int rebuildPage( MemPage *pPg, /* Edit this page */ int nCell, /* Final number of cells on page */ u8 **apCell, /* Array of cells */ u16 *szCell /* Array of cell sizes */ ){ const int hdr = pPg->hdrOffset; /* Offset of header on pPg */ u8 * const aData = pPg->aData; /* Pointer to data for pPg */ const int usableSize = pPg->pBt->usableSize; u8 * const pEnd = &aData[usableSize]; int i; u8 *pCellptr = pPg->aCellIdx; u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); u8 *pData; i = get2byte(&aData[hdr+5]); memcpy(&pTmp[i], &aData[i], usableSize - i); pData = pEnd; for(i=0; iaData && pCellxCellSize(pPg, pCell) || CORRUPT_DB ); testcase( szCell[i]!=pPg->xCellSize(pPg,pCell) ); } /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ pPg->nCell = nCell; pPg->nOverflow = 0; put2byte(&aData[hdr+1], 0); put2byte(&aData[hdr+3], pPg->nCell); put2byte(&aData[hdr+5], pData - aData); aData[hdr+7] = 0x00; return SQLITE_OK; } /* ** Array apCell[] contains nCell pointers to b-tree cells. Array szCell ** contains the size in bytes of each such cell. This function attempts to ** add the cells stored in the array to page pPg. If it cannot (because ** the page needs to be defragmented before the cells will fit), non-zero ** is returned. Otherwise, if the cells are added successfully, zero is ** returned. ** ** Argument pCellptr points to the first entry in the cell-pointer array ** (part of page pPg) to populate. After cell apCell[0] is written to the ** page body, a 16-bit offset is written to pCellptr. And so on, for each ** cell in the array. It is the responsibility of the caller to ensure ** that it is safe to overwrite this part of the cell-pointer array. ** ** When this function is called, *ppData points to the start of the ** content area on page pPg. If the size of the content area is extended, ** *ppData is updated to point to the new start of the content area ** before returning. ** ** Finally, argument pBegin points to the byte immediately following the ** end of the space required by this page for the cell-pointer area (for ** all cells - not just those inserted by the current call). If the content ** area must be extended to before this point in order to accomodate all ** cells in apCell[], then the cells do not fit and non-zero is returned. */ static int pageInsertArray( MemPage *pPg, /* Page to add cells to */ u8 *pBegin, /* End of cell-pointer array */ u8 **ppData, /* IN/OUT: Page content -area pointer */ u8 *pCellptr, /* Pointer to cell-pointer area */ int iFirst, /* Index of first cell to add */ int nCell, /* Number of cells to add to pPg */ CellArray *pCArray /* Array of cells */ ){ int i; u8 *aData = pPg->aData; u8 *pData = *ppData; int iEnd = iFirst + nCell; assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ for(i=iFirst; iapCell[i], sz); put2byte(pCellptr, (pSlot - aData)); pCellptr += 2; } *ppData = pData; return 0; } /* ** Array apCell[] contains nCell pointers to b-tree cells. Array szCell ** contains the size in bytes of each such cell. This function adds the ** space associated with each cell in the array that is currently stored ** within the body of pPg to the pPg free-list. The cell-pointers and other ** fields of the page are not updated. ** ** This function returns the total number of cells added to the free-list. */ static int pageFreeArray( MemPage *pPg, /* Page to edit */ int iFirst, /* First cell to delete */ int nCell, /* Cells to delete */ CellArray *pCArray /* Array of cells */ ){ u8 * const aData = pPg->aData; u8 * const pEnd = &aData[pPg->pBt->usableSize]; u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; int nRet = 0; int i; int iEnd = iFirst + nCell; u8 *pFree = 0; int szFree = 0; for(i=iFirst; iapCell[i]; if( pCell>=pStart && pCellszCell[i]; assert( sz>0 ); if( pFree!=(pCell + sz) ){ if( pFree ){ assert( pFree>aData && (pFree - aData)<65536 ); freeSpace(pPg, (u16)(pFree - aData), szFree); } pFree = pCell; szFree = sz; if( pFree+sz>pEnd ) return 0; }else{ pFree = pCell; szFree += sz; } nRet++; } } if( pFree ){ assert( pFree>aData && (pFree - aData)<65536 ); freeSpace(pPg, (u16)(pFree - aData), szFree); } return nRet; } /* ** apCell[] and szCell[] contains pointers to and sizes of all cells in the ** pages being balanced. The current page, pPg, has pPg->nCell cells starting ** with apCell[iOld]. After balancing, this page should hold nNew cells ** starting at apCell[iNew]. ** ** This routine makes the necessary adjustments to pPg so that it contains ** the correct cells after being balanced. ** ** The pPg->nFree field is invalid when this function returns. It is the ** responsibility of the caller to set it correctly. */ static int editPage( MemPage *pPg, /* Edit this page */ int iOld, /* Index of first cell currently on page */ int iNew, /* Index of new first cell on page */ int nNew, /* Final number of cells on page */ CellArray *pCArray /* Array of cells and sizes */ ){ u8 * const aData = pPg->aData; const int hdr = pPg->hdrOffset; u8 *pBegin = &pPg->aCellIdx[nNew * 2]; int nCell = pPg->nCell; /* Cells stored on pPg */ u8 *pData; u8 *pCellptr; int i; int iOldEnd = iOld + pPg->nCell + pPg->nOverflow; int iNewEnd = iNew + nNew; #ifdef SQLITE_DEBUG u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); memcpy(pTmp, aData, pPg->pBt->usableSize); #endif /* Remove cells from the start and end of the page */ if( iOldaCellIdx, &pPg->aCellIdx[nShift*2], nCell*2); nCell -= nShift; } if( iNewEnd < iOldEnd ){ nCell -= pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray); } pData = &aData[get2byteNotZero(&aData[hdr+5])]; if( pDataaCellIdx; memmove(&pCellptr[nAdd*2], pCellptr, nCell*2); if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew, nAdd, pCArray ) ) goto editpage_fail; nCell += nAdd; } /* Add any overflow cells */ for(i=0; inOverflow; i++){ int iCell = (iOld + pPg->aiOvfl[i]) - iNew; if( iCell>=0 && iCellaCellIdx[iCell * 2]; memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2); nCell++; if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iCell+iNew, 1, pCArray ) ) goto editpage_fail; } } /* Append cells to the end of the page */ pCellptr = &pPg->aCellIdx[nCell*2]; if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew+nCell, nNew-nCell, pCArray ) ) goto editpage_fail; pPg->nCell = nNew; pPg->nOverflow = 0; put2byte(&aData[hdr+3], pPg->nCell); put2byte(&aData[hdr+5], pData - aData); #ifdef SQLITE_DEBUG for(i=0; iapCell[i+iNew]; int iOff = get2byteAligned(&pPg->aCellIdx[i*2]); if( pCell>=aData && pCell<&aData[pPg->pBt->usableSize] ){ pCell = &pTmp[pCell - aData]; } assert( 0==memcmp(pCell, &aData[iOff], pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) ); } #endif return SQLITE_OK; editpage_fail: /* Unable to edit this page. Rebuild it from scratch instead. */ populateCellCache(pCArray, iNew, nNew); return rebuildPage(pPg, nNew, &pCArray->apCell[iNew], &pCArray->szCell[iNew]); } /* ** The following parameters determine how many adjacent pages get involved ** in a balancing operation. NN is the number of neighbors on either side ** of the page that participate in the balancing operation. NB is the ** total number of pages that participate, including the target page and ** NN neighbors on either side. ** ** The minimum value of NN is 1 (of course). Increasing NN above 1 ** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance ** in exchange for a larger degradation in INSERT and UPDATE performance. ** The value of NN appears to give the best results overall. */ #define NN 1 /* Number of neighbors on either side of pPage */ #define NB (NN*2+1) /* Total pages involved in the balance */ #ifndef SQLITE_OMIT_QUICKBALANCE /* ** This version of balance() handles the common special case where ** a new entry is being inserted on the extreme right-end of the ** tree, in other words, when the new entry will become the largest ** entry in the tree. ** ** Instead of trying to balance the 3 right-most leaf pages, just add ** a new page to the right-hand side and put the one new entry in ** that page. This leaves the right side of the tree somewhat ** unbalanced. But odds are that we will be inserting new entries ** at the end soon afterwards so the nearly empty page will quickly ** fill up. On average. ** ** pPage is the leaf page which is the right-most page in the tree. ** pParent is its parent. pPage must have a single overflow entry ** which is also the right-most entry on the page. ** ** The pSpace buffer is used to store a temporary copy of the divider ** cell that will be inserted into pParent. Such a cell consists of a 4 ** byte page number followed by a variable length integer. In other ** words, at most 13 bytes. Hence the pSpace buffer must be at ** least 13 bytes in size. */ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ BtShared *const pBt = pPage->pBt; /* B-Tree Database */ MemPage *pNew; /* Newly allocated page */ int rc; /* Return Code */ Pgno pgnoNew; /* Page number of pNew */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); assert( pPage->nOverflow==1 ); /* This error condition is now caught prior to reaching this function */ if( NEVER(pPage->nCell==0) ) return SQLITE_CORRUPT_BKPT; /* Allocate a new page. This page will become the right-sibling of ** pPage. Make the parent page writable, so that the new divider cell ** may be inserted. If both these operations are successful, proceed. */ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); if( rc==SQLITE_OK ){ u8 *pOut = &pSpace[4]; u8 *pCell = pPage->apOvfl[0]; u16 szCell = pPage->xCellSize(pPage, pCell); u8 *pStop; assert( sqlite3PagerIswriteable(pNew->pDbPage) ); assert( pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); rc = rebuildPage(pNew, 1, &pCell, &szCell); if( NEVER(rc) ) return rc; pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell; /* If this is an auto-vacuum database, update the pointer map ** with entries for the new page, and any pointer from the ** cell on the page to an overflow page. If either of these ** operations fails, the return code is set, but the contents ** of the parent page are still manipulated by thh code below. ** That is Ok, at this point the parent page is guaranteed to ** be marked as dirty. Returning an error code will cause a ** rollback, undoing any changes made to the parent page. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); if( szCell>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pCell, &rc); } } /* Create a divider cell to insert into pParent. The divider cell ** consists of a 4-byte page number (the page number of pPage) and ** a variable length key value (which must be the same value as the ** largest key on pPage). ** ** To find the largest key value on pPage, first find the right-most ** cell on pPage. The first two fields of this cell are the ** record-length (a variable length integer at most 32-bits in size) ** and the key value (a variable length integer, may have any value). ** The first of the while(...) loops below skips over the record-length ** field. The second while(...) loop copies the key value from the ** cell on pPage into the pSpace buffer. */ pCell = findCell(pPage, pPage->nCell-1); pStop = &pCell[9]; while( (*(pCell++)&0x80) && pCellnCell, pSpace, (int)(pOut-pSpace), 0, pPage->pgno, &rc); /* Set the right-child pointer of pParent to point to the new page. */ put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); /* Release the reference to the new page. */ releasePage(pNew); } return rc; } #endif /* SQLITE_OMIT_QUICKBALANCE */ #if 0 /* ** This function does not contribute anything to the operation of SQLite. ** it is sometimes activated temporarily while debugging code responsible ** for setting pointer-map entries. */ static int ptrmapCheckPages(MemPage **apPage, int nPage){ int i, j; for(i=0; ipBt; assert( pPage->isInit ); for(j=0; jnCell; j++){ CellInfo info; u8 *z; z = findCell(pPage, j); pPage->xParseCell(pPage, z, &info); if( info.iOverflow ){ Pgno ovfl = get4byte(&z[info.iOverflow]); ptrmapGet(pBt, ovfl, &e, &n); assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 ); } if( !pPage->leaf ){ Pgno child = get4byte(z); ptrmapGet(pBt, child, &e, &n); assert( n==pPage->pgno && e==PTRMAP_BTREE ); } } if( !pPage->leaf ){ Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]); ptrmapGet(pBt, child, &e, &n); assert( n==pPage->pgno && e==PTRMAP_BTREE ); } } return 1; } #endif /* ** This function is used to copy the contents of the b-tree node stored ** on page pFrom to page pTo. If page pFrom was not a leaf page, then ** the pointer-map entries for each child page are updated so that the ** parent page stored in the pointer map is page pTo. If pFrom contained ** any cells with overflow page pointers, then the corresponding pointer ** map entries are also updated so that the parent page is page pTo. ** ** If pFrom is currently carrying any overflow cells (entries in the ** MemPage.apOvfl[] array), they are not copied to pTo. ** ** Before returning, page pTo is reinitialized using btreeInitPage(). ** ** The performance of this function is not critical. It is only used by ** the balance_shallower() and balance_deeper() procedures, neither of ** which are called often under normal circumstances. */ static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){ if( (*pRC)==SQLITE_OK ){ BtShared * const pBt = pFrom->pBt; u8 * const aFrom = pFrom->aData; u8 * const aTo = pTo->aData; int const iFromHdr = pFrom->hdrOffset; int const iToHdr = ((pTo->pgno==1) ? 100 : 0); int rc; int iData; assert( pFrom->isInit ); assert( pFrom->nFree>=iToHdr ); assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize ); /* Copy the b-tree node content from page pFrom to page pTo. */ iData = get2byte(&aFrom[iFromHdr+5]); memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData); memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell); /* Reinitialize page pTo so that the contents of the MemPage structure ** match the new data. The initialization of pTo can actually fail under ** fairly obscure circumstances, even though it is a copy of initialized ** page pFrom. */ pTo->isInit = 0; rc = btreeInitPage(pTo); if( rc!=SQLITE_OK ){ *pRC = rc; return; } /* If this is an auto-vacuum database, update the pointer-map entries ** for any b-tree or overflow pages that pTo now contains the pointers to. */ if( ISAUTOVACUUM ){ *pRC = setChildPtrmaps(pTo); } } } /* ** This routine redistributes cells on the iParentIdx'th child of pParent ** (hereafter "the page") and up to 2 siblings so that all pages have about the ** same amount of free space. Usually a single sibling on either side of the ** page are used in the balancing, though both siblings might come from one ** side if the page is the first or last child of its parent. If the page ** has fewer than 2 siblings (something which can only happen if the page ** is a root page or a child of a root page) then all available siblings ** participate in the balancing. ** ** The number of siblings of the page might be increased or decreased by ** one or two in an effort to keep pages nearly full but not over full. ** ** Note that when this routine is called, some of the cells on the page ** might not actually be stored in MemPage.aData[]. This can happen ** if the page is overfull. This routine ensures that all cells allocated ** to the page and its siblings fit into MemPage.aData[] before returning. ** ** In the course of balancing the page and its siblings, cells may be ** inserted into or removed from the parent page (pParent). Doing so ** may cause the parent page to become overfull or underfull. If this ** happens, it is the responsibility of the caller to invoke the correct ** balancing routine to fix this problem (see the balance() routine). ** ** If this routine fails for any reason, it might leave the database ** in a corrupted state. So if this routine fails, the database should ** be rolled back. ** ** The third argument to this function, aOvflSpace, is a pointer to a ** buffer big enough to hold one page. If while inserting cells into the parent ** page (pParent) the parent page becomes overfull, this buffer is ** used to store the parent's overflow cells. Because this function inserts ** a maximum of four divider cells into the parent page, and the maximum ** size of a cell stored within an internal node is always less than 1/4 ** of the page-size, the aOvflSpace[] buffer is guaranteed to be large ** enough for all overflow cells. ** ** If aOvflSpace is set to a null pointer, this function returns ** SQLITE_NOMEM. */ #if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM) #pragma optimize("", off) #endif static int balance_nonroot( MemPage *pParent, /* Parent page of siblings being balanced */ int iParentIdx, /* Index of "the page" in pParent */ u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ int isRoot, /* True if pParent is a root-page */ int bBulk /* True if this call is part of a bulk load */ ){ BtShared *pBt; /* The whole database */ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ int nNew = 0; /* Number of pages in apNew[] */ int nOld; /* Number of pages in apOld[] */ int i, j, k; /* Loop counters */ int nxDiv; /* Next divider slot in pParent->aCell[] */ int rc = SQLITE_OK; /* The return code */ u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */ int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ int usableSpace; /* Bytes in pPage beyond the header */ int pageFlags; /* Value of pPage->aData[0] */ int iSpace1 = 0; /* First unused byte of aSpace1[] */ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ int szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ u8 *pRight; /* Location in parent of right-sibling pointer */ u8 *apDiv[NB-1]; /* Divider cells in pParent */ int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */ int cntOld[NB+2]; /* Old index in b.apCell[] */ int szNew[NB+2]; /* Combined size of cells placed on i-th page */ u8 *aSpace1; /* Space for copies of dividers cells */ Pgno pgno; /* Temp var to store a page number in */ u8 abDone[NB+2]; /* True after i'th new page is populated */ Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */ Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */ u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */ CellArray b; /* Parsed information on cells being balanced */ memset(abDone, 0, sizeof(abDone)); b.nCell = 0; b.apCell = 0; pBt = pParent->pBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); #if 0 TRACE(("BALANCE: begin page %d child of %d\n", pPage->pgno, pParent->pgno)); #endif /* At this point pParent may have at most one overflow cell. And if ** this overflow cell is present, it must be the cell with ** index iParentIdx. This scenario comes about when this function ** is called (indirectly) from sqlite3BtreeDelete(). */ assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx ); if( !aOvflSpace ){ return SQLITE_NOMEM; } /* Find the sibling pages to balance. Also locate the cells in pParent ** that divide the siblings. An attempt is made to find NN siblings on ** either side of pPage. More siblings are taken from one side, however, ** if there are fewer than NN siblings on the other side. If pParent ** has NB or fewer children then all children of pParent are taken. ** ** This loop also drops the divider cells from the parent page. This ** way, the remainder of the function does not have to deal with any ** overflow cells in the parent page, since if any existed they will ** have already been removed. */ i = pParent->nOverflow + pParent->nCell; if( i<2 ){ nxDiv = 0; }else{ assert( bBulk==0 || bBulk==1 ); if( iParentIdx==0 ){ nxDiv = 0; }else if( iParentIdx==i ){ nxDiv = i-2+bBulk; }else{ nxDiv = iParentIdx-1; } i = 2-bBulk; } nOld = i+1; if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){ pRight = &pParent->aData[pParent->hdrOffset+8]; }else{ pRight = findCell(pParent, i+nxDiv-pParent->nOverflow); } pgno = get4byte(pRight); while( 1 ){ rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; } nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow; if( (i--)==0 ) break; if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){ apDiv[i] = pParent->apOvfl[0]; pgno = get4byte(apDiv[i]); szNew[i] = pParent->xCellSize(pParent, apDiv[i]); pParent->nOverflow = 0; }else{ apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); pgno = get4byte(apDiv[i]); szNew[i] = pParent->xCellSize(pParent, apDiv[i]); /* Drop the cell from the parent page. apDiv[i] still points to ** the cell within the parent, even though it has been dropped. ** This is safe because dropping a cell only overwrites the first ** four bytes of it, and this function does not need the first ** four bytes of the divider cell. So the pointer is safe to use ** later on. ** ** But not if we are in secure-delete mode. In secure-delete mode, ** the dropCell() routine will overwrite the entire cell with zeroes. ** In this case, temporarily copy the cell into the aOvflSpace[] ** buffer. It will be copied out again as soon as the aSpace[] buffer ** is allocated. */ if( pBt->btsFlags & BTS_SECURE_DELETE ){ int iOff; iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); if( (iOff+szNew[i])>(int)pBt->usableSize ){ rc = SQLITE_CORRUPT_BKPT; memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; }else{ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; } } dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc); } } /* Make nMaxCells a multiple of 4 in order to preserve 8-byte ** alignment */ nMaxCells = (nMaxCells + 3)&~3; /* ** Allocate space for memory structures */ szScratch = nMaxCells*sizeof(u8*) /* b.apCell */ + nMaxCells*sizeof(u16) /* b.szCell */ + pBt->pageSize; /* aSpace1 */ /* EVIDENCE-OF: R-28375-38319 SQLite will never request a scratch buffer ** that is more than 6 times the database page size. */ assert( szScratch<=6*(int)pBt->pageSize ); b.apCell = sqlite3ScratchMalloc( szScratch ); if( b.apCell==0 ){ rc = SQLITE_NOMEM; goto balance_cleanup; } b.szCell = (u16*)&b.apCell[nMaxCells]; aSpace1 = (u8*)&b.szCell[nMaxCells]; assert( EIGHT_BYTE_ALIGNMENT(aSpace1) ); /* ** Load pointers to all cells on sibling pages and the divider cells ** into the local b.apCell[] array. Make copies of the divider cells ** into space obtained from aSpace1[]. The divider cells have already ** been removed from pParent. ** ** If the siblings are on leaf pages, then the child pointers of the ** divider cells are stripped from the cells before they are copied ** into aSpace1[]. In this way, all cells in b.apCell[] are without ** child pointers. If siblings are not leaves, then all cell in ** b.apCell[] include child pointers. Either way, all cells in b.apCell[] ** are alike. ** ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. ** leafData: 1 if pPage holds key+data and pParent holds only keys. */ b.pRef = apOld[0]; leafCorrection = b.pRef->leaf*4; leafData = b.pRef->intKeyLeaf; for(i=0; inCell; u8 *aData = pOld->aData; u16 maskPage = pOld->maskPage; u8 *piCell = aData + pOld->cellOffset; u8 *piEnd; /* Verify that all sibling pages are of the same "type" (table-leaf, ** table-interior, index-leaf, or index-interior). */ if( pOld->aData[0]!=apOld[0]->aData[0] ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } /* Load b.apCell[] with pointers to all cells in pOld. If pOld ** constains overflow cells, include them in the b.apCell[] array ** in the correct spot. ** ** Note that when there are multiple overflow cells, it is always the ** case that they are sequential and adjacent. This invariant arises ** because multiple overflows can only occurs when inserting divider ** cells into a parent on a prior balance, and divider cells are always ** adjacent and are inserted in order. There is an assert() tagged ** with "NOTE 1" in the overflow cell insertion loop to prove this ** invariant. ** ** This must be done in advance. Once the balance starts, the cell ** offset section of the btree page will be overwritten and we will no ** long be able to find the cells if a pointer to each cell is not saved ** first. */ memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*limit); if( pOld->nOverflow>0 ){ memset(&b.szCell[b.nCell+limit], 0, sizeof(b.szCell[0])*pOld->nOverflow); limit = pOld->aiOvfl[0]; for(j=0; jnOverflow; k++){ assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */ b.apCell[b.nCell] = pOld->apOvfl[k]; b.nCell++; } } piEnd = aData + pOld->cellOffset + 2*pOld->nCell; while( piCellmaxLocal+23 ); assert( iSpace1 <= (int)pBt->pageSize ); memcpy(pTemp, apDiv[i], sz); b.apCell[b.nCell] = pTemp+leafCorrection; assert( leafCorrection==0 || leafCorrection==4 ); b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection; if( !pOld->leaf ){ assert( leafCorrection==0 ); assert( pOld->hdrOffset==0 ); /* The right pointer of the child page pOld becomes the left ** pointer of the divider cell */ memcpy(b.apCell[b.nCell], &pOld->aData[8], 4); }else{ assert( leafCorrection==4 ); while( b.szCell[b.nCell]<4 ){ /* Do not allow any cells smaller than 4 bytes. If a smaller cell ** does exist, pad it with 0x00 bytes. */ assert( b.szCell[b.nCell]==3 || CORRUPT_DB ); assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB ); aSpace1[iSpace1++] = 0x00; b.szCell[b.nCell]++; } } b.nCell++; } } /* ** Figure out the number of pages needed to hold all b.nCell cells. ** Store this number in "k". Also compute szNew[] which is the total ** size of all cells on the i-th page and cntNew[] which is the index ** in b.apCell[] of the cell that divides page i from page i+1. ** cntNew[k] should equal b.nCell. ** ** Values computed by this block: ** ** k: The total number of sibling pages ** szNew[i]: Spaced used on the i-th sibling page. ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to ** the right of the i-th sibling page. ** usableSpace: Number of bytes of space available on each sibling. ** */ usableSpace = pBt->usableSize - 12 + leafCorrection; for(i=0; inFree; if( szNew[i]<0 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } for(j=0; jnOverflow; j++){ szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]); } cntNew[i] = cntOld[i]; } k = nOld; for(i=0; iusableSpace ){ if( i+1>=k ){ k = i+2; if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } szNew[k-1] = 0; cntNew[k-1] = b.nCell; } sz = 2 + cachedCellSize(&b, cntNew[i]-1); szNew[i] -= sz; if( !leafData ){ if( cntNew[i]usableSpace ) break; szNew[i] += sz; cntNew[i]++; if( !leafData ){ if( cntNew[i]=b.nCell ){ k = i+1; }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } } /* ** The packing computed by the previous block is biased toward the siblings ** on the left side (siblings with smaller keys). The left siblings are ** always nearly full, while the right-most sibling might be nearly empty. ** The next block of code attempts to adjust the packing of siblings to ** get a better balance. ** ** This adjustment is more than an optimization. The packing above might ** be so out of balance as to be illegal. For example, the right-most ** sibling might be completely empty. This adjustment is not optional. */ for(i=k-1; i>0; i--){ int szRight = szNew[i]; /* Size of sibling on the right */ int szLeft = szNew[i-1]; /* Size of sibling on the left */ int r; /* Index of right-most cell in left sibling */ int d; /* Index of first cell to the left of right sibling */ r = cntNew[i-1] - 1; d = r + 1 - leafData; (void)cachedCellSize(&b, d); do{ assert( d szLeft-(b.szCell[r]+2)) ){ break; } szRight += b.szCell[d] + 2; szLeft -= b.szCell[r] + 2; cntNew[i-1] = r; r--; d--; }while( r>=0 ); szNew[i] = szRight; szNew[i-1] = szLeft; if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } } /* Sanity check: For a non-corrupt database file one of the follwing ** must be true: ** (1) We found one or more cells (cntNew[0])>0), or ** (2) pPage is a virtual root page. A virtual root page is when ** the real root page is page 1 and we are the only child of ** that page. */ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", apOld[0]->pgno, apOld[0]->nCell, nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0 )); /* ** Allocate k new pages. Reuse old pages where possible. */ pageFlags = apOld[0]->aData[0]; for(i=0; ipDbPage); nNew++; if( rc ) goto balance_cleanup; }else{ assert( i>0 ); rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0); if( rc ) goto balance_cleanup; zeroPage(pNew, pageFlags); apNew[i] = pNew; nNew++; cntOld[i] = b.nCell; /* Set the pointer-map entry for the new sibling page. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); if( rc!=SQLITE_OK ){ goto balance_cleanup; } } } } /* ** Reassign page numbers so that the new pages are in ascending order. ** This helps to keep entries in the disk file in order so that a scan ** of the table is closer to a linear scan through the file. That in turn ** helps the operating system to deliver pages from the disk more rapidly. ** ** An O(n^2) insertion sort algorithm is used, but since n is never more ** than (NB+2) (a small constant), that should not be a problem. ** ** When NB==3, this one optimization makes the database about 25% faster ** for large insertions and deletions. */ for(i=0; ipgno; aPgFlags[i] = apNew[i]->pDbPage->flags; for(j=0; ji ){ sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0); } sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]); apNew[i]->pgno = pgno; } } TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " "%d(%d nc=%d) %d(%d nc=%d)\n", apNew[0]->pgno, szNew[0], cntNew[0], nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0, nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0, nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0, nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0, nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0, nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0, nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0 )); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); put4byte(pRight, apNew[nNew-1]->pgno); /* If the sibling pages are not leaves, ensure that the right-child pointer ** of the right-most new sibling page is set to the value that was ** originally in the same field of the right-most old sibling page. */ if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){ MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1]; memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4); } /* Make any required updates to pointer map entries associated with ** cells stored on sibling pages following the balance operation. Pointer ** map entries associated with divider cells are set by the insertCell() ** routine. The associated pointer map entries are: ** ** a) if the cell contains a reference to an overflow chain, the ** entry associated with the first page in the overflow chain, and ** ** b) if the sibling pages are not leaves, the child page associated ** with the cell. ** ** If the sibling pages are not leaves, then the pointer map entry ** associated with the right-child of each sibling may also need to be ** updated. This happens below, after the sibling pages have been ** populated, not here. */ if( ISAUTOVACUUM ){ MemPage *pNew = apNew[0]; u8 *aOld = pNew->aData; int cntOldNext = pNew->nCell + pNew->nOverflow; int usableSize = pBt->usableSize; int iNew = 0; int iOld = 0; for(i=0; inCell + pOld->nOverflow + !leafData; aOld = pOld->aData; } if( i==cntNew[iNew] ){ pNew = apNew[++iNew]; if( !leafData ) continue; } /* Cell pCell is destined for new sibling page pNew. Originally, it ** was either part of sibling page iOld (possibly an overflow cell), ** or else the divider cell to the left of sibling page iOld. So, ** if sibling page iOld had the same page number as pNew, and if ** pCell really was a part of sibling page iOld (not a divider or ** overflow cell), we can skip updating the pointer map entries. */ if( iOld>=nNew || pNew->pgno!=aPgno[iOld] || pCell=&aOld[usableSize] ){ if( !leafCorrection ){ ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc); } if( cachedCellSize(&b,i)>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pCell, &rc); } if( rc ) goto balance_cleanup; } } } /* Insert new divider cells into pParent. */ for(i=0; ileaf ){ memcpy(&pNew->aData[8], pCell, 4); }else if( leafData ){ /* If the tree is a leaf-data tree, and the siblings are leaves, ** then there is no divider cell in b.apCell[]. Instead, the divider ** cell consists of the integer key for the right-most cell of ** the sibling-page assembled above only. */ CellInfo info; j--; pNew->xParseCell(pNew, b.apCell[j], &info); pCell = pTemp; sz = 4 + putVarint(&pCell[4], info.nKey); pTemp = 0; }else{ pCell -= 4; /* Obscure case for non-leaf-data trees: If the cell at pCell was ** previously stored on a leaf node, and its reported size was 4 ** bytes, then it may actually be smaller than this ** (see btreeParseCellPtr(), 4 bytes is the minimum size of ** any cell). But it is important to pass the correct size to ** insertCell(), so reparse the cell now. ** ** Note that this can never happen in an SQLite data file, as all ** cells are at least 4 bytes. It only happens in b-trees used ** to evaluate "IN (SELECT ...)" and similar clauses. */ if( b.szCell[j]==4 ){ assert(leafCorrection==4); sz = pParent->xCellSize(pParent, pCell); } } iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc); if( rc!=SQLITE_OK ) goto balance_cleanup; assert( sqlite3PagerIswriteable(pParent->pDbPage) ); } /* Now update the actual sibling pages. The order in which they are updated ** is important, as this code needs to avoid disrupting any page from which ** cells may still to be read. In practice, this means: ** ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1]) ** then it is not safe to update page apNew[iPg] until after ** the left-hand sibling apNew[iPg-1] has been updated. ** ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1]) ** then it is not safe to update page apNew[iPg] until after ** the right-hand sibling apNew[iPg+1] has been updated. ** ** If neither of the above apply, the page is safe to update. ** ** The iPg value in the following loop starts at nNew-1 goes down ** to 0, then back up to nNew-1 again, thus making two passes over ** the pages. On the initial downward pass, only condition (1) above ** needs to be tested because (2) will always be true from the previous ** step. On the upward pass, both conditions are always true, so the ** upwards pass simply processes pages that were missed on the downward ** pass. */ for(i=1-nNew; i=0 && iPg=0 /* On the upwards pass, or... */ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */ ){ int iNew; int iOld; int nNewCell; /* Verify condition (1): If cells are moving left, update iPg ** only after iPg-1 has already been updated. */ assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] ); /* Verify condition (2): If cells are moving right, update iPg ** only after iPg+1 has already been updated. */ assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] ); if( iPg==0 ){ iNew = iOld = 0; nNewCell = cntNew[0]; }else{ iOld = iPgnFree = usableSpace-szNew[iPg]; assert( apNew[iPg]->nOverflow==0 ); assert( apNew[iPg]->nCell==nNewCell ); } } /* All pages have been processed exactly once */ assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 ); assert( nOld>0 ); assert( nNew>0 ); if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){ /* The root page of the b-tree now contains no cells. The only sibling ** page is the right-child of the parent. Copy the contents of the ** child page into the parent, decreasing the overall height of the ** b-tree structure by one. This is described as the "balance-shallower" ** sub-algorithm in some documentation. ** ** If this is an auto-vacuum database, the call to copyNodeContent() ** sets all pointer-map entries corresponding to database image pages ** for which the pointer is stored within the content being copied. ** ** It is critical that the child page be defragmented before being ** copied into the parent, because if the parent is page 1 then it will ** by smaller than the child due to the database header, and so all the ** free space needs to be up front. */ assert( nNew==1 ); rc = defragmentPage(apNew[0]); testcase( rc!=SQLITE_OK ); assert( apNew[0]->nFree == (get2byte(&apNew[0]->aData[5])-apNew[0]->cellOffset-apNew[0]->nCell*2) || rc!=SQLITE_OK ); copyNodeContent(apNew[0], pParent, &rc); freePage(apNew[0], &rc); }else if( ISAUTOVACUUM && !leafCorrection ){ /* Fix the pointer map entries associated with the right-child of each ** sibling page. All other pointer map entries have already been taken ** care of. */ for(i=0; iaData[8]); ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc); } } assert( pParent->isInit ); TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", nOld, nNew, b.nCell)); /* Free any old pages that were not reused as new pages. */ for(i=nNew; iisInit ){ /* The ptrmapCheckPages() contains assert() statements that verify that ** all pointer map pages are set correctly. This is helpful while ** debugging. This is usually disabled because a corrupt database may ** cause an assert() statement to fail. */ ptrmapCheckPages(apNew, nNew); ptrmapCheckPages(&pParent, 1); } #endif /* ** Cleanup before returning. */ balance_cleanup: sqlite3ScratchFree(b.apCell); for(i=0; i= 1700 && defined(_M_ARM) #pragma optimize("", on) #endif /* ** This function is called when the root page of a b-tree structure is ** overfull (has one or more overflow pages). ** ** A new child page is allocated and the contents of the current root ** page, including overflow cells, are copied into the child. The root ** page is then overwritten to make it an empty page with the right-child ** pointer pointing to the new page. ** ** Before returning, all pointer-map entries corresponding to pages ** that the new child-page now contains pointers to are updated. The ** entry corresponding to the new right-child pointer of the root ** page is also updated. ** ** If successful, *ppChild is set to contain a reference to the child ** page and SQLITE_OK is returned. In this case the caller is required ** to call releasePage() on *ppChild exactly once. If an error occurs, ** an error code is returned and *ppChild is set to 0. */ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ int rc; /* Return value from subprocedures */ MemPage *pChild = 0; /* Pointer to a new child page */ Pgno pgnoChild = 0; /* Page number of the new child page */ BtShared *pBt = pRoot->pBt; /* The BTree */ assert( pRoot->nOverflow>0 ); assert( sqlite3_mutex_held(pBt->mutex) ); /* Make pRoot, the root page of the b-tree, writable. Allocate a new ** page that will become the new right-child of pPage. Copy the contents ** of the node stored on pRoot into the new child page. */ rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc==SQLITE_OK ){ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); copyNodeContent(pRoot, pChild, &rc); if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); } } if( rc ){ *ppChild = 0; releasePage(pChild); return rc; } assert( sqlite3PagerIswriteable(pChild->pDbPage) ); assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell ); TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl, pRoot->nOverflow*sizeof(pRoot->aiOvfl[0])); memcpy(pChild->apOvfl, pRoot->apOvfl, pRoot->nOverflow*sizeof(pRoot->apOvfl[0])); pChild->nOverflow = pRoot->nOverflow; /* Zero the contents of pRoot. Then install pChild as the right-child. */ zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); *ppChild = pChild; return SQLITE_OK; } /* ** The page that pCur currently points to has just been modified in ** some way. This function figures out if this modification means the ** tree needs to be balanced, and if so calls the appropriate balancing ** routine. Balancing routines are: ** ** balance_quick() ** balance_deeper() ** balance_nonroot() */ static int balance(BtCursor *pCur){ int rc = SQLITE_OK; const int nMin = pCur->pBt->usableSize * 2 / 3; u8 aBalanceQuickSpace[13]; u8 *pFree = 0; TESTONLY( int balance_quick_called = 0 ); TESTONLY( int balance_deeper_called = 0 ); do { int iPage = pCur->iPage; MemPage *pPage = pCur->apPage[iPage]; if( iPage==0 ){ if( pPage->nOverflow ){ /* The root page of the b-tree is overfull. In this case call the ** balance_deeper() function to create a new child for the root-page ** and copy the current contents of the root-page to it. The ** next iteration of the do-loop will balance the child page. */ assert( (balance_deeper_called++)==0 ); rc = balance_deeper(pPage, &pCur->apPage[1]); if( rc==SQLITE_OK ){ pCur->iPage = 1; pCur->aiIdx[0] = 0; pCur->aiIdx[1] = 0; assert( pCur->apPage[1]->nOverflow ); } }else{ break; } }else if( pPage->nOverflow==0 && pPage->nFree<=nMin ){ break; }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; rc = sqlite3PagerWrite(pParent->pDbPage); if( rc==SQLITE_OK ){ #ifndef SQLITE_OMIT_QUICKBALANCE if( pPage->intKeyLeaf && pPage->nOverflow==1 && pPage->aiOvfl[0]==pPage->nCell && pParent->pgno!=1 && pParent->nCell==iIdx ){ /* Call balance_quick() to create a new sibling of pPage on which ** to store the overflow cell. balance_quick() inserts a new cell ** into pParent, which may cause pParent overflow. If this ** happens, the next iteration of the do-loop will balance pParent ** use either balance_nonroot() or balance_deeper(). Until this ** happens, the overflow cell is stored in the aBalanceQuickSpace[] ** buffer. ** ** The purpose of the following assert() is to check that only a ** single call to balance_quick() is made for each call to this ** function. If this were not verified, a subtle bug involving reuse ** of the aBalanceQuickSpace[] might sneak in. */ assert( (balance_quick_called++)==0 ); rc = balance_quick(pParent, pPage, aBalanceQuickSpace); }else #endif { /* In this case, call balance_nonroot() to redistribute cells ** between pPage and up to 2 of its sibling pages. This involves ** modifying the contents of pParent, which may cause pParent to ** become overfull or underfull. The next iteration of the do-loop ** will balance the parent page to correct this. ** ** If the parent page becomes overfull, the overflow cell or cells ** are stored in the pSpace buffer allocated immediately below. ** A subsequent iteration of the do-loop will deal with this by ** calling balance_nonroot() (balance_deeper() may be called first, ** but it doesn't deal with overflow cells - just moves them to a ** different page). Once this subsequent call to balance_nonroot() ** has completed, it is safe to release the pSpace buffer used by ** the previous call, as the overflow cell data will have been ** copied either into the body of a database page or into the new ** pSpace buffer passed to the latter call to balance_nonroot(). */ u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, pCur->hints&BTREE_BULKLOAD); if( pFree ){ /* If pFree is not NULL, it points to the pSpace buffer used ** by a previous call to balance_nonroot(). Its contents are ** now stored either on real database pages or within the ** new pSpace buffer, so it may be safely freed here. */ sqlite3PageFree(pFree); } /* The pSpace buffer will be freed after the next call to ** balance_nonroot(), or just before this function returns, whichever ** comes first. */ pFree = pSpace; } } pPage->nOverflow = 0; /* The next iteration of the do-loop balances the parent page. */ releasePage(pPage); pCur->iPage--; assert( pCur->iPage>=0 ); } }while( rc==SQLITE_OK ); if( pFree ){ sqlite3PageFree(pFree); } return rc; } /* ** Insert a new record into the BTree. The key is given by (pKey,nKey) ** and the data is given by (pData,nData). The cursor is used only to ** define what table the record should be inserted into. The cursor ** is left pointing at a random location. ** ** For an INTKEY table, only the nKey value of the key is used. pKey is ** ignored. For a ZERODATA table, the pData and nData are both ignored. ** ** If the seekResult parameter is non-zero, then a successful call to ** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already ** been performed. seekResult is the search result returned (a negative ** number if pCur points at an entry that is smaller than (pKey, nKey), or ** a positive value if pCur points at an entry that is larger than ** (pKey, nKey)). ** ** If the seekResult parameter is non-zero, then the caller guarantees that ** cursor pCur is pointing at the existing copy of a row that is to be ** overwritten. If the seekResult parameter is 0, then cursor pCur may ** point to any entry or to no entry at all and so this function has to seek ** the cursor before the new key can be inserted. */ SQLITE_PRIVATE int sqlite3BtreeInsert( BtCursor *pCur, /* Insert data into the table of this cursor */ const void *pKey, i64 nKey, /* The key of the new record */ const void *pData, int nData, /* The data of the new record */ int nZero, /* Number of extra 0 bytes to append to data */ int appendBias, /* True if this is likely an append */ int seekResult /* Result of prior MovetoUnpacked() call */ ){ int rc; int loc = seekResult; /* -1: before desired location +1: after */ int szNew = 0; int idx; MemPage *pPage; Btree *p = pCur->pBtree; BtShared *pBt = p->pBt; unsigned char *oldCell; unsigned char *newCell = 0; if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); return pCur->skipNext; } assert( cursorHoldsMutex(pCur) ); assert( (pCur->curFlags & BTCF_WriteFlag)!=0 && pBt->inTransaction==TRANS_WRITE && (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); /* Assert that the caller has been consistent. If this cursor was opened ** expecting an index b-tree, then the caller should be inserting blob ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ assert( (pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** ** In some cases, the call to btreeMoveto() below is a no-op. For ** example, when inserting data into a table with auto-generated integer ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the ** integer key to use. It then calls this function to actually insert the ** data into the intkey B-Tree. In this case btreeMoveto() recognizes ** that the cursor is already where it needs to be and returns without ** doing any work. To avoid thwarting these optimizations, it is important ** not to clear the cursor here. */ if( pCur->curFlags & BTCF_Multiple ){ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; } if( pCur->pKeyInfo==0 ){ assert( pKey==0 ); /* If this is an insert into a table b-tree, invalidate any incrblob ** cursors open on the row being replaced */ invalidateIncrblobCursors(p, nKey, 0); /* If the cursor is currently on the last row and we are appending a ** new row onto the end, set the "loc" to avoid an unnecessary ** btreeMoveto() call */ if( (pCur->curFlags&BTCF_ValidNKey)!=0 && nKey>0 && pCur->info.nKey==nKey-1 ){ loc = -1; }else if( loc==0 ){ rc = sqlite3BtreeMovetoUnpacked(pCur, 0, nKey, appendBias, &loc); if( rc ) return rc; } }else if( loc==0 ){ rc = btreeMoveto(pCur, pKey, nKey, appendBias, &loc); if( rc ) return rc; } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); pPage = pCur->apPage[pCur->iPage]; assert( pPage->intKey || nKey>=0 ); assert( pPage->leaf || !pPage->intKey ); TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", pCur->pgnoRoot, nKey, nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit ); newCell = pBt->pTmpSpace; assert( newCell!=0 ); rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew); if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); assert( szNew <= MX_CELL_SIZE(pBt) ); idx = pCur->aiIdx[pCur->iPage]; if( loc==0 ){ u16 szOld; assert( idxnCell ); rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ goto end_insert; } oldCell = findCell(pPage, idx); if( !pPage->leaf ){ memcpy(newCell, oldCell, 4); } rc = clearCell(pPage, oldCell, &szOld); dropCell(pPage, idx, szOld, &rc); if( rc ) goto end_insert; }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); idx = ++pCur->aiIdx[pCur->iPage]; }else{ assert( pPage->leaf ); } insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); /* If no error has occurred and pPage has an overflow cell, call balance() ** to redistribute the cells within the tree. Since balance() may move ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey ** variables. ** ** Previous versions of SQLite called moveToRoot() to move the cursor ** back to the root page as balance() used to invalidate the contents ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that, ** set the cursor state to "invalid". This makes common insert operations ** slightly faster. ** ** There is a subtle but important optimization here too. When inserting ** multiple records into an intkey b-tree using a single cursor (as can ** happen while processing an "INSERT INTO ... SELECT" statement), it ** is advantageous to leave the cursor pointing to the last entry in ** the b-tree if possible. If the cursor is left pointing to the last ** entry in the table, and the next row inserted has an integer key ** larger than the largest existing key, it is possible to insert the ** row without seeking the cursor. This can be a big performance boost. */ pCur->info.nSize = 0; if( rc==SQLITE_OK && pPage->nOverflow ){ pCur->curFlags &= ~(BTCF_ValidNKey); rc = balance(pCur); /* Must make sure nOverflow is reset to zero even if the balance() ** fails. Internal data structure corruption will result otherwise. ** Also, set the cursor state to invalid. This stops saveCursorPosition() ** from trying to save the current position of the cursor. */ pCur->apPage[pCur->iPage]->nOverflow = 0; pCur->eState = CURSOR_INVALID; } assert( pCur->apPage[pCur->iPage]->nOverflow==0 ); end_insert: return rc; } /* ** Delete the entry that the cursor is pointing to. The cursor ** is left pointing at an arbitrary location. */ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur){ Btree *p = pCur->pBtree; BtShared *pBt = p->pBt; int rc; /* Return code */ MemPage *pPage; /* Page to delete cell from */ unsigned char *pCell; /* Pointer to cell to delete */ int iCellIdx; /* Index of cell to delete */ int iCellDepth; /* Depth of node containing pCell */ u16 szCell; /* Size of the cell being deleted */ assert( cursorHoldsMutex(pCur) ); assert( pBt->inTransaction==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( pCur->curFlags & BTCF_WriteFlag ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); assert( !hasReadConflicts(p, pCur->pgnoRoot) ); assert( pCur->aiIdx[pCur->iPage]apPage[pCur->iPage]->nCell ); assert( pCur->eState==CURSOR_VALID ); iCellDepth = pCur->iPage; iCellIdx = pCur->aiIdx[iCellDepth]; pPage = pCur->apPage[iCellDepth]; pCell = findCell(pPage, iCellIdx); /* If the page containing the entry to delete is not a leaf page, move ** the cursor to the largest entry in the tree that is smaller than ** the entry being deleted. This cell will replace the cell being deleted ** from the internal node. The 'previous' entry is used for this instead ** of the 'next' entry, as the previous entry is always a part of the ** sub-tree headed by the child page of the cell being deleted. This makes ** balancing the tree following the delete operation easier. */ if( !pPage->leaf ){ int notUsed = 0; rc = sqlite3BtreePrevious(pCur, ¬Used); if( rc ) return rc; } /* Save the positions of any other cursors open on this table before ** making any modifications. Make the page containing the entry to be ** deleted writable. Then free any overflow pages associated with the ** entry and finally remove the cell itself from within the page. */ if( pCur->curFlags & BTCF_Multiple ){ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; } /* If this is a delete operation to remove a row from a table b-tree, ** invalidate any incrblob cursors open on the row being deleted. */ if( pCur->pKeyInfo==0 ){ invalidateIncrblobCursors(p, pCur->info.nKey, 0); } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; rc = clearCell(pPage, pCell, &szCell); dropCell(pPage, iCellIdx, szCell, &rc); if( rc ) return rc; /* If the cell deleted was not located on a leaf page, then the cursor ** is currently pointing to the largest entry in the sub-tree headed ** by the child-page of the cell that was just deleted from an internal ** node. The cell from the leaf node needs to be moved to the internal ** node to replace the deleted cell. */ if( !pPage->leaf ){ MemPage *pLeaf = pCur->apPage[pCur->iPage]; int nCell; Pgno n = pCur->apPage[iCellDepth+1]->pgno; unsigned char *pTmp; pCell = findCell(pLeaf, pLeaf->nCell-1); if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; assert( pTmp!=0 ); rc = sqlite3PagerWrite(pLeaf->pDbPage); insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); if( rc ) return rc; } /* Balance the tree. If the entry deleted was located on a leaf page, ** then the cursor still points to that page. In this case the first ** call to balance() repairs the tree, and the if(...) condition is ** never true. ** ** Otherwise, if the entry deleted was on an internal node page, then ** pCur is pointing to the leaf page from which a cell was removed to ** replace the cell deleted from the internal node. This is slightly ** tricky as the leaf node may be underfull, and the internal node may ** be either under or overfull. In this case run the balancing algorithm ** on the leaf node first. If the balance proceeds far enough up the ** tree that we can be sure that any problem in the internal node has ** been corrected, so be it. Otherwise, after balancing the leaf node, ** walk the cursor up the tree to the internal node and balance it as ** well. */ rc = balance(pCur); if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){ while( pCur->iPage>iCellDepth ){ releasePage(pCur->apPage[pCur->iPage--]); } rc = balance(pCur); } if( rc==SQLITE_OK ){ moveToRoot(pCur); } return rc; } /* ** Create a new BTree table. Write into *piTable the page ** number for the root page of the new table. ** ** The type of type is determined by the flags parameter. Only the ** following values of flags are currently in use. Other values for ** flags might not work: ** ** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys ** BTREE_ZERODATA Used for SQL indices */ static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){ BtShared *pBt = p->pBt; MemPage *pRoot; Pgno pgnoRoot; int rc; int ptfFlags; /* Page-type flage for the root page of new table */ assert( sqlite3BtreeHoldsMutex(p) ); assert( pBt->inTransaction==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); #ifdef SQLITE_OMIT_AUTOVACUUM rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); if( rc ){ return rc; } #else if( pBt->autoVacuum ){ Pgno pgnoMove; /* Move a page here to make room for the root-page */ MemPage *pPageMove; /* The page to move to. */ /* Creating a new table may probably require moving an existing database ** to make room for the new tables root page. In case this page turns ** out to be an overflow page, delete all overflow page-map caches ** held by open cursors. */ invalidateAllOverflowCache(pBt); /* Read the value of meta[3] from the database to determine where the ** root page of the new table should go. meta[3] is the largest root-page ** created so far, so the new root-page is (meta[3]+1). */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); pgnoRoot++; /* The new root-page may not be allocated on a pointer-map page, or the ** PENDING_BYTE page. */ while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ pgnoRoot++; } assert( pgnoRoot>=3 || CORRUPT_DB ); testcase( pgnoRoot<3 ); /* Allocate a page. The page that currently resides at pgnoRoot will ** be moved to the allocated page (unless the allocated page happens ** to reside at pgnoRoot). */ rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT); if( rc!=SQLITE_OK ){ return rc; } if( pgnoMove!=pgnoRoot ){ /* pgnoRoot is the page that will be used for the root-page of ** the new table (assuming an error did not occur). But we were ** allocated pgnoMove. If required (i.e. if it was not allocated ** by extending the file), the current page at position pgnoMove ** is already journaled. */ u8 eType = 0; Pgno iPtrPage = 0; /* Save the positions of any open cursors. This is required in ** case they are holding a reference to an xFetch reference ** corresponding to page pgnoRoot. */ rc = saveAllCursors(pBt, 0, 0); releasePage(pPageMove); if( rc!=SQLITE_OK ){ return rc; } /* Move the page currently at pgnoRoot to pgnoMove. */ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); if( rc!=SQLITE_OK ){ return rc; } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ rc = SQLITE_CORRUPT_BKPT; } if( rc!=SQLITE_OK ){ releasePage(pRoot); return rc; } assert( eType!=PTRMAP_ROOTPAGE ); assert( eType!=PTRMAP_FREEPAGE ); rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0); releasePage(pRoot); /* Obtain the page at pgnoRoot */ if( rc!=SQLITE_OK ){ return rc; } rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); if( rc!=SQLITE_OK ){ return rc; } rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pRoot); return rc; } }else{ pRoot = pPageMove; } /* Update the pointer-map and meta-data with the new root-page number. */ ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc); if( rc ){ releasePage(pRoot); return rc; } /* When the new root page was allocated, page 1 was made writable in ** order either to increase the database filesize, or to decrement the ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. */ assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) ); rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); if( NEVER(rc) ){ releasePage(pRoot); return rc; } }else{ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); if( rc ) return rc; } #endif assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); if( createTabFlags & BTREE_INTKEY ){ ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; }else{ ptfFlags = PTF_ZERODATA | PTF_LEAF; } zeroPage(pRoot, ptfFlags); sqlite3PagerUnref(pRoot->pDbPage); assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 ); *piTable = (int)pgnoRoot; return SQLITE_OK; } SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ int rc; sqlite3BtreeEnter(p); rc = btreeCreateTable(p, piTable, flags); sqlite3BtreeLeave(p); return rc; } /* ** Erase the given database page and all its children. Return ** the page to the freelist. */ static int clearDatabasePage( BtShared *pBt, /* The BTree that contains the table */ Pgno pgno, /* Page number to clear */ int freePageFlag, /* Deallocate page if true */ int *pnChange /* Add number of Cells freed to this counter */ ){ MemPage *pPage; int rc; unsigned char *pCell; int i; int hdr; u16 szCell; assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); if( rc ) return rc; if( pPage->bBusy ){ rc = SQLITE_CORRUPT_BKPT; goto cleardatabasepage_out; } pPage->bBusy = 1; hdr = pPage->hdrOffset; for(i=0; inCell; i++){ pCell = findCell(pPage, i); if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } rc = clearCell(pPage, pCell, &szCell); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; }else if( pnChange ){ assert( pPage->intKey || CORRUPT_DB ); testcase( !pPage->intKey ); *pnChange += pPage->nCell; } if( freePageFlag ){ freePage(pPage, &rc); }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){ zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF); } cleardatabasepage_out: pPage->bBusy = 0; releasePage(pPage); return rc; } /* ** Delete all information from a single table in the database. iTable is ** the page number of the root of the table. After this routine returns, ** the root page is empty, but still exists. ** ** This routine will fail with SQLITE_LOCKED if there are any open ** read cursors on the table. Open write cursors are moved to the ** root of the table. ** ** If pnChange is not NULL, then table iTable must be an intkey table. The ** integer value pointed to by pnChange is incremented by the number of ** entries in the table. */ SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); rc = saveAllCursors(pBt, (Pgno)iTable, 0); if( SQLITE_OK==rc ){ /* Invalidate all incrblob cursors open on table iTable (assuming iTable ** is the root of a table b-tree - if it is not, the following call is ** a no-op). */ invalidateIncrblobCursors(p, 0, 1); rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } sqlite3BtreeLeave(p); return rc; } /* ** Delete all information from the single table that pCur is open on. ** ** This routine only work for pCur on an ephemeral table. */ SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){ return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0); } /* ** Erase all information in a table and add the root of the table to ** the freelist. Except, the root of the principle table (the one on ** page 1) is never added to the freelist. ** ** This routine will fail with SQLITE_LOCKED if there are any open ** cursors on the table. ** ** If AUTOVACUUM is enabled and the page at iTable is not the last ** root page in the database file, then the last root page ** in the database file is moved into the slot formerly occupied by ** iTable and that last slot formerly occupied by the last root page ** is added to the freelist instead of iTable. In this say, all ** root pages are kept at the beginning of the database file, which ** is necessary for AUTOVACUUM to work right. *piMoved is set to the ** page number that used to be the last root page in the file before ** the move. If no page gets moved, *piMoved is set to 0. ** The last root page is recorded in meta[3] and the value of ** meta[3] is updated by this procedure. */ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ int rc; MemPage *pPage = 0; BtShared *pBt = p->pBt; assert( sqlite3BtreeHoldsMutex(p) ); assert( p->inTrans==TRANS_WRITE ); /* It is illegal to drop a table if any cursors are open on the ** database. This is because in auto-vacuum mode the backend may ** need to move another root-page to fill a gap left by the deleted ** root page. If an open cursor was using this page a problem would ** occur. ** ** This error is caught long before control reaches this point. */ if( NEVER(pBt->pCursor) ){ sqlite3ConnectionBlocked(p->db, pBt->pCursor->pBtree->db); return SQLITE_LOCKED_SHAREDCACHE; } rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); if( rc ) return rc; rc = sqlite3BtreeClearTable(p, iTable, 0); if( rc ){ releasePage(pPage); return rc; } *piMoved = 0; if( iTable>1 ){ #ifdef SQLITE_OMIT_AUTOVACUUM freePage(pPage, &rc); releasePage(pPage); #else if( pBt->autoVacuum ){ Pgno maxRootPgno; sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno); if( iTable==maxRootPgno ){ /* If the table being dropped is the table with the largest root-page ** number in the database, put the root page on the free list. */ freePage(pPage, &rc); releasePage(pPage); if( rc!=SQLITE_OK ){ return rc; } }else{ /* The table being dropped does not have the largest root-page ** number in the database. So move the page that does into the ** gap left by the deleted root-page. */ MemPage *pMove; releasePage(pPage); rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); if( rc!=SQLITE_OK ){ return rc; } rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0); releasePage(pMove); if( rc!=SQLITE_OK ){ return rc; } pMove = 0; rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); freePage(pMove, &rc); releasePage(pMove); if( rc!=SQLITE_OK ){ return rc; } *piMoved = maxRootPgno; } /* Set the new 'max-root-page' value in the database header. This ** is the old value less one, less one more if that happens to ** be a root-page number, less one again if that is the ** PENDING_BYTE_PAGE. */ maxRootPgno--; while( maxRootPgno==PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, maxRootPgno) ){ maxRootPgno--; } assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno); }else{ freePage(pPage, &rc); releasePage(pPage); } #endif }else{ /* If sqlite3BtreeDropTable was called on page 1. ** This really never should happen except in a corrupt ** database. */ zeroPage(pPage, PTF_INTKEY|PTF_LEAF ); releasePage(pPage); } return rc; } SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ int rc; sqlite3BtreeEnter(p); rc = btreeDropTable(p, iTable, piMoved); sqlite3BtreeLeave(p); return rc; } /* ** This function may only be called if the b-tree connection already ** has a read or write transaction open on the database. ** ** Read the meta-information out of a database file. Meta[0] ** is the number of free pages currently in the database. Meta[1] ** through meta[15] are available for use by higher layers. Meta[0] ** is read-only, the others are read/write. ** ** The schema layer numbers meta values differently. At the schema ** layer (and the SetCookie and ReadCookie opcodes) the number of ** free pages is not visible. So Cookie[0] is the same as Meta[1]. ** ** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead ** of reading the value out of the header, it instead loads the "DataVersion" ** from the pager. The BTREE_DATA_VERSION value is not actually stored in the ** database file. It is a number computed by the pager. But its access ** pattern is the same as header meta values, and so it is convenient to ** read it from this routine. */ SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans>TRANS_NONE ); assert( SQLITE_OK==querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK) ); assert( pBt->pPage1 ); assert( idx>=0 && idx<=15 ); if( idx==BTREE_DATA_VERSION ){ *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion; }else{ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); } /* If auto-vacuum is disabled in this build and this is an auto-vacuum ** database, mark the database as read-only. */ #ifdef SQLITE_OMIT_AUTOVACUUM if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){ pBt->btsFlags |= BTS_READ_ONLY; } #endif sqlite3BtreeLeave(p); } /* ** Write meta-information back into the database. Meta[0] is ** read-only and may not be written. */ SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){ BtShared *pBt = p->pBt; unsigned char *pP1; int rc; assert( idx>=1 && idx<=15 ); sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); assert( pBt->pPage1!=0 ); pP1 = pBt->pPage1->aData; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pP1[36 + idx*4], iMeta); #ifndef SQLITE_OMIT_AUTOVACUUM if( idx==BTREE_INCR_VACUUM ){ assert( pBt->autoVacuum || iMeta==0 ); assert( iMeta==0 || iMeta==1 ); pBt->incrVacuum = (u8)iMeta; } #endif } sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_BTREECOUNT /* ** The first argument, pCur, is a cursor opened on some b-tree. Count the ** number of entries in the b-tree and write the result to *pnEntry. ** ** SQLITE_OK is returned if the operation is successfully executed. ** Otherwise, if an error is encountered (i.e. an IO error or database ** corruption) an SQLite error code is returned. */ SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){ i64 nEntry = 0; /* Value to return in *pnEntry */ int rc; /* Return code */ if( pCur->pgnoRoot==0 ){ *pnEntry = 0; return SQLITE_OK; } rc = moveToRoot(pCur); /* Unless an error occurs, the following loop runs one iteration for each ** page in the B-Tree structure (not including overflow pages). */ while( rc==SQLITE_OK ){ int iIdx; /* Index of child node in parent */ MemPage *pPage; /* Current page of the b-tree */ /* If this is a leaf page or the tree is not an int-key tree, then ** this page contains countable entries. Increment the entry counter ** accordingly. */ pPage = pCur->apPage[pCur->iPage]; if( pPage->leaf || !pPage->intKey ){ nEntry += pPage->nCell; } /* pPage is a leaf node. This loop navigates the cursor so that it ** points to the first interior cell that it points to the parent of ** the next page in the tree that has not yet been visited. The ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell ** of the page, or to the number of cells in the page if the next page ** to visit is the right-child of its parent. ** ** If all pages in the tree have been visited, return SQLITE_OK to the ** caller. */ if( pPage->leaf ){ do { if( pCur->iPage==0 ){ /* All pages of the b-tree have been visited. Return successfully. */ *pnEntry = nEntry; return moveToRoot(pCur); } moveToParent(pCur); }while ( pCur->aiIdx[pCur->iPage]>=pCur->apPage[pCur->iPage]->nCell ); pCur->aiIdx[pCur->iPage]++; pPage = pCur->apPage[pCur->iPage]; } /* Descend to the child node of the cell that the cursor currently ** points at. This is the right-child if (iIdx==pPage->nCell). */ iIdx = pCur->aiIdx[pCur->iPage]; if( iIdx==pPage->nCell ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); }else{ rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx))); } } /* An error has occurred. Return an error code. */ return rc; } #endif /* ** Return the pager associated with a BTree. This routine is used for ** testing and debugging only. */ SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){ return p->pBt->pPager; } #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** Append a message to the error message string. */ static void checkAppendMsg( IntegrityCk *pCheck, const char *zFormat, ... ){ va_list ap; char zBuf[200]; if( !pCheck->mxErr ) return; pCheck->mxErr--; pCheck->nErr++; va_start(ap, zFormat); if( pCheck->errMsg.nChar ){ sqlite3StrAccumAppend(&pCheck->errMsg, "\n", 1); } if( pCheck->zPfx ){ sqlite3_snprintf(sizeof(zBuf), zBuf, pCheck->zPfx, pCheck->v1, pCheck->v2); sqlite3StrAccumAppendAll(&pCheck->errMsg, zBuf); } sqlite3VXPrintf(&pCheck->errMsg, 1, zFormat, ap); va_end(ap); if( pCheck->errMsg.accError==STRACCUM_NOMEM ){ pCheck->mallocFailed = 1; } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that ** corresponds to page iPg is already set. */ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); } /* ** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. */ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); } /* ** Add 1 to the reference count for page iPage. If this is the second ** reference to the page, add an error message to pCheck->zErrMsg. ** Return 1 if there are 2 or more references to the page and 0 if ** if this is the first reference to the page. ** ** Also check that the page number is in bounds. */ static int checkRef(IntegrityCk *pCheck, Pgno iPage){ if( iPage==0 ) return 1; if( iPage>pCheck->nPage ){ checkAppendMsg(pCheck, "invalid page number %d", iPage); return 1; } if( getPageReferenced(pCheck, iPage) ){ checkAppendMsg(pCheck, "2nd reference to page %d", iPage); return 1; } setPageReferenced(pCheck, iPage); return 0; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Check that the entry in the pointer-map for page iChild maps to ** page iParent, pointer type ptrType. If not, append an error message ** to pCheck. */ static void checkPtrmap( IntegrityCk *pCheck, /* Integrity check context */ Pgno iChild, /* Child page number */ u8 eType, /* Expected pointer map type */ Pgno iParent /* Expected pointer map parent page number */ ){ int rc; u8 ePtrmapType; Pgno iPtrmapParent; rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); return; } if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ checkAppendMsg(pCheck, "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", iChild, eType, iParent, ePtrmapType, iPtrmapParent); } } #endif /* ** Check the integrity of the freelist or of an overflow page list. ** Verify that the number of pages on the list is N. */ static void checkList( IntegrityCk *pCheck, /* Integrity checking context */ int isFreeList, /* True for a freelist. False for overflow page list */ int iPage, /* Page number for first page in the list */ int N /* Expected number of pages in the list */ ){ int i; int expected = N; int iFirst = iPage; while( N-- > 0 && pCheck->mxErr ){ DbPage *pOvflPage; unsigned char *pOvflData; if( iPage<1 ){ checkAppendMsg(pCheck, "%d of %d pages missing from overflow list starting at %d", N+1, expected, iFirst); break; } if( checkRef(pCheck, iPage) ) break; if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage) ){ checkAppendMsg(pCheck, "failed to get page %d", iPage); break; } pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); if( isFreeList ){ int n = get4byte(&pOvflData[4]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pCheck->pBt->autoVacuum ){ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0); } #endif if( n>(int)pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, "freelist leaf count too big on page %d", iPage); N--; }else{ for(i=0; ipBt->autoVacuum ){ checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0); } #endif checkRef(pCheck, iFreePage); } N -= n; } } #ifndef SQLITE_OMIT_AUTOVACUUM else{ /* If this database supports auto-vacuum and iPage is not the last ** page in this overflow list, check that the pointer-map entry for ** the following page matches iPage. */ if( pCheck->pBt->autoVacuum && N>0 ){ i = get4byte(pOvflData); checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage); } } #endif iPage = get4byte(pOvflData); sqlite3PagerUnref(pOvflPage); } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* ** An implementation of a min-heap. ** ** aHeap[0] is the number of elements on the heap. aHeap[1] is the ** root element. The daughter nodes of aHeap[N] are aHeap[N*2] ** and aHeap[N*2+1]. ** ** The heap property is this: Every node is less than or equal to both ** of its daughter nodes. A consequence of the heap property is that the ** root node aHeap[1] is always the minimum value currently in the heap. ** ** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto ** the heap, preserving the heap property. The btreeHeapPull() routine ** removes the root element from the heap (the minimum value in the heap) ** and then moves other nodes around as necessary to preserve the heap ** property. ** ** This heap is used for cell overlap and coverage testing. Each u32 ** entry represents the span of a cell or freeblock on a btree page. ** The upper 16 bits are the index of the first byte of a range and the ** lower 16 bits are the index of the last byte of that range. */ static void btreeHeapInsert(u32 *aHeap, u32 x){ u32 j, i = ++aHeap[0]; aHeap[i] = x; while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){ x = aHeap[j]; aHeap[j] = aHeap[i]; aHeap[i] = x; i = j; } } static int btreeHeapPull(u32 *aHeap, u32 *pOut){ u32 j, i, x; if( (x = aHeap[0])==0 ) return 0; *pOut = aHeap[1]; aHeap[1] = aHeap[x]; aHeap[x] = 0xffffffff; aHeap[0]--; i = 1; while( (j = i*2)<=aHeap[0] ){ if( aHeap[j]>aHeap[j+1] ) j++; if( aHeap[i]zPfx; int saved_v1 = pCheck->v1; int saved_v2 = pCheck->v2; u8 savedIsInit = 0; /* Check that the page exists */ pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; pCheck->zPfx = "Page %d: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); goto end_of_check; } /* Clear MemPage.isInit to make sure the corruption detection code in ** btreeInitPage() is executed. */ savedIsInit = pPage->isInit; pPage->isInit = 0; if( (rc = btreeInitPage(pPage))!=0 ){ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ checkAppendMsg(pCheck, "btreeInitPage() returns error code %d", rc); goto end_of_check; } data = pPage->aData; hdr = pPage->hdrOffset; /* Set up for cell analysis */ pCheck->zPfx = "On tree page %d cell %d: "; contentOffset = get2byteNotZero(&data[hdr+5]); assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ cellStart = hdr + 12 - 4*pPage->leaf; assert( pPage->aCellIdx==&data[cellStart] ); pCellIdx = &data[cellStart + 2*(nCell-1)]; if( !pPage->leaf ){ /* Analyze the right-child page of internal pages */ pgno = get4byte(&data[hdr+8]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ pCheck->zPfx = "On page %d at right child: "; checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif depth = checkTreePage(pCheck, pgno, &maxKey, maxKey); keyCanBeEqual = 0; }else{ /* For leaf pages, the coverage check will occur in the same loop ** as the other cell checks, so initialize the heap. */ heap = pCheck->heap; heap[0] = 0; } /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte ** integer offsets to the cell contents. */ for(i=nCell-1; i>=0 && pCheck->mxErr; i--){ CellInfo info; /* Check cell size */ pCheck->v2 = i; assert( pCellIdx==&data[cellStart + i*2] ); pc = get2byteAligned(pCellIdx); pCellIdx -= 2; if( pcusableSize-4 ){ checkAppendMsg(pCheck, "Offset %d out of range %d..%d", pc, contentOffset, usableSize-4); doCoverageCheck = 0; continue; } pCell = &data[pc]; pPage->xParseCell(pPage, pCell, &info); if( pc+info.nSize>usableSize ){ checkAppendMsg(pCheck, "Extends off end of page"); doCoverageCheck = 0; continue; } /* Check for integer primary key out of range */ if( pPage->intKey ){ if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){ checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey); } maxKey = info.nKey; } /* Check the content overflow list */ if( info.nPayload>info.nLocal ){ int nPage; /* Number of pages on the overflow chain */ Pgno pgnoOvfl; /* First page of the overflow chain */ assert( pc + info.iOverflow <= usableSize ); nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4); pgnoOvfl = get4byte(&pCell[info.iOverflow]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage); } #endif checkList(pCheck, 0, pgnoOvfl, nPage); } if( !pPage->leaf ){ /* Check sanity of left child page for internal pages */ pgno = get4byte(pCell); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey); keyCanBeEqual = 0; if( d2!=depth ){ checkAppendMsg(pCheck, "Child page depth differs"); depth = d2; } }else{ /* Populate the coverage-checking heap for leaf pages */ btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1)); } } *piMinKey = maxKey; /* Check for complete coverage of the page */ pCheck->zPfx = 0; if( doCoverageCheck && pCheck->mxErr>0 ){ /* For leaf pages, the min-heap has already been initialized and the ** cells have already been inserted. But for internal pages, that has ** not yet been done, so do it now */ if( !pPage->leaf ){ heap = pCheck->heap; heap[0] = 0; for(i=nCell-1; i>=0; i--){ u32 size; pc = get2byteAligned(&data[cellStart+i*2]); size = pPage->xCellSize(pPage, &data[pc]); btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header ** is the offset of the first freeblock, or zero if there are no ** freeblocks on the page. */ i = get2byte(&data[hdr+1]); while( i>0 ){ int size, j; assert( (u32)i<=usableSize-4 ); /* Enforced by btreeInitPage() */ size = get2byte(&data[i+2]); assert( (u32)(i+size)<=usableSize ); /* Enforced by btreeInitPage() */ btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1)); /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a ** big-endian integer which is the offset in the b-tree page of the next ** freeblock in the chain, or zero if the freeblock is the last on the ** chain. */ j = get2byte(&data[i]); /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of ** increasing offset. */ assert( j==0 || j>i+size ); /* Enforced by btreeInitPage() */ assert( (u32)j<=usableSize-4 ); /* Enforced by btreeInitPage() */ i = j; } /* Analyze the min-heap looking for overlap between cells and/or ** freeblocks, and counting the number of untracked bytes in nFrag. ** ** Each min-heap entry is of the form: (start_address<<16)|end_address. ** There is an implied first entry the covers the page header, the cell ** pointer index, and the gap between the cell pointer index and the start ** of cell content. ** ** The loop below pulls entries from the min-heap in order and compares ** the start_address against the previous end_address. If there is an ** overlap, that means bytes are used multiple times. If there is a gap, ** that gap is added to the fragmentation count. */ nFrag = 0; prev = contentOffset - 1; /* Implied first min-heap entry */ while( btreeHeapPull(heap,&x) ){ if( (prev&0xffff)>=(x>>16) ){ checkAppendMsg(pCheck, "Multiple uses for byte %u of page %d", x>>16, iPage); break; }else{ nFrag += (x>>16) - (prev&0xffff) - 1; prev = x; } } nFrag += usableSize - (prev&0xffff) - 1; /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments ** is stored in the fifth field of the b-tree page header. ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the ** number of fragmented free bytes within the cell content area. */ if( heap[0]==0 && nFrag!=data[hdr+7] ){ checkAppendMsg(pCheck, "Fragmentation of %d bytes reported as %d on page %d", nFrag, data[hdr+7], iPage); } } end_of_check: if( !doCoverageCheck ) pPage->isInit = savedIsInit; releasePage(pPage); pCheck->zPfx = saved_zPfx; pCheck->v1 = saved_v1; pCheck->v2 = saved_v2; return depth+1; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** This routine does a complete check of the given BTree file. aRoot[] is ** an array of pages numbers were each page number is the root page of ** a table. nRoot is the number of entries in aRoot. ** ** A read-only or read-write transaction must be opened before calling ** this function. ** ** Write the number of error seen in *pnErr. Except for some memory ** allocation errors, an error message held in memory obtained from ** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is ** returned. If a memory allocation error occurs, NULL is returned. */ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( Btree *p, /* The btree to be checked */ int *aRoot, /* An array of root pages numbers for individual trees */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr /* Write number of errors seen to this variable */ ){ Pgno i; IntegrityCk sCheck; BtShared *pBt = p->pBt; int savedDbFlags = pBt->db->flags; char zErr[100]; VVA_ONLY( int nRef ); sqlite3BtreeEnter(p); assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); assert( (nRef = sqlite3PagerRefcount(pBt->pPager))>=0 ); sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; sCheck.nPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; sCheck.nErr = 0; sCheck.mallocFailed = 0; sCheck.zPfx = 0; sCheck.v1 = 0; sCheck.v2 = 0; sCheck.aPgRef = 0; sCheck.heap = 0; sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); if( sCheck.nPage==0 ){ goto integrity_ck_cleanup; } sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); if( !sCheck.aPgRef ){ sCheck.mallocFailed = 1; goto integrity_ck_cleanup; } sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize ); if( sCheck.heap==0 ){ sCheck.mallocFailed = 1; goto integrity_ck_cleanup; } i = PENDING_BYTE_PAGE(pBt); if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); /* Check the integrity of the freelist */ sCheck.zPfx = "Main freelist: "; checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), get4byte(&pBt->pPage1->aData[36])); sCheck.zPfx = 0; /* Check all the tables. */ testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~SQLITE_CellSizeCk; for(i=0; (int)iautoVacuum && aRoot[i]>1 ){ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); } #endif checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); } pBt->db->flags = savedDbFlags; /* Make sure every page in the file is referenced */ for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ checkAppendMsg(&sCheck, "Page %d is never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain ** references to pointer-map pages. */ if( getPageReferenced(&sCheck, i)==0 && (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ checkAppendMsg(&sCheck, "Page %d is never used", i); } if( getPageReferenced(&sCheck, i)!=0 && (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); } #endif } /* Clean up and report errors. */ integrity_ck_cleanup: sqlite3PageFree(sCheck.heap); sqlite3_free(sCheck.aPgRef); if( sCheck.mallocFailed ){ sqlite3StrAccumReset(&sCheck.errMsg); sCheck.nErr++; } *pnErr = sCheck.nErr; if( sCheck.nErr==0 ) sqlite3StrAccumReset(&sCheck.errMsg); /* Make sure this analysis did not leave any unref() pages. */ assert( nRef==sqlite3PagerRefcount(pBt->pPager) ); sqlite3BtreeLeave(p); return sqlite3StrAccumFinish(&sCheck.errMsg); } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* ** Return the full pathname of the underlying database file. Return ** an empty string if the database is in-memory or a TEMP database. ** ** The pager filename is invariant as long as the pager is ** open so it is safe to access without the BtShared mutex. */ SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *p){ assert( p->pBt->pPager!=0 ); return sqlite3PagerFilename(p->pBt->pPager, 1); } /* ** Return the pathname of the journal file for this database. The return ** value of this routine is the same regardless of whether the journal file ** has been created or not. ** ** The pager journal filename is invariant as long as the pager is ** open so it is safe to access without the BtShared mutex. */ SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *p){ assert( p->pBt->pPager!=0 ); return sqlite3PagerJournalname(p->pBt->pPager); } /* ** Return non-zero if a transaction is active. */ SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree *p){ assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); return (p && (p->inTrans==TRANS_WRITE)); } #ifndef SQLITE_OMIT_WAL /* ** Run a checkpoint on the Btree passed as the first argument. ** ** Return SQLITE_LOCKED if this or any other connection has an open ** transaction on the shared-cache the argument Btree is connected to. ** ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. */ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){ int rc = SQLITE_OK; if( p ){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); if( pBt->inTransaction!=TRANS_NONE ){ rc = SQLITE_LOCKED; }else{ rc = sqlite3PagerCheckpoint(pBt->pPager, eMode, pnLog, pnCkpt); } sqlite3BtreeLeave(p); } return rc; } #endif /* ** Return non-zero if a read (or write) transaction is active. */ SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree *p){ assert( p ); assert( sqlite3_mutex_held(p->db->mutex) ); return p->inTrans!=TRANS_NONE; } SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree *p){ assert( p ); assert( sqlite3_mutex_held(p->db->mutex) ); return p->nBackup!=0; } /* ** This function returns a pointer to a blob of memory associated with ** a single shared-btree. The memory is used by client code for its own ** purposes (for example, to store a high-level schema associated with ** the shared-btree). The btree layer manages reference counting issues. ** ** The first time this is called on a shared-btree, nBytes bytes of memory ** are allocated, zeroed, and returned to the caller. For each subsequent ** call the nBytes parameter is ignored and a pointer to the same blob ** of memory returned. ** ** If the nBytes parameter is 0 and the blob of memory has not yet been ** allocated, a null pointer is returned. If the blob has already been ** allocated, it is returned as normal. ** ** Just before the shared-btree is closed, the function passed as the ** xFree argument when the memory allocation was made is invoked on the ** blob of allocated memory. The xFree function should not call sqlite3_free() ** on the memory, the btree layer does that. */ SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); if( !pBt->pSchema && nBytes ){ pBt->pSchema = sqlite3DbMallocZero(0, nBytes); pBt->xFreeSchema = xFree; } sqlite3BtreeLeave(p); return pBt->pSchema; } /* ** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared ** btree as the argument handle holds an exclusive lock on the ** sqlite_master table. Otherwise SQLITE_OK. */ SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *p){ int rc; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE ); sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Obtain a lock on the table whose root page is iTab. The ** lock is a write lock if isWritelock is true or a read lock ** if it is false. */ SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ int rc = SQLITE_OK; assert( p->inTrans!=TRANS_NONE ); if( p->sharable ){ u8 lockType = READ_LOCK + isWriteLock; assert( READ_LOCK+1==WRITE_LOCK ); assert( isWriteLock==0 || isWriteLock==1 ); sqlite3BtreeEnter(p); rc = querySharedCacheTableLock(p, iTab, lockType); if( rc==SQLITE_OK ){ rc = setSharedCacheTableLock(p, iTab, lockType); } sqlite3BtreeLeave(p); } return rc; } #endif #ifndef SQLITE_OMIT_INCRBLOB /* ** Argument pCsr must be a cursor opened for writing on an ** INTKEY table currently pointing at a valid table entry. ** This function modifies the data stored as part of that entry. ** ** Only the data content may only be modified, it is not possible to ** change the length of the data stored. If this function is called with ** parameters that attempt to write past the end of the existing data, ** no modifications are made and SQLITE_CORRUPT is returned. */ SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ int rc; assert( cursorHoldsMutex(pCsr) ); assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) ); assert( pCsr->curFlags & BTCF_Incrblob ); rc = restoreCursorPosition(pCsr); if( rc!=SQLITE_OK ){ return rc; } assert( pCsr->eState!=CURSOR_REQUIRESEEK ); if( pCsr->eState!=CURSOR_VALID ){ return SQLITE_ABORT; } /* Save the positions of all other cursors open on this table. This is ** required in case any of them are holding references to an xFetch ** version of the b-tree page modified by the accessPayload call below. ** ** Note that pCsr must be open on a INTKEY table and saveCursorPosition() ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence ** saveAllCursors can only return SQLITE_OK. */ VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr); assert( rc==SQLITE_OK ); /* Check some assumptions: ** (a) the cursor is open for writing, ** (b) there is a read/write transaction open, ** (c) the connection holds a write-lock on the table (if required), ** (d) there are no conflicting read-locks, and ** (e) the cursor points at a valid row of an intKey table. */ if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){ return SQLITE_READONLY; } assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0 && pCsr->pBt->inTransaction==TRANS_WRITE ); assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) ); assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) ); assert( pCsr->apPage[pCsr->iPage]->intKey ); return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1); } /* ** Mark this cursor as an incremental blob cursor. */ SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *pCur){ pCur->curFlags |= BTCF_Incrblob; pCur->pBtree->hasIncrblobCur = 1; } #endif /* ** Set both the "read version" (single byte at byte offset 18) and ** "write version" (single byte at byte offset 19) fields in the database ** header to iVersion. */ SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ BtShared *pBt = pBtree->pBt; int rc; /* Return code */ assert( iVersion==1 || iVersion==2 ); /* If setting the version fields to 1, do not automatically open the ** WAL connection, even if the version fields are currently set to 2. */ pBt->btsFlags &= ~BTS_NO_WAL; if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL; rc = sqlite3BtreeBeginTrans(pBtree, 0); if( rc==SQLITE_OK ){ u8 *aData = pBt->pPage1->aData; if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){ rc = sqlite3BtreeBeginTrans(pBtree, 2); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc==SQLITE_OK ){ aData[18] = (u8)iVersion; aData[19] = (u8)iVersion; } } } } pBt->btsFlags &= ~BTS_NO_WAL; return rc; } /* ** set the mask of hint flags for cursor pCsr. */ SQLITE_PRIVATE void sqlite3BtreeCursorHints(BtCursor *pCsr, unsigned int mask){ assert( mask==BTREE_BULKLOAD || mask==BTREE_SEEK_EQ || mask==0 ); pCsr->hints = mask; } #ifdef SQLITE_DEBUG /* ** Return true if the cursor has a hint specified. This routine is ** only used from within assert() statements */ SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){ return (pCsr->hints & mask)!=0; } #endif /* ** Return true if the given Btree is read-only. */ SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){ return (p->pBt->btsFlags & BTS_READ_ONLY)!=0; } /* ** Return the size of the header added to each page by this module. */ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } /************** End of btree.c ***********************************************/ /************** Begin file backup.c ******************************************/ /* ** 2009 January 28 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the implementation of the sqlite3_backup_XXX() ** API functions and the related features. */ /* #include "sqliteInt.h" */ /* #include "btreeInt.h" */ /* ** Structure allocated for each backup operation. */ struct sqlite3_backup { sqlite3* pDestDb; /* Destination database handle */ Btree *pDest; /* Destination b-tree file */ u32 iDestSchema; /* Original schema cookie in destination */ int bDestLocked; /* True once a write-transaction is open on pDest */ Pgno iNext; /* Page number of the next source page to copy */ sqlite3* pSrcDb; /* Source database handle */ Btree *pSrc; /* Source b-tree file */ int rc; /* Backup process error code */ /* These two variables are set by every call to backup_step(). They are ** read by calls to backup_remaining() and backup_pagecount(). */ Pgno nRemaining; /* Number of pages left to copy */ Pgno nPagecount; /* Total number of pages to copy */ int isAttached; /* True once backup has been registered with pager */ sqlite3_backup *pNext; /* Next backup associated with source pager */ }; /* ** THREAD SAFETY NOTES: ** ** Once it has been created using backup_init(), a single sqlite3_backup ** structure may be accessed via two groups of thread-safe entry points: ** ** * Via the sqlite3_backup_XXX() API function backup_step() and ** backup_finish(). Both these functions obtain the source database ** handle mutex and the mutex associated with the source BtShared ** structure, in that order. ** ** * Via the BackupUpdate() and BackupRestart() functions, which are ** invoked by the pager layer to report various state changes in ** the page cache associated with the source database. The mutex ** associated with the source database BtShared structure will always ** be held when either of these functions are invoked. ** ** The other sqlite3_backup_XXX() API functions, backup_remaining() and ** backup_pagecount() are not thread-safe functions. If they are called ** while some other thread is calling backup_step() or backup_finish(), ** the values returned may be invalid. There is no way for a call to ** BackupUpdate() or BackupRestart() to interfere with backup_remaining() ** or backup_pagecount(). ** ** Depending on the SQLite configuration, the database handles and/or ** the Btree objects may have their own mutexes that require locking. ** Non-sharable Btrees (in-memory databases for example), do not have ** associated mutexes. */ /* ** Return a pointer corresponding to database zDb (i.e. "main", "temp") ** in connection handle pDb. If such a database cannot be found, return ** a NULL pointer and write an error message to pErrorDb. ** ** If the "temp" database is requested, it may need to be opened by this ** function. If an error occurs while doing so, return 0 and write an ** error message to pErrorDb. */ static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){ int i = sqlite3FindDbName(pDb, zDb); if( i==1 ){ Parse *pParse; int rc = 0; pParse = sqlite3StackAllocZero(pErrorDb, sizeof(*pParse)); if( pParse==0 ){ sqlite3ErrorWithMsg(pErrorDb, SQLITE_NOMEM, "out of memory"); rc = SQLITE_NOMEM; }else{ pParse->db = pDb; if( sqlite3OpenTempDatabase(pParse) ){ sqlite3ErrorWithMsg(pErrorDb, pParse->rc, "%s", pParse->zErrMsg); rc = SQLITE_ERROR; } sqlite3DbFree(pErrorDb, pParse->zErrMsg); sqlite3ParserReset(pParse); sqlite3StackFree(pErrorDb, pParse); } if( rc ){ return 0; } } if( i<0 ){ sqlite3ErrorWithMsg(pErrorDb, SQLITE_ERROR, "unknown database %s", zDb); return 0; } return pDb->aDb[i].pBt; } /* ** Attempt to set the page size of the destination to match the page size ** of the source. */ static int setDestPgsz(sqlite3_backup *p){ int rc; rc = sqlite3BtreeSetPageSize(p->pDest,sqlite3BtreeGetPageSize(p->pSrc),-1,0); return rc; } /* ** Check that there is no open read-transaction on the b-tree passed as the ** second argument. If there is not, return SQLITE_OK. Otherwise, if there ** is an open read-transaction, return SQLITE_ERROR and leave an error ** message in database handle db. */ static int checkReadTransaction(sqlite3 *db, Btree *p){ if( sqlite3BtreeIsInReadTrans(p) ){ sqlite3ErrorWithMsg(db, SQLITE_ERROR, "destination database is in use"); return SQLITE_ERROR; } return SQLITE_OK; } /* ** Create an sqlite3_backup process to copy the contents of zSrcDb from ** connection handle pSrcDb to zDestDb in pDestDb. If successful, return ** a pointer to the new sqlite3_backup object. ** ** If an error occurs, NULL is returned and an error code and error message ** stored in database handle pDestDb. */ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init( sqlite3* pDestDb, /* Database to write to */ const char *zDestDb, /* Name of database within pDestDb */ sqlite3* pSrcDb, /* Database connection to read from */ const char *zSrcDb /* Name of database within pSrcDb */ ){ sqlite3_backup *p; /* Value to return */ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(pSrcDb)||!sqlite3SafetyCheckOk(pDestDb) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif /* Lock the source database handle. The destination database ** handle is not locked in this routine, but it is locked in ** sqlite3_backup_step(). The user is required to ensure that no ** other thread accesses the destination handle for the duration ** of the backup operation. Any attempt to use the destination ** database connection while a backup is in progress may cause ** a malfunction or a deadlock. */ sqlite3_mutex_enter(pSrcDb->mutex); sqlite3_mutex_enter(pDestDb->mutex); if( pSrcDb==pDestDb ){ sqlite3ErrorWithMsg( pDestDb, SQLITE_ERROR, "source and destination must be distinct" ); p = 0; }else { /* Allocate space for a new sqlite3_backup object... ** EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a ** call to sqlite3_backup_init() and is destroyed by a call to ** sqlite3_backup_finish(). */ p = (sqlite3_backup *)sqlite3MallocZero(sizeof(sqlite3_backup)); if( !p ){ sqlite3Error(pDestDb, SQLITE_NOMEM); } } /* If the allocation succeeded, populate the new object. */ if( p ){ p->pSrc = findBtree(pDestDb, pSrcDb, zSrcDb); p->pDest = findBtree(pDestDb, pDestDb, zDestDb); p->pDestDb = pDestDb; p->pSrcDb = pSrcDb; p->iNext = 1; p->isAttached = 0; if( 0==p->pSrc || 0==p->pDest || setDestPgsz(p)==SQLITE_NOMEM || checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK ){ /* One (or both) of the named databases did not exist or an OOM ** error was hit. Or there is a transaction open on the destination ** database. The error has already been written into the pDestDb ** handle. All that is left to do here is free the sqlite3_backup ** structure. */ sqlite3_free(p); p = 0; } } if( p ){ p->pSrc->nBackup++; } sqlite3_mutex_leave(pDestDb->mutex); sqlite3_mutex_leave(pSrcDb->mutex); return p; } /* ** Argument rc is an SQLite error code. Return true if this error is ** considered fatal if encountered during a backup operation. All errors ** are considered fatal except for SQLITE_BUSY and SQLITE_LOCKED. */ static int isFatalError(int rc){ return (rc!=SQLITE_OK && rc!=SQLITE_BUSY && ALWAYS(rc!=SQLITE_LOCKED)); } /* ** Parameter zSrcData points to a buffer containing the data for ** page iSrcPg from the source database. Copy this data into the ** destination database. */ static int backupOnePage( sqlite3_backup *p, /* Backup handle */ Pgno iSrcPg, /* Source database page to backup */ const u8 *zSrcData, /* Source database page data */ int bUpdate /* True for an update, false otherwise */ ){ Pager * const pDestPager = sqlite3BtreePager(p->pDest); const int nSrcPgsz = sqlite3BtreeGetPageSize(p->pSrc); int nDestPgsz = sqlite3BtreeGetPageSize(p->pDest); const int nCopy = MIN(nSrcPgsz, nDestPgsz); const i64 iEnd = (i64)iSrcPg*(i64)nSrcPgsz; #ifdef SQLITE_HAS_CODEC /* Use BtreeGetReserveNoMutex() for the source b-tree, as although it is ** guaranteed that the shared-mutex is held by this thread, handle ** p->pSrc may not actually be the owner. */ int nSrcReserve = sqlite3BtreeGetReserveNoMutex(p->pSrc); int nDestReserve = sqlite3BtreeGetOptimalReserve(p->pDest); #endif int rc = SQLITE_OK; i64 iOff; assert( sqlite3BtreeGetReserveNoMutex(p->pSrc)>=0 ); assert( p->bDestLocked ); assert( !isFatalError(p->rc) ); assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); assert( zSrcData ); /* Catch the case where the destination is an in-memory database and the ** page sizes of the source and destination differ. */ if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ rc = SQLITE_READONLY; } #ifdef SQLITE_HAS_CODEC /* Backup is not possible if the page size of the destination is changing ** and a codec is in use. */ if( nSrcPgsz!=nDestPgsz && sqlite3PagerGetCodec(pDestPager)!=0 ){ rc = SQLITE_READONLY; } /* Backup is not possible if the number of bytes of reserve space differ ** between source and destination. If there is a difference, try to ** fix the destination to agree with the source. If that is not possible, ** then the backup cannot proceed. */ if( nSrcReserve!=nDestReserve ){ u32 newPgsz = nSrcPgsz; rc = sqlite3PagerSetPagesize(pDestPager, &newPgsz, nSrcReserve); if( rc==SQLITE_OK && newPgsz!=nSrcPgsz ) rc = SQLITE_READONLY; } #endif /* This loop runs once for each destination page spanned by the source ** page. For each iteration, variable iOff is set to the byte offset ** of the destination page. */ for(iOff=iEnd-(i64)nSrcPgsz; rc==SQLITE_OK && iOffpDest->pBt) ) continue; if( SQLITE_OK==(rc = sqlite3PagerGet(pDestPager, iDest, &pDestPg)) && SQLITE_OK==(rc = sqlite3PagerWrite(pDestPg)) ){ const u8 *zIn = &zSrcData[iOff%nSrcPgsz]; u8 *zDestData = sqlite3PagerGetData(pDestPg); u8 *zOut = &zDestData[iOff%nDestPgsz]; /* Copy the data from the source page into the destination page. ** Then clear the Btree layer MemPage.isInit flag. Both this module ** and the pager code use this trick (clearing the first byte ** of the page 'extra' space to invalidate the Btree layers ** cached parse of the page). MemPage.isInit is marked ** "MUST BE FIRST" for this purpose. */ memcpy(zOut, zIn, nCopy); ((u8 *)sqlite3PagerGetExtra(pDestPg))[0] = 0; if( iOff==0 && bUpdate==0 ){ sqlite3Put4byte(&zOut[28], sqlite3BtreeLastPage(p->pSrc)); } } sqlite3PagerUnref(pDestPg); } return rc; } /* ** If pFile is currently larger than iSize bytes, then truncate it to ** exactly iSize bytes. If pFile is not larger than iSize bytes, then ** this function is a no-op. ** ** Return SQLITE_OK if everything is successful, or an SQLite error ** code if an error occurs. */ static int backupTruncateFile(sqlite3_file *pFile, i64 iSize){ i64 iCurrent; int rc = sqlite3OsFileSize(pFile, &iCurrent); if( rc==SQLITE_OK && iCurrent>iSize ){ rc = sqlite3OsTruncate(pFile, iSize); } return rc; } /* ** Register this backup object with the associated source pager for ** callbacks when pages are changed or the cache invalidated. */ static void attachBackupObject(sqlite3_backup *p){ sqlite3_backup **pp; assert( sqlite3BtreeHoldsMutex(p->pSrc) ); pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); p->pNext = *pp; *pp = p; p->isAttached = 1; } /* ** Copy nPage pages from the source b-tree to the destination. */ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){ int rc; int destMode; /* Destination journal mode */ int pgszSrc = 0; /* Source page size */ int pgszDest = 0; /* Destination page size */ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(p->pSrcDb->mutex); sqlite3BtreeEnter(p->pSrc); if( p->pDestDb ){ sqlite3_mutex_enter(p->pDestDb->mutex); } rc = p->rc; if( !isFatalError(rc) ){ Pager * const pSrcPager = sqlite3BtreePager(p->pSrc); /* Source pager */ Pager * const pDestPager = sqlite3BtreePager(p->pDest); /* Dest pager */ int ii; /* Iterator variable */ int nSrcPage = -1; /* Size of source db in pages */ int bCloseTrans = 0; /* True if src db requires unlocking */ /* If the source pager is currently in a write-transaction, return ** SQLITE_BUSY immediately. */ if( p->pDestDb && p->pSrc->pBt->inTransaction==TRANS_WRITE ){ rc = SQLITE_BUSY; }else{ rc = SQLITE_OK; } /* Lock the destination database, if it is not locked already. */ if( SQLITE_OK==rc && p->bDestLocked==0 && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2)) ){ p->bDestLocked = 1; sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema); } /* If there is no open read-transaction on the source database, open ** one now. If a transaction is opened here, then it will be closed ** before this function exits. */ if( rc==SQLITE_OK && 0==sqlite3BtreeIsInReadTrans(p->pSrc) ){ rc = sqlite3BtreeBeginTrans(p->pSrc, 0); bCloseTrans = 1; } /* Do not allow backup if the destination database is in WAL mode ** and the page sizes are different between source and destination */ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ rc = SQLITE_READONLY; } /* Now that there is a read-lock on the source database, query the ** source pager for the number of pages in the database. */ nSrcPage = (int)sqlite3BtreeLastPage(p->pSrc); assert( nSrcPage>=0 ); for(ii=0; (nPage<0 || iiiNext<=(Pgno)nSrcPage && !rc; ii++){ const Pgno iSrcPg = p->iNext; /* Source page number */ if( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ){ DbPage *pSrcPg; /* Source page object */ rc = sqlite3PagerAcquire(pSrcPager, iSrcPg, &pSrcPg, PAGER_GET_READONLY); if( rc==SQLITE_OK ){ rc = backupOnePage(p, iSrcPg, sqlite3PagerGetData(pSrcPg), 0); sqlite3PagerUnref(pSrcPg); } } p->iNext++; } if( rc==SQLITE_OK ){ p->nPagecount = nSrcPage; p->nRemaining = nSrcPage+1-p->iNext; if( p->iNext>(Pgno)nSrcPage ){ rc = SQLITE_DONE; }else if( !p->isAttached ){ attachBackupObject(p); } } /* Update the schema version field in the destination database. This ** is to make sure that the schema-version really does change in ** the case where the source and destination databases have the ** same schema version. */ if( rc==SQLITE_DONE ){ if( nSrcPage==0 ){ rc = sqlite3BtreeNewDb(p->pDest); nSrcPage = 1; } if( rc==SQLITE_OK || rc==SQLITE_DONE ){ rc = sqlite3BtreeUpdateMeta(p->pDest,1,p->iDestSchema+1); } if( rc==SQLITE_OK ){ if( p->pDestDb ){ sqlite3ResetAllSchemasOfConnection(p->pDestDb); } if( destMode==PAGER_JOURNALMODE_WAL ){ rc = sqlite3BtreeSetVersion(p->pDest, 2); } } if( rc==SQLITE_OK ){ int nDestTruncate; /* Set nDestTruncate to the final number of pages in the destination ** database. The complication here is that the destination page ** size may be different to the source page size. ** ** If the source page size is smaller than the destination page size, ** round up. In this case the call to sqlite3OsTruncate() below will ** fix the size of the file. However it is important to call ** sqlite3PagerTruncateImage() here so that any pages in the ** destination file that lie beyond the nDestTruncate page mark are ** journalled by PagerCommitPhaseOne() before they are destroyed ** by the file truncation. */ assert( pgszSrc==sqlite3BtreeGetPageSize(p->pSrc) ); assert( pgszDest==sqlite3BtreeGetPageSize(p->pDest) ); if( pgszSrcpDest->pBt) ){ nDestTruncate--; } }else{ nDestTruncate = nSrcPage * (pgszSrc/pgszDest); } assert( nDestTruncate>0 ); if( pgszSrc= iSize || ( nDestTruncate==(int)(PENDING_BYTE_PAGE(p->pDest->pBt)-1) && iSize>=PENDING_BYTE && iSize<=PENDING_BYTE+pgszDest )); /* This block ensures that all data required to recreate the original ** database has been stored in the journal for pDestPager and the ** journal synced to disk. So at this point we may safely modify ** the database file in any way, knowing that if a power failure ** occurs, the original database will be reconstructed from the ** journal file. */ sqlite3PagerPagecount(pDestPager, &nDstPage); for(iPg=nDestTruncate; rc==SQLITE_OK && iPg<=(Pgno)nDstPage; iPg++){ if( iPg!=PENDING_BYTE_PAGE(p->pDest->pBt) ){ DbPage *pPg; rc = sqlite3PagerGet(pDestPager, iPg, &pPg); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pPg); sqlite3PagerUnref(pPg); } } } if( rc==SQLITE_OK ){ rc = sqlite3PagerCommitPhaseOne(pDestPager, 0, 1); } /* Write the extra pages and truncate the database file as required */ iEnd = MIN(PENDING_BYTE + pgszDest, iSize); for( iOff=PENDING_BYTE+pgszSrc; rc==SQLITE_OK && iOffpDest, 0)) ){ rc = SQLITE_DONE; } } } /* If bCloseTrans is true, then this function opened a read transaction ** on the source database. Close the read transaction here. There is ** no need to check the return values of the btree methods here, as ** "committing" a read-only transaction cannot fail. */ if( bCloseTrans ){ TESTONLY( int rc2 ); TESTONLY( rc2 = ) sqlite3BtreeCommitPhaseOne(p->pSrc, 0); TESTONLY( rc2 |= ) sqlite3BtreeCommitPhaseTwo(p->pSrc, 0); assert( rc2==SQLITE_OK ); } if( rc==SQLITE_IOERR_NOMEM ){ rc = SQLITE_NOMEM; } p->rc = rc; } if( p->pDestDb ){ sqlite3_mutex_leave(p->pDestDb->mutex); } sqlite3BtreeLeave(p->pSrc); sqlite3_mutex_leave(p->pSrcDb->mutex); return rc; } /* ** Release all resources associated with an sqlite3_backup* handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){ sqlite3_backup **pp; /* Ptr to head of pagers backup list */ sqlite3 *pSrcDb; /* Source database connection */ int rc; /* Value to return */ /* Enter the mutexes */ if( p==0 ) return SQLITE_OK; pSrcDb = p->pSrcDb; sqlite3_mutex_enter(pSrcDb->mutex); sqlite3BtreeEnter(p->pSrc); if( p->pDestDb ){ sqlite3_mutex_enter(p->pDestDb->mutex); } /* Detach this backup from the source pager. */ if( p->pDestDb ){ p->pSrc->nBackup--; } if( p->isAttached ){ pp = sqlite3PagerBackupPtr(sqlite3BtreePager(p->pSrc)); while( *pp!=p ){ pp = &(*pp)->pNext; } *pp = p->pNext; } /* If a transaction is still open on the Btree, roll it back. */ sqlite3BtreeRollback(p->pDest, SQLITE_OK, 0); /* Set the error code of the destination database handle. */ rc = (p->rc==SQLITE_DONE) ? SQLITE_OK : p->rc; if( p->pDestDb ){ sqlite3Error(p->pDestDb, rc); /* Exit the mutexes and free the backup context structure. */ sqlite3LeaveMutexAndCloseZombie(p->pDestDb); } sqlite3BtreeLeave(p->pSrc); if( p->pDestDb ){ /* EVIDENCE-OF: R-64852-21591 The sqlite3_backup object is created by a ** call to sqlite3_backup_init() and is destroyed by a call to ** sqlite3_backup_finish(). */ sqlite3_free(p); } sqlite3LeaveMutexAndCloseZombie(pSrcDb); return rc; } /* ** Return the number of pages still to be backed up as of the most recent ** call to sqlite3_backup_step(). */ SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p){ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return p->nRemaining; } /* ** Return the total number of pages in the source database as of the most ** recent call to sqlite3_backup_step(). */ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p){ #ifdef SQLITE_ENABLE_API_ARMOR if( p==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return p->nPagecount; } /* ** This function is called after the contents of page iPage of the ** source database have been modified. If page iPage has already been ** copied into the destination database, then the data written to the ** destination is now invalidated. The destination copy of iPage needs ** to be updated with the new data before the backup operation is ** complete. ** ** It is assumed that the mutex associated with the BtShared object ** corresponding to the source database is held when this function is ** called. */ static SQLITE_NOINLINE void backupUpdate( sqlite3_backup *p, Pgno iPage, const u8 *aData ){ assert( p!=0 ); do{ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); if( !isFatalError(p->rc) && iPageiNext ){ /* The backup process p has already copied page iPage. But now it ** has been modified by a transaction on the source pager. Copy ** the new data into the backup. */ int rc; assert( p->pDestDb ); sqlite3_mutex_enter(p->pDestDb->mutex); rc = backupOnePage(p, iPage, aData, 1); sqlite3_mutex_leave(p->pDestDb->mutex); assert( rc!=SQLITE_BUSY && rc!=SQLITE_LOCKED ); if( rc!=SQLITE_OK ){ p->rc = rc; } } }while( (p = p->pNext)!=0 ); } SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *pBackup, Pgno iPage, const u8 *aData){ if( pBackup ) backupUpdate(pBackup, iPage, aData); } /* ** Restart the backup process. This is called when the pager layer ** detects that the database has been modified by an external database ** connection. In this case there is no way of knowing which of the ** pages that have been copied into the destination database are still ** valid and which are not, so the entire process needs to be restarted. ** ** It is assumed that the mutex associated with the BtShared object ** corresponding to the source database is held when this function is ** called. */ SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *pBackup){ sqlite3_backup *p; /* Iterator variable */ for(p=pBackup; p; p=p->pNext){ assert( sqlite3_mutex_held(p->pSrc->pBt->mutex) ); p->iNext = 1; } } #ifndef SQLITE_OMIT_VACUUM /* ** Copy the complete content of pBtFrom into pBtTo. A transaction ** must be active for both files. ** ** The size of file pTo may be reduced by this operation. If anything ** goes wrong, the transaction on pTo is rolled back. If successful, the ** transaction is committed before returning. */ SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ int rc; sqlite3_file *pFd; /* File descriptor for database pTo */ sqlite3_backup b; sqlite3BtreeEnter(pTo); sqlite3BtreeEnter(pFrom); assert( sqlite3BtreeIsInTrans(pTo) ); pFd = sqlite3PagerFile(sqlite3BtreePager(pTo)); if( pFd->pMethods ){ i64 nByte = sqlite3BtreeGetPageSize(pFrom)*(i64)sqlite3BtreeLastPage(pFrom); rc = sqlite3OsFileControl(pFd, SQLITE_FCNTL_OVERWRITE, &nByte); if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; if( rc ) goto copy_finished; } /* Set up an sqlite3_backup object. sqlite3_backup.pDestDb must be set ** to 0. This is used by the implementations of sqlite3_backup_step() ** and sqlite3_backup_finish() to detect that they are being called ** from this function, not directly by the user. */ memset(&b, 0, sizeof(b)); b.pSrcDb = pFrom->db; b.pSrc = pFrom; b.pDest = pTo; b.iNext = 1; /* 0x7FFFFFFF is the hard limit for the number of pages in a database ** file. By passing this as the number of pages to copy to ** sqlite3_backup_step(), we can guarantee that the copy finishes ** within a single call (unless an error occurs). The assert() statement ** checks this assumption - (p->rc) should be set to either SQLITE_DONE ** or an error code. */ sqlite3_backup_step(&b, 0x7FFFFFFF); assert( b.rc!=SQLITE_OK ); rc = sqlite3_backup_finish(&b); if( rc==SQLITE_OK ){ pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED; }else{ sqlite3PagerClearCache(sqlite3BtreePager(b.pDest)); } assert( sqlite3BtreeIsInTrans(pTo)==0 ); copy_finished: sqlite3BtreeLeave(pFrom); sqlite3BtreeLeave(pTo); return rc; } #endif /* SQLITE_OMIT_VACUUM */ /************** End of backup.c **********************************************/ /************** Begin file vdbemem.c *****************************************/ /* ** 2004 May 26 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code use to manipulate "Mem" structure. A "Mem" ** stores a single value in the VDBE. Mem is an opaque structure visible ** only within the VDBE. Interface routines refer to a Mem using the ** name sqlite_value */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ #ifdef SQLITE_DEBUG /* ** Check invariants on a Mem object. ** ** This routine is intended for use inside of assert() statements, like ** this: assert( sqlite3VdbeCheckMemInvariants(pMem) ); */ SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem *p){ /* If MEM_Dyn is set then Mem.xDel!=0. ** Mem.xDel is might not be initialized if MEM_Dyn is clear. */ assert( (p->flags & MEM_Dyn)==0 || p->xDel!=0 ); /* MEM_Dyn may only be set if Mem.szMalloc==0. In this way we ** ensure that if Mem.szMalloc>0 then it is safe to do ** Mem.z = Mem.zMalloc without having to check Mem.flags&MEM_Dyn. ** That saves a few cycles in inner loops. */ assert( (p->flags & MEM_Dyn)==0 || p->szMalloc==0 ); /* Cannot be both MEM_Int and MEM_Real at the same time */ assert( (p->flags & (MEM_Int|MEM_Real))!=(MEM_Int|MEM_Real) ); /* The szMalloc field holds the correct memory allocation size */ assert( p->szMalloc==0 || p->szMalloc==sqlite3DbMallocSize(p->db,p->zMalloc) ); /* If p holds a string or blob, the Mem.z must point to exactly ** one of the following: ** ** (1) Memory in Mem.zMalloc and managed by the Mem object ** (2) Memory to be freed using Mem.xDel ** (3) An ephemeral string or blob ** (4) A static string or blob */ if( (p->flags & (MEM_Str|MEM_Blob)) && p->n>0 ){ assert( ((p->szMalloc>0 && p->z==p->zMalloc)? 1 : 0) + ((p->flags&MEM_Dyn)!=0 ? 1 : 0) + ((p->flags&MEM_Ephem)!=0 ? 1 : 0) + ((p->flags&MEM_Static)!=0 ? 1 : 0) == 1 ); } return 1; } #endif /* ** If pMem is an object with a valid string representation, this routine ** ensures the internal encoding for the string representation is ** 'desiredEnc', one of SQLITE_UTF8, SQLITE_UTF16LE or SQLITE_UTF16BE. ** ** If pMem is not a string object, or the encoding of the string ** representation is already stored using the requested encoding, then this ** routine is a no-op. ** ** SQLITE_OK is returned if the conversion is successful (or not required). ** SQLITE_NOMEM may be returned if a malloc() fails during conversion ** between formats. */ SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *pMem, int desiredEnc){ #ifndef SQLITE_OMIT_UTF16 int rc; #endif assert( (pMem->flags&MEM_RowSet)==0 ); assert( desiredEnc==SQLITE_UTF8 || desiredEnc==SQLITE_UTF16LE || desiredEnc==SQLITE_UTF16BE ); if( !(pMem->flags&MEM_Str) || pMem->enc==desiredEnc ){ return SQLITE_OK; } assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); #ifdef SQLITE_OMIT_UTF16 return SQLITE_ERROR; #else /* MemTranslate() may return SQLITE_OK or SQLITE_NOMEM. If NOMEM is returned, ** then the encoding of the value may not have changed. */ rc = sqlite3VdbeMemTranslate(pMem, (u8)desiredEnc); assert(rc==SQLITE_OK || rc==SQLITE_NOMEM); assert(rc==SQLITE_OK || pMem->enc!=desiredEnc); assert(rc==SQLITE_NOMEM || pMem->enc==desiredEnc); return rc; #endif } /* ** Make sure pMem->z points to a writable allocation of at least ** min(n,32) bytes. ** ** If the bPreserve argument is true, then copy of the content of ** pMem->z into the new allocation. pMem must be either a string or ** blob if bPreserve is true. If bPreserve is false, any prior content ** in pMem->z is discarded. */ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3VdbeMemGrow(Mem *pMem, int n, int bPreserve){ assert( sqlite3VdbeCheckMemInvariants(pMem) ); assert( (pMem->flags&MEM_RowSet)==0 ); /* If the bPreserve flag is set to true, then the memory cell must already ** contain a valid string or blob value. */ assert( bPreserve==0 || pMem->flags&(MEM_Blob|MEM_Str) ); testcase( bPreserve && pMem->z==0 ); assert( pMem->szMalloc==0 || pMem->szMalloc==sqlite3DbMallocSize(pMem->db, pMem->zMalloc) ); if( pMem->szMallocszMalloc>0 && pMem->z==pMem->zMalloc ){ pMem->z = pMem->zMalloc = sqlite3DbReallocOrFree(pMem->db, pMem->z, n); bPreserve = 0; }else{ if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc); pMem->zMalloc = sqlite3DbMallocRaw(pMem->db, n); } if( pMem->zMalloc==0 ){ sqlite3VdbeMemSetNull(pMem); pMem->z = 0; pMem->szMalloc = 0; return SQLITE_NOMEM; }else{ pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc); } } if( bPreserve && pMem->z && pMem->z!=pMem->zMalloc ){ memcpy(pMem->zMalloc, pMem->z, pMem->n); } if( (pMem->flags&MEM_Dyn)!=0 ){ assert( pMem->xDel!=0 && pMem->xDel!=SQLITE_DYNAMIC ); pMem->xDel((void *)(pMem->z)); } pMem->z = pMem->zMalloc; pMem->flags &= ~(MEM_Dyn|MEM_Ephem|MEM_Static); return SQLITE_OK; } /* ** Change the pMem->zMalloc allocation to be at least szNew bytes. ** If pMem->zMalloc already meets or exceeds the requested size, this ** routine is a no-op. ** ** Any prior string or blob content in the pMem object may be discarded. ** The pMem->xDel destructor is called, if it exists. Though MEM_Str ** and MEM_Blob values may be discarded, MEM_Int, MEM_Real, and MEM_Null ** values are preserved. ** ** Return SQLITE_OK on success or an error code (probably SQLITE_NOMEM) ** if unable to complete the resizing. */ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){ assert( szNew>0 ); assert( (pMem->flags & MEM_Dyn)==0 || pMem->szMalloc==0 ); if( pMem->szMallocflags & MEM_Dyn)==0 ); pMem->z = pMem->zMalloc; pMem->flags &= (MEM_Null|MEM_Int|MEM_Real); return SQLITE_OK; } /* ** Change pMem so that its MEM_Str or MEM_Blob value is stored in ** MEM.zMalloc, where it can be safely written. ** ** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails. */ SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){ int f; assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( (pMem->flags&MEM_RowSet)==0 ); ExpandBlob(pMem); f = pMem->flags; if( (f&(MEM_Str|MEM_Blob)) && (pMem->szMalloc==0 || pMem->z!=pMem->zMalloc) ){ if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){ return SQLITE_NOMEM; } pMem->z[pMem->n] = 0; pMem->z[pMem->n+1] = 0; pMem->flags |= MEM_Term; } pMem->flags &= ~MEM_Ephem; #ifdef SQLITE_DEBUG pMem->pScopyFrom = 0; #endif return SQLITE_OK; } /* ** If the given Mem* has a zero-filled tail, turn it into an ordinary ** blob stored in dynamically allocated space. */ #ifndef SQLITE_OMIT_INCRBLOB SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){ if( pMem->flags & MEM_Zero ){ int nByte; assert( pMem->flags&MEM_Blob ); assert( (pMem->flags&MEM_RowSet)==0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); /* Set nByte to the number of bytes required to store the expanded blob. */ nByte = pMem->n + pMem->u.nZero; if( nByte<=0 ){ nByte = 1; } if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){ return SQLITE_NOMEM; } memset(&pMem->z[pMem->n], 0, pMem->u.nZero); pMem->n += pMem->u.nZero; pMem->flags &= ~(MEM_Zero|MEM_Term); } return SQLITE_OK; } #endif /* ** It is already known that pMem contains an unterminated string. ** Add the zero terminator. */ static SQLITE_NOINLINE int vdbeMemAddTerminator(Mem *pMem){ if( sqlite3VdbeMemGrow(pMem, pMem->n+2, 1) ){ return SQLITE_NOMEM; } pMem->z[pMem->n] = 0; pMem->z[pMem->n+1] = 0; pMem->flags |= MEM_Term; return SQLITE_OK; } /* ** Make sure the given Mem is \u0000 terminated. */ SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem *pMem){ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); testcase( (pMem->flags & (MEM_Term|MEM_Str))==(MEM_Term|MEM_Str) ); testcase( (pMem->flags & (MEM_Term|MEM_Str))==0 ); if( (pMem->flags & (MEM_Term|MEM_Str))!=MEM_Str ){ return SQLITE_OK; /* Nothing to do */ }else{ return vdbeMemAddTerminator(pMem); } } /* ** Add MEM_Str to the set of representations for the given Mem. Numbers ** are converted using sqlite3_snprintf(). Converting a BLOB to a string ** is a no-op. ** ** Existing representations MEM_Int and MEM_Real are invalidated if ** bForce is true but are retained if bForce is false. ** ** A MEM_Null value will never be passed to this function. This function is ** used for converting values to text for returning to the user (i.e. via ** sqlite3_value_text()), or for ensuring that values to be used as btree ** keys are strings. In the former case a NULL pointer is returned the ** user and the latter is an internal programming error. */ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){ int fg = pMem->flags; const int nByte = 32; assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( !(fg&MEM_Zero) ); assert( !(fg&(MEM_Str|MEM_Blob)) ); assert( fg&(MEM_Int|MEM_Real) ); assert( (pMem->flags&MEM_RowSet)==0 ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){ return SQLITE_NOMEM; } /* For a Real or Integer, use sqlite3_snprintf() to produce the UTF-8 ** string representation of the value. Then, if the required encoding ** is UTF-16le or UTF-16be do a translation. ** ** FIX ME: It would be better if sqlite3_snprintf() could do UTF-16. */ if( fg & MEM_Int ){ sqlite3_snprintf(nByte, pMem->z, "%lld", pMem->u.i); }else{ assert( fg & MEM_Real ); sqlite3_snprintf(nByte, pMem->z, "%!.15g", pMem->u.r); } pMem->n = sqlite3Strlen30(pMem->z); pMem->enc = SQLITE_UTF8; pMem->flags |= MEM_Str|MEM_Term; if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real); sqlite3VdbeChangeEncoding(pMem, enc); return SQLITE_OK; } /* ** Memory cell pMem contains the context of an aggregate function. ** This routine calls the finalize method for that function. The ** result of the aggregate is stored back into pMem. ** ** Return SQLITE_ERROR if the finalizer reports an error. SQLITE_OK ** otherwise. */ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){ int rc = SQLITE_OK; if( ALWAYS(pFunc && pFunc->xFinalize) ){ sqlite3_context ctx; Mem t; assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); memset(&ctx, 0, sizeof(ctx)); memset(&t, 0, sizeof(t)); t.flags = MEM_Null; t.db = pMem->db; ctx.pOut = &t; ctx.pMem = pMem; ctx.pFunc = pFunc; pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */ assert( (pMem->flags & MEM_Dyn)==0 ); if( pMem->szMalloc>0 ) sqlite3DbFree(pMem->db, pMem->zMalloc); memcpy(pMem, &t, sizeof(t)); rc = ctx.isError; } return rc; } /* ** If the memory cell contains a value that must be freed by ** invoking the external callback in Mem.xDel, then this routine ** will free that value. It also sets Mem.flags to MEM_Null. ** ** This is a helper routine for sqlite3VdbeMemSetNull() and ** for sqlite3VdbeMemRelease(). Use those other routines as the ** entry point for releasing Mem resources. */ static SQLITE_NOINLINE void vdbeMemClearExternAndSetNull(Mem *p){ assert( p->db==0 || sqlite3_mutex_held(p->db->mutex) ); assert( VdbeMemDynamic(p) ); if( p->flags&MEM_Agg ){ sqlite3VdbeMemFinalize(p, p->u.pDef); assert( (p->flags & MEM_Agg)==0 ); testcase( p->flags & MEM_Dyn ); } if( p->flags&MEM_Dyn ){ assert( (p->flags&MEM_RowSet)==0 ); assert( p->xDel!=SQLITE_DYNAMIC && p->xDel!=0 ); p->xDel((void *)p->z); }else if( p->flags&MEM_RowSet ){ sqlite3RowSetClear(p->u.pRowSet); }else if( p->flags&MEM_Frame ){ VdbeFrame *pFrame = p->u.pFrame; pFrame->pParent = pFrame->v->pDelFrame; pFrame->v->pDelFrame = pFrame; } p->flags = MEM_Null; } /* ** Release memory held by the Mem p, both external memory cleared ** by p->xDel and memory in p->zMalloc. ** ** This is a helper routine invoked by sqlite3VdbeMemRelease() in ** the unusual case where there really is memory in p that needs ** to be freed. */ static SQLITE_NOINLINE void vdbeMemClear(Mem *p){ if( VdbeMemDynamic(p) ){ vdbeMemClearExternAndSetNull(p); } if( p->szMalloc ){ sqlite3DbFree(p->db, p->zMalloc); p->szMalloc = 0; } p->z = 0; } /* ** Release any memory resources held by the Mem. Both the memory that is ** free by Mem.xDel and the Mem.zMalloc allocation are freed. ** ** Use this routine prior to clean up prior to abandoning a Mem, or to ** reset a Mem back to its minimum memory utilization. ** ** Use sqlite3VdbeMemSetNull() to release just the Mem.xDel space ** prior to inserting new content into the Mem. */ SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){ assert( sqlite3VdbeCheckMemInvariants(p) ); if( VdbeMemDynamic(p) || p->szMalloc ){ vdbeMemClear(p); } } /* ** Convert a 64-bit IEEE double into a 64-bit signed integer. ** If the double is out of range of a 64-bit signed integer then ** return the closest available 64-bit signed integer. */ static i64 doubleToInt64(double r){ #ifdef SQLITE_OMIT_FLOATING_POINT /* When floating-point is omitted, double and int64 are the same thing */ return r; #else /* ** Many compilers we encounter do not define constants for the ** minimum and maximum 64-bit integers, or they define them ** inconsistently. And many do not understand the "LL" notation. ** So we define our own static constants here using nothing ** larger than a 32-bit integer constant. */ static const i64 maxInt = LARGEST_INT64; static const i64 minInt = SMALLEST_INT64; if( r<=(double)minInt ){ return minInt; }else if( r>=(double)maxInt ){ return maxInt; }else{ return (i64)r; } #endif } /* ** Return some kind of integer value which is the best we can do ** at representing the value that *pMem describes as an integer. ** If pMem is an integer, then the value is exact. If pMem is ** a floating-point then the value returned is the integer part. ** If pMem is a string or blob, then we make an attempt to convert ** it into an integer and return that. If pMem represents an ** an SQL-NULL value, return 0. ** ** If pMem represents a string value, its encoding might be changed. */ SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem *pMem){ int flags; assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); flags = pMem->flags; if( flags & MEM_Int ){ return pMem->u.i; }else if( flags & MEM_Real ){ return doubleToInt64(pMem->u.r); }else if( flags & (MEM_Str|MEM_Blob) ){ i64 value = 0; assert( pMem->z || pMem->n==0 ); sqlite3Atoi64(pMem->z, &value, pMem->n, pMem->enc); return value; }else{ return 0; } } /* ** Return the best representation of pMem that we can get into a ** double. If pMem is already a double or an integer, return its ** value. If it is a string or blob, try to convert it to a double. ** If it is a NULL, return 0.0. */ SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem *pMem){ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); if( pMem->flags & MEM_Real ){ return pMem->u.r; }else if( pMem->flags & MEM_Int ){ return (double)pMem->u.i; }else if( pMem->flags & (MEM_Str|MEM_Blob) ){ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ double val = (double)0; sqlite3AtoF(pMem->z, &val, pMem->n, pMem->enc); return val; }else{ /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ return (double)0; } } /* ** The MEM structure is already a MEM_Real. Try to also make it a ** MEM_Int if we can. */ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ i64 ix; assert( pMem->flags & MEM_Real ); assert( (pMem->flags & MEM_RowSet)==0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); ix = doubleToInt64(pMem->u.r); /* Only mark the value as an integer if ** ** (1) the round-trip conversion real->int->real is a no-op, and ** (2) The integer is neither the largest nor the smallest ** possible integer (ticket #3922) ** ** The second and third terms in the following conditional enforces ** the second condition under the assumption that addition overflow causes ** values to wrap around. */ if( pMem->u.r==ix && ix>SMALLEST_INT64 && ixu.i = ix; MemSetTypeFlag(pMem, MEM_Int); } } /* ** Convert pMem to type integer. Invalidate any prior representations. */ SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem *pMem){ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( (pMem->flags & MEM_RowSet)==0 ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); pMem->u.i = sqlite3VdbeIntValue(pMem); MemSetTypeFlag(pMem, MEM_Int); return SQLITE_OK; } /* ** Convert pMem so that it is of type MEM_Real. ** Invalidate any prior representations. */ SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem *pMem){ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); pMem->u.r = sqlite3VdbeRealValue(pMem); MemSetTypeFlag(pMem, MEM_Real); return SQLITE_OK; } /* ** Convert pMem so that it has types MEM_Real or MEM_Int or both. ** Invalidate any prior representations. ** ** Every effort is made to force the conversion, even if the input ** is a string that does not look completely like a number. Convert ** as much of the string as we can and ignore the rest. */ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ if( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))==0 ){ assert( (pMem->flags & (MEM_Blob|MEM_Str))!=0 ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); if( 0==sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc) ){ MemSetTypeFlag(pMem, MEM_Int); }else{ pMem->u.r = sqlite3VdbeRealValue(pMem); MemSetTypeFlag(pMem, MEM_Real); sqlite3VdbeIntegerAffinity(pMem); } } assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))!=0 ); pMem->flags &= ~(MEM_Str|MEM_Blob); return SQLITE_OK; } /* ** Cast the datatype of the value in pMem according to the affinity ** "aff". Casting is different from applying affinity in that a cast ** is forced. In other words, the value is converted into the desired ** affinity even if that results in loss of data. This routine is ** used (for example) to implement the SQL "cast()" operator. */ SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){ if( pMem->flags & MEM_Null ) return; switch( aff ){ case SQLITE_AFF_BLOB: { /* Really a cast to BLOB */ if( (pMem->flags & MEM_Blob)==0 ){ sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); MemSetTypeFlag(pMem, MEM_Blob); }else{ pMem->flags &= ~(MEM_TypeMask&~MEM_Blob); } break; } case SQLITE_AFF_NUMERIC: { sqlite3VdbeMemNumerify(pMem); break; } case SQLITE_AFF_INTEGER: { sqlite3VdbeMemIntegerify(pMem); break; } case SQLITE_AFF_REAL: { sqlite3VdbeMemRealify(pMem); break; } default: { assert( aff==SQLITE_AFF_TEXT ); assert( MEM_Str==(MEM_Blob>>3) ); pMem->flags |= (pMem->flags&MEM_Blob)>>3; sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); pMem->flags &= ~(MEM_Int|MEM_Real|MEM_Blob|MEM_Zero); break; } } } /* ** Initialize bulk memory to be a consistent Mem object. ** ** The minimum amount of initialization feasible is performed. */ SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem *pMem, sqlite3 *db, u16 flags){ assert( (flags & ~MEM_TypeMask)==0 ); pMem->flags = flags; pMem->db = db; pMem->szMalloc = 0; } /* ** Delete any previous value and set the value stored in *pMem to NULL. ** ** This routine calls the Mem.xDel destructor to dispose of values that ** require the destructor. But it preserves the Mem.zMalloc memory allocation. ** To free all resources, use sqlite3VdbeMemRelease(), which both calls this ** routine to invoke the destructor and deallocates Mem.zMalloc. ** ** Use this routine to reset the Mem prior to insert a new value. ** ** Use sqlite3VdbeMemRelease() to complete erase the Mem prior to abandoning it. */ SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem *pMem){ if( VdbeMemDynamic(pMem) ){ vdbeMemClearExternAndSetNull(pMem); }else{ pMem->flags = MEM_Null; } } SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value *p){ sqlite3VdbeMemSetNull((Mem*)p); } /* ** Delete any previous value and set the value to be a BLOB of length ** n containing all zeros. */ SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem *pMem, int n){ sqlite3VdbeMemRelease(pMem); pMem->flags = MEM_Blob|MEM_Zero; pMem->n = 0; if( n<0 ) n = 0; pMem->u.nZero = n; pMem->enc = SQLITE_UTF8; pMem->z = 0; } /* ** The pMem is known to contain content that needs to be destroyed prior ** to a value change. So invoke the destructor, then set the value to ** a 64-bit integer. */ static SQLITE_NOINLINE void vdbeReleaseAndSetInt64(Mem *pMem, i64 val){ sqlite3VdbeMemSetNull(pMem); pMem->u.i = val; pMem->flags = MEM_Int; } /* ** Delete any previous value and set the value stored in *pMem to val, ** manifest type INTEGER. */ SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem *pMem, i64 val){ if( VdbeMemDynamic(pMem) ){ vdbeReleaseAndSetInt64(pMem, val); }else{ pMem->u.i = val; pMem->flags = MEM_Int; } } #ifndef SQLITE_OMIT_FLOATING_POINT /* ** Delete any previous value and set the value stored in *pMem to val, ** manifest type REAL. */ SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem *pMem, double val){ sqlite3VdbeMemSetNull(pMem); if( !sqlite3IsNaN(val) ){ pMem->u.r = val; pMem->flags = MEM_Real; } } #endif /* ** Delete any previous value and set the value of pMem to be an ** empty boolean index. */ SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem *pMem){ sqlite3 *db = pMem->db; assert( db!=0 ); assert( (pMem->flags & MEM_RowSet)==0 ); sqlite3VdbeMemRelease(pMem); pMem->zMalloc = sqlite3DbMallocRaw(db, 64); if( db->mallocFailed ){ pMem->flags = MEM_Null; pMem->szMalloc = 0; }else{ assert( pMem->zMalloc ); pMem->szMalloc = sqlite3DbMallocSize(db, pMem->zMalloc); pMem->u.pRowSet = sqlite3RowSetInit(db, pMem->zMalloc, pMem->szMalloc); assert( pMem->u.pRowSet!=0 ); pMem->flags = MEM_RowSet; } } /* ** Return true if the Mem object contains a TEXT or BLOB that is ** too large - whose size exceeds SQLITE_MAX_LENGTH. */ SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem *p){ assert( p->db!=0 ); if( p->flags & (MEM_Str|MEM_Blob) ){ int n = p->n; if( p->flags & MEM_Zero ){ n += p->u.nZero; } return n>p->db->aLimit[SQLITE_LIMIT_LENGTH]; } return 0; } #ifdef SQLITE_DEBUG /* ** This routine prepares a memory cell for modification by breaking ** its link to a shallow copy and by marking any current shallow ** copies of this cell as invalid. ** ** This is used for testing and debugging only - to make sure shallow ** copies are not misused. */ SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe *pVdbe, Mem *pMem){ int i; Mem *pX; for(i=1, pX=&pVdbe->aMem[1]; i<=pVdbe->nMem; i++, pX++){ if( pX->pScopyFrom==pMem ){ pX->flags |= MEM_Undefined; pX->pScopyFrom = 0; } } pMem->pScopyFrom = 0; } #endif /* SQLITE_DEBUG */ /* ** Make an shallow copy of pFrom into pTo. Prior contents of ** pTo are freed. The pFrom->z field is not duplicated. If ** pFrom->z is used, then pTo->z points to the same thing as pFrom->z ** and flags gets srcType (either MEM_Ephem or MEM_Static). */ static SQLITE_NOINLINE void vdbeClrCopy(Mem *pTo, const Mem *pFrom, int eType){ vdbeMemClearExternAndSetNull(pTo); assert( !VdbeMemDynamic(pTo) ); sqlite3VdbeMemShallowCopy(pTo, pFrom, eType); } SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int srcType){ assert( (pFrom->flags & MEM_RowSet)==0 ); assert( pTo->db==pFrom->db ); if( VdbeMemDynamic(pTo) ){ vdbeClrCopy(pTo,pFrom,srcType); return; } memcpy(pTo, pFrom, MEMCELLSIZE); if( (pFrom->flags&MEM_Static)==0 ){ pTo->flags &= ~(MEM_Dyn|MEM_Static|MEM_Ephem); assert( srcType==MEM_Ephem || srcType==MEM_Static ); pTo->flags |= srcType; } } /* ** Make a full copy of pFrom into pTo. Prior contents of pTo are ** freed before the copy is made. */ SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){ int rc = SQLITE_OK; /* The pFrom==0 case in the following assert() is when an sqlite3_value ** from sqlite3_value_dup() is used as the argument ** to sqlite3_result_value(). */ assert( pTo->db==pFrom->db || pFrom->db==0 ); assert( (pFrom->flags & MEM_RowSet)==0 ); if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo); memcpy(pTo, pFrom, MEMCELLSIZE); pTo->flags &= ~MEM_Dyn; if( pTo->flags&(MEM_Str|MEM_Blob) ){ if( 0==(pFrom->flags&MEM_Static) ){ pTo->flags |= MEM_Ephem; rc = sqlite3VdbeMemMakeWriteable(pTo); } } return rc; } /* ** Transfer the contents of pFrom to pTo. Any existing value in pTo is ** freed. If pFrom contains ephemeral data, a copy is made. ** ** pFrom contains an SQL NULL when this routine returns. */ SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){ assert( pFrom->db==0 || sqlite3_mutex_held(pFrom->db->mutex) ); assert( pTo->db==0 || sqlite3_mutex_held(pTo->db->mutex) ); assert( pFrom->db==0 || pTo->db==0 || pFrom->db==pTo->db ); sqlite3VdbeMemRelease(pTo); memcpy(pTo, pFrom, sizeof(Mem)); pFrom->flags = MEM_Null; pFrom->szMalloc = 0; } /* ** Change the value of a Mem to be a string or a BLOB. ** ** The memory management strategy depends on the value of the xDel ** parameter. If the value passed is SQLITE_TRANSIENT, then the ** string is copied into a (possibly existing) buffer managed by the ** Mem structure. Otherwise, any existing buffer is freed and the ** pointer copied. ** ** If the string is too large (if it exceeds the SQLITE_LIMIT_LENGTH ** size limit) then no memory allocation occurs. If the string can be ** stored without allocating memory, then it is. If a memory allocation ** is required to store the string, then value of pMem is unchanged. In ** either case, SQLITE_TOOBIG is returned. */ SQLITE_PRIVATE int sqlite3VdbeMemSetStr( Mem *pMem, /* Memory cell to set to string value */ const char *z, /* String pointer */ int n, /* Bytes in string, or negative */ u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ int nByte = n; /* New value for pMem->n */ int iLimit; /* Maximum allowed string or blob size */ u16 flags = 0; /* New value for pMem->flags */ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( (pMem->flags & MEM_RowSet)==0 ); /* If z is a NULL pointer, set pMem to contain an SQL NULL. */ if( !z ){ sqlite3VdbeMemSetNull(pMem); return SQLITE_OK; } if( pMem->db ){ iLimit = pMem->db->aLimit[SQLITE_LIMIT_LENGTH]; }else{ iLimit = SQLITE_MAX_LENGTH; } flags = (enc==0?MEM_Blob:MEM_Str); if( nByte<0 ){ assert( enc!=0 ); if( enc==SQLITE_UTF8 ){ nByte = sqlite3Strlen30(z); if( nByte>iLimit ) nByte = iLimit+1; }else{ for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){} } flags |= MEM_Term; } /* The following block sets the new values of Mem.z and Mem.xDel. It ** also sets a flag in local variable "flags" to indicate the memory ** management (one of MEM_Dyn or MEM_Static). */ if( xDel==SQLITE_TRANSIENT ){ int nAlloc = nByte; if( flags&MEM_Term ){ nAlloc += (enc==SQLITE_UTF8?1:2); } if( nByte>iLimit ){ return SQLITE_TOOBIG; } testcase( nAlloc==0 ); testcase( nAlloc==31 ); testcase( nAlloc==32 ); if( sqlite3VdbeMemClearAndResize(pMem, MAX(nAlloc,32)) ){ return SQLITE_NOMEM; } memcpy(pMem->z, z, nAlloc); }else if( xDel==SQLITE_DYNAMIC ){ sqlite3VdbeMemRelease(pMem); pMem->zMalloc = pMem->z = (char *)z; pMem->szMalloc = sqlite3DbMallocSize(pMem->db, pMem->zMalloc); }else{ sqlite3VdbeMemRelease(pMem); pMem->z = (char *)z; pMem->xDel = xDel; flags |= ((xDel==SQLITE_STATIC)?MEM_Static:MEM_Dyn); } pMem->n = nByte; pMem->flags = flags; pMem->enc = (enc==0 ? SQLITE_UTF8 : enc); #ifndef SQLITE_OMIT_UTF16 if( pMem->enc!=SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){ return SQLITE_NOMEM; } #endif if( nByte>iLimit ){ return SQLITE_TOOBIG; } return SQLITE_OK; } /* ** Move data out of a btree key or data field and into a Mem structure. ** The data or key is taken from the entry that pCur is currently pointing ** to. offset and amt determine what portion of the data or key to retrieve. ** key is true to get the key or false to get data. The result is written ** into the pMem element. ** ** The pMem object must have been initialized. This routine will use ** pMem->zMalloc to hold the content from the btree, if possible. New ** pMem->zMalloc space will be allocated if necessary. The calling routine ** is responsible for making sure that the pMem object is eventually ** destroyed. ** ** If this routine fails for any reason (malloc returns NULL or unable ** to read from the disk) then the pMem is left in an inconsistent state. */ static SQLITE_NOINLINE int vdbeMemFromBtreeResize( BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ int key, /* If true, retrieve from the btree key, not data. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ int rc; pMem->flags = MEM_Null; if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){ if( key ){ rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z); }else{ rc = sqlite3BtreeData(pCur, offset, amt, pMem->z); } if( rc==SQLITE_OK ){ pMem->z[amt] = 0; pMem->z[amt+1] = 0; pMem->flags = MEM_Blob|MEM_Term; pMem->n = (int)amt; }else{ sqlite3VdbeMemRelease(pMem); } } return rc; } SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( BtCursor *pCur, /* Cursor pointing at record to retrieve. */ u32 offset, /* Offset from the start of data to return bytes from. */ u32 amt, /* Number of bytes to return. */ int key, /* If true, retrieve from the btree key, not data. */ Mem *pMem /* OUT: Return data in this Mem structure. */ ){ char *zData; /* Data from the btree layer */ u32 available = 0; /* Number of bytes available on the local btree page */ int rc = SQLITE_OK; /* Return code */ assert( sqlite3BtreeCursorIsValid(pCur) ); assert( !VdbeMemDynamic(pMem) ); /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert() ** that both the BtShared and database handle mutexes are held. */ assert( (pMem->flags & MEM_RowSet)==0 ); if( key ){ zData = (char *)sqlite3BtreeKeyFetch(pCur, &available); }else{ zData = (char *)sqlite3BtreeDataFetch(pCur, &available); } assert( zData!=0 ); if( offset+amt<=available ){ pMem->z = &zData[offset]; pMem->flags = MEM_Blob|MEM_Ephem; pMem->n = (int)amt; }else{ rc = vdbeMemFromBtreeResize(pCur, offset, amt, key, pMem); } return rc; } /* ** The pVal argument is known to be a value other than NULL. ** Convert it into a string with encoding enc and return a pointer ** to a zero-terminated version of that string. */ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){ assert( pVal!=0 ); assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); assert( (pVal->flags & MEM_RowSet)==0 ); assert( (pVal->flags & (MEM_Null))==0 ); if( pVal->flags & (MEM_Blob|MEM_Str) ){ pVal->flags |= MEM_Str; if( pVal->flags & MEM_Zero ){ sqlite3VdbeMemExpandBlob(pVal); } if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){ sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED); } if( (enc & SQLITE_UTF16_ALIGNED)!=0 && 1==(1&SQLITE_PTR_TO_INT(pVal->z)) ){ assert( (pVal->flags & (MEM_Ephem|MEM_Static))!=0 ); if( sqlite3VdbeMemMakeWriteable(pVal)!=SQLITE_OK ){ return 0; } } sqlite3VdbeMemNulTerminate(pVal); /* IMP: R-31275-44060 */ }else{ sqlite3VdbeMemStringify(pVal, enc, 0); assert( 0==(1&SQLITE_PTR_TO_INT(pVal->z)) ); } assert(pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) || pVal->db==0 || pVal->db->mallocFailed ); if( pVal->enc==(enc & ~SQLITE_UTF16_ALIGNED) ){ return pVal->z; }else{ return 0; } } /* This function is only available internally, it is not part of the ** external API. It works in a similar way to sqlite3_value_text(), ** except the data returned is in the encoding specified by the second ** parameter, which must be one of SQLITE_UTF16BE, SQLITE_UTF16LE or ** SQLITE_UTF8. ** ** (2006-02-16:) The enc value can be or-ed with SQLITE_UTF16_ALIGNED. ** If that is the case, then the result must be aligned on an even byte ** boundary. */ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ if( !pVal ) return 0; assert( pVal->db==0 || sqlite3_mutex_held(pVal->db->mutex) ); assert( (enc&3)==(enc&~SQLITE_UTF16_ALIGNED) ); assert( (pVal->flags & MEM_RowSet)==0 ); if( (pVal->flags&(MEM_Str|MEM_Term))==(MEM_Str|MEM_Term) && pVal->enc==enc ){ return pVal->z; } if( pVal->flags&MEM_Null ){ return 0; } return valueToText(pVal, enc); } /* ** Create a new sqlite3_value object. */ SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *db){ Mem *p = sqlite3DbMallocZero(db, sizeof(*p)); if( p ){ p->flags = MEM_Null; p->db = db; } return p; } /* ** Context object passed by sqlite3Stat4ProbeSetValue() through to ** valueNew(). See comments above valueNew() for details. */ struct ValueNewStat4Ctx { Parse *pParse; Index *pIdx; UnpackedRecord **ppRec; int iVal; }; /* ** Allocate and return a pointer to a new sqlite3_value object. If ** the second argument to this function is NULL, the object is allocated ** by calling sqlite3ValueNew(). ** ** Otherwise, if the second argument is non-zero, then this function is ** being called indirectly by sqlite3Stat4ProbeSetValue(). If it has not ** already been allocated, allocate the UnpackedRecord structure that ** that function will return to its caller here. Then return a pointer to ** an sqlite3_value within the UnpackedRecord.a[] array. */ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( p ){ UnpackedRecord *pRec = p->ppRec[0]; if( pRec==0 ){ Index *pIdx = p->pIdx; /* Index being probed */ int nByte; /* Bytes of space to allocate */ int i; /* Counter variable */ int nCol = pIdx->nColumn; /* Number of index columns including rowid */ nByte = sizeof(Mem) * nCol + ROUND8(sizeof(UnpackedRecord)); pRec = (UnpackedRecord*)sqlite3DbMallocZero(db, nByte); if( pRec ){ pRec->pKeyInfo = sqlite3KeyInfoOfIndex(p->pParse, pIdx); if( pRec->pKeyInfo ){ assert( pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField==nCol ); assert( pRec->pKeyInfo->enc==ENC(db) ); pRec->aMem = (Mem *)((u8*)pRec + ROUND8(sizeof(UnpackedRecord))); for(i=0; iaMem[i].flags = MEM_Null; pRec->aMem[i].db = db; } }else{ sqlite3DbFree(db, pRec); pRec = 0; } } if( pRec==0 ) return 0; p->ppRec[0] = pRec; } pRec->nField = p->iVal+1; return &pRec->aMem[p->iVal]; } #else UNUSED_PARAMETER(p); #endif /* defined(SQLITE_ENABLE_STAT3_OR_STAT4) */ return sqlite3ValueNew(db); } /* ** The expression object indicated by the second argument is guaranteed ** to be a scalar SQL function. If ** ** * all function arguments are SQL literals, ** * the SQLITE_FUNC_CONSTANT function flag is set, and ** * the SQLITE_FUNC_NEEDCOLL function flag is not set, ** ** then this routine attempts to invoke the SQL function. Assuming no ** error occurs, output parameter (*ppVal) is set to point to a value ** object containing the result before returning SQLITE_OK. ** ** Affinity aff is applied to the result of the function before returning. ** If the result is a text value, the sqlite3_value object uses encoding ** enc. ** ** If the conditions above are not met, this function returns SQLITE_OK ** and sets (*ppVal) to NULL. Or, if an error occurs, (*ppVal) is set to ** NULL and an SQLite error code returned. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 static int valueFromFunction( sqlite3 *db, /* The database connection */ Expr *p, /* The expression to evaluate */ u8 enc, /* Encoding to use */ u8 aff, /* Affinity to use */ sqlite3_value **ppVal, /* Write the new value here */ struct ValueNewStat4Ctx *pCtx /* Second argument for valueNew() */ ){ sqlite3_context ctx; /* Context object for function invocation */ sqlite3_value **apVal = 0; /* Function arguments */ int nVal = 0; /* Size of apVal[] array */ FuncDef *pFunc = 0; /* Function definition */ sqlite3_value *pVal = 0; /* New value */ int rc = SQLITE_OK; /* Return code */ int nName; /* Size of function name in bytes */ ExprList *pList = 0; /* Function arguments */ int i; /* Iterator variable */ assert( pCtx!=0 ); assert( (p->flags & EP_TokenOnly)==0 ); pList = p->x.pList; if( pList ) nVal = pList->nExpr; nName = sqlite3Strlen30(p->u.zToken); pFunc = sqlite3FindFunction(db, p->u.zToken, nName, nVal, enc, 0); assert( pFunc ); if( (pFunc->funcFlags & SQLITE_FUNC_CONSTANT)==0 || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) ){ return SQLITE_OK; } if( pList ){ apVal = (sqlite3_value**)sqlite3DbMallocZero(db, sizeof(apVal[0]) * nVal); if( apVal==0 ){ rc = SQLITE_NOMEM; goto value_from_function_out; } for(i=0; ia[i].pExpr, enc, aff, &apVal[i]); if( apVal[i]==0 || rc!=SQLITE_OK ) goto value_from_function_out; } } pVal = valueNew(db, pCtx); if( pVal==0 ){ rc = SQLITE_NOMEM; goto value_from_function_out; } assert( pCtx->pParse->rc==SQLITE_OK ); memset(&ctx, 0, sizeof(ctx)); ctx.pOut = pVal; ctx.pFunc = pFunc; pFunc->xFunc(&ctx, nVal, apVal); if( ctx.isError ){ rc = ctx.isError; sqlite3ErrorMsg(pCtx->pParse, "%s", sqlite3_value_text(pVal)); }else{ sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8); assert( rc==SQLITE_OK ); rc = sqlite3VdbeChangeEncoding(pVal, enc); if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){ rc = SQLITE_TOOBIG; pCtx->pParse->nErr++; } } pCtx->pParse->rc = rc; value_from_function_out: if( rc!=SQLITE_OK ){ pVal = 0; } if( apVal ){ for(i=0; iop)==TK_UPLUS ) pExpr = pExpr->pLeft; if( NEVER(op==TK_REGISTER) ) op = pExpr->op2; /* Compressed expressions only appear when parsing the DEFAULT clause ** on a table column definition, and hence only when pCtx==0. This ** check ensures that an EP_TokenOnly expression is never passed down ** into valueFromFunction(). */ assert( (pExpr->flags & EP_TokenOnly)==0 || pCtx==0 ); if( op==TK_CAST ){ u8 aff = sqlite3AffinityType(pExpr->u.zToken,0); rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx); testcase( rc!=SQLITE_OK ); if( *ppVal ){ sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8); sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8); } return rc; } /* Handle negative integers in a single step. This is needed in the ** case when the value is -9223372036854775808. */ if( op==TK_UMINUS && (pExpr->pLeft->op==TK_INTEGER || pExpr->pLeft->op==TK_FLOAT) ){ pExpr = pExpr->pLeft; op = pExpr->op; negInt = -1; zNeg = "-"; } if( op==TK_STRING || op==TK_FLOAT || op==TK_INTEGER ){ pVal = valueNew(db, pCtx); if( pVal==0 ) goto no_mem; if( ExprHasProperty(pExpr, EP_IntValue) ){ sqlite3VdbeMemSetInt64(pVal, (i64)pExpr->u.iValue*negInt); }else{ zVal = sqlite3MPrintf(db, "%s%s", zNeg, pExpr->u.zToken); if( zVal==0 ) goto no_mem; sqlite3ValueSetStr(pVal, -1, zVal, SQLITE_UTF8, SQLITE_DYNAMIC); } if( (op==TK_INTEGER || op==TK_FLOAT ) && affinity==SQLITE_AFF_BLOB ){ sqlite3ValueApplyAffinity(pVal, SQLITE_AFF_NUMERIC, SQLITE_UTF8); }else{ sqlite3ValueApplyAffinity(pVal, affinity, SQLITE_UTF8); } if( pVal->flags & (MEM_Int|MEM_Real) ) pVal->flags &= ~MEM_Str; if( enc!=SQLITE_UTF8 ){ rc = sqlite3VdbeChangeEncoding(pVal, enc); } }else if( op==TK_UMINUS ) { /* This branch happens for multiple negative signs. Ex: -(-5) */ if( SQLITE_OK==sqlite3ValueFromExpr(db,pExpr->pLeft,enc,affinity,&pVal) && pVal!=0 ){ sqlite3VdbeMemNumerify(pVal); if( pVal->flags & MEM_Real ){ pVal->u.r = -pVal->u.r; }else if( pVal->u.i==SMALLEST_INT64 ){ pVal->u.r = -(double)SMALLEST_INT64; MemSetTypeFlag(pVal, MEM_Real); }else{ pVal->u.i = -pVal->u.i; } sqlite3ValueApplyAffinity(pVal, affinity, enc); } }else if( op==TK_NULL ){ pVal = valueNew(db, pCtx); if( pVal==0 ) goto no_mem; } #ifndef SQLITE_OMIT_BLOB_LITERAL else if( op==TK_BLOB ){ int nVal; assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); assert( pExpr->u.zToken[1]=='\'' ); pVal = valueNew(db, pCtx); if( !pVal ) goto no_mem; zVal = &pExpr->u.zToken[2]; nVal = sqlite3Strlen30(zVal)-1; assert( zVal[nVal]=='\'' ); sqlite3VdbeMemSetStr(pVal, sqlite3HexToBlob(db, zVal, nVal), nVal/2, 0, SQLITE_DYNAMIC); } #endif #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 else if( op==TK_FUNCTION && pCtx!=0 ){ rc = valueFromFunction(db, pExpr, enc, affinity, &pVal, pCtx); } #endif *ppVal = pVal; return rc; no_mem: db->mallocFailed = 1; sqlite3DbFree(db, zVal); assert( *ppVal==0 ); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( pCtx==0 ) sqlite3ValueFree(pVal); #else assert( pCtx==0 ); sqlite3ValueFree(pVal); #endif return SQLITE_NOMEM; } /* ** Create a new sqlite3_value object, containing the value of pExpr. ** ** This only works for very simple expressions that consist of one constant ** token (i.e. "5", "5.1", "'a string'"). If the expression can ** be converted directly into a value, then the value is allocated and ** a pointer written to *ppVal. The caller is responsible for deallocating ** the value by passing it to sqlite3ValueFree() later on. If the expression ** cannot be converted to a value, then *ppVal is set to NULL. */ SQLITE_PRIVATE int sqlite3ValueFromExpr( sqlite3 *db, /* The database connection */ Expr *pExpr, /* The expression to evaluate */ u8 enc, /* Encoding to use */ u8 affinity, /* Affinity to use */ sqlite3_value **ppVal /* Write the new value here */ ){ return valueFromExpr(db, pExpr, enc, affinity, ppVal, 0); } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** The implementation of the sqlite_record() function. This function accepts ** a single argument of any type. The return value is a formatted database ** record (a blob) containing the argument value. ** ** This is used to convert the value stored in the 'sample' column of the ** sqlite_stat3 table to the record format SQLite uses internally. */ static void recordFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const int file_format = 1; int iSerial; /* Serial type */ int nSerial; /* Bytes of space for iSerial as varint */ int nVal; /* Bytes of space required for argv[0] */ int nRet; sqlite3 *db; u8 *aRet; UNUSED_PARAMETER( argc ); iSerial = sqlite3VdbeSerialType(argv[0], file_format); nSerial = sqlite3VarintLen(iSerial); nVal = sqlite3VdbeSerialTypeLen(iSerial); db = sqlite3_context_db_handle(context); nRet = 1 + nSerial + nVal; aRet = sqlite3DbMallocRaw(db, nRet); if( aRet==0 ){ sqlite3_result_error_nomem(context); }else{ aRet[0] = nSerial+1; putVarint32(&aRet[1], iSerial); sqlite3VdbeSerialPut(&aRet[1+nSerial], argv[0], iSerial); sqlite3_result_blob(context, aRet, nRet, SQLITE_TRANSIENT); sqlite3DbFree(db, aRet); } } /* ** Register built-in functions used to help read ANALYZE data. */ SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void){ static SQLITE_WSD FuncDef aAnalyzeTableFuncs[] = { FUNCTION(sqlite_record, 1, 0, 0, recordFunc), }; int i; FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aAnalyzeTableFuncs); for(i=0; idb; /* Skip over any TK_COLLATE nodes */ pExpr = sqlite3ExprSkipCollate(pExpr); if( !pExpr ){ pVal = valueNew(db, pAlloc); if( pVal ){ sqlite3VdbeMemSetNull((Mem*)pVal); } }else if( pExpr->op==TK_VARIABLE || NEVER(pExpr->op==TK_REGISTER && pExpr->op2==TK_VARIABLE) ){ Vdbe *v; int iBindVar = pExpr->iColumn; sqlite3VdbeSetVarmask(pParse->pVdbe, iBindVar); if( (v = pParse->pReprepare)!=0 ){ pVal = valueNew(db, pAlloc); if( pVal ){ rc = sqlite3VdbeMemCopy((Mem*)pVal, &v->aVar[iBindVar-1]); if( rc==SQLITE_OK ){ sqlite3ValueApplyAffinity(pVal, affinity, ENC(db)); } pVal->db = pParse->db; } } }else{ rc = valueFromExpr(db, pExpr, ENC(db), affinity, &pVal, pAlloc); } assert( pVal==0 || pVal->db==db ); *ppVal = pVal; return rc; } /* ** This function is used to allocate and populate UnpackedRecord ** structures intended to be compared against sample index keys stored ** in the sqlite_stat4 table. ** ** A single call to this function attempts to populates field iVal (leftmost ** is 0 etc.) of the unpacked record with a value extracted from expression ** pExpr. Extraction of values is possible if: ** ** * (pExpr==0). In this case the value is assumed to be an SQL NULL, ** ** * The expression is a bound variable, and this is a reprepare, or ** ** * The sqlite3ValueFromExpr() function is able to extract a value ** from the expression (i.e. the expression is a literal value). ** ** If a value can be extracted, the affinity passed as the 5th argument ** is applied to it before it is copied into the UnpackedRecord. Output ** parameter *pbOk is set to true if a value is extracted, or false ** otherwise. ** ** When this function is called, *ppRec must either point to an object ** allocated by an earlier call to this function, or must be NULL. If it ** is NULL and a value can be successfully extracted, a new UnpackedRecord ** is allocated (and *ppRec set to point to it) before returning. ** ** Unless an error is encountered, SQLITE_OK is returned. It is not an ** error if a value cannot be extracted from pExpr. If an error does ** occur, an SQLite error code is returned. */ SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue( Parse *pParse, /* Parse context */ Index *pIdx, /* Index being probed */ UnpackedRecord **ppRec, /* IN/OUT: Probe record */ Expr *pExpr, /* The expression to extract a value from */ u8 affinity, /* Affinity to use */ int iVal, /* Array element to populate */ int *pbOk /* OUT: True if value was extracted */ ){ int rc; sqlite3_value *pVal = 0; struct ValueNewStat4Ctx alloc; alloc.pParse = pParse; alloc.pIdx = pIdx; alloc.ppRec = ppRec; alloc.iVal = iVal; rc = stat4ValueFromExpr(pParse, pExpr, affinity, &alloc, &pVal); assert( pVal==0 || pVal->db==pParse->db ); *pbOk = (pVal!=0); return rc; } /* ** Attempt to extract a value from expression pExpr using the methods ** as described for sqlite3Stat4ProbeSetValue() above. ** ** If successful, set *ppVal to point to a new value object and return ** SQLITE_OK. If no value can be extracted, but no other error occurs ** (e.g. OOM), return SQLITE_OK and set *ppVal to NULL. Or, if an error ** does occur, return an SQLite error code. The final value of *ppVal ** is undefined in this case. */ SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr( Parse *pParse, /* Parse context */ Expr *pExpr, /* The expression to extract a value from */ u8 affinity, /* Affinity to use */ sqlite3_value **ppVal /* OUT: New value object (or NULL) */ ){ return stat4ValueFromExpr(pParse, pExpr, affinity, 0, ppVal); } /* ** Extract the iCol-th column from the nRec-byte record in pRec. Write ** the column value into *ppVal. If *ppVal is initially NULL then a new ** sqlite3_value object is allocated. ** ** If *ppVal is initially NULL then the caller is responsible for ** ensuring that the value written into *ppVal is eventually freed. */ SQLITE_PRIVATE int sqlite3Stat4Column( sqlite3 *db, /* Database handle */ const void *pRec, /* Pointer to buffer containing record */ int nRec, /* Size of buffer pRec in bytes */ int iCol, /* Column to extract */ sqlite3_value **ppVal /* OUT: Extracted value */ ){ u32 t; /* a column type code */ int nHdr; /* Size of the header in the record */ int iHdr; /* Next unread header byte */ int iField; /* Next unread data byte */ int szField; /* Size of the current data field */ int i; /* Column index */ u8 *a = (u8*)pRec; /* Typecast byte array */ Mem *pMem = *ppVal; /* Write result into this Mem object */ assert( iCol>0 ); iHdr = getVarint32(a, nHdr); if( nHdr>nRec || iHdr>=nHdr ) return SQLITE_CORRUPT_BKPT; iField = nHdr; for(i=0; i<=iCol; i++){ iHdr += getVarint32(&a[iHdr], t); testcase( iHdr==nHdr ); testcase( iHdr==nHdr+1 ); if( iHdr>nHdr ) return SQLITE_CORRUPT_BKPT; szField = sqlite3VdbeSerialTypeLen(t); iField += szField; } testcase( iField==nRec ); testcase( iField==nRec+1 ); if( iField>nRec ) return SQLITE_CORRUPT_BKPT; if( pMem==0 ){ pMem = *ppVal = sqlite3ValueNew(db); if( pMem==0 ) return SQLITE_NOMEM; } sqlite3VdbeSerialGet(&a[iField-szField], t, pMem); pMem->enc = ENC(db); return SQLITE_OK; } /* ** Unless it is NULL, the argument must be an UnpackedRecord object returned ** by an earlier call to sqlite3Stat4ProbeSetValue(). This call deletes ** the object. */ SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord *pRec){ if( pRec ){ int i; int nCol = pRec->pKeyInfo->nField+pRec->pKeyInfo->nXField; Mem *aMem = pRec->aMem; sqlite3 *db = aMem[0].db; for(i=0; ipKeyInfo); sqlite3DbFree(db, pRec); } } #endif /* ifdef SQLITE_ENABLE_STAT4 */ /* ** Change the string value of an sqlite3_value object */ SQLITE_PRIVATE void sqlite3ValueSetStr( sqlite3_value *v, /* Value to be set */ int n, /* Length of string z */ const void *z, /* Text of the new string */ u8 enc, /* Encoding to use */ void (*xDel)(void*) /* Destructor for the string */ ){ if( v ) sqlite3VdbeMemSetStr((Mem *)v, z, n, enc, xDel); } /* ** Free an sqlite3_value object */ SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value *v){ if( !v ) return; sqlite3VdbeMemRelease((Mem *)v); sqlite3DbFree(((Mem*)v)->db, v); } /* ** The sqlite3ValueBytes() routine returns the number of bytes in the ** sqlite3_value object assuming that it uses the encoding "enc". ** The valueBytes() routine is a helper function. */ static SQLITE_NOINLINE int valueBytes(sqlite3_value *pVal, u8 enc){ return valueToText(pVal, enc)!=0 ? pVal->n : 0; } SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){ Mem *p = (Mem*)pVal; assert( (p->flags & MEM_Null)==0 || (p->flags & (MEM_Str|MEM_Blob))==0 ); if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){ return p->n; } if( (p->flags & MEM_Blob)!=0 ){ if( p->flags & MEM_Zero ){ return p->n + p->u.nZero; }else{ return p->n; } } if( p->flags & MEM_Null ) return 0; return valueBytes(pVal, enc); } /************** End of vdbemem.c *********************************************/ /************** Begin file vdbeaux.c *****************************************/ /* ** 2003 September 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used for creating, destroying, and populating ** a VDBE (or an "sqlite3_stmt" as it is known to the outside world.) */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ /* ** Create a new virtual database engine. */ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ sqlite3 *db = pParse->db; Vdbe *p; p = sqlite3DbMallocZero(db, sizeof(Vdbe) ); if( p==0 ) return 0; p->db = db; if( db->pVdbe ){ db->pVdbe->pPrev = p; } p->pNext = db->pVdbe; p->pPrev = 0; db->pVdbe = p; p->magic = VDBE_MAGIC_INIT; p->pParse = pParse; assert( pParse->aLabel==0 ); assert( pParse->nLabel==0 ); assert( pParse->nOpAlloc==0 ); return p; } /* ** Change the error string stored in Vdbe.zErrMsg */ SQLITE_PRIVATE void sqlite3VdbeError(Vdbe *p, const char *zFormat, ...){ va_list ap; sqlite3DbFree(p->db, p->zErrMsg); va_start(ap, zFormat); p->zErrMsg = sqlite3VMPrintf(p->db, zFormat, ap); va_end(ap); } /* ** Remember the SQL string for a prepared statement. */ SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepareV2){ assert( isPrepareV2==1 || isPrepareV2==0 ); if( p==0 ) return; #if defined(SQLITE_OMIT_TRACE) && !defined(SQLITE_ENABLE_SQLLOG) if( !isPrepareV2 ) return; #endif assert( p->zSql==0 ); p->zSql = sqlite3DbStrNDup(p->db, z, n); p->isPrepareV2 = (u8)isPrepareV2; } /* ** Return the SQL associated with a prepared statement */ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){ Vdbe *p = (Vdbe *)pStmt; return (p && p->isPrepareV2) ? p->zSql : 0; } /* ** Swap all content between two VDBE structures. */ SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ Vdbe tmp, *pTmp; char *zTmp; tmp = *pA; *pA = *pB; *pB = tmp; pTmp = pA->pNext; pA->pNext = pB->pNext; pB->pNext = pTmp; pTmp = pA->pPrev; pA->pPrev = pB->pPrev; pB->pPrev = pTmp; zTmp = pA->zSql; pA->zSql = pB->zSql; pB->zSql = zTmp; pB->isPrepareV2 = pA->isPrepareV2; } /* ** Resize the Vdbe.aOp array so that it is at least nOp elements larger ** than its current size. nOp is guaranteed to be less than or equal ** to 1024/sizeof(Op). ** ** If an out-of-memory error occurs while resizing the array, return ** SQLITE_NOMEM. In this case Vdbe.aOp and Parse.nOpAlloc remain ** unchanged (this is so that any opcodes already allocated can be ** correctly deallocated along with the rest of the Vdbe). */ static int growOpArray(Vdbe *v, int nOp){ VdbeOp *pNew; Parse *p = v->pParse; /* The SQLITE_TEST_REALLOC_STRESS compile-time option is designed to force ** more frequent reallocs and hence provide more opportunities for ** simulated OOM faults. SQLITE_TEST_REALLOC_STRESS is generally used ** during testing only. With SQLITE_TEST_REALLOC_STRESS grow the op array ** by the minimum* amount required until the size reaches 512. Normal ** operation (without SQLITE_TEST_REALLOC_STRESS) is to double the current ** size of the op array or add 1KB of space, whichever is smaller. */ #ifdef SQLITE_TEST_REALLOC_STRESS int nNew = (p->nOpAlloc>=512 ? p->nOpAlloc*2 : p->nOpAlloc+nOp); #else int nNew = (p->nOpAlloc ? p->nOpAlloc*2 : (int)(1024/sizeof(Op))); UNUSED_PARAMETER(nOp); #endif assert( nOp<=(1024/sizeof(Op)) ); assert( nNew>=(p->nOpAlloc+nOp) ); pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op)); if( pNew ){ p->nOpAlloc = sqlite3DbMallocSize(p->db, pNew)/sizeof(Op); v->aOp = pNew; } return (pNew ? SQLITE_OK : SQLITE_NOMEM); } #ifdef SQLITE_DEBUG /* This routine is just a convenient place to set a breakpoint that will ** fire after each opcode is inserted and displayed using ** "PRAGMA vdbe_addoptrace=on". */ static void test_addop_breakpoint(void){ static int n = 0; n++; } #endif /* ** Add a new instruction to the list of instructions current in the ** VDBE. Return the address of the new instruction. ** ** Parameters: ** ** p Pointer to the VDBE ** ** op The opcode for this instruction ** ** p1, p2, p3 Operands ** ** Use the sqlite3VdbeResolveLabel() function to fix an address and ** the sqlite3VdbeChangeP4() function to change the value of the P4 ** operand. */ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ int i; VdbeOp *pOp; i = p->nOp; assert( p->magic==VDBE_MAGIC_INIT ); assert( op>0 && op<0xff ); if( p->pParse->nOpAlloc<=i ){ if( growOpArray(p, 1) ){ return 1; } } p->nOp++; pOp = &p->aOp[i]; pOp->opcode = (u8)op; pOp->p5 = 0; pOp->p1 = p1; pOp->p2 = p2; pOp->p3 = p3; pOp->p4.p = 0; pOp->p4type = P4_NOTUSED; #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS pOp->zComment = 0; #endif #ifdef SQLITE_DEBUG if( p->db->flags & SQLITE_VdbeAddopTrace ){ int jj, kk; Parse *pParse = p->pParse; for(jj=kk=0; jjaColCache + jj; if( x->iLevel>pParse->iCacheLevel || x->iReg==0 ) continue; printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn); kk++; } if( kk ) printf("\n"); sqlite3VdbePrintOp(0, i, &p->aOp[i]); test_addop_breakpoint(); } #endif #ifdef VDBE_PROFILE pOp->cycles = 0; pOp->cnt = 0; #endif #ifdef SQLITE_VDBE_COVERAGE pOp->iSrcLine = 0; #endif return i; } SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ return sqlite3VdbeAddOp3(p, op, 0, 0, 0); } SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ return sqlite3VdbeAddOp3(p, op, p1, 0, 0); } SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ return sqlite3VdbeAddOp3(p, op, p1, p2, 0); } /* ** Add an opcode that includes the p4 value as a pointer. */ SQLITE_PRIVATE int sqlite3VdbeAddOp4( Vdbe *p, /* Add the opcode to this VM */ int op, /* The new opcode */ int p1, /* The P1 operand */ int p2, /* The P2 operand */ int p3, /* The P3 operand */ const char *zP4, /* The P4 operand */ int p4type /* P4 operand type */ ){ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); sqlite3VdbeChangeP4(p, addr, zP4, p4type); return addr; } /* ** Add an opcode that includes the p4 value with a P4_INT64 type. */ SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8( Vdbe *p, /* Add the opcode to this VM */ int op, /* The new opcode */ int p1, /* The P1 operand */ int p2, /* The P2 operand */ int p3, /* The P3 operand */ const u8 *zP4, /* The P4 operand */ int p4type /* P4 operand type */ ){ char *p4copy = sqlite3DbMallocRaw(sqlite3VdbeDb(p), 8); if( p4copy ) memcpy(p4copy, zP4, 8); return sqlite3VdbeAddOp4(p, op, p1, p2, p3, p4copy, p4type); } /* ** Add an OP_ParseSchema opcode. This routine is broken out from ** sqlite3VdbeAddOp4() since it needs to also needs to mark all btrees ** as having been used. ** ** The zWhere string must have been obtained from sqlite3_malloc(). ** This routine will take ownership of the allocated memory. */ SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere){ int j; int addr = sqlite3VdbeAddOp3(p, OP_ParseSchema, iDb, 0, 0); sqlite3VdbeChangeP4(p, addr, zWhere, P4_DYNAMIC); for(j=0; jdb->nDb; j++) sqlite3VdbeUsesBtree(p, j); } /* ** Add an opcode that includes the p4 value as an integer. */ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( Vdbe *p, /* Add the opcode to this VM */ int op, /* The new opcode */ int p1, /* The P1 operand */ int p2, /* The P2 operand */ int p3, /* The P3 operand */ int p4 /* The P4 operand as an integer */ ){ int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); sqlite3VdbeChangeP4(p, addr, SQLITE_INT_TO_PTR(p4), P4_INT32); return addr; } /* ** Create a new symbolic label for an instruction that has yet to be ** coded. The symbolic label is really just a negative number. The ** label can be used as the P2 value of an operation. Later, when ** the label is resolved to a specific address, the VDBE will scan ** through its operation list and change all values of P2 which match ** the label into the resolved address. ** ** The VDBE knows that a P2 value is a label because labels are ** always negative and P2 values are suppose to be non-negative. ** Hence, a negative P2 value is a label that has yet to be resolved. ** ** Zero is returned if a malloc() fails. */ SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe *v){ Parse *p = v->pParse; int i = p->nLabel++; assert( v->magic==VDBE_MAGIC_INIT ); if( (i & (i-1))==0 ){ p->aLabel = sqlite3DbReallocOrFree(p->db, p->aLabel, (i*2+1)*sizeof(p->aLabel[0])); } if( p->aLabel ){ p->aLabel[i] = -1; } return -1-i; } /* ** Resolve label "x" to be the address of the next instruction to ** be inserted. The parameter "x" must have been obtained from ** a prior call to sqlite3VdbeMakeLabel(). */ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){ Parse *p = v->pParse; int j = -1-x; assert( v->magic==VDBE_MAGIC_INIT ); assert( jnLabel ); if( ALWAYS(j>=0) && p->aLabel ){ p->aLabel[j] = v->nOp; } p->iFixedOp = v->nOp - 1; } /* ** Mark the VDBE as one that can only be run one time. */ SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){ p->runOnlyOnce = 1; } #ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */ /* ** The following type and function are used to iterate through all opcodes ** in a Vdbe main program and each of the sub-programs (triggers) it may ** invoke directly or indirectly. It should be used as follows: ** ** Op *pOp; ** VdbeOpIter sIter; ** ** memset(&sIter, 0, sizeof(sIter)); ** sIter.v = v; // v is of type Vdbe* ** while( (pOp = opIterNext(&sIter)) ){ ** // Do something with pOp ** } ** sqlite3DbFree(v->db, sIter.apSub); ** */ typedef struct VdbeOpIter VdbeOpIter; struct VdbeOpIter { Vdbe *v; /* Vdbe to iterate through the opcodes of */ SubProgram **apSub; /* Array of subprograms */ int nSub; /* Number of entries in apSub */ int iAddr; /* Address of next instruction to return */ int iSub; /* 0 = main program, 1 = first sub-program etc. */ }; static Op *opIterNext(VdbeOpIter *p){ Vdbe *v = p->v; Op *pRet = 0; Op *aOp; int nOp; if( p->iSub<=p->nSub ){ if( p->iSub==0 ){ aOp = v->aOp; nOp = v->nOp; }else{ aOp = p->apSub[p->iSub-1]->aOp; nOp = p->apSub[p->iSub-1]->nOp; } assert( p->iAddriAddr]; p->iAddr++; if( p->iAddr==nOp ){ p->iSub++; p->iAddr = 0; } if( pRet->p4type==P4_SUBPROGRAM ){ int nByte = (p->nSub+1)*sizeof(SubProgram*); int j; for(j=0; jnSub; j++){ if( p->apSub[j]==pRet->p4.pProgram ) break; } if( j==p->nSub ){ p->apSub = sqlite3DbReallocOrFree(v->db, p->apSub, nByte); if( !p->apSub ){ pRet = 0; }else{ p->apSub[p->nSub++] = pRet->p4.pProgram; } } } } return pRet; } /* ** Check if the program stored in the VM associated with pParse may ** throw an ABORT exception (causing the statement, but not entire transaction ** to be rolled back). This condition is true if the main program or any ** sub-programs contains any of the following: ** ** * OP_Halt with P1=SQLITE_CONSTRAINT and P2=OE_Abort. ** * OP_HaltIfNull with P1=SQLITE_CONSTRAINT and P2=OE_Abort. ** * OP_Destroy ** * OP_VUpdate ** * OP_VRename ** * OP_FkCounter with P2==0 (immediate foreign key constraint) ** * OP_CreateTable and OP_InitCoroutine (for CREATE TABLE AS SELECT ...) ** ** Then check that the value of Parse.mayAbort is true if an ** ABORT may be thrown, or false otherwise. Return true if it does ** match, or false otherwise. This function is intended to be used as ** part of an assert statement in the compiler. Similar to: ** ** assert( sqlite3VdbeAssertMayAbort(pParse->pVdbe, pParse->mayAbort) ); */ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){ int hasAbort = 0; int hasFkCounter = 0; int hasCreateTable = 0; int hasInitCoroutine = 0; Op *pOp; VdbeOpIter sIter; memset(&sIter, 0, sizeof(sIter)); sIter.v = v; while( (pOp = opIterNext(&sIter))!=0 ){ int opcode = pOp->opcode; if( opcode==OP_Destroy || opcode==OP_VUpdate || opcode==OP_VRename || ((opcode==OP_Halt || opcode==OP_HaltIfNull) && ((pOp->p1&0xff)==SQLITE_CONSTRAINT && pOp->p2==OE_Abort)) ){ hasAbort = 1; break; } if( opcode==OP_CreateTable ) hasCreateTable = 1; if( opcode==OP_InitCoroutine ) hasInitCoroutine = 1; #ifndef SQLITE_OMIT_FOREIGN_KEY if( opcode==OP_FkCounter && pOp->p1==0 && pOp->p2==1 ){ hasFkCounter = 1; } #endif } sqlite3DbFree(v->db, sIter.apSub); /* Return true if hasAbort==mayAbort. Or if a malloc failure occurred. ** If malloc failed, then the while() loop above may not have iterated ** through all opcodes and hasAbort may be set incorrectly. Return ** true for this case to prevent the assert() in the callers frame ** from failing. */ return ( v->db->mallocFailed || hasAbort==mayAbort || hasFkCounter || (hasCreateTable && hasInitCoroutine) ); } #endif /* SQLITE_DEBUG - the sqlite3AssertMayAbort() function */ /* ** Loop through the program looking for P2 values that are negative ** on jump instructions. Each such value is a label. Resolve the ** label by setting the P2 value to its correct non-zero value. ** ** This routine is called once after all opcodes have been inserted. ** ** Variable *pMaxFuncArgs is set to the maximum value of any P2 argument ** to an OP_Function, OP_AggStep or OP_VFilter opcode. This is used by ** sqlite3VdbeMakeReady() to size the Vdbe.apArg[] array. ** ** The Op.opflags field is set on all opcodes. */ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ int i; int nMaxArgs = *pMaxFuncArgs; Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; p->readOnly = 1; p->bIsReader = 0; for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ u8 opcode = pOp->opcode; /* NOTE: Be sure to update mkopcodeh.awk when adding or removing ** cases from this switch! */ switch( opcode ){ case OP_Transaction: { if( pOp->p2!=0 ) p->readOnly = 0; /* fall thru */ } case OP_AutoCommit: case OP_Savepoint: { p->bIsReader = 1; break; } #ifndef SQLITE_OMIT_WAL case OP_Checkpoint: #endif case OP_Vacuum: case OP_JournalMode: { p->readOnly = 0; p->bIsReader = 1; break; } #ifndef SQLITE_OMIT_VIRTUALTABLE case OP_VUpdate: { if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; break; } case OP_VFilter: { int n; assert( p->nOp - i >= 3 ); assert( pOp[-1].opcode==OP_Integer ); n = pOp[-1].p1; if( n>nMaxArgs ) nMaxArgs = n; break; } #endif case OP_Next: case OP_NextIfOpen: case OP_SorterNext: { pOp->p4.xAdvance = sqlite3BtreeNext; pOp->p4type = P4_ADVANCE; break; } case OP_Prev: case OP_PrevIfOpen: { pOp->p4.xAdvance = sqlite3BtreePrevious; pOp->p4type = P4_ADVANCE; break; } } pOp->opflags = sqlite3OpcodeProperty[opcode]; if( (pOp->opflags & OPFLG_JUMP)!=0 && pOp->p2<0 ){ assert( -1-pOp->p2nLabel ); pOp->p2 = aLabel[-1-pOp->p2]; } } sqlite3DbFree(p->db, pParse->aLabel); pParse->aLabel = 0; pParse->nLabel = 0; *pMaxFuncArgs = nMaxArgs; assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) ); } /* ** Return the address of the next instruction to be inserted. */ SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){ assert( p->magic==VDBE_MAGIC_INIT ); return p->nOp; } /* ** This function returns a pointer to the array of opcodes associated with ** the Vdbe passed as the first argument. It is the callers responsibility ** to arrange for the returned array to be eventually freed using the ** vdbeFreeOpArray() function. ** ** Before returning, *pnOp is set to the number of entries in the returned ** array. Also, *pnMaxArg is set to the larger of its current value and ** the number of entries in the Vdbe.apArg[] array required to execute the ** returned program. */ SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe *p, int *pnOp, int *pnMaxArg){ VdbeOp *aOp = p->aOp; assert( aOp && !p->db->mallocFailed ); /* Check that sqlite3VdbeUsesBtree() was not called on this VM */ assert( DbMaskAllZero(p->btreeMask) ); resolveP2Values(p, pnMaxArg); *pnOp = p->nOp; p->aOp = 0; return aOp; } /* ** Add a whole list of operations to the operation stack. Return the ** address of the first operation added. */ SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe *p, int nOp, VdbeOpList const *aOp, int iLineno){ int addr; assert( p->magic==VDBE_MAGIC_INIT ); if( p->nOp + nOp > p->pParse->nOpAlloc && growOpArray(p, nOp) ){ return 0; } addr = p->nOp; if( ALWAYS(nOp>0) ){ int i; VdbeOpList const *pIn = aOp; for(i=0; ip2; VdbeOp *pOut = &p->aOp[i+addr]; pOut->opcode = pIn->opcode; pOut->p1 = pIn->p1; if( p2<0 ){ assert( sqlite3OpcodeProperty[pOut->opcode] & OPFLG_JUMP ); pOut->p2 = addr + ADDR(p2); }else{ pOut->p2 = p2; } pOut->p3 = pIn->p3; pOut->p4type = P4_NOTUSED; pOut->p4.p = 0; pOut->p5 = 0; #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS pOut->zComment = 0; #endif #ifdef SQLITE_VDBE_COVERAGE pOut->iSrcLine = iLineno+i; #else (void)iLineno; #endif #ifdef SQLITE_DEBUG if( p->db->flags & SQLITE_VdbeAddopTrace ){ sqlite3VdbePrintOp(0, i+addr, &p->aOp[i+addr]); } #endif } p->nOp += nOp; } return addr; } #if defined(SQLITE_ENABLE_STMT_SCANSTATUS) /* ** Add an entry to the array of counters managed by sqlite3_stmt_scanstatus(). */ SQLITE_PRIVATE void sqlite3VdbeScanStatus( Vdbe *p, /* VM to add scanstatus() to */ int addrExplain, /* Address of OP_Explain (or 0) */ int addrLoop, /* Address of loop counter */ int addrVisit, /* Address of rows visited counter */ LogEst nEst, /* Estimated number of output rows */ const char *zName /* Name of table or index being scanned */ ){ int nByte = (p->nScan+1) * sizeof(ScanStatus); ScanStatus *aNew; aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); if( aNew ){ ScanStatus *pNew = &aNew[p->nScan++]; pNew->addrExplain = addrExplain; pNew->addrLoop = addrLoop; pNew->addrVisit = addrVisit; pNew->nEst = nEst; pNew->zName = sqlite3DbStrDup(p->db, zName); p->aScan = aNew; } } #endif /* ** Change the value of the P1 operand for a specific instruction. ** This routine is useful when a large program is loaded from a ** static array using sqlite3VdbeAddOpList but we want to make a ** few minor changes to the program. */ SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, u32 addr, int val){ assert( p!=0 ); if( ((u32)p->nOp)>addr ){ p->aOp[addr].p1 = val; } } /* ** Change the value of the P2 operand for a specific instruction. ** This routine is useful for setting a jump destination. */ SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){ assert( p!=0 ); if( ((u32)p->nOp)>addr ){ p->aOp[addr].p2 = val; } } /* ** Change the value of the P3 operand for a specific instruction. */ SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){ assert( p!=0 ); if( ((u32)p->nOp)>addr ){ p->aOp[addr].p3 = val; } } /* ** Change the value of the P5 operand for the most recently ** added operation. */ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 val){ assert( p!=0 ); if( p->aOp ){ assert( p->nOp>0 ); p->aOp[p->nOp-1].p5 = val; } } /* ** Change the P2 operand of instruction addr so that it points to ** the address of the next instruction to be coded. */ SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){ sqlite3VdbeChangeP2(p, addr, p->nOp); p->pParse->iFixedOp = p->nOp - 1; } /* ** If the input FuncDef structure is ephemeral, then free it. If ** the FuncDef is not ephermal, then do nothing. */ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ if( ALWAYS(pDef) && (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ sqlite3DbFree(db, pDef); } } static void vdbeFreeOpArray(sqlite3 *, Op *, int); /* ** Delete a P4 value if necessary. */ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( p4 ){ assert( db ); switch( p4type ){ case P4_FUNCCTX: { freeEphemeralFunction(db, ((sqlite3_context*)p4)->pFunc); /* Fall through into the next case */ } case P4_REAL: case P4_INT64: case P4_DYNAMIC: case P4_INTARRAY: { sqlite3DbFree(db, p4); break; } case P4_KEYINFO: { if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4); break; } case P4_MPRINTF: { if( db->pnBytesFreed==0 ) sqlite3_free(p4); break; } case P4_FUNCDEF: { freeEphemeralFunction(db, (FuncDef*)p4); break; } case P4_MEM: { if( db->pnBytesFreed==0 ){ sqlite3ValueFree((sqlite3_value*)p4); }else{ Mem *p = (Mem*)p4; if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); sqlite3DbFree(db, p); } break; } case P4_VTAB : { if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); break; } } } } /* ** Free the space allocated for aOp and any p4 values allocated for the ** opcodes contained within. If aOp is not NULL it is assumed to contain ** nOp entries. */ static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){ if( aOp ){ Op *pOp; for(pOp=aOp; pOp<&aOp[nOp]; pOp++){ freeP4(db, pOp->p4type, pOp->p4.p); #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS sqlite3DbFree(db, pOp->zComment); #endif } } sqlite3DbFree(db, aOp); } /* ** Link the SubProgram object passed as the second argument into the linked ** list at Vdbe.pSubProgram. This list is used to delete all sub-program ** objects when the VM is no longer required. */ SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *pVdbe, SubProgram *p){ p->pNext = pVdbe->pProgram; pVdbe->pProgram = p; } /* ** Change the opcode at addr into OP_Noop */ SQLITE_PRIVATE void sqlite3VdbeChangeToNoop(Vdbe *p, int addr){ if( addrnOp ){ VdbeOp *pOp = &p->aOp[addr]; sqlite3 *db = p->db; freeP4(db, pOp->p4type, pOp->p4.p); memset(pOp, 0, sizeof(pOp[0])); pOp->opcode = OP_Noop; if( addr==p->nOp-1 ) p->nOp--; } } /* ** If the last opcode is "op" and it is not a jump destination, ** then remove it. Return true if and only if an opcode was removed. */ SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){ if( (p->nOp-1)>(p->pParse->iFixedOp) && p->aOp[p->nOp-1].opcode==op ){ sqlite3VdbeChangeToNoop(p, p->nOp-1); return 1; }else{ return 0; } } /* ** Change the value of the P4 operand for a specific instruction. ** This routine is useful when a large program is loaded from a ** static array using sqlite3VdbeAddOpList but we want to make a ** few minor changes to the program. ** ** If n>=0 then the P4 operand is dynamic, meaning that a copy of ** the string is made into memory obtained from sqlite3_malloc(). ** A value of n==0 means copy bytes of zP4 up to and including the ** first null byte. If n>0 then copy n+1 bytes of zP4. ** ** Other values of n (P4_STATIC, P4_COLLSEQ etc.) indicate that zP4 points ** to a string or structure that is guaranteed to exist for the lifetime of ** the Vdbe. In these cases we can just copy the pointer. ** ** If addr<0 then change P4 on the most recently inserted instruction. */ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int n){ Op *pOp; sqlite3 *db; assert( p!=0 ); db = p->db; assert( p->magic==VDBE_MAGIC_INIT ); if( p->aOp==0 || db->mallocFailed ){ if( n!=P4_VTAB ){ freeP4(db, n, (void*)*(char**)&zP4); } return; } assert( p->nOp>0 ); assert( addrnOp ); if( addr<0 ){ addr = p->nOp - 1; } pOp = &p->aOp[addr]; assert( pOp->p4type==P4_NOTUSED || pOp->p4type==P4_INT32 || pOp->p4type==P4_KEYINFO ); freeP4(db, pOp->p4type, pOp->p4.p); pOp->p4.p = 0; if( n==P4_INT32 ){ /* Note: this cast is safe, because the origin data point was an int ** that was cast to a (const char *). */ pOp->p4.i = SQLITE_PTR_TO_INT(zP4); pOp->p4type = P4_INT32; }else if( zP4==0 ){ pOp->p4.p = 0; pOp->p4type = P4_NOTUSED; }else if( n==P4_KEYINFO ){ pOp->p4.p = (void*)zP4; pOp->p4type = P4_KEYINFO; }else if( n==P4_VTAB ){ pOp->p4.p = (void*)zP4; pOp->p4type = P4_VTAB; sqlite3VtabLock((VTable *)zP4); assert( ((VTable *)zP4)->db==p->db ); }else if( n<0 ){ pOp->p4.p = (void*)zP4; pOp->p4type = (signed char)n; }else{ if( n==0 ) n = sqlite3Strlen30(zP4); pOp->p4.z = sqlite3DbStrNDup(p->db, zP4, n); pOp->p4type = P4_DYNAMIC; } } /* ** Set the P4 on the most recently added opcode to the KeyInfo for the ** index given. */ SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){ Vdbe *v = pParse->pVdbe; assert( v!=0 ); assert( pIdx!=0 ); sqlite3VdbeChangeP4(v, -1, (char*)sqlite3KeyInfoOfIndex(pParse, pIdx), P4_KEYINFO); } #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS /* ** Change the comment on the most recently coded instruction. Or ** insert a No-op and add the comment to that new instruction. This ** makes the code easier to read during debugging. None of this happens ** in a production build. */ static void vdbeVComment(Vdbe *p, const char *zFormat, va_list ap){ assert( p->nOp>0 || p->aOp==0 ); assert( p->aOp==0 || p->aOp[p->nOp-1].zComment==0 || p->db->mallocFailed ); if( p->nOp ){ assert( p->aOp ); sqlite3DbFree(p->db, p->aOp[p->nOp-1].zComment); p->aOp[p->nOp-1].zComment = sqlite3VMPrintf(p->db, zFormat, ap); } } SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe *p, const char *zFormat, ...){ va_list ap; if( p ){ va_start(ap, zFormat); vdbeVComment(p, zFormat, ap); va_end(ap); } } SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ va_list ap; if( p ){ sqlite3VdbeAddOp0(p, OP_Noop); va_start(ap, zFormat); vdbeVComment(p, zFormat, ap); va_end(ap); } } #endif /* NDEBUG */ #ifdef SQLITE_VDBE_COVERAGE /* ** Set the value if the iSrcLine field for the previously coded instruction. */ SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){ sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine; } #endif /* SQLITE_VDBE_COVERAGE */ /* ** Return the opcode for a given address. If the address is -1, then ** return the most recently inserted opcode. ** ** If a memory allocation error has occurred prior to the calling of this ** routine, then a pointer to a dummy VdbeOp will be returned. That opcode ** is readable but not writable, though it is cast to a writable value. ** The return of a dummy opcode allows the call to continue functioning ** after an OOM fault without having to check to see if the return from ** this routine is a valid pointer. But because the dummy.opcode is 0, ** dummy will never be written to. This is verified by code inspection and ** by running with Valgrind. */ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ /* C89 specifies that the constant "dummy" will be initialized to all ** zeros, which is correct. MSVC generates a warning, nevertheless. */ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ assert( p->magic==VDBE_MAGIC_INIT ); if( addr<0 ){ addr = p->nOp - 1; } assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); if( p->db->mallocFailed ){ return (VdbeOp*)&dummy; }else{ return &p->aOp[addr]; } } #if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) /* ** Return an integer value for one of the parameters to the opcode pOp ** determined by character c. */ static int translateP(char c, const Op *pOp){ if( c=='1' ) return pOp->p1; if( c=='2' ) return pOp->p2; if( c=='3' ) return pOp->p3; if( c=='4' ) return pOp->p4.i; return pOp->p5; } /* ** Compute a string for the "comment" field of a VDBE opcode listing. ** ** The Synopsis: field in comments in the vdbe.c source file gets converted ** to an extra string that is appended to the sqlite3OpcodeName(). In the ** absence of other comments, this synopsis becomes the comment on the opcode. ** Some translation occurs: ** ** "PX" -> "r[X]" ** "PX@PY" -> "r[X..X+Y-1]" or "r[x]" if y is 0 or 1 ** "PX@PY+1" -> "r[X..X+Y]" or "r[x]" if y is 0 ** "PY..PY" -> "r[X..Y]" or "r[x]" if y<=x */ static int displayComment( const Op *pOp, /* The opcode to be commented */ const char *zP4, /* Previously obtained value for P4 */ char *zTemp, /* Write result here */ int nTemp /* Space available in zTemp[] */ ){ const char *zOpName; const char *zSynopsis; int nOpName; int ii, jj; zOpName = sqlite3OpcodeName(pOp->opcode); nOpName = sqlite3Strlen30(zOpName); if( zOpName[nOpName+1] ){ int seenCom = 0; char c; zSynopsis = zOpName += nOpName + 1; for(ii=jj=0; jjzComment); seenCom = 1; }else{ int v1 = translateP(c, pOp); int v2; sqlite3_snprintf(nTemp-jj, zTemp+jj, "%d", v1); if( strncmp(zSynopsis+ii+1, "@P", 2)==0 ){ ii += 3; jj += sqlite3Strlen30(zTemp+jj); v2 = translateP(zSynopsis[ii], pOp); if( strncmp(zSynopsis+ii+1,"+1",2)==0 ){ ii += 2; v2++; } if( v2>1 ){ sqlite3_snprintf(nTemp-jj, zTemp+jj, "..%d", v1+v2-1); } }else if( strncmp(zSynopsis+ii+1, "..P3", 4)==0 && pOp->p3==0 ){ ii += 4; } } jj += sqlite3Strlen30(zTemp+jj); }else{ zTemp[jj++] = c; } } if( !seenCom && jjzComment ){ sqlite3_snprintf(nTemp-jj, zTemp+jj, "; %s", pOp->zComment); jj += sqlite3Strlen30(zTemp+jj); } if( jjzComment ){ sqlite3_snprintf(nTemp, zTemp, "%s", pOp->zComment); jj = sqlite3Strlen30(zTemp); }else{ zTemp[0] = 0; jj = 0; } return jj; } #endif /* SQLITE_DEBUG */ #if !defined(SQLITE_OMIT_EXPLAIN) || !defined(NDEBUG) \ || defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) /* ** Compute a string that describes the P4 parameter for an opcode. ** Use zTemp for any required temporary buffer space. */ static char *displayP4(Op *pOp, char *zTemp, int nTemp){ char *zP4 = zTemp; assert( nTemp>=20 ); switch( pOp->p4type ){ case P4_KEYINFO: { int i, j; KeyInfo *pKeyInfo = pOp->p4.pKeyInfo; assert( pKeyInfo->aSortOrder!=0 ); sqlite3_snprintf(nTemp, zTemp, "k(%d", pKeyInfo->nField); i = sqlite3Strlen30(zTemp); for(j=0; jnField; j++){ CollSeq *pColl = pKeyInfo->aColl[j]; const char *zColl = pColl ? pColl->zName : "nil"; int n = sqlite3Strlen30(zColl); if( n==6 && memcmp(zColl,"BINARY",6)==0 ){ zColl = "B"; n = 1; } if( i+n>nTemp-6 ){ memcpy(&zTemp[i],",...",4); break; } zTemp[i++] = ','; if( pKeyInfo->aSortOrder[j] ){ zTemp[i++] = '-'; } memcpy(&zTemp[i], zColl, n+1); i += n; } zTemp[i++] = ')'; zTemp[i] = 0; assert( ip4.pColl; sqlite3_snprintf(nTemp, zTemp, "(%.20s)", pColl->zName); break; } case P4_FUNCDEF: { FuncDef *pDef = pOp->p4.pFunc; sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg); break; } #ifdef SQLITE_DEBUG case P4_FUNCCTX: { FuncDef *pDef = pOp->p4.pCtx->pFunc; sqlite3_snprintf(nTemp, zTemp, "%s(%d)", pDef->zName, pDef->nArg); break; } #endif case P4_INT64: { sqlite3_snprintf(nTemp, zTemp, "%lld", *pOp->p4.pI64); break; } case P4_INT32: { sqlite3_snprintf(nTemp, zTemp, "%d", pOp->p4.i); break; } case P4_REAL: { sqlite3_snprintf(nTemp, zTemp, "%.16g", *pOp->p4.pReal); break; } case P4_MEM: { Mem *pMem = pOp->p4.pMem; if( pMem->flags & MEM_Str ){ zP4 = pMem->z; }else if( pMem->flags & MEM_Int ){ sqlite3_snprintf(nTemp, zTemp, "%lld", pMem->u.i); }else if( pMem->flags & MEM_Real ){ sqlite3_snprintf(nTemp, zTemp, "%.16g", pMem->u.r); }else if( pMem->flags & MEM_Null ){ sqlite3_snprintf(nTemp, zTemp, "NULL"); }else{ assert( pMem->flags & MEM_Blob ); zP4 = "(blob)"; } break; } #ifndef SQLITE_OMIT_VIRTUALTABLE case P4_VTAB: { sqlite3_vtab *pVtab = pOp->p4.pVtab->pVtab; sqlite3_snprintf(nTemp, zTemp, "vtab:%p", pVtab); break; } #endif case P4_INTARRAY: { sqlite3_snprintf(nTemp, zTemp, "intarray"); break; } case P4_SUBPROGRAM: { sqlite3_snprintf(nTemp, zTemp, "program"); break; } case P4_ADVANCE: { zTemp[0] = 0; break; } default: { zP4 = pOp->p4.z; if( zP4==0 ){ zP4 = zTemp; zTemp[0] = 0; } } } assert( zP4!=0 ); return zP4; } #endif /* ** Declare to the Vdbe that the BTree object at db->aDb[i] is used. ** ** The prepared statements need to know in advance the complete set of ** attached databases that will be use. A mask of these databases ** is maintained in p->btreeMask. The p->lockMask value is the subset of ** p->btreeMask of databases that will require a lock. */ SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe *p, int i){ assert( i>=0 && idb->nDb && i<(int)sizeof(yDbMask)*8 ); assert( i<(int)sizeof(p->btreeMask)*8 ); DbMaskSet(p->btreeMask, i); if( i!=1 && sqlite3BtreeSharable(p->db->aDb[i].pBt) ){ DbMaskSet(p->lockMask, i); } } #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 /* ** If SQLite is compiled to support shared-cache mode and to be threadsafe, ** this routine obtains the mutex associated with each BtShared structure ** that may be accessed by the VM passed as an argument. In doing so it also ** sets the BtShared.db member of each of the BtShared structures, ensuring ** that the correct busy-handler callback is invoked if required. ** ** If SQLite is not threadsafe but does support shared-cache mode, then ** sqlite3BtreeEnter() is invoked to set the BtShared.db variables ** of all of BtShared structures accessible via the database handle ** associated with the VM. ** ** If SQLite is not threadsafe and does not support shared-cache mode, this ** function is a no-op. ** ** The p->btreeMask field is a bitmask of all btrees that the prepared ** statement p will ever use. Let N be the number of bits in p->btreeMask ** corresponding to btrees that use shared cache. Then the runtime of ** this routine is N*N. But as N is rarely more than 1, this should not ** be a problem. */ SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe *p){ int i; sqlite3 *db; Db *aDb; int nDb; if( DbMaskAllZero(p->lockMask) ) return; /* The common case */ db = p->db; aDb = db->aDb; nDb = db->nDb; for(i=0; ilockMask,i) && ALWAYS(aDb[i].pBt!=0) ){ sqlite3BtreeEnter(aDb[i].pBt); } } } #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0 /* ** Unlock all of the btrees previously locked by a call to sqlite3VdbeEnter(). */ static SQLITE_NOINLINE void vdbeLeave(Vdbe *p){ int i; sqlite3 *db; Db *aDb; int nDb; db = p->db; aDb = db->aDb; nDb = db->nDb; for(i=0; ilockMask,i) && ALWAYS(aDb[i].pBt!=0) ){ sqlite3BtreeLeave(aDb[i].pBt); } } } SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe *p){ if( DbMaskAllZero(p->lockMask) ) return; /* The common case */ vdbeLeave(p); } #endif #if defined(VDBE_PROFILE) || defined(SQLITE_DEBUG) /* ** Print a single opcode. This routine is used for debugging only. */ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){ char *zP4; char zPtr[50]; char zCom[100]; static const char *zFormat1 = "%4d %-13s %4d %4d %4d %-13s %.2X %s\n"; if( pOut==0 ) pOut = stdout; zP4 = displayP4(pOp, zPtr, sizeof(zPtr)); #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS displayComment(pOp, zP4, zCom, sizeof(zCom)); #else zCom[0] = 0; #endif /* NB: The sqlite3OpcodeName() function is implemented by code created ** by the mkopcodeh.awk and mkopcodec.awk scripts which extract the ** information from the vdbe.c source text */ fprintf(pOut, zFormat1, pc, sqlite3OpcodeName(pOp->opcode), pOp->p1, pOp->p2, pOp->p3, zP4, pOp->p5, zCom ); fflush(pOut); } #endif /* ** Release an array of N Mem elements */ static void releaseMemArray(Mem *p, int N){ if( p && N ){ Mem *pEnd = &p[N]; sqlite3 *db = p->db; u8 malloc_failed = db->mallocFailed; if( db->pnBytesFreed ){ do{ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); }while( (++p)flags & MEM_Agg ); testcase( p->flags & MEM_Dyn ); testcase( p->flags & MEM_Frame ); testcase( p->flags & MEM_RowSet ); if( p->flags&(MEM_Agg|MEM_Dyn|MEM_Frame|MEM_RowSet) ){ sqlite3VdbeMemRelease(p); }else if( p->szMalloc ){ sqlite3DbFree(db, p->zMalloc); p->szMalloc = 0; } p->flags = MEM_Undefined; }while( (++p)mallocFailed = malloc_failed; } } /* ** Delete a VdbeFrame object and its contents. VdbeFrame objects are ** allocated by the OP_Program opcode in sqlite3VdbeExec(). */ SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame *p){ int i; Mem *aMem = VdbeFrameMem(p); VdbeCursor **apCsr = (VdbeCursor **)&aMem[p->nChildMem]; for(i=0; inChildCsr; i++){ sqlite3VdbeFreeCursor(p->v, apCsr[i]); } releaseMemArray(aMem, p->nChildMem); sqlite3DbFree(p->v->db, p); } #ifndef SQLITE_OMIT_EXPLAIN /* ** Give a listing of the program in the virtual machine. ** ** The interface is the same as sqlite3VdbeExec(). But instead of ** running the code, it invokes the callback once for each instruction. ** This feature is used to implement "EXPLAIN". ** ** When p->explain==1, each instruction is listed. When ** p->explain==2, only OP_Explain instructions are listed and these ** are shown in a different format. p->explain==2 is used to implement ** EXPLAIN QUERY PLAN. ** ** When p->explain==1, first the main program is listed, then each of ** the trigger subprograms are listed one by one. */ SQLITE_PRIVATE int sqlite3VdbeList( Vdbe *p /* The VDBE */ ){ int nRow; /* Stop when row count reaches this */ int nSub = 0; /* Number of sub-vdbes seen so far */ SubProgram **apSub = 0; /* Array of sub-vdbes */ Mem *pSub = 0; /* Memory cell hold array of subprogs */ sqlite3 *db = p->db; /* The database connection */ int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ Mem *pMem = &p->aMem[1]; /* First Mem of result set */ assert( p->explain ); assert( p->magic==VDBE_MAGIC_RUN ); assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM ); /* Even though this opcode does not use dynamic strings for ** the result, result columns may become dynamic if the user calls ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. */ releaseMemArray(pMem, 8); p->pResultSet = 0; if( p->rc==SQLITE_NOMEM ){ /* This happens if a malloc() inside a call to sqlite3_column_text() or ** sqlite3_column_text16() failed. */ db->mallocFailed = 1; return SQLITE_ERROR; } /* When the number of output rows reaches nRow, that means the ** listing has finished and sqlite3_step() should return SQLITE_DONE. ** nRow is the sum of the number of rows in the main program, plus ** the sum of the number of rows in all trigger subprograms encountered ** so far. The nRow value will increase as new trigger subprograms are ** encountered, but p->pc will eventually catch up to nRow. */ nRow = p->nOp; if( p->explain==1 ){ /* The first 8 memory cells are used for the result set. So we will ** commandeer the 9th cell to use as storage for an array of pointers ** to trigger subprograms. The VDBE is guaranteed to have at least 9 ** cells. */ assert( p->nMem>9 ); pSub = &p->aMem[9]; if( pSub->flags&MEM_Blob ){ /* On the first call to sqlite3_step(), pSub will hold a NULL. It is ** initialized to a BLOB by the P4_SUBPROGRAM processing logic below */ nSub = pSub->n/sizeof(Vdbe*); apSub = (SubProgram **)pSub->z; } for(i=0; inOp; } } do{ i = p->pc++; }while( iexplain==2 && p->aOp[i].opcode!=OP_Explain ); if( i>=nRow ){ p->rc = SQLITE_OK; rc = SQLITE_DONE; }else if( db->u1.isInterrupted ){ p->rc = SQLITE_INTERRUPT; rc = SQLITE_ERROR; sqlite3VdbeError(p, sqlite3ErrStr(p->rc)); }else{ char *zP4; Op *pOp; if( inOp ){ /* The output line number is small enough that we are still in the ** main program. */ pOp = &p->aOp[i]; }else{ /* We are currently listing subprograms. Figure out which one and ** pick up the appropriate opcode. */ int j; i -= p->nOp; for(j=0; i>=apSub[j]->nOp; j++){ i -= apSub[j]->nOp; } pOp = &apSub[j]->aOp[i]; } if( p->explain==1 ){ pMem->flags = MEM_Int; pMem->u.i = i; /* Program counter */ pMem++; pMem->flags = MEM_Static|MEM_Str|MEM_Term; pMem->z = (char*)sqlite3OpcodeName(pOp->opcode); /* Opcode */ assert( pMem->z!=0 ); pMem->n = sqlite3Strlen30(pMem->z); pMem->enc = SQLITE_UTF8; pMem++; /* When an OP_Program opcode is encounter (the only opcode that has ** a P4_SUBPROGRAM argument), expand the size of the array of subprograms ** kept in p->aMem[9].z to hold the new program - assuming this subprogram ** has not already been seen. */ if( pOp->p4type==P4_SUBPROGRAM ){ int nByte = (nSub+1)*sizeof(SubProgram*); int j; for(j=0; jp4.pProgram ) break; } if( j==nSub && SQLITE_OK==sqlite3VdbeMemGrow(pSub, nByte, nSub!=0) ){ apSub = (SubProgram **)pSub->z; apSub[nSub++] = pOp->p4.pProgram; pSub->flags |= MEM_Blob; pSub->n = nSub*sizeof(SubProgram*); } } } pMem->flags = MEM_Int; pMem->u.i = pOp->p1; /* P1 */ pMem++; pMem->flags = MEM_Int; pMem->u.i = pOp->p2; /* P2 */ pMem++; pMem->flags = MEM_Int; pMem->u.i = pOp->p3; /* P3 */ pMem++; if( sqlite3VdbeMemClearAndResize(pMem, 32) ){ /* P4 */ assert( p->db->mallocFailed ); return SQLITE_ERROR; } pMem->flags = MEM_Str|MEM_Term; zP4 = displayP4(pOp, pMem->z, 32); if( zP4!=pMem->z ){ sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0); }else{ assert( pMem->z!=0 ); pMem->n = sqlite3Strlen30(pMem->z); pMem->enc = SQLITE_UTF8; } pMem++; if( p->explain==1 ){ if( sqlite3VdbeMemClearAndResize(pMem, 4) ){ assert( p->db->mallocFailed ); return SQLITE_ERROR; } pMem->flags = MEM_Str|MEM_Term; pMem->n = 2; sqlite3_snprintf(3, pMem->z, "%.2x", pOp->p5); /* P5 */ pMem->enc = SQLITE_UTF8; pMem++; #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS if( sqlite3VdbeMemClearAndResize(pMem, 500) ){ assert( p->db->mallocFailed ); return SQLITE_ERROR; } pMem->flags = MEM_Str|MEM_Term; pMem->n = displayComment(pOp, zP4, pMem->z, 500); pMem->enc = SQLITE_UTF8; #else pMem->flags = MEM_Null; /* Comment */ #endif } p->nResColumn = 8 - 4*(p->explain-1); p->pResultSet = &p->aMem[1]; p->rc = SQLITE_OK; rc = SQLITE_ROW; } return rc; } #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_DEBUG /* ** Print the SQL that was used to generate a VDBE program. */ SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe *p){ const char *z = 0; if( p->zSql ){ z = p->zSql; }else if( p->nOp>=1 ){ const VdbeOp *pOp = &p->aOp[0]; if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ z = pOp->p4.z; while( sqlite3Isspace(*z) ) z++; } } if( z ) printf("SQL: [%s]\n", z); } #endif #if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) /* ** Print an IOTRACE message showing SQL content. */ SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe *p){ int nOp = p->nOp; VdbeOp *pOp; if( sqlite3IoTrace==0 ) return; if( nOp<1 ) return; pOp = &p->aOp[0]; if( pOp->opcode==OP_Init && pOp->p4.z!=0 ){ int i, j; char z[1000]; sqlite3_snprintf(sizeof(z), z, "%s", pOp->p4.z); for(i=0; sqlite3Isspace(z[i]); i++){} for(j=0; z[i]; i++){ if( sqlite3Isspace(z[i]) ){ if( z[i-1]!=' ' ){ z[j++] = ' '; } }else{ z[j++] = z[i]; } } z[j] = 0; sqlite3IoTrace("SQL %s\n", z); } } #endif /* !SQLITE_OMIT_TRACE && SQLITE_ENABLE_IOTRACE */ /* ** Allocate space from a fixed size buffer and return a pointer to ** that space. If insufficient space is available, return NULL. ** ** The pBuf parameter is the initial value of a pointer which will ** receive the new memory. pBuf is normally NULL. If pBuf is not ** NULL, it means that memory space has already been allocated and that ** this routine should not allocate any new memory. When pBuf is not ** NULL simply return pBuf. Only allocate new memory space when pBuf ** is NULL. ** ** nByte is the number of bytes of space needed. ** ** *ppFrom points to available space and pEnd points to the end of the ** available space. When space is allocated, *ppFrom is advanced past ** the end of the allocated space. ** ** *pnByte is a counter of the number of bytes of space that have failed ** to allocate. If there is insufficient space in *ppFrom to satisfy the ** request, then increment *pnByte by the amount of the request. */ static void *allocSpace( void *pBuf, /* Where return pointer will be stored */ int nByte, /* Number of bytes to allocate */ u8 **ppFrom, /* IN/OUT: Allocate from *ppFrom */ u8 *pEnd, /* Pointer to 1 byte past the end of *ppFrom buffer */ int *pnByte /* If allocation cannot be made, increment *pnByte */ ){ assert( EIGHT_BYTE_ALIGNMENT(*ppFrom) ); if( pBuf ) return pBuf; nByte = ROUND8(nByte); if( &(*ppFrom)[nByte] <= pEnd ){ pBuf = (void*)*ppFrom; *ppFrom += nByte; }else{ *pnByte += nByte; } return pBuf; } /* ** Rewind the VDBE back to the beginning in preparation for ** running it. */ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) int i; #endif assert( p!=0 ); assert( p->magic==VDBE_MAGIC_INIT ); /* There should be at least one opcode. */ assert( p->nOp>0 ); /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */ p->magic = VDBE_MAGIC_RUN; #ifdef SQLITE_DEBUG for(i=1; inMem; i++){ assert( p->aMem[i].db==p->db ); } #endif p->pc = -1; p->rc = SQLITE_OK; p->errorAction = OE_Abort; p->magic = VDBE_MAGIC_RUN; p->nChange = 0; p->cacheCtr = 1; p->minWriteFileFormat = 255; p->iStatement = 0; p->nFkConstraint = 0; #ifdef VDBE_PROFILE for(i=0; inOp; i++){ p->aOp[i].cnt = 0; p->aOp[i].cycles = 0; } #endif } /* ** Prepare a virtual machine for execution for the first time after ** creating the virtual machine. This involves things such ** as allocating registers and initializing the program counter. ** After the VDBE has be prepped, it can be executed by one or more ** calls to sqlite3VdbeExec(). ** ** This function may be called exactly once on each virtual machine. ** After this routine is called the VM has been "packaged" and is ready ** to run. After this routine is called, further calls to ** sqlite3VdbeAddOp() functions are prohibited. This routine disconnects ** the Vdbe from the Parse object that helped generate it so that the ** the Vdbe becomes an independent entity and the Parse object can be ** destroyed. ** ** Use the sqlite3VdbeRewind() procedure to restore a virtual machine back ** to its initial state after it has been run. */ SQLITE_PRIVATE void sqlite3VdbeMakeReady( Vdbe *p, /* The VDBE */ Parse *pParse /* Parsing context */ ){ sqlite3 *db; /* The database connection */ int nVar; /* Number of parameters */ int nMem; /* Number of VM memory registers */ int nCursor; /* Number of cursors required */ int nArg; /* Number of arguments in subprograms */ int nOnce; /* Number of OP_Once instructions */ int n; /* Loop counter */ u8 *zCsr; /* Memory available for allocation */ u8 *zEnd; /* First byte past allocated memory */ int nByte; /* How much extra memory is needed */ assert( p!=0 ); assert( p->nOp>0 ); assert( pParse!=0 ); assert( p->magic==VDBE_MAGIC_INIT ); assert( pParse==p->pParse ); db = p->db; assert( db->mallocFailed==0 ); nVar = pParse->nVar; nMem = pParse->nMem; nCursor = pParse->nTab; nArg = pParse->nMaxArg; nOnce = pParse->nOnce; if( nOnce==0 ) nOnce = 1; /* Ensure at least one byte in p->aOnceFlag[] */ /* For each cursor required, also allocate a memory cell. Memory ** cells (nMem+1-nCursor)..nMem, inclusive, will never be used by ** the vdbe program. Instead they are used to allocate space for ** VdbeCursor/BtCursor structures. The blob of memory associated with ** cursor 0 is stored in memory cell nMem. Memory cell (nMem-1) ** stores the blob of memory associated with cursor 1, etc. ** ** See also: allocateCursor(). */ nMem += nCursor; /* Allocate space for memory registers, SQL variables, VDBE cursors and ** an array to marshal SQL function arguments in. */ zCsr = (u8*)&p->aOp[p->nOp]; /* Memory avaliable for allocation */ zEnd = (u8*)&p->aOp[pParse->nOpAlloc]; /* First byte past end of zCsr[] */ resolveP2Values(p, &nArg); p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort); if( pParse->explain && nMem<10 ){ nMem = 10; } memset(zCsr, 0, zEnd-zCsr); zCsr += (zCsr - (u8*)0)&7; assert( EIGHT_BYTE_ALIGNMENT(zCsr) ); p->expired = 0; /* Memory for registers, parameters, cursor, etc, is allocated in two ** passes. On the first pass, we try to reuse unused space at the ** end of the opcode array. If we are unable to satisfy all memory ** requirements by reusing the opcode array tail, then the second ** pass will fill in the rest using a fresh allocation. ** ** This two-pass approach that reuses as much memory as possible from ** the leftover space at the end of the opcode array can significantly ** reduce the amount of memory held by a prepared statement. */ do { nByte = 0; p->aMem = allocSpace(p->aMem, nMem*sizeof(Mem), &zCsr, zEnd, &nByte); p->aVar = allocSpace(p->aVar, nVar*sizeof(Mem), &zCsr, zEnd, &nByte); p->apArg = allocSpace(p->apArg, nArg*sizeof(Mem*), &zCsr, zEnd, &nByte); p->azVar = allocSpace(p->azVar, nVar*sizeof(char*), &zCsr, zEnd, &nByte); p->apCsr = allocSpace(p->apCsr, nCursor*sizeof(VdbeCursor*), &zCsr, zEnd, &nByte); p->aOnceFlag = allocSpace(p->aOnceFlag, nOnce, &zCsr, zEnd, &nByte); #ifdef SQLITE_ENABLE_STMT_SCANSTATUS p->anExec = allocSpace(p->anExec, p->nOp*sizeof(i64), &zCsr, zEnd, &nByte); #endif if( nByte ){ p->pFree = sqlite3DbMallocZero(db, nByte); } zCsr = p->pFree; zEnd = &zCsr[nByte]; }while( nByte && !db->mallocFailed ); p->nCursor = nCursor; p->nOnceFlag = nOnce; if( p->aVar ){ p->nVar = (ynVar)nVar; for(n=0; naVar[n].flags = MEM_Null; p->aVar[n].db = db; } } if( p->azVar && pParse->nzVar>0 ){ p->nzVar = pParse->nzVar; memcpy(p->azVar, pParse->azVar, p->nzVar*sizeof(p->azVar[0])); memset(pParse->azVar, 0, pParse->nzVar*sizeof(pParse->azVar[0])); } if( p->aMem ){ p->aMem--; /* aMem[] goes from 1..nMem */ p->nMem = nMem; /* not from 0..nMem-1 */ for(n=1; n<=nMem; n++){ p->aMem[n].flags = MEM_Undefined; p->aMem[n].db = db; } } p->explain = pParse->explain; sqlite3VdbeRewind(p); } /* ** Close a VDBE cursor and release all the resources that cursor ** happens to hold. */ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx==0 ){ return; } sqlite3VdbeSorterClose(p->db, pCx); if( pCx->pBt ){ sqlite3BtreeClose(pCx->pBt); /* The pCx->pCursor will be close automatically, if it exists, by ** the call above. */ }else if( pCx->pCursor ){ sqlite3BtreeCloseCursor(pCx->pCursor); } #ifndef SQLITE_OMIT_VIRTUALTABLE else if( pCx->pVtabCursor ){ sqlite3_vtab_cursor *pVtabCursor = pCx->pVtabCursor; const sqlite3_module *pModule = pVtabCursor->pVtab->pModule; assert( pVtabCursor->pVtab->nRef>0 ); pVtabCursor->pVtab->nRef--; pModule->xClose(pVtabCursor); } #endif } /* ** Close all cursors in the current frame. */ static void closeCursorsInFrame(Vdbe *p){ if( p->apCsr ){ int i; for(i=0; inCursor; i++){ VdbeCursor *pC = p->apCsr[i]; if( pC ){ sqlite3VdbeFreeCursor(p, pC); p->apCsr[i] = 0; } } } } /* ** Copy the values stored in the VdbeFrame structure to its Vdbe. This ** is used, for example, when a trigger sub-program is halted to restore ** control to the main program. */ SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){ Vdbe *v = pFrame->v; closeCursorsInFrame(v); #ifdef SQLITE_ENABLE_STMT_SCANSTATUS v->anExec = pFrame->anExec; #endif v->aOnceFlag = pFrame->aOnceFlag; v->nOnceFlag = pFrame->nOnceFlag; v->aOp = pFrame->aOp; v->nOp = pFrame->nOp; v->aMem = pFrame->aMem; v->nMem = pFrame->nMem; v->apCsr = pFrame->apCsr; v->nCursor = pFrame->nCursor; v->db->lastRowid = pFrame->lastRowid; v->nChange = pFrame->nChange; v->db->nChange = pFrame->nDbChange; return pFrame->pc; } /* ** Close all cursors. ** ** Also release any dynamic memory held by the VM in the Vdbe.aMem memory ** cell array. This is necessary as the memory cell array may contain ** pointers to VdbeFrame objects, which may in turn contain pointers to ** open cursors. */ static void closeAllCursors(Vdbe *p){ if( p->pFrame ){ VdbeFrame *pFrame; for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); sqlite3VdbeFrameRestore(pFrame); p->pFrame = 0; p->nFrame = 0; } assert( p->nFrame==0 ); closeCursorsInFrame(p); if( p->aMem ){ releaseMemArray(&p->aMem[1], p->nMem); } while( p->pDelFrame ){ VdbeFrame *pDel = p->pDelFrame; p->pDelFrame = pDel->pParent; sqlite3VdbeFrameDelete(pDel); } /* Delete any auxdata allocations made by the VM */ if( p->pAuxData ) sqlite3VdbeDeleteAuxData(p, -1, 0); assert( p->pAuxData==0 ); } /* ** Clean up the VM after a single run. */ static void Cleanup(Vdbe *p){ sqlite3 *db = p->db; #ifdef SQLITE_DEBUG /* Execute assert() statements to ensure that the Vdbe.apCsr[] and ** Vdbe.aMem[] arrays have already been cleaned up. */ int i; if( p->apCsr ) for(i=0; inCursor; i++) assert( p->apCsr[i]==0 ); if( p->aMem ){ for(i=1; i<=p->nMem; i++) assert( p->aMem[i].flags==MEM_Undefined ); } #endif sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; p->pResultSet = 0; } /* ** Set the number of result columns that will be returned by this SQL ** statement. This is now set at compile time, rather than during ** execution of the vdbe program so that sqlite3_column_count() can ** be called on an SQL statement before sqlite3_step(). */ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ Mem *pColName; int n; sqlite3 *db = p->db; releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); sqlite3DbFree(db, p->aColName); n = nResColumn*COLNAME_N; p->nResColumn = (u16)nResColumn; p->aColName = pColName = (Mem*)sqlite3DbMallocZero(db, sizeof(Mem)*n ); if( p->aColName==0 ) return; while( n-- > 0 ){ pColName->flags = MEM_Null; pColName->db = p->db; pColName++; } } /* ** Set the name of the idx'th column to be returned by the SQL statement. ** zName must be a pointer to a nul terminated string. ** ** This call must be made after a call to sqlite3VdbeSetNumCols(). ** ** The final parameter, xDel, must be one of SQLITE_DYNAMIC, SQLITE_STATIC ** or SQLITE_TRANSIENT. If it is SQLITE_DYNAMIC, then the buffer pointed ** to by zName will be freed by sqlite3DbFree() when the vdbe is destroyed. */ SQLITE_PRIVATE int sqlite3VdbeSetColName( Vdbe *p, /* Vdbe being configured */ int idx, /* Index of column zName applies to */ int var, /* One of the COLNAME_* constants */ const char *zName, /* Pointer to buffer containing name */ void (*xDel)(void*) /* Memory management strategy for zName */ ){ int rc; Mem *pColName; assert( idxnResColumn ); assert( vardb->mallocFailed ){ assert( !zName || xDel!=SQLITE_DYNAMIC ); return SQLITE_NOMEM; } assert( p->aColName!=0 ); pColName = &(p->aColName[idx+var*p->nResColumn]); rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); return rc; } /* ** A read or write transaction may or may not be active on database handle ** db. If a transaction is active, commit it. If there is a ** write-transaction spanning more than one database file, this routine ** takes care of the master journal trickery. */ static int vdbeCommit(sqlite3 *db, Vdbe *p){ int i; int nTrans = 0; /* Number of databases with an active write-transaction */ int rc = SQLITE_OK; int needXcommit = 0; #ifdef SQLITE_OMIT_VIRTUALTABLE /* With this option, sqlite3VtabSync() is defined to be simply ** SQLITE_OK so p is not used. */ UNUSED_PARAMETER(p); #endif /* Before doing anything else, call the xSync() callback for any ** virtual module tables written in this transaction. This has to ** be done before determining whether a master journal file is ** required, as an xSync() callback may add an attached database ** to the transaction. */ rc = sqlite3VtabSync(db, p); /* This loop determines (a) if the commit hook should be invoked and ** (b) how many database files have open write transactions, not ** including the temp database. (b) is important because if more than ** one database file has an open write transaction, a master journal ** file is required for an atomic commit. */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( sqlite3BtreeIsInTrans(pBt) ){ needXcommit = 1; if( i!=1 ) nTrans++; sqlite3BtreeEnter(pBt); rc = sqlite3PagerExclusiveLock(sqlite3BtreePager(pBt)); sqlite3BtreeLeave(pBt); } } if( rc!=SQLITE_OK ){ return rc; } /* If there are any write-transactions at all, invoke the commit hook */ if( needXcommit && db->xCommitCallback ){ rc = db->xCommitCallback(db->pCommitArg); if( rc ){ return SQLITE_CONSTRAINT_COMMITHOOK; } } /* The simple case - no more than one database file (not counting the ** TEMP database) has a transaction active. There is no need for the ** master-journal. ** ** If the return value of sqlite3BtreeGetFilename() is a zero length ** string, it means the main database is :memory: or a temp file. In ** that case we do not support atomic multi-file commits, so use the ** simple case then too. */ if( 0==sqlite3Strlen30(sqlite3BtreeGetFilename(db->aDb[0].pBt)) || nTrans<=1 ){ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ rc = sqlite3BtreeCommitPhaseOne(pBt, 0); } } /* Do the commit only if all databases successfully complete phase 1. ** If one of the BtreeCommitPhaseOne() calls fails, this indicates an ** IO error while deleting or truncating a journal file. It is unlikely, ** but could happen. In this case abandon processing and return the error. */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ rc = sqlite3BtreeCommitPhaseTwo(pBt, 0); } } if( rc==SQLITE_OK ){ sqlite3VtabCommit(db); } } /* The complex case - There is a multi-file write-transaction active. ** This requires a master journal file to ensure the transaction is ** committed atomically. */ #ifndef SQLITE_OMIT_DISKIO else{ sqlite3_vfs *pVfs = db->pVfs; int needSync = 0; char *zMaster = 0; /* File-name for the master journal */ char const *zMainFile = sqlite3BtreeGetFilename(db->aDb[0].pBt); sqlite3_file *pMaster = 0; i64 offset = 0; int res; int retryCount = 0; int nMainFile; /* Select a master journal file name */ nMainFile = sqlite3Strlen30(zMainFile); zMaster = sqlite3MPrintf(db, "%s-mjXXXXXX9XXz", zMainFile); if( zMaster==0 ) return SQLITE_NOMEM; do { u32 iRandom; if( retryCount ){ if( retryCount>100 ){ sqlite3_log(SQLITE_FULL, "MJ delete: %s", zMaster); sqlite3OsDelete(pVfs, zMaster, 0); break; }else if( retryCount==1 ){ sqlite3_log(SQLITE_FULL, "MJ collide: %s", zMaster); } } retryCount++; sqlite3_randomness(sizeof(iRandom), &iRandom); sqlite3_snprintf(13, &zMaster[nMainFile], "-mj%06X9%02X", (iRandom>>8)&0xffffff, iRandom&0xff); /* The antipenultimate character of the master journal name must ** be "9" to avoid name collisions when using 8+3 filenames. */ assert( zMaster[sqlite3Strlen30(zMaster)-3]=='9' ); sqlite3FileSuffix3(zMainFile, zMaster); rc = sqlite3OsAccess(pVfs, zMaster, SQLITE_ACCESS_EXISTS, &res); }while( rc==SQLITE_OK && res ); if( rc==SQLITE_OK ){ /* Open the master journal. */ rc = sqlite3OsOpenMalloc(pVfs, zMaster, &pMaster, SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| SQLITE_OPEN_EXCLUSIVE|SQLITE_OPEN_MASTER_JOURNAL, 0 ); } if( rc!=SQLITE_OK ){ sqlite3DbFree(db, zMaster); return rc; } /* Write the name of each database file in the transaction into the new ** master journal file. If an error occurs at this point close ** and delete the master journal file. All the individual journal files ** still have 'null' as the master journal pointer, so they will roll ** back independently if a failure occurs. */ for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( sqlite3BtreeIsInTrans(pBt) ){ char const *zFile = sqlite3BtreeGetJournalname(pBt); if( zFile==0 ){ continue; /* Ignore TEMP and :memory: databases */ } assert( zFile[0]!=0 ); if( !needSync && !sqlite3BtreeSyncDisabled(pBt) ){ needSync = 1; } rc = sqlite3OsWrite(pMaster, zFile, sqlite3Strlen30(zFile)+1, offset); offset += sqlite3Strlen30(zFile)+1; if( rc!=SQLITE_OK ){ sqlite3OsCloseFree(pMaster); sqlite3OsDelete(pVfs, zMaster, 0); sqlite3DbFree(db, zMaster); return rc; } } } /* Sync the master journal file. If the IOCAP_SEQUENTIAL device ** flag is set this is not required. */ if( needSync && 0==(sqlite3OsDeviceCharacteristics(pMaster)&SQLITE_IOCAP_SEQUENTIAL) && SQLITE_OK!=(rc = sqlite3OsSync(pMaster, SQLITE_SYNC_NORMAL)) ){ sqlite3OsCloseFree(pMaster); sqlite3OsDelete(pVfs, zMaster, 0); sqlite3DbFree(db, zMaster); return rc; } /* Sync all the db files involved in the transaction. The same call ** sets the master journal pointer in each individual journal. If ** an error occurs here, do not delete the master journal file. ** ** If the error occurs during the first call to ** sqlite3BtreeCommitPhaseOne(), then there is a chance that the ** master journal file will be orphaned. But we cannot delete it, ** in case the master journal file name was written into the journal ** file before the failure occurred. */ for(i=0; rc==SQLITE_OK && inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ rc = sqlite3BtreeCommitPhaseOne(pBt, zMaster); } } sqlite3OsCloseFree(pMaster); assert( rc!=SQLITE_BUSY ); if( rc!=SQLITE_OK ){ sqlite3DbFree(db, zMaster); return rc; } /* Delete the master journal file. This commits the transaction. After ** doing this the directory is synced again before any individual ** transaction files are deleted. */ rc = sqlite3OsDelete(pVfs, zMaster, needSync); sqlite3DbFree(db, zMaster); zMaster = 0; if( rc ){ return rc; } /* All files and directories have already been synced, so the following ** calls to sqlite3BtreeCommitPhaseTwo() are only closing files and ** deleting or truncating journals. If something goes wrong while ** this is happening we don't really care. The integrity of the ** transaction is already guaranteed, but some stray 'cold' journals ** may be lying around. Returning an error code won't help matters. */ disable_simulated_io_errors(); sqlite3BeginBenignMalloc(); for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ sqlite3BtreeCommitPhaseTwo(pBt, 1); } } sqlite3EndBenignMalloc(); enable_simulated_io_errors(); sqlite3VtabCommit(db); } #endif return rc; } /* ** This routine checks that the sqlite3.nVdbeActive count variable ** matches the number of vdbe's in the list sqlite3.pVdbe that are ** currently active. An assertion fails if the two counts do not match. ** This is an internal self-check only - it is not an essential processing ** step. ** ** This is a no-op if NDEBUG is defined. */ #ifndef NDEBUG static void checkActiveVdbeCnt(sqlite3 *db){ Vdbe *p; int cnt = 0; int nWrite = 0; int nRead = 0; p = db->pVdbe; while( p ){ if( sqlite3_stmt_busy((sqlite3_stmt*)p) ){ cnt++; if( p->readOnly==0 ) nWrite++; if( p->bIsReader ) nRead++; } p = p->pNext; } assert( cnt==db->nVdbeActive ); assert( nWrite==db->nVdbeWrite ); assert( nRead==db->nVdbeRead ); } #else #define checkActiveVdbeCnt(x) #endif /* ** If the Vdbe passed as the first argument opened a statement-transaction, ** close it now. Argument eOp must be either SAVEPOINT_ROLLBACK or ** SAVEPOINT_RELEASE. If it is SAVEPOINT_ROLLBACK, then the statement ** transaction is rolled back. If eOp is SAVEPOINT_RELEASE, then the ** statement transaction is committed. ** ** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned. ** Otherwise SQLITE_OK. */ SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){ sqlite3 *const db = p->db; int rc = SQLITE_OK; /* If p->iStatement is greater than zero, then this Vdbe opened a ** statement transaction that should be closed here. The only exception ** is that an IO error may have occurred, causing an emergency rollback. ** In this case (db->nStatement==0), and there is nothing to do. */ if( db->nStatement && p->iStatement ){ int i; const int iSavepoint = p->iStatement-1; assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE); assert( db->nStatement>0 ); assert( p->iStatement==(db->nStatement+db->nSavepoint) ); for(i=0; inDb; i++){ int rc2 = SQLITE_OK; Btree *pBt = db->aDb[i].pBt; if( pBt ){ if( eOp==SAVEPOINT_ROLLBACK ){ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint); } if( rc2==SQLITE_OK ){ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint); } if( rc==SQLITE_OK ){ rc = rc2; } } } db->nStatement--; p->iStatement = 0; if( rc==SQLITE_OK ){ if( eOp==SAVEPOINT_ROLLBACK ){ rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint); } if( rc==SQLITE_OK ){ rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint); } } /* If the statement transaction is being rolled back, also restore the ** database handles deferred constraint counter to the value it had when ** the statement transaction was opened. */ if( eOp==SAVEPOINT_ROLLBACK ){ db->nDeferredCons = p->nStmtDefCons; db->nDeferredImmCons = p->nStmtDefImmCons; } } return rc; } /* ** This function is called when a transaction opened by the database ** handle associated with the VM passed as an argument is about to be ** committed. If there are outstanding deferred foreign key constraint ** violations, return SQLITE_ERROR. Otherwise, SQLITE_OK. ** ** If there are outstanding FK violations and this function returns ** SQLITE_ERROR, set the result of the VM to SQLITE_CONSTRAINT_FOREIGNKEY ** and write an error message to it. Then return SQLITE_ERROR. */ #ifndef SQLITE_OMIT_FOREIGN_KEY SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){ sqlite3 *db = p->db; if( (deferred && (db->nDeferredCons+db->nDeferredImmCons)>0) || (!deferred && p->nFkConstraint>0) ){ p->rc = SQLITE_CONSTRAINT_FOREIGNKEY; p->errorAction = OE_Abort; sqlite3VdbeError(p, "FOREIGN KEY constraint failed"); return SQLITE_ERROR; } return SQLITE_OK; } #endif /* ** This routine is called the when a VDBE tries to halt. If the VDBE ** has made changes and is in autocommit mode, then commit those ** changes. If a rollback is needed, then do the rollback. ** ** This routine is the only way to move the state of a VM from ** SQLITE_MAGIC_RUN to SQLITE_MAGIC_HALT. It is harmless to ** call this on a VM that is in the SQLITE_MAGIC_HALT state. ** ** Return an error code. If the commit could not complete because of ** lock contention, return SQLITE_BUSY. If SQLITE_BUSY is returned, it ** means the close did not happen and needs to be repeated. */ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ int rc; /* Used to store transient return codes */ sqlite3 *db = p->db; /* This function contains the logic that determines if a statement or ** transaction will be committed or rolled back as a result of the ** execution of this virtual machine. ** ** If any of the following errors occur: ** ** SQLITE_NOMEM ** SQLITE_IOERR ** SQLITE_FULL ** SQLITE_INTERRUPT ** ** Then the internal cache might have been left in an inconsistent ** state. We need to rollback the statement transaction, if there is ** one, or the complete transaction if there is no statement transaction. */ if( p->db->mallocFailed ){ p->rc = SQLITE_NOMEM; } if( p->aOnceFlag ) memset(p->aOnceFlag, 0, p->nOnceFlag); closeAllCursors(p); if( p->magic!=VDBE_MAGIC_RUN ){ return SQLITE_OK; } checkActiveVdbeCnt(db); /* No commit or rollback needed if the program never started or if the ** SQL statement does not read or write a database file. */ if( p->pc>=0 && p->bIsReader ){ int mrc; /* Primary error code from p->rc */ int eStatementOp = 0; int isSpecialError; /* Set to true if a 'special' error */ /* Lock all btrees used by the statement */ sqlite3VdbeEnter(p); /* Check for one of the special errors */ mrc = p->rc & 0xff; isSpecialError = mrc==SQLITE_NOMEM || mrc==SQLITE_IOERR || mrc==SQLITE_INTERRUPT || mrc==SQLITE_FULL; if( isSpecialError ){ /* If the query was read-only and the error code is SQLITE_INTERRUPT, ** no rollback is necessary. Otherwise, at least a savepoint ** transaction must be rolled back to restore the database to a ** consistent state. ** ** Even if the statement is read-only, it is important to perform ** a statement or transaction rollback operation. If the error ** occurred while writing to the journal, sub-journal or database ** file as part of an effort to free up cache space (see function ** pagerStress() in pager.c), the rollback is required to restore ** the pager to a consistent state. */ if( !p->readOnly || mrc!=SQLITE_INTERRUPT ){ if( (mrc==SQLITE_NOMEM || mrc==SQLITE_FULL) && p->usesStmtJournal ){ eStatementOp = SAVEPOINT_ROLLBACK; }else{ /* We are forced to roll back the active transaction. Before doing ** so, abort any other statements this handle currently has active. */ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; p->nChange = 0; } } } /* Check for immediate foreign key violations. */ if( p->rc==SQLITE_OK ){ sqlite3VdbeCheckFk(p, 0); } /* If the auto-commit flag is set and this is the only active writer ** VM, then we do either a commit or rollback of the current transaction. ** ** Note: This block also runs if one of the special errors handled ** above has occurred. */ if( !sqlite3VtabInSync(db) && db->autoCommit && db->nVdbeWrite==(p->readOnly==0) ){ if( p->rc==SQLITE_OK || (p->errorAction==OE_Fail && !isSpecialError) ){ rc = sqlite3VdbeCheckFk(p, 1); if( rc!=SQLITE_OK ){ if( NEVER(p->readOnly) ){ sqlite3VdbeLeave(p); return SQLITE_ERROR; } rc = SQLITE_CONSTRAINT_FOREIGNKEY; }else{ /* The auto-commit flag is true, the vdbe program was successful ** or hit an 'OR FAIL' constraint and there are no deferred foreign ** key constraints to hold up the transaction. This means a commit ** is required. */ rc = vdbeCommit(db, p); } if( rc==SQLITE_BUSY && p->readOnly ){ sqlite3VdbeLeave(p); return SQLITE_BUSY; }else if( rc!=SQLITE_OK ){ p->rc = rc; sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; }else{ db->nDeferredCons = 0; db->nDeferredImmCons = 0; db->flags &= ~SQLITE_DeferFKs; sqlite3CommitInternalChanges(db); } }else{ sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; } db->nStatement = 0; }else if( eStatementOp==0 ){ if( p->rc==SQLITE_OK || p->errorAction==OE_Fail ){ eStatementOp = SAVEPOINT_RELEASE; }else if( p->errorAction==OE_Abort ){ eStatementOp = SAVEPOINT_ROLLBACK; }else{ sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; p->nChange = 0; } } /* If eStatementOp is non-zero, then a statement transaction needs to ** be committed or rolled back. Call sqlite3VdbeCloseStatement() to ** do so. If this operation returns an error, and the current statement ** error code is SQLITE_OK or SQLITE_CONSTRAINT, then promote the ** current statement error code. */ if( eStatementOp ){ rc = sqlite3VdbeCloseStatement(p, eStatementOp); if( rc ){ if( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ){ p->rc = rc; sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; } sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); sqlite3CloseSavepoints(db); db->autoCommit = 1; p->nChange = 0; } } /* If this was an INSERT, UPDATE or DELETE and no statement transaction ** has been rolled back, update the database connection change-counter. */ if( p->changeCntOn ){ if( eStatementOp!=SAVEPOINT_ROLLBACK ){ sqlite3VdbeSetChanges(db, p->nChange); }else{ sqlite3VdbeSetChanges(db, 0); } p->nChange = 0; } /* Release the locks */ sqlite3VdbeLeave(p); } /* We have successfully halted and closed the VM. Record this fact. */ if( p->pc>=0 ){ db->nVdbeActive--; if( !p->readOnly ) db->nVdbeWrite--; if( p->bIsReader ) db->nVdbeRead--; assert( db->nVdbeActive>=db->nVdbeRead ); assert( db->nVdbeRead>=db->nVdbeWrite ); assert( db->nVdbeWrite>=0 ); } p->magic = VDBE_MAGIC_HALT; checkActiveVdbeCnt(db); if( p->db->mallocFailed ){ p->rc = SQLITE_NOMEM; } /* If the auto-commit flag is set to true, then any locks that were held ** by connection db have now been released. Call sqlite3ConnectionUnlocked() ** to invoke any required unlock-notify callbacks. */ if( db->autoCommit ){ sqlite3ConnectionUnlocked(db); } assert( db->nVdbeActive>0 || db->autoCommit==0 || db->nStatement==0 ); return (p->rc==SQLITE_BUSY ? SQLITE_BUSY : SQLITE_OK); } /* ** Each VDBE holds the result of the most recent sqlite3_step() call ** in p->rc. This routine sets that result back to SQLITE_OK. */ SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe *p){ p->rc = SQLITE_OK; } /* ** Copy the error code and error message belonging to the VDBE passed ** as the first argument to its database handle (so that they will be ** returned by calls to sqlite3_errcode() and sqlite3_errmsg()). ** ** This function does not clear the VDBE error code or message, just ** copies them to the database handle. */ SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p){ sqlite3 *db = p->db; int rc = p->rc; if( p->zErrMsg ){ u8 mallocFailed = db->mallocFailed; sqlite3BeginBenignMalloc(); if( db->pErr==0 ) db->pErr = sqlite3ValueNew(db); sqlite3ValueSetStr(db->pErr, -1, p->zErrMsg, SQLITE_UTF8, SQLITE_TRANSIENT); sqlite3EndBenignMalloc(); db->mallocFailed = mallocFailed; db->errCode = rc; }else{ sqlite3Error(db, rc); } return rc; } #ifdef SQLITE_ENABLE_SQLLOG /* ** If an SQLITE_CONFIG_SQLLOG hook is registered and the VM has been run, ** invoke it. */ static void vdbeInvokeSqllog(Vdbe *v){ if( sqlite3GlobalConfig.xSqllog && v->rc==SQLITE_OK && v->zSql && v->pc>=0 ){ char *zExpanded = sqlite3VdbeExpandSql(v, v->zSql); assert( v->db->init.busy==0 ); if( zExpanded ){ sqlite3GlobalConfig.xSqllog( sqlite3GlobalConfig.pSqllogArg, v->db, zExpanded, 1 ); sqlite3DbFree(v->db, zExpanded); } } } #else # define vdbeInvokeSqllog(x) #endif /* ** Clean up a VDBE after execution but do not delete the VDBE just yet. ** Write any error messages into *pzErrMsg. Return the result code. ** ** After this routine is run, the VDBE should be ready to be executed ** again. ** ** To look at it another way, this routine resets the state of the ** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to ** VDBE_MAGIC_INIT. */ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ sqlite3 *db; db = p->db; /* If the VM did not run to completion or if it encountered an ** error, then it might not have been halted properly. So halt ** it now. */ sqlite3VdbeHalt(p); /* If the VDBE has be run even partially, then transfer the error code ** and error message from the VDBE into the main database structure. But ** if the VDBE has just been set to run but has not actually executed any ** instructions yet, leave the main database error information unchanged. */ if( p->pc>=0 ){ vdbeInvokeSqllog(p); sqlite3VdbeTransferError(p); sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; if( p->runOnlyOnce ) p->expired = 1; }else if( p->rc && p->expired ){ /* The expired flag was set on the VDBE before the first call ** to sqlite3_step(). For consistency (since sqlite3_step() was ** called), set the database error in this case as well. */ sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg); sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; } /* Reclaim all memory used by the VDBE */ Cleanup(p); /* Save profiling information from this VDBE run. */ #ifdef VDBE_PROFILE { FILE *out = fopen("vdbe_profile.out", "a"); if( out ){ int i; fprintf(out, "---- "); for(i=0; inOp; i++){ fprintf(out, "%02x", p->aOp[i].opcode); } fprintf(out, "\n"); if( p->zSql ){ char c, pc = 0; fprintf(out, "-- "); for(i=0; (c = p->zSql[i])!=0; i++){ if( pc=='\n' ) fprintf(out, "-- "); putc(c, out); pc = c; } if( pc!='\n' ) fprintf(out, "\n"); } for(i=0; inOp; i++){ char zHdr[100]; sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ", p->aOp[i].cnt, p->aOp[i].cycles, p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 ); fprintf(out, "%s", zHdr); sqlite3VdbePrintOp(out, i, &p->aOp[i]); } fclose(out); } } #endif p->iCurrentTime = 0; p->magic = VDBE_MAGIC_INIT; return p->rc & db->errMask; } /* ** Clean up and delete a VDBE after execution. Return an integer which is ** the result code. Write any error message text into *pzErrMsg. */ SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){ int rc = SQLITE_OK; if( p->magic==VDBE_MAGIC_RUN || p->magic==VDBE_MAGIC_HALT ){ rc = sqlite3VdbeReset(p); assert( (rc & p->db->errMask)==rc ); } sqlite3VdbeDelete(p); return rc; } /* ** If parameter iOp is less than zero, then invoke the destructor for ** all auxiliary data pointers currently cached by the VM passed as ** the first argument. ** ** Or, if iOp is greater than or equal to zero, then the destructor is ** only invoked for those auxiliary data pointers created by the user ** function invoked by the OP_Function opcode at instruction iOp of ** VM pVdbe, and only then if: ** ** * the associated function parameter is the 32nd or later (counting ** from left to right), or ** ** * the corresponding bit in argument mask is clear (where the first ** function parameter corresponds to bit 0 etc.). */ SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(Vdbe *pVdbe, int iOp, int mask){ AuxData **pp = &pVdbe->pAuxData; while( *pp ){ AuxData *pAux = *pp; if( (iOp<0) || (pAux->iOp==iOp && (pAux->iArg>31 || !(mask & MASKBIT32(pAux->iArg)))) ){ testcase( pAux->iArg==31 ); if( pAux->xDelete ){ pAux->xDelete(pAux->pAux); } *pp = pAux->pNext; sqlite3DbFree(pVdbe->db, pAux); }else{ pp= &pAux->pNext; } } } /* ** Free all memory associated with the Vdbe passed as the second argument, ** except for object itself, which is preserved. ** ** The difference between this function and sqlite3VdbeDelete() is that ** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with ** the database connection and frees the object itself. */ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ SubProgram *pSub, *pNext; int i; assert( p->db==0 || p->db==db ); releaseMemArray(p->aVar, p->nVar); releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); for(pSub=p->pProgram; pSub; pSub=pNext){ pNext = pSub->pNext; vdbeFreeOpArray(db, pSub->aOp, pSub->nOp); sqlite3DbFree(db, pSub); } for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]); vdbeFreeOpArray(db, p->aOp, p->nOp); sqlite3DbFree(db, p->aColName); sqlite3DbFree(db, p->zSql); sqlite3DbFree(db, p->pFree); #ifdef SQLITE_ENABLE_STMT_SCANSTATUS for(i=0; inScan; i++){ sqlite3DbFree(db, p->aScan[i].zName); } sqlite3DbFree(db, p->aScan); #endif } /* ** Delete an entire VDBE. */ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ sqlite3 *db; if( NEVER(p==0) ) return; db = p->db; assert( sqlite3_mutex_held(db->mutex) ); sqlite3VdbeClearObject(db, p); if( p->pPrev ){ p->pPrev->pNext = p->pNext; }else{ assert( db->pVdbe==p ); db->pVdbe = p->pNext; } if( p->pNext ){ p->pNext->pPrev = p->pPrev; } p->magic = VDBE_MAGIC_DEAD; p->db = 0; sqlite3DbFree(db, p); } /* ** The cursor "p" has a pending seek operation that has not yet been ** carried out. Seek the cursor now. If an error occurs, return ** the appropriate error code. */ static int SQLITE_NOINLINE handleDeferredMoveto(VdbeCursor *p){ int res, rc; #ifdef SQLITE_TEST extern int sqlite3_search_count; #endif assert( p->deferredMoveto ); assert( p->isTable ); rc = sqlite3BtreeMovetoUnpacked(p->pCursor, 0, p->movetoTarget, 0, &res); if( rc ) return rc; if( res!=0 ) return SQLITE_CORRUPT_BKPT; #ifdef SQLITE_TEST sqlite3_search_count++; #endif p->deferredMoveto = 0; p->cacheStatus = CACHE_STALE; return SQLITE_OK; } /* ** Something has moved cursor "p" out of place. Maybe the row it was ** pointed to was deleted out from under it. Or maybe the btree was ** rebalanced. Whatever the cause, try to restore "p" to the place it ** is supposed to be pointing. If the row was deleted out from under the ** cursor, set the cursor to point to a NULL row. */ static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){ int isDifferentRow, rc; assert( p->pCursor!=0 ); assert( sqlite3BtreeCursorHasMoved(p->pCursor) ); rc = sqlite3BtreeCursorRestore(p->pCursor, &isDifferentRow); p->cacheStatus = CACHE_STALE; if( isDifferentRow ) p->nullRow = 1; return rc; } /* ** Check to ensure that the cursor is valid. Restore the cursor ** if need be. Return any I/O error from the restore operation. */ SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){ if( sqlite3BtreeCursorHasMoved(p->pCursor) ){ return handleMovedCursor(p); } return SQLITE_OK; } /* ** Make sure the cursor p is ready to read or write the row to which it ** was last positioned. Return an error code if an OOM fault or I/O error ** prevents us from positioning the cursor to its correct position. ** ** If a MoveTo operation is pending on the given cursor, then do that ** MoveTo now. If no move is pending, check to see if the row has been ** deleted out from under the cursor and if it has, mark the row as ** a NULL row. ** ** If the cursor is already pointing to the correct row and that row has ** not been deleted out from under the cursor, then this routine is a no-op. */ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor *p){ if( p->deferredMoveto ){ return handleDeferredMoveto(p); } if( p->pCursor && sqlite3BtreeCursorHasMoved(p->pCursor) ){ return handleMovedCursor(p); } return SQLITE_OK; } /* ** The following functions: ** ** sqlite3VdbeSerialType() ** sqlite3VdbeSerialTypeLen() ** sqlite3VdbeSerialLen() ** sqlite3VdbeSerialPut() ** sqlite3VdbeSerialGet() ** ** encapsulate the code that serializes values for storage in SQLite ** data and index records. Each serialized value consists of a ** 'serial-type' and a blob of data. The serial type is an 8-byte unsigned ** integer, stored as a varint. ** ** In an SQLite index record, the serial type is stored directly before ** the blob of data that it corresponds to. In a table record, all serial ** types are stored at the start of the record, and the blobs of data at ** the end. Hence these functions allow the caller to handle the ** serial-type and data blob separately. ** ** The following table describes the various storage classes for data: ** ** serial type bytes of data type ** -------------- --------------- --------------- ** 0 0 NULL ** 1 1 signed integer ** 2 2 signed integer ** 3 3 signed integer ** 4 4 signed integer ** 5 6 signed integer ** 6 8 signed integer ** 7 8 IEEE float ** 8 0 Integer constant 0 ** 9 0 Integer constant 1 ** 10,11 reserved for expansion ** N>=12 and even (N-12)/2 BLOB ** N>=13 and odd (N-13)/2 text ** ** The 8 and 9 types were added in 3.3.0, file format 4. Prior versions ** of SQLite will not understand those serial types. */ /* ** Return the serial-type for the value stored in pMem. */ SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format){ int flags = pMem->flags; u32 n; if( flags&MEM_Null ){ return 0; } if( flags&MEM_Int ){ /* Figure out whether to use 1, 2, 4, 6 or 8 bytes. */ # define MAX_6BYTE ((((i64)0x00008000)<<32)-1) i64 i = pMem->u.i; u64 u; if( i<0 ){ u = ~i; }else{ u = i; } if( u<=127 ){ return ((i&1)==i && file_format>=4) ? 8+(u32)u : 1; } if( u<=32767 ) return 2; if( u<=8388607 ) return 3; if( u<=2147483647 ) return 4; if( u<=MAX_6BYTE ) return 5; return 6; } if( flags&MEM_Real ){ return 7; } assert( pMem->db->mallocFailed || flags&(MEM_Str|MEM_Blob) ); assert( pMem->n>=0 ); n = (u32)pMem->n; if( flags & MEM_Zero ){ n += pMem->u.nZero; } return ((n*2) + 12 + ((flags&MEM_Str)!=0)); } /* ** The sizes for serial types less than 12 */ static const u8 sqlite3SmallTypeSizes[] = { 0, 1, 2, 3, 4, 6, 8, 8, 0, 0, 0, 0 }; /* ** Return the length of the data corresponding to the supplied serial-type. */ SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32 serial_type){ if( serial_type>=12 ){ return (serial_type-12)/2; }else{ return sqlite3SmallTypeSizes[serial_type]; } } /* ** If we are on an architecture with mixed-endian floating ** points (ex: ARM7) then swap the lower 4 bytes with the ** upper 4 bytes. Return the result. ** ** For most architectures, this is a no-op. ** ** (later): It is reported to me that the mixed-endian problem ** on ARM7 is an issue with GCC, not with the ARM7 chip. It seems ** that early versions of GCC stored the two words of a 64-bit ** float in the wrong order. And that error has been propagated ** ever since. The blame is not necessarily with GCC, though. ** GCC might have just copying the problem from a prior compiler. ** I am also told that newer versions of GCC that follow a different ** ABI get the byte order right. ** ** Developers using SQLite on an ARM7 should compile and run their ** application using -DSQLITE_DEBUG=1 at least once. With DEBUG ** enabled, some asserts below will ensure that the byte order of ** floating point values is correct. ** ** (2007-08-30) Frank van Vugt has studied this problem closely ** and has send his findings to the SQLite developers. Frank ** writes that some Linux kernels offer floating point hardware ** emulation that uses only 32-bit mantissas instead of a full ** 48-bits as required by the IEEE standard. (This is the ** CONFIG_FPE_FASTFPE option.) On such systems, floating point ** byte swapping becomes very complicated. To avoid problems, ** the necessary byte swapping is carried out using a 64-bit integer ** rather than a 64-bit float. Frank assures us that the code here ** works for him. We, the developers, have no way to independently ** verify this, but Frank seems to know what he is talking about ** so we trust him. */ #ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT static u64 floatSwap(u64 in){ union { u64 r; u32 i[2]; } u; u32 t; u.r = in; t = u.i[0]; u.i[0] = u.i[1]; u.i[1] = t; return u.r; } # define swapMixedEndianFloat(X) X = floatSwap(X) #else # define swapMixedEndianFloat(X) #endif /* ** Write the serialized data blob for the value stored in pMem into ** buf. It is assumed that the caller has allocated sufficient space. ** Return the number of bytes written. ** ** nBuf is the amount of space left in buf[]. The caller is responsible ** for allocating enough space to buf[] to hold the entire field, exclusive ** of the pMem->u.nZero bytes for a MEM_Zero value. ** ** Return the number of bytes actually written into buf[]. The number ** of bytes in the zero-filled tail is included in the return value only ** if those bytes were zeroed in buf[]. */ SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){ u32 len; /* Integer and Real */ if( serial_type<=7 && serial_type>0 ){ u64 v; u32 i; if( serial_type==7 ){ assert( sizeof(v)==sizeof(pMem->u.r) ); memcpy(&v, &pMem->u.r, sizeof(v)); swapMixedEndianFloat(v); }else{ v = pMem->u.i; } len = i = sqlite3SmallTypeSizes[serial_type]; assert( i>0 ); do{ buf[--i] = (u8)(v&0xFF); v >>= 8; }while( i ); return len; } /* String or blob */ if( serial_type>=12 ){ assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0) == (int)sqlite3VdbeSerialTypeLen(serial_type) ); len = pMem->n; memcpy(buf, pMem->z, len); return len; } /* NULL or constants 0 or 1 */ return 0; } /* Input "x" is a sequence of unsigned characters that represent a ** big-endian integer. Return the equivalent native integer */ #define ONE_BYTE_INT(x) ((i8)(x)[0]) #define TWO_BYTE_INT(x) (256*(i8)((x)[0])|(x)[1]) #define THREE_BYTE_INT(x) (65536*(i8)((x)[0])|((x)[1]<<8)|(x)[2]) #define FOUR_BYTE_UINT(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) #define FOUR_BYTE_INT(x) (16777216*(i8)((x)[0])|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) /* ** Deserialize the data blob pointed to by buf as serial type serial_type ** and store the result in pMem. Return the number of bytes read. ** ** This function is implemented as two separate routines for performance. ** The few cases that require local variables are broken out into a separate ** routine so that in most cases the overhead of moving the stack pointer ** is avoided. */ static u32 SQLITE_NOINLINE serialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ Mem *pMem /* Memory cell to write value into */ ){ u64 x = FOUR_BYTE_UINT(buf); u32 y = FOUR_BYTE_UINT(buf+4); x = (x<<32) + y; if( serial_type==6 ){ /* EVIDENCE-OF: R-29851-52272 Value is a big-endian 64-bit ** twos-complement integer. */ pMem->u.i = *(i64*)&x; pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); }else{ /* EVIDENCE-OF: R-57343-49114 Value is a big-endian IEEE 754-2008 64-bit ** floating point number. */ #if !defined(NDEBUG) && !defined(SQLITE_OMIT_FLOATING_POINT) /* Verify that integers and floating point values use the same ** byte order. Or, that if SQLITE_MIXED_ENDIAN_64BIT_FLOAT is ** defined that 64-bit floating point values really are mixed ** endian. */ static const u64 t1 = ((u64)0x3ff00000)<<32; static const double r1 = 1.0; u64 t2 = t1; swapMixedEndianFloat(t2); assert( sizeof(r1)==sizeof(t2) && memcmp(&r1, &t2, sizeof(r1))==0 ); #endif assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 ); swapMixedEndianFloat(x); memcpy(&pMem->u.r, &x, sizeof(x)); pMem->flags = sqlite3IsNaN(pMem->u.r) ? MEM_Null : MEM_Real; } return 8; } SQLITE_PRIVATE u32 sqlite3VdbeSerialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ Mem *pMem /* Memory cell to write value into */ ){ switch( serial_type ){ case 10: /* Reserved for future use */ case 11: /* Reserved for future use */ case 0: { /* Null */ /* EVIDENCE-OF: R-24078-09375 Value is a NULL. */ pMem->flags = MEM_Null; break; } case 1: { /* EVIDENCE-OF: R-44885-25196 Value is an 8-bit twos-complement ** integer. */ pMem->u.i = ONE_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); return 1; } case 2: { /* 2-byte signed integer */ /* EVIDENCE-OF: R-49794-35026 Value is a big-endian 16-bit ** twos-complement integer. */ pMem->u.i = TWO_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); return 2; } case 3: { /* 3-byte signed integer */ /* EVIDENCE-OF: R-37839-54301 Value is a big-endian 24-bit ** twos-complement integer. */ pMem->u.i = THREE_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); return 3; } case 4: { /* 4-byte signed integer */ /* EVIDENCE-OF: R-01849-26079 Value is a big-endian 32-bit ** twos-complement integer. */ pMem->u.i = FOUR_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); return 4; } case 5: { /* 6-byte signed integer */ /* EVIDENCE-OF: R-50385-09674 Value is a big-endian 48-bit ** twos-complement integer. */ pMem->u.i = FOUR_BYTE_UINT(buf+2) + (((i64)1)<<32)*TWO_BYTE_INT(buf); pMem->flags = MEM_Int; testcase( pMem->u.i<0 ); return 6; } case 6: /* 8-byte signed integer */ case 7: { /* IEEE floating point */ /* These use local variables, so do them in a separate routine ** to avoid having to move the frame pointer in the common case */ return serialGet(buf,serial_type,pMem); } case 8: /* Integer 0 */ case 9: { /* Integer 1 */ /* EVIDENCE-OF: R-12976-22893 Value is the integer 0. */ /* EVIDENCE-OF: R-18143-12121 Value is the integer 1. */ pMem->u.i = serial_type-8; pMem->flags = MEM_Int; return 0; } default: { /* EVIDENCE-OF: R-14606-31564 Value is a BLOB that is (N-12)/2 bytes in ** length. ** EVIDENCE-OF: R-28401-00140 Value is a string in the text encoding and ** (N-13)/2 bytes in length. */ static const u16 aFlag[] = { MEM_Blob|MEM_Ephem, MEM_Str|MEM_Ephem }; pMem->z = (char *)buf; pMem->n = (serial_type-12)/2; pMem->flags = aFlag[serial_type&1]; return pMem->n; } } return 0; } /* ** This routine is used to allocate sufficient space for an UnpackedRecord ** structure large enough to be used with sqlite3VdbeRecordUnpack() if ** the first argument is a pointer to KeyInfo structure pKeyInfo. ** ** The space is either allocated using sqlite3DbMallocRaw() or from within ** the unaligned buffer passed via the second and third arguments (presumably ** stack space). If the former, then *ppFree is set to a pointer that should ** be eventually freed by the caller using sqlite3DbFree(). Or, if the ** allocation comes from the pSpace/szSpace buffer, *ppFree is set to NULL ** before returning. ** ** If an OOM error occurs, NULL is returned. */ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord( KeyInfo *pKeyInfo, /* Description of the record */ char *pSpace, /* Unaligned space available */ int szSpace, /* Size of pSpace[] in bytes */ char **ppFree /* OUT: Caller should free this pointer */ ){ UnpackedRecord *p; /* Unpacked record to return */ int nOff; /* Increment pSpace by nOff to align it */ int nByte; /* Number of bytes required for *p */ /* We want to shift the pointer pSpace up such that it is 8-byte aligned. ** Thus, we need to calculate a value, nOff, between 0 and 7, to shift ** it by. If pSpace is already 8-byte aligned, nOff should be zero. */ nOff = (8 - (SQLITE_PTR_TO_INT(pSpace) & 7)) & 7; nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1); if( nByte>szSpace+nOff ){ p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte); *ppFree = (char *)p; if( !p ) return 0; }else{ p = (UnpackedRecord*)&pSpace[nOff]; *ppFree = 0; } p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))]; assert( pKeyInfo->aSortOrder!=0 ); p->pKeyInfo = pKeyInfo; p->nField = pKeyInfo->nField + 1; return p; } /* ** Given the nKey-byte encoding of a record in pKey[], populate the ** UnpackedRecord structure indicated by the fourth argument with the ** contents of the decoded record. */ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( KeyInfo *pKeyInfo, /* Information about the record format */ int nKey, /* Size of the binary record */ const void *pKey, /* The binary record */ UnpackedRecord *p /* Populate this structure before returning. */ ){ const unsigned char *aKey = (const unsigned char *)pKey; int d; u32 idx; /* Offset in aKey[] to read from */ u16 u; /* Unsigned loop counter */ u32 szHdr; Mem *pMem = p->aMem; p->default_rc = 0; assert( EIGHT_BYTE_ALIGNMENT(pMem) ); idx = getVarint32(aKey, szHdr); d = szHdr; u = 0; while( idxenc = pKeyInfo->enc; pMem->db = pKeyInfo->db; /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */ pMem->szMalloc = 0; d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); pMem++; if( (++u)>=p->nField ) break; } assert( u<=pKeyInfo->nField + 1 ); p->nField = u; } #if SQLITE_DEBUG /* ** This function compares two index or table record keys in the same way ** as the sqlite3VdbeRecordCompare() routine. Unlike VdbeRecordCompare(), ** this function deserializes and compares values using the ** sqlite3VdbeSerialGet() and sqlite3MemCompare() functions. It is used ** in assert() statements to ensure that the optimized code in ** sqlite3VdbeRecordCompare() returns results with these two primitives. ** ** Return true if the result of comparison is equivalent to desiredResult. ** Return false if there is a disagreement. */ static int vdbeRecordCompareDebug( int nKey1, const void *pKey1, /* Left key */ const UnpackedRecord *pPKey2, /* Right key */ int desiredResult /* Correct answer */ ){ u32 d1; /* Offset into aKey[] of next data element */ u32 idx1; /* Offset into aKey[] of next header element */ u32 szHdr1; /* Number of bytes in header */ int i = 0; int rc = 0; const unsigned char *aKey1 = (const unsigned char *)pKey1; KeyInfo *pKeyInfo; Mem mem1; pKeyInfo = pPKey2->pKeyInfo; if( pKeyInfo->db==0 ) return 1; mem1.enc = pKeyInfo->enc; mem1.db = pKeyInfo->db; /* mem1.flags = 0; // Will be initialized by sqlite3VdbeSerialGet() */ VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */ /* Compilers may complain that mem1.u.i is potentially uninitialized. ** We could initialize it, as shown here, to silence those complaints. ** But in fact, mem1.u.i will never actually be used uninitialized, and doing ** the unnecessary initialization has a measurable negative performance ** impact, since this routine is a very high runner. And so, we choose ** to ignore the compiler warnings and leave this variable uninitialized. */ /* mem1.u.i = 0; // not needed, here to silence compiler warning */ idx1 = getVarint32(aKey1, szHdr1); if( szHdr1>98307 ) return SQLITE_CORRUPT; d1 = szHdr1; assert( pKeyInfo->nField+pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); assert( pKeyInfo->aSortOrder!=0 ); assert( pKeyInfo->nField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); do{ u32 serial_type1; /* Read the serial types for the next element in each key. */ idx1 += getVarint32( aKey1+idx1, serial_type1 ); /* Verify that there is enough key space remaining to avoid ** a buffer overread. The "d1+serial_type1+2" subexpression will ** always be greater than or equal to the amount of required key space. ** Use that approximation to avoid the more expensive call to ** sqlite3VdbeSerialTypeLen() in the common case. */ if( d1+serial_type1+2>(u32)nKey1 && d1+sqlite3VdbeSerialTypeLen(serial_type1)>(u32)nKey1 ){ break; } /* Extract the values to be compared. */ d1 += sqlite3VdbeSerialGet(&aKey1[d1], serial_type1, &mem1); /* Do the comparison */ rc = sqlite3MemCompare(&mem1, &pPKey2->aMem[i], pKeyInfo->aColl[i]); if( rc!=0 ){ assert( mem1.szMalloc==0 ); /* See comment below */ if( pKeyInfo->aSortOrder[i] ){ rc = -rc; /* Invert the result for DESC sort order. */ } goto debugCompareEnd; } i++; }while( idx1nField ); /* No memory allocation is ever used on mem1. Prove this using ** the following assert(). If the assert() fails, it indicates a ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */ assert( mem1.szMalloc==0 ); /* rc==0 here means that one of the keys ran out of fields and ** all the fields up to that point were equal. Return the default_rc ** value. */ rc = pPKey2->default_rc; debugCompareEnd: if( desiredResult==0 && rc==0 ) return 1; if( desiredResult<0 && rc<0 ) return 1; if( desiredResult>0 && rc>0 ) return 1; if( CORRUPT_DB ) return 1; if( pKeyInfo->db->mallocFailed ) return 1; return 0; } #endif #if SQLITE_DEBUG /* ** Count the number of fields (a.k.a. columns) in the record given by ** pKey,nKey. The verify that this count is less than or equal to the ** limit given by pKeyInfo->nField + pKeyInfo->nXField. ** ** If this constraint is not satisfied, it means that the high-speed ** vdbeRecordCompareInt() and vdbeRecordCompareString() routines will ** not work correctly. If this assert() ever fires, it probably means ** that the KeyInfo.nField or KeyInfo.nXField values were computed ** incorrectly. */ static void vdbeAssertFieldCountWithinLimits( int nKey, const void *pKey, /* The record to verify */ const KeyInfo *pKeyInfo /* Compare size with this KeyInfo */ ){ int nField = 0; u32 szHdr; u32 idx; u32 notUsed; const unsigned char *aKey = (const unsigned char*)pKey; if( CORRUPT_DB ) return; idx = getVarint32(aKey, szHdr); assert( nKey>=0 ); assert( szHdr<=(u32)nKey ); while( idxnField+pKeyInfo->nXField ); } #else # define vdbeAssertFieldCountWithinLimits(A,B,C) #endif /* ** Both *pMem1 and *pMem2 contain string values. Compare the two values ** using the collation sequence pColl. As usual, return a negative , zero ** or positive value if *pMem1 is less than, equal to or greater than ** *pMem2, respectively. Similar in spirit to "rc = (*pMem1) - (*pMem2);". */ static int vdbeCompareMemString( const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl, u8 *prcErr /* If an OOM occurs, set to SQLITE_NOMEM */ ){ if( pMem1->enc==pColl->enc ){ /* The strings are already in the correct encoding. Call the ** comparison function directly */ return pColl->xCmp(pColl->pUser,pMem1->n,pMem1->z,pMem2->n,pMem2->z); }else{ int rc; const void *v1, *v2; int n1, n2; Mem c1; Mem c2; sqlite3VdbeMemInit(&c1, pMem1->db, MEM_Null); sqlite3VdbeMemInit(&c2, pMem1->db, MEM_Null); sqlite3VdbeMemShallowCopy(&c1, pMem1, MEM_Ephem); sqlite3VdbeMemShallowCopy(&c2, pMem2, MEM_Ephem); v1 = sqlite3ValueText((sqlite3_value*)&c1, pColl->enc); n1 = v1==0 ? 0 : c1.n; v2 = sqlite3ValueText((sqlite3_value*)&c2, pColl->enc); n2 = v2==0 ? 0 : c2.n; rc = pColl->xCmp(pColl->pUser, n1, v1, n2, v2); sqlite3VdbeMemRelease(&c1); sqlite3VdbeMemRelease(&c2); if( (v1==0 || v2==0) && prcErr ) *prcErr = SQLITE_NOMEM; return rc; } } /* ** Compare two blobs. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second, respectively. ** If one blob is a prefix of the other, then the shorter is the lessor. */ static SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){ int c = memcmp(pB1->z, pB2->z, pB1->n>pB2->n ? pB2->n : pB1->n); if( c ) return c; return pB1->n - pB2->n; } /* ** Compare the values contained by the two memory cells, returning ** negative, zero or positive if pMem1 is less than, equal to, or greater ** than pMem2. Sorting order is NULL's first, followed by numbers (integers ** and reals) sorted numerically, followed by text ordered by the collating ** sequence pColl and finally blob's ordered by memcmp(). ** ** Two NULL values are considered equal by this function. */ SQLITE_PRIVATE int sqlite3MemCompare(const Mem *pMem1, const Mem *pMem2, const CollSeq *pColl){ int f1, f2; int combined_flags; f1 = pMem1->flags; f2 = pMem2->flags; combined_flags = f1|f2; assert( (combined_flags & MEM_RowSet)==0 ); /* If one value is NULL, it is less than the other. If both values ** are NULL, return 0. */ if( combined_flags&MEM_Null ){ return (f2&MEM_Null) - (f1&MEM_Null); } /* If one value is a number and the other is not, the number is less. ** If both are numbers, compare as reals if one is a real, or as integers ** if both values are integers. */ if( combined_flags&(MEM_Int|MEM_Real) ){ double r1, r2; if( (f1 & f2 & MEM_Int)!=0 ){ if( pMem1->u.i < pMem2->u.i ) return -1; if( pMem1->u.i > pMem2->u.i ) return 1; return 0; } if( (f1&MEM_Real)!=0 ){ r1 = pMem1->u.r; }else if( (f1&MEM_Int)!=0 ){ r1 = (double)pMem1->u.i; }else{ return 1; } if( (f2&MEM_Real)!=0 ){ r2 = pMem2->u.r; }else if( (f2&MEM_Int)!=0 ){ r2 = (double)pMem2->u.i; }else{ return -1; } if( r1r2 ) return 1; return 0; } /* If one value is a string and the other is a blob, the string is less. ** If both are strings, compare using the collating functions. */ if( combined_flags&MEM_Str ){ if( (f1 & MEM_Str)==0 ){ return 1; } if( (f2 & MEM_Str)==0 ){ return -1; } assert( pMem1->enc==pMem2->enc ); assert( pMem1->enc==SQLITE_UTF8 || pMem1->enc==SQLITE_UTF16LE || pMem1->enc==SQLITE_UTF16BE ); /* The collation sequence must be defined at this point, even if ** the user deletes the collation sequence after the vdbe program is ** compiled (this was not always the case). */ assert( !pColl || pColl->xCmp ); if( pColl ){ return vdbeCompareMemString(pMem1, pMem2, pColl, 0); } /* If a NULL pointer was passed as the collate function, fall through ** to the blob case and use memcmp(). */ } /* Both values must be blobs. Compare using memcmp(). */ return sqlite3BlobCompare(pMem1, pMem2); } /* ** The first argument passed to this function is a serial-type that ** corresponds to an integer - all values between 1 and 9 inclusive ** except 7. The second points to a buffer containing an integer value ** serialized according to serial_type. This function deserializes ** and returns the value. */ static i64 vdbeRecordDecodeInt(u32 serial_type, const u8 *aKey){ u32 y; assert( CORRUPT_DB || (serial_type>=1 && serial_type<=9 && serial_type!=7) ); switch( serial_type ){ case 0: case 1: testcase( aKey[0]&0x80 ); return ONE_BYTE_INT(aKey); case 2: testcase( aKey[0]&0x80 ); return TWO_BYTE_INT(aKey); case 3: testcase( aKey[0]&0x80 ); return THREE_BYTE_INT(aKey); case 4: { testcase( aKey[0]&0x80 ); y = FOUR_BYTE_UINT(aKey); return (i64)*(int*)&y; } case 5: { testcase( aKey[0]&0x80 ); return FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); } case 6: { u64 x = FOUR_BYTE_UINT(aKey); testcase( aKey[0]&0x80 ); x = (x<<32) | FOUR_BYTE_UINT(aKey+4); return (i64)*(i64*)&x; } } return (serial_type - 8); } /* ** This function compares the two table rows or index records ** specified by {nKey1, pKey1} and pPKey2. It returns a negative, zero ** or positive integer if key1 is less than, equal to or ** greater than key2. The {nKey1, pKey1} key must be a blob ** created by the OP_MakeRecord opcode of the VDBE. The pPKey2 ** key must be a parsed key such as obtained from ** sqlite3VdbeParseRecord. ** ** If argument bSkip is non-zero, it is assumed that the caller has already ** determined that the first fields of the keys are equal. ** ** Key1 and Key2 do not have to contain the same number of fields. If all ** fields that appear in both keys are equal, then pPKey2->default_rc is ** returned. ** ** If database corruption is discovered, set pPKey2->errCode to ** SQLITE_CORRUPT and return 0. If an OOM error is encountered, ** pPKey2->errCode is set to SQLITE_NOMEM and, if it is not NULL, the ** malloc-failed flag set on database handle (pPKey2->pKeyInfo->db). */ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( int nKey1, const void *pKey1, /* Left key */ UnpackedRecord *pPKey2, /* Right key */ int bSkip /* If true, skip the first field */ ){ u32 d1; /* Offset into aKey[] of next data element */ int i; /* Index of next field to compare */ u32 szHdr1; /* Size of record header in bytes */ u32 idx1; /* Offset of first type in header */ int rc = 0; /* Return value */ Mem *pRhs = pPKey2->aMem; /* Next field of pPKey2 to compare */ KeyInfo *pKeyInfo = pPKey2->pKeyInfo; const unsigned char *aKey1 = (const unsigned char *)pKey1; Mem mem1; /* If bSkip is true, then the caller has already determined that the first ** two elements in the keys are equal. Fix the various stack variables so ** that this routine begins comparing at the second field. */ if( bSkip ){ u32 s1; idx1 = 1 + getVarint32(&aKey1[1], s1); szHdr1 = aKey1[0]; d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1); i = 1; pRhs++; }else{ idx1 = getVarint32(aKey1, szHdr1); d1 = szHdr1; if( d1>(unsigned)nKey1 ){ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ } i = 0; } VVA_ONLY( mem1.szMalloc = 0; ) /* Only needed by assert() statements */ assert( pPKey2->pKeyInfo->nField+pPKey2->pKeyInfo->nXField>=pPKey2->nField || CORRUPT_DB ); assert( pPKey2->pKeyInfo->aSortOrder!=0 ); assert( pPKey2->pKeyInfo->nField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); do{ u32 serial_type; /* RHS is an integer */ if( pRhs->flags & MEM_Int ){ serial_type = aKey1[idx1]; testcase( serial_type==12 ); if( serial_type>=10 ){ rc = +1; }else if( serial_type==0 ){ rc = -1; }else if( serial_type==7 ){ double rhs = (double)pRhs->u.i; sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); if( mem1.u.rrhs ){ rc = +1; } }else{ i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]); i64 rhs = pRhs->u.i; if( lhsrhs ){ rc = +1; } } } /* RHS is real */ else if( pRhs->flags & MEM_Real ){ serial_type = aKey1[idx1]; if( serial_type>=10 ){ /* Serial types 12 or greater are strings and blobs (greater than ** numbers). Types 10 and 11 are currently "reserved for future ** use", so it doesn't really matter what the results of comparing ** them to numberic values are. */ rc = +1; }else if( serial_type==0 ){ rc = -1; }else{ double rhs = pRhs->u.r; double lhs; sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); if( serial_type==7 ){ lhs = mem1.u.r; }else{ lhs = (double)mem1.u.i; } if( lhsrhs ){ rc = +1; } } } /* RHS is a string */ else if( pRhs->flags & MEM_Str ){ getVarint32(&aKey1[idx1], serial_type); testcase( serial_type==12 ); if( serial_type<12 ){ rc = -1; }else if( !(serial_type & 0x01) ){ rc = +1; }else{ mem1.n = (serial_type - 12) / 2; testcase( (d1+mem1.n)==(unsigned)nKey1 ); testcase( (d1+mem1.n+1)==(unsigned)nKey1 ); if( (d1+mem1.n) > (unsigned)nKey1 ){ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ }else if( pKeyInfo->aColl[i] ){ mem1.enc = pKeyInfo->enc; mem1.db = pKeyInfo->db; mem1.flags = MEM_Str; mem1.z = (char*)&aKey1[d1]; rc = vdbeCompareMemString( &mem1, pRhs, pKeyInfo->aColl[i], &pPKey2->errCode ); }else{ int nCmp = MIN(mem1.n, pRhs->n); rc = memcmp(&aKey1[d1], pRhs->z, nCmp); if( rc==0 ) rc = mem1.n - pRhs->n; } } } /* RHS is a blob */ else if( pRhs->flags & MEM_Blob ){ getVarint32(&aKey1[idx1], serial_type); testcase( serial_type==12 ); if( serial_type<12 || (serial_type & 0x01) ){ rc = -1; }else{ int nStr = (serial_type - 12) / 2; testcase( (d1+nStr)==(unsigned)nKey1 ); testcase( (d1+nStr+1)==(unsigned)nKey1 ); if( (d1+nStr) > (unsigned)nKey1 ){ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ }else{ int nCmp = MIN(nStr, pRhs->n); rc = memcmp(&aKey1[d1], pRhs->z, nCmp); if( rc==0 ) rc = nStr - pRhs->n; } } } /* RHS is null */ else{ serial_type = aKey1[idx1]; rc = (serial_type!=0); } if( rc!=0 ){ if( pKeyInfo->aSortOrder[i] ){ rc = -rc; } assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, rc) ); assert( mem1.szMalloc==0 ); /* See comment below */ return rc; } i++; pRhs++; d1 += sqlite3VdbeSerialTypeLen(serial_type); idx1 += sqlite3VarintLen(serial_type); }while( idx1<(unsigned)szHdr1 && inField && d1<=(unsigned)nKey1 ); /* No memory allocation is ever used on mem1. Prove this using ** the following assert(). If the assert() fails, it indicates a ** memory leak and a need to call sqlite3VdbeMemRelease(&mem1). */ assert( mem1.szMalloc==0 ); /* rc==0 here means that one or both of the keys ran out of fields and ** all the fields up to that point were equal. Return the default_rc ** value. */ assert( CORRUPT_DB || vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, pPKey2->default_rc) || pKeyInfo->db->mallocFailed ); return pPKey2->default_rc; } SQLITE_PRIVATE int sqlite3VdbeRecordCompare( int nKey1, const void *pKey1, /* Left key */ UnpackedRecord *pPKey2 /* Right key */ ){ return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 0); } /* ** This function is an optimized version of sqlite3VdbeRecordCompare() ** that (a) the first field of pPKey2 is an integer, and (b) the ** size-of-header varint at the start of (pKey1/nKey1) fits in a single ** byte (i.e. is less than 128). ** ** To avoid concerns about buffer overreads, this routine is only used ** on schemas where the maximum valid header size is 63 bytes or less. */ static int vdbeRecordCompareInt( int nKey1, const void *pKey1, /* Left key */ UnpackedRecord *pPKey2 /* Right key */ ){ const u8 *aKey = &((const u8*)pKey1)[*(const u8*)pKey1 & 0x3F]; int serial_type = ((const u8*)pKey1)[1]; int res; u32 y; u64 x; i64 v = pPKey2->aMem[0].u.i; i64 lhs; vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo); assert( (*(u8*)pKey1)<=0x3F || CORRUPT_DB ); switch( serial_type ){ case 1: { /* 1-byte signed integer */ lhs = ONE_BYTE_INT(aKey); testcase( lhs<0 ); break; } case 2: { /* 2-byte signed integer */ lhs = TWO_BYTE_INT(aKey); testcase( lhs<0 ); break; } case 3: { /* 3-byte signed integer */ lhs = THREE_BYTE_INT(aKey); testcase( lhs<0 ); break; } case 4: { /* 4-byte signed integer */ y = FOUR_BYTE_UINT(aKey); lhs = (i64)*(int*)&y; testcase( lhs<0 ); break; } case 5: { /* 6-byte signed integer */ lhs = FOUR_BYTE_UINT(aKey+2) + (((i64)1)<<32)*TWO_BYTE_INT(aKey); testcase( lhs<0 ); break; } case 6: { /* 8-byte signed integer */ x = FOUR_BYTE_UINT(aKey); x = (x<<32) | FOUR_BYTE_UINT(aKey+4); lhs = *(i64*)&x; testcase( lhs<0 ); break; } case 8: lhs = 0; break; case 9: lhs = 1; break; /* This case could be removed without changing the results of running ** this code. Including it causes gcc to generate a faster switch ** statement (since the range of switch targets now starts at zero and ** is contiguous) but does not cause any duplicate code to be generated ** (as gcc is clever enough to combine the two like cases). Other ** compilers might be similar. */ case 0: case 7: return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2); default: return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2); } if( v>lhs ){ res = pPKey2->r1; }else if( vr2; }else if( pPKey2->nField>1 ){ /* The first fields of the two keys are equal. Compare the trailing ** fields. */ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1); }else{ /* The first fields of the two keys are equal and there are no trailing ** fields. Return pPKey2->default_rc in this case. */ res = pPKey2->default_rc; } assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) ); return res; } /* ** This function is an optimized version of sqlite3VdbeRecordCompare() ** that (a) the first field of pPKey2 is a string, that (b) the first field ** uses the collation sequence BINARY and (c) that the size-of-header varint ** at the start of (pKey1/nKey1) fits in a single byte. */ static int vdbeRecordCompareString( int nKey1, const void *pKey1, /* Left key */ UnpackedRecord *pPKey2 /* Right key */ ){ const u8 *aKey1 = (const u8*)pKey1; int serial_type; int res; vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo); getVarint32(&aKey1[1], serial_type); if( serial_type<12 ){ res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */ }else if( !(serial_type & 0x01) ){ res = pPKey2->r2; /* (pKey1/nKey1) is a blob */ }else{ int nCmp; int nStr; int szHdr = aKey1[0]; nStr = (serial_type-12) / 2; if( (szHdr + nStr) > nKey1 ){ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; return 0; /* Corruption */ } nCmp = MIN( pPKey2->aMem[0].n, nStr ); res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp); if( res==0 ){ res = nStr - pPKey2->aMem[0].n; if( res==0 ){ if( pPKey2->nField>1 ){ res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1); }else{ res = pPKey2->default_rc; } }else if( res>0 ){ res = pPKey2->r2; }else{ res = pPKey2->r1; } }else if( res>0 ){ res = pPKey2->r2; }else{ res = pPKey2->r1; } } assert( vdbeRecordCompareDebug(nKey1, pKey1, pPKey2, res) || CORRUPT_DB || pPKey2->pKeyInfo->db->mallocFailed ); return res; } /* ** Return a pointer to an sqlite3VdbeRecordCompare() compatible function ** suitable for comparing serialized records to the unpacked record passed ** as the only argument. */ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){ /* varintRecordCompareInt() and varintRecordCompareString() both assume ** that the size-of-header varint that occurs at the start of each record ** fits in a single byte (i.e. is 127 or less). varintRecordCompareInt() ** also assumes that it is safe to overread a buffer by at least the ** maximum possible legal header size plus 8 bytes. Because there is ** guaranteed to be at least 74 (but not 136) bytes of padding following each ** buffer passed to varintRecordCompareInt() this makes it convenient to ** limit the size of the header to 64 bytes in cases where the first field ** is an integer. ** ** The easiest way to enforce this limit is to consider only records with ** 13 fields or less. If the first field is an integer, the maximum legal ** header size is (12*5 + 1 + 1) bytes. */ if( (p->pKeyInfo->nField + p->pKeyInfo->nXField)<=13 ){ int flags = p->aMem[0].flags; if( p->pKeyInfo->aSortOrder[0] ){ p->r1 = 1; p->r2 = -1; }else{ p->r1 = -1; p->r2 = 1; } if( (flags & MEM_Int) ){ return vdbeRecordCompareInt; } testcase( flags & MEM_Real ); testcase( flags & MEM_Null ); testcase( flags & MEM_Blob ); if( (flags & (MEM_Real|MEM_Null|MEM_Blob))==0 && p->pKeyInfo->aColl[0]==0 ){ assert( flags & MEM_Str ); return vdbeRecordCompareString; } } return sqlite3VdbeRecordCompare; } /* ** pCur points at an index entry created using the OP_MakeRecord opcode. ** Read the rowid (the last field in the record) and store it in *rowid. ** Return SQLITE_OK if everything works, or an error code otherwise. ** ** pCur might be pointing to text obtained from a corrupt database file. ** So the content cannot be trusted. Do appropriate checks on the content. */ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ i64 nCellKey = 0; int rc; u32 szHdr; /* Size of the header */ u32 typeRowid; /* Serial type of the rowid */ u32 lenRowid; /* Size of the rowid */ Mem m, v; /* Get the size of the index entry. Only indices entries of less ** than 2GiB are support - anything large must be database corruption. ** Any corruption is detected in sqlite3BtreeParseCellPtr(), though, so ** this code can safely assume that nCellKey is 32-bits */ assert( sqlite3BtreeCursorIsValid(pCur) ); VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey ); /* Read in the complete content of the index entry */ sqlite3VdbeMemInit(&m, db, 0); rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m); if( rc ){ return rc; } /* The index entry must begin with a header size */ (void)getVarint32((u8*)m.z, szHdr); testcase( szHdr==3 ); testcase( szHdr==m.n ); if( unlikely(szHdr<3 || (int)szHdr>m.n) ){ goto idx_rowid_corruption; } /* The last field of the index should be an integer - the ROWID. ** Verify that the last entry really is an integer. */ (void)getVarint32((u8*)&m.z[szHdr-1], typeRowid); testcase( typeRowid==1 ); testcase( typeRowid==2 ); testcase( typeRowid==3 ); testcase( typeRowid==4 ); testcase( typeRowid==5 ); testcase( typeRowid==6 ); testcase( typeRowid==8 ); testcase( typeRowid==9 ); if( unlikely(typeRowid<1 || typeRowid>9 || typeRowid==7) ){ goto idx_rowid_corruption; } lenRowid = sqlite3SmallTypeSizes[typeRowid]; testcase( (u32)m.n==szHdr+lenRowid ); if( unlikely((u32)m.npCursor; Mem m; assert( sqlite3BtreeCursorIsValid(pCur) ); VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ /* nCellKey will always be between 0 and 0xffffffff because of the way ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */ if( nCellKey<=0 || nCellKey>0x7fffffff ){ *res = 0; return SQLITE_CORRUPT_BKPT; } sqlite3VdbeMemInit(&m, db, 0); rc = sqlite3VdbeMemFromBtree(pC->pCursor, 0, (u32)nCellKey, 1, &m); if( rc ){ return rc; } *res = sqlite3VdbeRecordCompare(m.n, m.z, pUnpacked); sqlite3VdbeMemRelease(&m); return SQLITE_OK; } /* ** This routine sets the value to be returned by subsequent calls to ** sqlite3_changes() on the database handle 'db'. */ SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *db, int nChange){ assert( sqlite3_mutex_held(db->mutex) ); db->nChange = nChange; db->nTotalChange += nChange; } /* ** Set a flag in the vdbe to update the change counter when it is finalised ** or reset. */ SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){ v->changeCntOn = 1; } /* ** Mark every prepared statement associated with a database connection ** as expired. ** ** An expired statement means that recompilation of the statement is ** recommend. Statements expire when things happen that make their ** programs obsolete. Removing user-defined functions or collating ** sequences, or changing an authorization function are the types of ** things that make prepared statements obsolete. */ SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db){ Vdbe *p; for(p = db->pVdbe; p; p=p->pNext){ p->expired = 1; } } /* ** Return the database associated with the Vdbe. */ SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe *v){ return v->db; } /* ** Return a pointer to an sqlite3_value structure containing the value bound ** parameter iVar of VM v. Except, if the value is an SQL NULL, return ** 0 instead. Unless it is NULL, apply affinity aff (one of the SQLITE_AFF_* ** constants) to the value before returning it. ** ** The returned value must be freed by the caller using sqlite3ValueFree(). */ SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe *v, int iVar, u8 aff){ assert( iVar>0 ); if( v ){ Mem *pMem = &v->aVar[iVar-1]; if( 0==(pMem->flags & MEM_Null) ){ sqlite3_value *pRet = sqlite3ValueNew(v->db); if( pRet ){ sqlite3VdbeMemCopy((Mem *)pRet, pMem); sqlite3ValueApplyAffinity(pRet, aff, SQLITE_UTF8); } return pRet; } } return 0; } /* ** Configure SQL variable iVar so that binding a new value to it signals ** to sqlite3_reoptimize() that re-preparing the statement may result ** in a better query plan. */ SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe *v, int iVar){ assert( iVar>0 ); if( iVar>32 ){ v->expmask = 0xffffffff; }else{ v->expmask |= ((u32)1 << (iVar-1)); } } #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored ** in memory obtained from sqlite3_malloc) into a Vdbe.zErrMsg (text stored ** in memory obtained from sqlite3DbMalloc). */ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ sqlite3 *db = p->db; sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = sqlite3DbStrDup(db, pVtab->zErrMsg); sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = 0; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /************** End of vdbeaux.c *********************************************/ /************** Begin file vdbeapi.c *****************************************/ /* ** 2004 May 26 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code use to implement APIs that are part of the ** VDBE. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ #ifndef SQLITE_OMIT_DEPRECATED /* ** Return TRUE (non-zero) of the statement supplied as an argument needs ** to be recompiled. A statement needs to be recompiled whenever the ** execution environment changes in a way that would alter the program ** that sqlite3_prepare() generates. For example, if new functions or ** collating sequences are registered or if an authorizer function is ** added or changed. */ SQLITE_API int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt *pStmt){ Vdbe *p = (Vdbe*)pStmt; return p==0 || p->expired; } #endif /* ** Check on a Vdbe to make sure it has not been finalized. Log ** an error and return true if it has been finalized (or is otherwise ** invalid). Return false if it is ok. */ static int vdbeSafety(Vdbe *p){ if( p->db==0 ){ sqlite3_log(SQLITE_MISUSE, "API called with finalized prepared statement"); return 1; }else{ return 0; } } static int vdbeSafetyNotNull(Vdbe *p){ if( p==0 ){ sqlite3_log(SQLITE_MISUSE, "API called with NULL prepared statement"); return 1; }else{ return vdbeSafety(p); } } #ifndef SQLITE_OMIT_TRACE /* ** Invoke the profile callback. This routine is only called if we already ** know that the profile callback is defined and needs to be invoked. */ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){ sqlite3_int64 iNow; assert( p->startTime>0 ); assert( db->xProfile!=0 ); assert( db->init.busy==0 ); assert( p->zSql!=0 ); sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); db->xProfile(db->pProfileArg, p->zSql, (iNow - p->startTime)*1000000); p->startTime = 0; } /* ** The checkProfileCallback(DB,P) macro checks to see if a profile callback ** is needed, and it invokes the callback if it is needed. */ # define checkProfileCallback(DB,P) \ if( ((P)->startTime)>0 ){ invokeProfileCallback(DB,P); } #else # define checkProfileCallback(DB,P) /*no-op*/ #endif /* ** The following routine destroys a virtual machine that is created by ** the sqlite3_compile() routine. The integer returned is an SQLITE_ ** success/failure code that describes the result of executing the virtual ** machine. ** ** This routine sets the error code and string returned by ** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). */ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){ int rc; if( pStmt==0 ){ /* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL ** pointer is a harmless no-op. */ rc = SQLITE_OK; }else{ Vdbe *v = (Vdbe*)pStmt; sqlite3 *db = v->db; if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT; sqlite3_mutex_enter(db->mutex); checkProfileCallback(db, v); rc = sqlite3VdbeFinalize(v); rc = sqlite3ApiExit(db, rc); sqlite3LeaveMutexAndCloseZombie(db); } return rc; } /* ** Terminate the current execution of an SQL statement and reset it ** back to its starting state so that it can be reused. A success code from ** the prior execution is returned. ** ** This routine sets the error code and string returned by ** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16(). */ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt){ int rc; if( pStmt==0 ){ rc = SQLITE_OK; }else{ Vdbe *v = (Vdbe*)pStmt; sqlite3 *db = v->db; sqlite3_mutex_enter(db->mutex); checkProfileCallback(db, v); rc = sqlite3VdbeReset(v); sqlite3VdbeRewind(v); assert( (rc & (db->errMask))==rc ); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); } return rc; } /* ** Set all the parameters in the compiled SQL statement to NULL. */ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt *pStmt){ int i; int rc = SQLITE_OK; Vdbe *p = (Vdbe*)pStmt; #if SQLITE_THREADSAFE sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; #endif sqlite3_mutex_enter(mutex); for(i=0; inVar; i++){ sqlite3VdbeMemRelease(&p->aVar[i]); p->aVar[i].flags = MEM_Null; } if( p->isPrepareV2 && p->expmask ){ p->expired = 1; } sqlite3_mutex_leave(mutex); return rc; } /**************************** sqlite3_value_ ******************************* ** The following routines extract information from a Mem or sqlite3_value ** structure. */ SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value *pVal){ Mem *p = (Mem*)pVal; if( p->flags & (MEM_Blob|MEM_Str) ){ if( sqlite3VdbeMemExpandBlob(p)!=SQLITE_OK ){ assert( p->flags==MEM_Null && p->z==0 ); return 0; } p->flags |= MEM_Blob; return p->n ? p->z : 0; }else{ return sqlite3_value_text(pVal); } } SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value *pVal){ return sqlite3ValueBytes(pVal, SQLITE_UTF8); } SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value *pVal){ return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE); } SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value *pVal){ return sqlite3VdbeRealValue((Mem*)pVal); } SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value *pVal){ return (int)sqlite3VdbeIntValue((Mem*)pVal); } SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value *pVal){ return sqlite3VdbeIntValue((Mem*)pVal); } SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value *pVal){ return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value* pVal){ return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE); } SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value *pVal){ return sqlite3ValueText(pVal, SQLITE_UTF16BE); } SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value *pVal){ return sqlite3ValueText(pVal, SQLITE_UTF16LE); } #endif /* SQLITE_OMIT_UTF16 */ /* EVIDENCE-OF: R-12793-43283 Every value in SQLite has one of five ** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating ** point number string BLOB NULL */ SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value* pVal){ static const u8 aType[] = { SQLITE_BLOB, /* 0x00 */ SQLITE_NULL, /* 0x01 */ SQLITE_TEXT, /* 0x02 */ SQLITE_NULL, /* 0x03 */ SQLITE_INTEGER, /* 0x04 */ SQLITE_NULL, /* 0x05 */ SQLITE_INTEGER, /* 0x06 */ SQLITE_NULL, /* 0x07 */ SQLITE_FLOAT, /* 0x08 */ SQLITE_NULL, /* 0x09 */ SQLITE_FLOAT, /* 0x0a */ SQLITE_NULL, /* 0x0b */ SQLITE_INTEGER, /* 0x0c */ SQLITE_NULL, /* 0x0d */ SQLITE_INTEGER, /* 0x0e */ SQLITE_NULL, /* 0x0f */ SQLITE_BLOB, /* 0x10 */ SQLITE_NULL, /* 0x11 */ SQLITE_TEXT, /* 0x12 */ SQLITE_NULL, /* 0x13 */ SQLITE_INTEGER, /* 0x14 */ SQLITE_NULL, /* 0x15 */ SQLITE_INTEGER, /* 0x16 */ SQLITE_NULL, /* 0x17 */ SQLITE_FLOAT, /* 0x18 */ SQLITE_NULL, /* 0x19 */ SQLITE_FLOAT, /* 0x1a */ SQLITE_NULL, /* 0x1b */ SQLITE_INTEGER, /* 0x1c */ SQLITE_NULL, /* 0x1d */ SQLITE_INTEGER, /* 0x1e */ SQLITE_NULL, /* 0x1f */ }; return aType[pVal->flags&MEM_AffMask]; } /* Make a copy of an sqlite3_value object */ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value *pOrig){ sqlite3_value *pNew; if( pOrig==0 ) return 0; pNew = sqlite3_malloc( sizeof(*pNew) ); if( pNew==0 ) return 0; memset(pNew, 0, sizeof(*pNew)); memcpy(pNew, pOrig, MEMCELLSIZE); pNew->flags &= ~MEM_Dyn; pNew->db = 0; if( pNew->flags&(MEM_Str|MEM_Blob) ){ pNew->flags &= ~(MEM_Static|MEM_Dyn); pNew->flags |= MEM_Ephem; if( sqlite3VdbeMemMakeWriteable(pNew)!=SQLITE_OK ){ sqlite3ValueFree(pNew); pNew = 0; } } return pNew; } /* Destroy an sqlite3_value object previously obtained from ** sqlite3_value_dup(). */ SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value *pOld){ sqlite3ValueFree(pOld); } /**************************** sqlite3_result_ ******************************* ** The following routines are used by user-defined functions to specify ** the function result. ** ** The setStrOrError() function calls sqlite3VdbeMemSetStr() to store the ** result as a string or blob but if the string or blob is too large, it ** then sets the error code to SQLITE_TOOBIG ** ** The invokeValueDestructor(P,X) routine invokes destructor function X() ** on value P is not going to be used and need to be destroyed. */ static void setResultStrOrError( sqlite3_context *pCtx, /* Function context */ const char *z, /* String pointer */ int n, /* Bytes in string, or negative */ u8 enc, /* Encoding of z. 0 for BLOBs */ void (*xDel)(void*) /* Destructor function */ ){ if( sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel)==SQLITE_TOOBIG ){ sqlite3_result_error_toobig(pCtx); } } static int invokeValueDestructor( const void *p, /* Value to destroy */ void (*xDel)(void*), /* The destructor */ sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */ ){ assert( xDel!=SQLITE_DYNAMIC ); if( xDel==0 ){ /* noop */ }else if( xDel==SQLITE_TRANSIENT ){ /* noop */ }else{ xDel((void*)p); } if( pCtx ) sqlite3_result_error_toobig(pCtx); return SQLITE_TOOBIG; } SQLITE_API void SQLITE_STDCALL sqlite3_result_blob( sqlite3_context *pCtx, const void *z, int n, void (*xDel)(void *) ){ assert( n>=0 ); assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, 0, xDel); } SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64( sqlite3_context *pCtx, const void *z, sqlite3_uint64 n, void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); assert( xDel!=SQLITE_DYNAMIC ); if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{ setResultStrOrError(pCtx, z, (int)n, 0, xDel); } } SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context *pCtx, double rVal){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetDouble(pCtx->pOut, rVal); } SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context *pCtx, int iVal){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal); } SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, iVal); } SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context *pCtx){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); } SQLITE_API void SQLITE_STDCALL sqlite3_result_text( sqlite3_context *pCtx, const char *z, int n, void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); } SQLITE_API void SQLITE_STDCALL sqlite3_result_text64( sqlite3_context *pCtx, const char *z, sqlite3_uint64 n, void (*xDel)(void *), unsigned char enc ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); assert( xDel!=SQLITE_DYNAMIC ); if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{ setResultStrOrError(pCtx, z, (int)n, enc, xDel); } } #ifndef SQLITE_OMIT_UTF16 SQLITE_API void SQLITE_STDCALL sqlite3_result_text16( sqlite3_context *pCtx, const void *z, int n, void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); } SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be( sqlite3_context *pCtx, const void *z, int n, void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); } SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le( sqlite3_context *pCtx, const void *z, int n, void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemCopy(pCtx->pOut, pValue); } SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n); } SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ Mem *pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){ return SQLITE_TOOBIG; } sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n); return SQLITE_OK; } SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ pCtx->isError = errCode; pCtx->fErrorOrAux = 1; #ifdef SQLITE_DEBUG if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode; #endif if( pCtx->pOut->flags & MEM_Null ){ sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1, SQLITE_UTF8, SQLITE_STATIC); } } /* Force an SQLITE_TOOBIG error. */ SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context *pCtx){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_TOOBIG; pCtx->fErrorOrAux = 1; sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1, SQLITE_UTF8, SQLITE_STATIC); } /* An SQLITE_NOMEM error. */ SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context *pCtx){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); pCtx->isError = SQLITE_NOMEM; pCtx->fErrorOrAux = 1; pCtx->pOut->db->mallocFailed = 1; } /* ** This function is called after a transaction has been committed. It ** invokes callbacks registered with sqlite3_wal_hook() as required. */ static int doWalCallbacks(sqlite3 *db){ int rc = SQLITE_OK; #ifndef SQLITE_OMIT_WAL int i; for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ int nEntry; sqlite3BtreeEnter(pBt); nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt)); sqlite3BtreeLeave(pBt); if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){ rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zName, nEntry); } } } #endif return rc; } /* ** Execute the statement pStmt, either until a row of data is ready, the ** statement is completely executed or an error occurs. ** ** This routine implements the bulk of the logic behind the sqlite_step() ** API. The only thing omitted is the automatic recompile if a ** schema change has occurred. That detail is handled by the ** outer sqlite3_step() wrapper procedure. */ static int sqlite3Step(Vdbe *p){ sqlite3 *db; int rc; assert(p); if( p->magic!=VDBE_MAGIC_RUN ){ /* We used to require that sqlite3_reset() be called before retrying ** sqlite3_step() after any error or after SQLITE_DONE. But beginning ** with version 3.7.0, we changed this so that sqlite3_reset() would ** be called automatically instead of throwing the SQLITE_MISUSE error. ** This "automatic-reset" change is not technically an incompatibility, ** since any application that receives an SQLITE_MISUSE is broken by ** definition. ** ** Nevertheless, some published applications that were originally written ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE ** returns, and those were broken by the automatic-reset change. As a ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the ** legacy behavior of returning SQLITE_MISUSE for cases where the ** previous sqlite3_step() returned something other than a SQLITE_LOCKED ** or SQLITE_BUSY error. */ #ifdef SQLITE_OMIT_AUTORESET if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){ sqlite3_reset((sqlite3_stmt*)p); }else{ return SQLITE_MISUSE_BKPT; } #else sqlite3_reset((sqlite3_stmt*)p); #endif } /* Check that malloc() has not failed. If it has, return early. */ db = p->db; if( db->mallocFailed ){ p->rc = SQLITE_NOMEM; return SQLITE_NOMEM; } if( p->pc<=0 && p->expired ){ p->rc = SQLITE_SCHEMA; rc = SQLITE_ERROR; goto end_of_step; } if( p->pc<0 ){ /* If there are no other statements currently running, then ** reset the interrupt flag. This prevents a call to sqlite3_interrupt ** from interrupting a statement that has not yet started. */ if( db->nVdbeActive==0 ){ db->u1.isInterrupted = 0; } assert( db->nVdbeWrite>0 || db->autoCommit==0 || (db->nDeferredCons==0 && db->nDeferredImmCons==0) ); #ifndef SQLITE_OMIT_TRACE if( db->xProfile && !db->init.busy && p->zSql ){ sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime); }else{ assert( p->startTime==0 ); } #endif db->nVdbeActive++; if( p->readOnly==0 ) db->nVdbeWrite++; if( p->bIsReader ) db->nVdbeRead++; p->pc = 0; } #ifdef SQLITE_DEBUG p->rcApp = SQLITE_OK; #endif #ifndef SQLITE_OMIT_EXPLAIN if( p->explain ){ rc = sqlite3VdbeList(p); }else #endif /* SQLITE_OMIT_EXPLAIN */ { db->nVdbeExec++; rc = sqlite3VdbeExec(p); db->nVdbeExec--; } #ifndef SQLITE_OMIT_TRACE /* If the statement completed successfully, invoke the profile callback */ if( rc!=SQLITE_ROW ) checkProfileCallback(db, p); #endif if( rc==SQLITE_DONE ){ assert( p->rc==SQLITE_OK ); p->rc = doWalCallbacks(db); if( p->rc!=SQLITE_OK ){ rc = SQLITE_ERROR; } } db->errCode = rc; if( SQLITE_NOMEM==sqlite3ApiExit(p->db, p->rc) ){ p->rc = SQLITE_NOMEM; } end_of_step: /* At this point local variable rc holds the value that should be ** returned if this statement was compiled using the legacy ** sqlite3_prepare() interface. According to the docs, this can only ** be one of the values in the first assert() below. Variable p->rc ** contains the value that would be returned if sqlite3_finalize() ** were called on statement p. */ assert( rc==SQLITE_ROW || rc==SQLITE_DONE || rc==SQLITE_ERROR || rc==SQLITE_BUSY || rc==SQLITE_MISUSE ); assert( (p->rc!=SQLITE_ROW && p->rc!=SQLITE_DONE) || p->rc==p->rcApp ); if( p->isPrepareV2 && rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ /* If this statement was prepared using sqlite3_prepare_v2(), and an ** error has occurred, then return the error code in p->rc to the ** caller. Set the error code in the database handle to the same value. */ rc = sqlite3VdbeTransferError(p); } return (rc&db->errMask); } /* ** This is the top-level implementation of sqlite3_step(). Call ** sqlite3Step() to do most of the work. If a schema error occurs, ** call sqlite3Reprepare() and try again. */ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){ int rc = SQLITE_OK; /* Result from sqlite3Step() */ int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */ Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */ int cnt = 0; /* Counter to prevent infinite loop of reprepares */ sqlite3 *db; /* The database connection */ if( vdbeSafetyNotNull(v) ){ return SQLITE_MISUSE_BKPT; } db = v->db; sqlite3_mutex_enter(db->mutex); v->doingRerun = 0; while( (rc = sqlite3Step(v))==SQLITE_SCHEMA && cnt++ < SQLITE_MAX_SCHEMA_RETRY ){ int savedPc = v->pc; rc2 = rc = sqlite3Reprepare(v); if( rc!=SQLITE_OK) break; sqlite3_reset(pStmt); if( savedPc>=0 ) v->doingRerun = 1; assert( v->expired==0 ); } if( rc2!=SQLITE_OK ){ /* This case occurs after failing to recompile an sql statement. ** The error message from the SQL compiler has already been loaded ** into the database handle. This block copies the error message ** from the database handle into the statement and sets the statement ** program counter to 0 to ensure that when the statement is ** finalized or reset the parser error message is available via ** sqlite3_errmsg() and sqlite3_errcode(). */ const char *zErr = (const char *)sqlite3_value_text(db->pErr); sqlite3DbFree(db, v->zErrMsg); if( !db->mallocFailed ){ v->zErrMsg = sqlite3DbStrDup(db, zErr); v->rc = rc2; } else { v->zErrMsg = 0; v->rc = rc = SQLITE_NOMEM; } } rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Extract the user data from a sqlite3_context structure and return a ** pointer to it. */ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context *p){ assert( p && p->pFunc ); return p->pFunc->pUserData; } /* ** Extract the user data from a sqlite3_context structure and return a ** pointer to it. ** ** IMPLEMENTATION-OF: R-46798-50301 The sqlite3_context_db_handle() interface ** returns a copy of the pointer to the database connection (the 1st ** parameter) of the sqlite3_create_function() and ** sqlite3_create_function16() routines that originally registered the ** application defined function. */ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context *p){ assert( p && p->pFunc ); return p->pOut->db; } /* ** Return the current time for a statement. If the current time ** is requested more than once within the same run of a single prepared ** statement, the exact same time is returned for each invocation regardless ** of the amount of time that elapses between invocations. In other words, ** the time returned is always the time of the first call. */ SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context *p){ int rc; #ifndef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3_int64 *piTime = &p->pVdbe->iCurrentTime; assert( p->pVdbe!=0 ); #else sqlite3_int64 iTime = 0; sqlite3_int64 *piTime = p->pVdbe!=0 ? &p->pVdbe->iCurrentTime : &iTime; #endif if( *piTime==0 ){ rc = sqlite3OsCurrentTimeInt64(p->pOut->db->pVfs, piTime); if( rc ) *piTime = 0; } return *piTime; } /* ** The following is the implementation of an SQL function that always ** fails with an error message stating that the function is used in the ** wrong context. The sqlite3_overload_function() API might construct ** SQL function that use this routine so that the functions will exist ** for name resolution but are actually overloaded by the xFindFunction ** method of virtual tables. */ SQLITE_PRIVATE void sqlite3InvalidFunction( sqlite3_context *context, /* The function calling context */ int NotUsed, /* Number of arguments to the function */ sqlite3_value **NotUsed2 /* Value of each argument */ ){ const char *zName = context->pFunc->zName; char *zErr; UNUSED_PARAMETER2(NotUsed, NotUsed2); zErr = sqlite3_mprintf( "unable to use function %s in the requested context", zName); sqlite3_result_error(context, zErr, -1); sqlite3_free(zErr); } /* ** Create a new aggregate context for p and return a pointer to ** its pMem->z element. */ static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){ Mem *pMem = p->pMem; assert( (pMem->flags & MEM_Agg)==0 ); if( nByte<=0 ){ sqlite3VdbeMemSetNull(pMem); pMem->z = 0; }else{ sqlite3VdbeMemClearAndResize(pMem, nByte); pMem->flags = MEM_Agg; pMem->u.pDef = p->pFunc; if( pMem->z ){ memset(pMem->z, 0, nByte); } } return (void*)pMem->z; } /* ** Allocate or return the aggregate context for a user function. A new ** context is allocated on the first call. Subsequent calls return the ** same context that was returned on prior calls. */ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context *p, int nByte){ assert( p && p->pFunc && p->pFunc->xStep ); assert( sqlite3_mutex_held(p->pOut->db->mutex) ); testcase( nByte<0 ); if( (p->pMem->flags & MEM_Agg)==0 ){ return createAggContext(p, nByte); }else{ return (void*)p->pMem->z; } } /* ** Return the auxiliary data pointer, if any, for the iArg'th argument to ** the user-function defined by pCtx. */ SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ AuxData *pAuxData; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); #if SQLITE_ENABLE_STAT3_OR_STAT4 if( pCtx->pVdbe==0 ) return 0; #else assert( pCtx->pVdbe!=0 ); #endif for(pAuxData=pCtx->pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; } return (pAuxData ? pAuxData->pAux : 0); } /* ** Set the auxiliary data pointer and delete function, for the iArg'th ** argument to the user-function defined by pCtx. Any previous value is ** deleted by calling the delete function specified when it was set. */ SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata( sqlite3_context *pCtx, int iArg, void *pAux, void (*xDelete)(void*) ){ AuxData *pAuxData; Vdbe *pVdbe = pCtx->pVdbe; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); if( iArg<0 ) goto failed; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( pVdbe==0 ) goto failed; #else assert( pVdbe!=0 ); #endif for(pAuxData=pVdbe->pAuxData; pAuxData; pAuxData=pAuxData->pNext){ if( pAuxData->iOp==pCtx->iOp && pAuxData->iArg==iArg ) break; } if( pAuxData==0 ){ pAuxData = sqlite3DbMallocZero(pVdbe->db, sizeof(AuxData)); if( !pAuxData ) goto failed; pAuxData->iOp = pCtx->iOp; pAuxData->iArg = iArg; pAuxData->pNext = pVdbe->pAuxData; pVdbe->pAuxData = pAuxData; if( pCtx->fErrorOrAux==0 ){ pCtx->isError = 0; pCtx->fErrorOrAux = 1; } }else if( pAuxData->xDelete ){ pAuxData->xDelete(pAuxData->pAux); } pAuxData->pAux = pAux; pAuxData->xDelete = xDelete; return; failed: if( xDelete ){ xDelete(pAux); } } #ifndef SQLITE_OMIT_DEPRECATED /* ** Return the number of times the Step function of an aggregate has been ** called. ** ** This function is deprecated. Do not use it for new code. It is ** provide only to avoid breaking legacy code. New aggregate function ** implementations should keep their own counts within their aggregate ** context. */ SQLITE_API int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context *p){ assert( p && p->pMem && p->pFunc && p->pFunc->xStep ); return p->pMem->n; } #endif /* ** Return the number of columns in the result set for the statement pStmt. */ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; return pVm ? pVm->nResColumn : 0; } /* ** Return the number of values available from the current row of the ** currently executing statement pStmt. */ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; if( pVm==0 || pVm->pResultSet==0 ) return 0; return pVm->nResColumn; } /* ** Return a pointer to static memory containing an SQL NULL value. */ static const Mem *columnNullValue(void){ /* Even though the Mem structure contains an element ** of type i64, on certain architectures (x86) with certain compiler ** switches (-Os), gcc may align this Mem object on a 4-byte boundary ** instead of an 8-byte one. This all works fine, except that when ** running with SQLITE_DEBUG defined the SQLite code sometimes assert()s ** that a Mem structure is located on an 8-byte boundary. To prevent ** these assert()s from failing, when building with SQLITE_DEBUG defined ** using gcc, we force nullMem to be 8-byte aligned using the magical ** __attribute__((aligned(8))) macro. */ static const Mem nullMem #if defined(SQLITE_DEBUG) && defined(__GNUC__) __attribute__((aligned(8))) #endif = { /* .u = */ {0}, /* .flags = */ MEM_Null, /* .enc = */ 0, /* .n = */ 0, /* .z = */ 0, /* .zMalloc = */ 0, /* .szMalloc = */ 0, /* .iPadding1 = */ 0, /* .db = */ 0, /* .xDel = */ 0, #ifdef SQLITE_DEBUG /* .pScopyFrom = */ 0, /* .pFiller = */ 0, #endif }; return &nullMem; } /* ** Check to see if column iCol of the given statement is valid. If ** it is, return a pointer to the Mem for the value of that column. ** If iCol is not valid, return a pointer to a Mem which has a value ** of NULL. */ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ Vdbe *pVm; Mem *pOut; pVm = (Vdbe *)pStmt; if( pVm && pVm->pResultSet!=0 && inResColumn && i>=0 ){ sqlite3_mutex_enter(pVm->db->mutex); pOut = &pVm->pResultSet[i]; }else{ if( pVm && ALWAYS(pVm->db) ){ sqlite3_mutex_enter(pVm->db->mutex); sqlite3Error(pVm->db, SQLITE_RANGE); } pOut = (Mem*)columnNullValue(); } return pOut; } /* ** This function is called after invoking an sqlite3_value_XXX function on a ** column value (i.e. a value returned by evaluating an SQL expression in the ** select list of a SELECT statement) that may cause a malloc() failure. If ** malloc() has failed, the threads mallocFailed flag is cleared and the result ** code of statement pStmt set to SQLITE_NOMEM. ** ** Specifically, this is called from within: ** ** sqlite3_column_int() ** sqlite3_column_int64() ** sqlite3_column_text() ** sqlite3_column_text16() ** sqlite3_column_real() ** sqlite3_column_bytes() ** sqlite3_column_bytes16() ** sqiite3_column_blob() */ static void columnMallocFailure(sqlite3_stmt *pStmt) { /* If malloc() failed during an encoding conversion within an ** sqlite3_column_XXX API, then set the return code of the statement to ** SQLITE_NOMEM. The next call to _step() (if any) will return SQLITE_ERROR ** and _finalize() will return NOMEM. */ Vdbe *p = (Vdbe *)pStmt; if( p ){ p->rc = sqlite3ApiExit(p->db, p->rc); sqlite3_mutex_leave(p->db->mutex); } } /**************************** sqlite3_column_ ******************************* ** The following routines are used to access elements of the current row ** in the result set. */ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt *pStmt, int i){ const void *val; val = sqlite3_value_blob( columnMem(pStmt,i) ); /* Even though there is no encoding conversion, value_blob() might ** need to call malloc() to expand the result of a zeroblob() ** expression. */ columnMallocFailure(pStmt); return val; } SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){ int val = sqlite3_value_bytes( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){ int val = sqlite3_value_bytes16( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt *pStmt, int i){ double val = sqlite3_value_double( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt *pStmt, int i){ int val = sqlite3_value_int( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt *pStmt, int i){ sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt *pStmt, int i){ const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt *pStmt, int i){ Mem *pOut = columnMem(pStmt, i); if( pOut->flags&MEM_Static ){ pOut->flags &= ~MEM_Static; pOut->flags |= MEM_Ephem; } columnMallocFailure(pStmt); return (sqlite3_value *)pOut; } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt *pStmt, int i){ const void *val = sqlite3_value_text16( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return val; } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt *pStmt, int i){ int iType = sqlite3_value_type( columnMem(pStmt,i) ); columnMallocFailure(pStmt); return iType; } /* ** Convert the N-th element of pStmt->pColName[] into a string using ** xFunc() then return that string. If N is out of range, return 0. ** ** There are up to 5 names for each column. useType determines which ** name is returned. Here are the names: ** ** 0 The column name as it should be displayed for output ** 1 The datatype name for the column ** 2 The name of the database that the column derives from ** 3 The name of the table that the column derives from ** 4 The name of the table column that the result column derives from ** ** If the result is not a simple column reference (if it is an expression ** or a constant) then useTypes 2, 3, and 4 return NULL. */ static const void *columnName( sqlite3_stmt *pStmt, int N, const void *(*xFunc)(Mem*), int useType ){ const void *ret; Vdbe *p; int n; sqlite3 *db; #ifdef SQLITE_ENABLE_API_ARMOR if( pStmt==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif ret = 0; p = (Vdbe *)pStmt; db = p->db; assert( db!=0 ); n = sqlite3_column_count(pStmt); if( N=0 ){ N += useType*n; sqlite3_mutex_enter(db->mutex); assert( db->mallocFailed==0 ); ret = xFunc(&p->aColName[N]); /* A malloc may have failed inside of the xFunc() call. If this ** is the case, clear the mallocFailed flag and return NULL. */ if( db->mallocFailed ){ db->mallocFailed = 0; ret = 0; } sqlite3_mutex_leave(db->mutex); } return ret; } /* ** Return the name of the Nth column of the result set returned by SQL ** statement pStmt. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME); } #endif /* ** Constraint: If you have ENABLE_COLUMN_METADATA then you must ** not define OMIT_DECLTYPE. */ #if defined(SQLITE_OMIT_DECLTYPE) && defined(SQLITE_ENABLE_COLUMN_METADATA) # error "Must not define both SQLITE_OMIT_DECLTYPE \ and SQLITE_ENABLE_COLUMN_METADATA" #endif #ifndef SQLITE_OMIT_DECLTYPE /* ** Return the column declaration type (if applicable) of the 'i'th column ** of the result set of SQL statement pStmt. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE); } #endif /* SQLITE_OMIT_UTF16 */ #endif /* SQLITE_OMIT_DECLTYPE */ #ifdef SQLITE_ENABLE_COLUMN_METADATA /* ** Return the name of the database from which a result column derives. ** NULL is returned if the result column is an expression or constant or ** anything else which is not an unambiguous reference to a database column. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE); } #endif /* SQLITE_OMIT_UTF16 */ /* ** Return the name of the table from which a result column derives. ** NULL is returned if the result column is an expression or constant or ** anything else which is not an unambiguous reference to a database column. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE); } #endif /* SQLITE_OMIT_UTF16 */ /* ** Return the name of the table column from which a result column derives. ** NULL is returned if the result column is an expression or constant or ** anything else which is not an unambiguous reference to a database column. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ return columnName( pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN); } #endif /* SQLITE_OMIT_UTF16 */ #endif /* SQLITE_ENABLE_COLUMN_METADATA */ /******************************* sqlite3_bind_ *************************** ** ** Routines used to attach values to wildcards in a compiled SQL statement. */ /* ** Unbind the value bound to variable i in virtual machine p. This is the ** the same as binding a NULL value to the column. If the "i" parameter is ** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. ** ** A successful evaluation of this routine acquires the mutex on p. ** the mutex is released if any kind of error occurs. ** ** The error code stored in database p->db is overwritten with the return ** value in any case. */ static int vdbeUnbind(Vdbe *p, int i){ Mem *pVar; if( vdbeSafetyNotNull(p) ){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(p->db->mutex); if( p->magic!=VDBE_MAGIC_RUN || p->pc>=0 ){ sqlite3Error(p->db, SQLITE_MISUSE); sqlite3_mutex_leave(p->db->mutex); sqlite3_log(SQLITE_MISUSE, "bind on a busy prepared statement: [%s]", p->zSql); return SQLITE_MISUSE_BKPT; } if( i<1 || i>p->nVar ){ sqlite3Error(p->db, SQLITE_RANGE); sqlite3_mutex_leave(p->db->mutex); return SQLITE_RANGE; } i--; pVar = &p->aVar[i]; sqlite3VdbeMemRelease(pVar); pVar->flags = MEM_Null; sqlite3Error(p->db, SQLITE_OK); /* If the bit corresponding to this variable in Vdbe.expmask is set, then ** binding a new value to this variable invalidates the current query plan. ** ** IMPLEMENTATION-OF: R-48440-37595 If the specific value bound to host ** parameter in the WHERE clause might influence the choice of query plan ** for a statement, then the statement will be automatically recompiled, ** as if there had been a schema change, on the first sqlite3_step() call ** following any change to the bindings of that parameter. */ if( p->isPrepareV2 && ((i<32 && p->expmask & ((u32)1 << i)) || p->expmask==0xffffffff) ){ p->expired = 1; } return SQLITE_OK; } /* ** Bind a text or BLOB value. */ static int bindText( sqlite3_stmt *pStmt, /* The statement to bind against */ int i, /* Index of the parameter to bind */ const void *zData, /* Pointer to the data to be bound */ int nData, /* Number of bytes of data to be bound */ void (*xDel)(void*), /* Destructor for the data */ u8 encoding /* Encoding for the data */ ){ Vdbe *p = (Vdbe *)pStmt; Mem *pVar; int rc; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ if( zData!=0 ){ pVar = &p->aVar[i-1]; rc = sqlite3VdbeMemSetStr(pVar, zData, nData, encoding, xDel); if( rc==SQLITE_OK && encoding!=0 ){ rc = sqlite3VdbeChangeEncoding(pVar, ENC(p->db)); } sqlite3Error(p->db, rc); rc = sqlite3ApiExit(p->db, rc); } sqlite3_mutex_leave(p->db->mutex); }else if( xDel!=SQLITE_STATIC && xDel!=SQLITE_TRANSIENT ){ xDel((void*)zData); } return rc; } /* ** Bind a blob value to an SQL statement variable. */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob( sqlite3_stmt *pStmt, int i, const void *zData, int nData, void (*xDel)(void*) ){ return bindText(pStmt, i, zData, nData, xDel, 0); } SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64( sqlite3_stmt *pStmt, int i, const void *zData, sqlite3_uint64 nData, void (*xDel)(void*) ){ assert( xDel!=SQLITE_DYNAMIC ); if( nData>0x7fffffff ){ return invokeValueDestructor(zData, xDel, 0); }else{ return bindText(pStmt, i, zData, (int)nData, xDel, 0); } } SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ int rc; Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); sqlite3_mutex_leave(p->db->mutex); } return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){ return sqlite3_bind_int64(p, i, (i64)iValue); } SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){ int rc; Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); sqlite3_mutex_leave(p->db->mutex); } return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ int rc; Vdbe *p = (Vdbe*)pStmt; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3_mutex_leave(p->db->mutex); } return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_text( sqlite3_stmt *pStmt, int i, const char *zData, int nData, void (*xDel)(void*) ){ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8); } SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64( sqlite3_stmt *pStmt, int i, const char *zData, sqlite3_uint64 nData, void (*xDel)(void*), unsigned char enc ){ assert( xDel!=SQLITE_DYNAMIC ); if( nData>0x7fffffff ){ return invokeValueDestructor(zData, xDel, 0); }else{ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; return bindText(pStmt, i, zData, (int)nData, xDel, enc); } } #ifndef SQLITE_OMIT_UTF16 SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16( sqlite3_stmt *pStmt, int i, const void *zData, int nData, void (*xDel)(void*) ){ return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE); } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){ int rc; switch( sqlite3_value_type((sqlite3_value*)pValue) ){ case SQLITE_INTEGER: { rc = sqlite3_bind_int64(pStmt, i, pValue->u.i); break; } case SQLITE_FLOAT: { rc = sqlite3_bind_double(pStmt, i, pValue->u.r); break; } case SQLITE_BLOB: { if( pValue->flags & MEM_Zero ){ rc = sqlite3_bind_zeroblob(pStmt, i, pValue->u.nZero); }else{ rc = sqlite3_bind_blob(pStmt, i, pValue->z, pValue->n,SQLITE_TRANSIENT); } break; } case SQLITE_TEXT: { rc = bindText(pStmt,i, pValue->z, pValue->n, SQLITE_TRANSIENT, pValue->enc); break; } default: { rc = sqlite3_bind_null(pStmt, i); break; } } return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ int rc; Vdbe *p = (Vdbe *)pStmt; rc = vdbeUnbind(p, i); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); sqlite3_mutex_leave(p->db->mutex); } return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){ int rc; Vdbe *p = (Vdbe *)pStmt; sqlite3_mutex_enter(p->db->mutex); if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){ rc = SQLITE_TOOBIG; }else{ assert( (n & 0x7FFFFFFF)==n ); rc = sqlite3_bind_zeroblob(pStmt, i, n); } rc = sqlite3ApiExit(p->db, rc); sqlite3_mutex_leave(p->db->mutex); return rc; } /* ** Return the number of wildcards that can be potentially bound to. ** This routine is added to support DBD::SQLite. */ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){ Vdbe *p = (Vdbe*)pStmt; return p ? p->nVar : 0; } /* ** Return the name of a wildcard parameter. Return NULL if the index ** is out of range or if the wildcard is unnamed. ** ** The result is always UTF-8. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){ Vdbe *p = (Vdbe*)pStmt; if( p==0 || i<1 || i>p->nzVar ){ return 0; } return p->azVar[i-1]; } /* ** Given a wildcard parameter name, return the index of the variable ** with that name. If there is no variable with the given name, ** return 0. */ SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){ int i; if( p==0 ){ return 0; } if( zName ){ for(i=0; inzVar; i++){ const char *z = p->azVar[i]; if( z && strncmp(z,zName,nName)==0 && z[nName]==0 ){ return i+1; } } } return 0; } SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){ return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName)); } /* ** Transfer all bindings from the first statement over to the second. */ SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ Vdbe *pFrom = (Vdbe*)pFromStmt; Vdbe *pTo = (Vdbe*)pToStmt; int i; assert( pTo->db==pFrom->db ); assert( pTo->nVar==pFrom->nVar ); sqlite3_mutex_enter(pTo->db->mutex); for(i=0; inVar; i++){ sqlite3VdbeMemMove(&pTo->aVar[i], &pFrom->aVar[i]); } sqlite3_mutex_leave(pTo->db->mutex); return SQLITE_OK; } #ifndef SQLITE_OMIT_DEPRECATED /* ** Deprecated external interface. Internal/core SQLite code ** should call sqlite3TransferBindings. ** ** It is misuse to call this routine with statements from different ** database connections. But as this is a deprecated interface, we ** will not bother to check for that condition. ** ** If the two statements contain a different number of bindings, then ** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise ** SQLITE_OK is returned. */ SQLITE_API int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){ Vdbe *pFrom = (Vdbe*)pFromStmt; Vdbe *pTo = (Vdbe*)pToStmt; if( pFrom->nVar!=pTo->nVar ){ return SQLITE_ERROR; } if( pTo->isPrepareV2 && pTo->expmask ){ pTo->expired = 1; } if( pFrom->isPrepareV2 && pFrom->expmask ){ pFrom->expired = 1; } return sqlite3TransferBindings(pFromStmt, pToStmt); } #endif /* ** Return the sqlite3* database handle to which the prepared statement given ** in the argument belongs. This is the same database handle that was ** the first argument to the sqlite3_prepare() that was used to create ** the statement in the first place. */ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt *pStmt){ return pStmt ? ((Vdbe*)pStmt)->db : 0; } /* ** Return true if the prepared statement is guaranteed to not modify the ** database. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt){ return pStmt ? ((Vdbe*)pStmt)->readOnly : 1; } /* ** Return true if the prepared statement is in need of being reset. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt *pStmt){ Vdbe *v = (Vdbe*)pStmt; return v!=0 && v->pc>=0 && v->magic==VDBE_MAGIC_RUN; } /* ** Return a pointer to the next prepared statement after pStmt associated ** with database connection pDb. If pStmt is NULL, return the first ** prepared statement for the database connection. Return NULL if there ** are no more. */ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){ sqlite3_stmt *pNext; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(pDb) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(pDb->mutex); if( pStmt==0 ){ pNext = (sqlite3_stmt*)pDb->pVdbe; }else{ pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext; } sqlite3_mutex_leave(pDb->mutex); return pNext; } /* ** Return the value of a status counter for a prepared statement */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ Vdbe *pVdbe = (Vdbe*)pStmt; u32 v; #ifdef SQLITE_ENABLE_API_ARMOR if( !pStmt ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif v = pVdbe->aCounter[op]; if( resetFlag ) pVdbe->aCounter[op] = 0; return (int)v; } #ifdef SQLITE_ENABLE_STMT_SCANSTATUS /* ** Return status data for a single loop within query pStmt. */ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus( sqlite3_stmt *pStmt, /* Prepared statement being queried */ int idx, /* Index of loop to report on */ int iScanStatusOp, /* Which metric to return */ void *pOut /* OUT: Write the answer here */ ){ Vdbe *p = (Vdbe*)pStmt; ScanStatus *pScan; if( idx<0 || idx>=p->nScan ) return 1; pScan = &p->aScan[idx]; switch( iScanStatusOp ){ case SQLITE_SCANSTAT_NLOOP: { *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop]; break; } case SQLITE_SCANSTAT_NVISIT: { *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit]; break; } case SQLITE_SCANSTAT_EST: { double r = 1.0; LogEst x = pScan->nEst; while( x<100 ){ x += 10; r *= 0.5; } *(double*)pOut = r*sqlite3LogEstToInt(x); break; } case SQLITE_SCANSTAT_NAME: { *(const char**)pOut = pScan->zName; break; } case SQLITE_SCANSTAT_EXPLAIN: { if( pScan->addrExplain ){ *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z; }else{ *(const char**)pOut = 0; } break; } case SQLITE_SCANSTAT_SELECTID: { if( pScan->addrExplain ){ *(int*)pOut = p->aOp[ pScan->addrExplain ].p1; }else{ *(int*)pOut = -1; } break; } default: { return 1; } } return 0; } /* ** Zero all counters associated with the sqlite3_stmt_scanstatus() data. */ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){ Vdbe *p = (Vdbe*)pStmt; memset(p->anExec, 0, p->nOp * sizeof(i64)); } #endif /* SQLITE_ENABLE_STMT_SCANSTATUS */ /************** End of vdbeapi.c *********************************************/ /************** Begin file vdbetrace.c ***************************************/ /* ** 2009 November 25 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code used to insert the values of host parameters ** (aka "wildcards") into the SQL text output by sqlite3_trace(). ** ** The Vdbe parse-tree explainer is also found here. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ #ifndef SQLITE_OMIT_TRACE /* ** zSql is a zero-terminated string of UTF-8 SQL text. Return the number of ** bytes in this text up to but excluding the first character in ** a host parameter. If the text contains no host parameters, return ** the total number of bytes in the text. */ static int findNextHostParameter(const char *zSql, int *pnToken){ int tokenType; int nTotal = 0; int n; *pnToken = 0; while( zSql[0] ){ n = sqlite3GetToken((u8*)zSql, &tokenType); assert( n>0 && tokenType!=TK_ILLEGAL ); if( tokenType==TK_VARIABLE ){ *pnToken = n; break; } nTotal += n; zSql += n; } return nTotal; } /* ** This function returns a pointer to a nul-terminated string in memory ** obtained from sqlite3DbMalloc(). If sqlite3.nVdbeExec is 1, then the ** string contains a copy of zRawSql but with host parameters expanded to ** their current bindings. Or, if sqlite3.nVdbeExec is greater than 1, ** then the returned string holds a copy of zRawSql with "-- " prepended ** to each line of text. ** ** If the SQLITE_TRACE_SIZE_LIMIT macro is defined to an integer, then ** then long strings and blobs are truncated to that many bytes. This ** can be used to prevent unreasonably large trace strings when dealing ** with large (multi-megabyte) strings and blobs. ** ** The calling function is responsible for making sure the memory returned ** is eventually freed. ** ** ALGORITHM: Scan the input string looking for host parameters in any of ** these forms: ?, ?N, $A, @A, :A. Take care to avoid text within ** string literals, quoted identifier names, and comments. For text forms, ** the host parameter index is found by scanning the prepared ** statement for the corresponding OP_Variable opcode. Once the host ** parameter index is known, locate the value in p->aVar[]. Then render ** the value as a literal in place of the host parameter name. */ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( Vdbe *p, /* The prepared statement being evaluated */ const char *zRawSql /* Raw text of the SQL statement */ ){ sqlite3 *db; /* The database connection */ int idx = 0; /* Index of a host parameter */ int nextIndex = 1; /* Index of next ? host parameter */ int n; /* Length of a token prefix */ int nToken; /* Length of the parameter token */ int i; /* Loop counter */ Mem *pVar; /* Value of a host parameter */ StrAccum out; /* Accumulate the output here */ char zBase[100]; /* Initial working space */ db = p->db; sqlite3StrAccumInit(&out, db, zBase, sizeof(zBase), db->aLimit[SQLITE_LIMIT_LENGTH]); if( db->nVdbeExec>1 ){ while( *zRawSql ){ const char *zStart = zRawSql; while( *(zRawSql++)!='\n' && *zRawSql ); sqlite3StrAccumAppend(&out, "-- ", 3); assert( (zRawSql - zStart) > 0 ); sqlite3StrAccumAppend(&out, zStart, (int)(zRawSql-zStart)); } }else if( p->nVar==0 ){ sqlite3StrAccumAppend(&out, zRawSql, sqlite3Strlen30(zRawSql)); }else{ while( zRawSql[0] ){ n = findNextHostParameter(zRawSql, &nToken); assert( n>0 ); sqlite3StrAccumAppend(&out, zRawSql, n); zRawSql += n; assert( zRawSql[0] || nToken==0 ); if( nToken==0 ) break; if( zRawSql[0]=='?' ){ if( nToken>1 ){ assert( sqlite3Isdigit(zRawSql[1]) ); sqlite3GetInt32(&zRawSql[1], &idx); }else{ idx = nextIndex; } }else{ assert( zRawSql[0]==':' || zRawSql[0]=='$' || zRawSql[0]=='@' || zRawSql[0]=='#' ); testcase( zRawSql[0]==':' ); testcase( zRawSql[0]=='$' ); testcase( zRawSql[0]=='@' ); testcase( zRawSql[0]=='#' ); idx = sqlite3VdbeParameterIndex(p, zRawSql, nToken); assert( idx>0 ); } zRawSql += nToken; nextIndex = idx + 1; assert( idx>0 && idx<=p->nVar ); pVar = &p->aVar[idx-1]; if( pVar->flags & MEM_Null ){ sqlite3StrAccumAppend(&out, "NULL", 4); }else if( pVar->flags & MEM_Int ){ sqlite3XPrintf(&out, 0, "%lld", pVar->u.i); }else if( pVar->flags & MEM_Real ){ sqlite3XPrintf(&out, 0, "%!.15g", pVar->u.r); }else if( pVar->flags & MEM_Str ){ int nOut; /* Number of bytes of the string text to include in output */ #ifndef SQLITE_OMIT_UTF16 u8 enc = ENC(db); Mem utf8; if( enc!=SQLITE_UTF8 ){ memset(&utf8, 0, sizeof(utf8)); utf8.db = db; sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC); sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8); pVar = &utf8; } #endif nOut = pVar->n; #ifdef SQLITE_TRACE_SIZE_LIMIT if( nOut>SQLITE_TRACE_SIZE_LIMIT ){ nOut = SQLITE_TRACE_SIZE_LIMIT; while( nOutn && (pVar->z[nOut]&0xc0)==0x80 ){ nOut++; } } #endif sqlite3XPrintf(&out, 0, "'%.*q'", nOut, pVar->z); #ifdef SQLITE_TRACE_SIZE_LIMIT if( nOutn ){ sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut); } #endif #ifndef SQLITE_OMIT_UTF16 if( enc!=SQLITE_UTF8 ) sqlite3VdbeMemRelease(&utf8); #endif }else if( pVar->flags & MEM_Zero ){ sqlite3XPrintf(&out, 0, "zeroblob(%d)", pVar->u.nZero); }else{ int nOut; /* Number of bytes of the blob to include in output */ assert( pVar->flags & MEM_Blob ); sqlite3StrAccumAppend(&out, "x'", 2); nOut = pVar->n; #ifdef SQLITE_TRACE_SIZE_LIMIT if( nOut>SQLITE_TRACE_SIZE_LIMIT ) nOut = SQLITE_TRACE_SIZE_LIMIT; #endif for(i=0; iz[i]&0xff); } sqlite3StrAccumAppend(&out, "'", 1); #ifdef SQLITE_TRACE_SIZE_LIMIT if( nOutn ){ sqlite3XPrintf(&out, 0, "/*+%d bytes*/", pVar->n-nOut); } #endif } } } return sqlite3StrAccumFinish(&out); } #endif /* #ifndef SQLITE_OMIT_TRACE */ /************** End of vdbetrace.c *******************************************/ /************** Begin file vdbe.c ********************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** The code in this file implements the function that runs the ** bytecode of a prepared statement. ** ** Various scripts scan this source file in order to generate HTML ** documentation, headers files, or other derived files. The formatting ** of the code in this file is, therefore, important. See other comments ** in this file for details. If in doubt, do not deviate from existing ** commenting and indentation practices when changing or adding code. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ /* ** Invoke this macro on memory cells just prior to changing the ** value of the cell. This macro verifies that shallow copies are ** not misused. A shallow copy of a string or blob just copies a ** pointer to the string or blob, not the content. If the original ** is changed while the copy is still in use, the string or blob might ** be changed out from under the copy. This macro verifies that nothing ** like that ever happens. */ #ifdef SQLITE_DEBUG # define memAboutToChange(P,M) sqlite3VdbeMemAboutToChange(P,M) #else # define memAboutToChange(P,M) #endif /* ** The following global variable is incremented every time a cursor ** moves, either by the OP_SeekXX, OP_Next, or OP_Prev opcodes. The test ** procedures use this information to make sure that indices are ** working correctly. This variable has no function other than to ** help verify the correct operation of the library. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_search_count = 0; #endif /* ** When this global variable is positive, it gets decremented once before ** each instruction in the VDBE. When it reaches zero, the u1.isInterrupted ** field of the sqlite3 structure is set in order to simulate an interrupt. ** ** This facility is used for testing purposes only. It does not function ** in an ordinary build. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_interrupt_count = 0; #endif /* ** The next global variable is incremented each type the OP_Sort opcode ** is executed. The test procedures use this information to make sure that ** sorting is occurring or not occurring at appropriate times. This variable ** has no function other than to help verify the correct operation of the ** library. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_sort_count = 0; #endif /* ** The next global variable records the size of the largest MEM_Blob ** or MEM_Str that has been used by a VDBE opcode. The test procedures ** use this information to make sure that the zero-blob functionality ** is working correctly. This variable has no function other than to ** help verify the correct operation of the library. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_max_blobsize = 0; static void updateMaxBlobsize(Mem *p){ if( (p->flags & (MEM_Str|MEM_Blob))!=0 && p->n>sqlite3_max_blobsize ){ sqlite3_max_blobsize = p->n; } } #endif /* ** The next global variable is incremented each time the OP_Found opcode ** is executed. This is used to test whether or not the foreign key ** operation implemented using OP_FkIsZero is working. This variable ** has no function other than to help verify the correct operation of the ** library. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_found_count = 0; #endif /* ** Test a register to see if it exceeds the current maximum blob size. ** If it does, record the new maximum blob size. */ #if defined(SQLITE_TEST) && !defined(SQLITE_OMIT_BUILTIN_TEST) # define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P) #else # define UPDATE_MAX_BLOBSIZE(P) #endif /* ** Invoke the VDBE coverage callback, if that callback is defined. This ** feature is used for test suite validation only and does not appear an ** production builds. ** ** M is an integer, 2 or 3, that indices how many different ways the ** branch can go. It is usually 2. "I" is the direction the branch ** goes. 0 means falls through. 1 means branch is taken. 2 means the ** second alternative branch is taken. ** ** iSrcLine is the source code line (from the __LINE__ macro) that ** generated the VDBE instruction. This instrumentation assumes that all ** source code is in a single file (the amalgamation). Special values 1 ** and 2 for the iSrcLine parameter mean that this particular branch is ** always taken or never taken, respectively. */ #if !defined(SQLITE_VDBE_COVERAGE) # define VdbeBranchTaken(I,M) #else # define VdbeBranchTaken(I,M) vdbeTakeBranch(pOp->iSrcLine,I,M) static void vdbeTakeBranch(int iSrcLine, u8 I, u8 M){ if( iSrcLine<=2 && ALWAYS(iSrcLine>0) ){ M = iSrcLine; /* Assert the truth of VdbeCoverageAlwaysTaken() and ** VdbeCoverageNeverTaken() */ assert( (M & I)==I ); }else{ if( sqlite3GlobalConfig.xVdbeBranch==0 ) return; /*NO_TEST*/ sqlite3GlobalConfig.xVdbeBranch(sqlite3GlobalConfig.pVdbeBranchArg, iSrcLine,I,M); } } #endif /* ** Convert the given register into a string if it isn't one ** already. Return non-zero if a malloc() fails. */ #define Stringify(P, enc) \ if(((P)->flags&(MEM_Str|MEM_Blob))==0 && sqlite3VdbeMemStringify(P,enc,0)) \ { goto no_mem; } /* ** An ephemeral string value (signified by the MEM_Ephem flag) contains ** a pointer to a dynamically allocated string where some other entity ** is responsible for deallocating that string. Because the register ** does not control the string, it might be deleted without the register ** knowing it. ** ** This routine converts an ephemeral string into a dynamically allocated ** string that the register itself controls. In other words, it ** converts an MEM_Ephem string into a string with P.z==P.zMalloc. */ #define Deephemeralize(P) \ if( ((P)->flags&MEM_Ephem)!=0 \ && sqlite3VdbeMemMakeWriteable(P) ){ goto no_mem;} /* Return true if the cursor was opened using the OP_OpenSorter opcode. */ #define isSorter(x) ((x)->pSorter!=0) /* ** Allocate VdbeCursor number iCur. Return a pointer to it. Return NULL ** if we run out of memory. */ static VdbeCursor *allocateCursor( Vdbe *p, /* The virtual machine */ int iCur, /* Index of the new VdbeCursor */ int nField, /* Number of fields in the table or index */ int iDb, /* Database the cursor belongs to, or -1 */ int isBtreeCursor /* True for B-Tree. False for pseudo-table or vtab */ ){ /* Find the memory cell that will be used to store the blob of memory ** required for this VdbeCursor structure. It is convenient to use a ** vdbe memory cell to manage the memory allocation required for a ** VdbeCursor structure for the following reasons: ** ** * Sometimes cursor numbers are used for a couple of different ** purposes in a vdbe program. The different uses might require ** different sized allocations. Memory cells provide growable ** allocations. ** ** * When using ENABLE_MEMORY_MANAGEMENT, memory cell buffers can ** be freed lazily via the sqlite3_release_memory() API. This ** minimizes the number of malloc calls made by the system. ** ** Memory cells for cursors are allocated at the top of the address ** space. Memory cell (p->nMem) corresponds to cursor 0. Space for ** cursor 1 is managed by memory cell (p->nMem-1), etc. */ Mem *pMem = &p->aMem[p->nMem-iCur]; int nByte; VdbeCursor *pCx = 0; nByte = ROUND8(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField + (isBtreeCursor?sqlite3BtreeCursorSize():0); assert( iCurnCursor ); if( p->apCsr[iCur] ){ sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); p->apCsr[iCur] = 0; } if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){ p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z; memset(pCx, 0, sizeof(VdbeCursor)); pCx->iDb = iDb; pCx->nField = nField; pCx->aOffset = &pCx->aType[nField]; if( isBtreeCursor ){ pCx->pCursor = (BtCursor*) &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField]; sqlite3BtreeCursorZero(pCx->pCursor); } } return pCx; } /* ** Try to convert a value into a numeric representation if we can ** do so without loss of information. In other words, if the string ** looks like a number, convert it into a number. If it does not ** look like a number, leave it alone. ** ** If the bTryForInt flag is true, then extra effort is made to give ** an integer representation. Strings that look like floating point ** values but which have no fractional component (example: '48.00') ** will have a MEM_Int representation when bTryForInt is true. ** ** If bTryForInt is false, then if the input string contains a decimal ** point or exponential notation, the result is only MEM_Real, even ** if there is an exact integer representation of the quantity. */ static void applyNumericAffinity(Mem *pRec, int bTryForInt){ double rValue; i64 iValue; u8 enc = pRec->enc; assert( (pRec->flags & (MEM_Str|MEM_Int|MEM_Real))==MEM_Str ); if( sqlite3AtoF(pRec->z, &rValue, pRec->n, enc)==0 ) return; if( 0==sqlite3Atoi64(pRec->z, &iValue, pRec->n, enc) ){ pRec->u.i = iValue; pRec->flags |= MEM_Int; }else{ pRec->u.r = rValue; pRec->flags |= MEM_Real; if( bTryForInt ) sqlite3VdbeIntegerAffinity(pRec); } } /* ** Processing is determine by the affinity parameter: ** ** SQLITE_AFF_INTEGER: ** SQLITE_AFF_REAL: ** SQLITE_AFF_NUMERIC: ** Try to convert pRec to an integer representation or a ** floating-point representation if an integer representation ** is not possible. Note that the integer representation is ** always preferred, even if the affinity is REAL, because ** an integer representation is more space efficient on disk. ** ** SQLITE_AFF_TEXT: ** Convert pRec to a text representation. ** ** SQLITE_AFF_BLOB: ** No-op. pRec is unchanged. */ static void applyAffinity( Mem *pRec, /* The value to apply affinity to */ char affinity, /* The affinity to be applied */ u8 enc /* Use this text encoding */ ){ if( affinity>=SQLITE_AFF_NUMERIC ){ assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL || affinity==SQLITE_AFF_NUMERIC ); if( (pRec->flags & MEM_Int)==0 ){ if( (pRec->flags & MEM_Real)==0 ){ if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1); }else{ sqlite3VdbeIntegerAffinity(pRec); } } }else if( affinity==SQLITE_AFF_TEXT ){ /* Only attempt the conversion to TEXT if there is an integer or real ** representation (blob and NULL do not get converted) but no string ** representation. */ if( 0==(pRec->flags&MEM_Str) && (pRec->flags&(MEM_Real|MEM_Int)) ){ sqlite3VdbeMemStringify(pRec, enc, 1); } pRec->flags &= ~(MEM_Real|MEM_Int); } } /* ** Try to convert the type of a function argument or a result column ** into a numeric representation. Use either INTEGER or REAL whichever ** is appropriate. But only do the conversion if it is possible without ** loss of information and return the revised type of the argument. */ SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value *pVal){ int eType = sqlite3_value_type(pVal); if( eType==SQLITE_TEXT ){ Mem *pMem = (Mem*)pVal; applyNumericAffinity(pMem, 0); eType = sqlite3_value_type(pVal); } return eType; } /* ** Exported version of applyAffinity(). This one works on sqlite3_value*, ** not the internal Mem* type. */ SQLITE_PRIVATE void sqlite3ValueApplyAffinity( sqlite3_value *pVal, u8 affinity, u8 enc ){ applyAffinity((Mem *)pVal, affinity, enc); } /* ** pMem currently only holds a string type (or maybe a BLOB that we can ** interpret as a string if we want to). Compute its corresponding ** numeric type, if has one. Set the pMem->u.r and pMem->u.i fields ** accordingly. */ static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){ assert( (pMem->flags & (MEM_Int|MEM_Real))==0 ); assert( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ); if( sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc)==0 ){ return 0; } if( sqlite3Atoi64(pMem->z, &pMem->u.i, pMem->n, pMem->enc)==SQLITE_OK ){ return MEM_Int; } return MEM_Real; } /* ** Return the numeric type for pMem, either MEM_Int or MEM_Real or both or ** none. ** ** Unlike applyNumericAffinity(), this routine does not modify pMem->flags. ** But it does set pMem->u.r and pMem->u.i appropriately. */ static u16 numericType(Mem *pMem){ if( pMem->flags & (MEM_Int|MEM_Real) ){ return pMem->flags & (MEM_Int|MEM_Real); } if( pMem->flags & (MEM_Str|MEM_Blob) ){ return computeNumericType(pMem); } return 0; } #ifdef SQLITE_DEBUG /* ** Write a nice string representation of the contents of cell pMem ** into buffer zBuf, length nBuf. */ SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf){ char *zCsr = zBuf; int f = pMem->flags; static const char *const encnames[] = {"(X)", "(8)", "(16LE)", "(16BE)"}; if( f&MEM_Blob ){ int i; char c; if( f & MEM_Dyn ){ c = 'z'; assert( (f & (MEM_Static|MEM_Ephem))==0 ); }else if( f & MEM_Static ){ c = 't'; assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); }else if( f & MEM_Ephem ){ c = 'e'; assert( (f & (MEM_Static|MEM_Dyn))==0 ); }else{ c = 's'; } sqlite3_snprintf(100, zCsr, "%c", c); zCsr += sqlite3Strlen30(zCsr); sqlite3_snprintf(100, zCsr, "%d[", pMem->n); zCsr += sqlite3Strlen30(zCsr); for(i=0; i<16 && in; i++){ sqlite3_snprintf(100, zCsr, "%02X", ((int)pMem->z[i] & 0xFF)); zCsr += sqlite3Strlen30(zCsr); } for(i=0; i<16 && in; i++){ char z = pMem->z[i]; if( z<32 || z>126 ) *zCsr++ = '.'; else *zCsr++ = z; } sqlite3_snprintf(100, zCsr, "]%s", encnames[pMem->enc]); zCsr += sqlite3Strlen30(zCsr); if( f & MEM_Zero ){ sqlite3_snprintf(100, zCsr,"+%dz",pMem->u.nZero); zCsr += sqlite3Strlen30(zCsr); } *zCsr = '\0'; }else if( f & MEM_Str ){ int j, k; zBuf[0] = ' '; if( f & MEM_Dyn ){ zBuf[1] = 'z'; assert( (f & (MEM_Static|MEM_Ephem))==0 ); }else if( f & MEM_Static ){ zBuf[1] = 't'; assert( (f & (MEM_Dyn|MEM_Ephem))==0 ); }else if( f & MEM_Ephem ){ zBuf[1] = 'e'; assert( (f & (MEM_Static|MEM_Dyn))==0 ); }else{ zBuf[1] = 's'; } k = 2; sqlite3_snprintf(100, &zBuf[k], "%d", pMem->n); k += sqlite3Strlen30(&zBuf[k]); zBuf[k++] = '['; for(j=0; j<15 && jn; j++){ u8 c = pMem->z[j]; if( c>=0x20 && c<0x7f ){ zBuf[k++] = c; }else{ zBuf[k++] = '.'; } } zBuf[k++] = ']'; sqlite3_snprintf(100,&zBuf[k], encnames[pMem->enc]); k += sqlite3Strlen30(&zBuf[k]); zBuf[k++] = 0; } } #endif #ifdef SQLITE_DEBUG /* ** Print the value of a register for tracing purposes: */ static void memTracePrint(Mem *p){ if( p->flags & MEM_Undefined ){ printf(" undefined"); }else if( p->flags & MEM_Null ){ printf(" NULL"); }else if( (p->flags & (MEM_Int|MEM_Str))==(MEM_Int|MEM_Str) ){ printf(" si:%lld", p->u.i); }else if( p->flags & MEM_Int ){ printf(" i:%lld", p->u.i); #ifndef SQLITE_OMIT_FLOATING_POINT }else if( p->flags & MEM_Real ){ printf(" r:%g", p->u.r); #endif }else if( p->flags & MEM_RowSet ){ printf(" (rowset)"); }else{ char zBuf[200]; sqlite3VdbeMemPrettyPrint(p, zBuf); printf(" %s", zBuf); } } static void registerTrace(int iReg, Mem *p){ printf("REG[%d] = ", iReg); memTracePrint(p); printf("\n"); } #endif #ifdef SQLITE_DEBUG # define REGISTER_TRACE(R,M) if(db->flags&SQLITE_VdbeTrace)registerTrace(R,M) #else # define REGISTER_TRACE(R,M) #endif #ifdef VDBE_PROFILE /* ** hwtime.h contains inline assembler code for implementing ** high-performance timing routines. */ /************** Include hwtime.h in the middle of vdbe.c *********************/ /************** Begin file hwtime.h ******************************************/ /* ** 2008 May 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ #ifndef _HWTIME_H_ #define _HWTIME_H_ /* ** The following routine only works on pentium-class (or newer) processors. ** It uses the RDTSC opcode to read the cycle count value out of the ** processor and returns that value. This can be used for high-res ** profiling. */ #if (defined(__GNUC__) || defined(_MSC_VER)) && \ (defined(i386) || defined(__i386__) || defined(_M_IX86)) #if defined(__GNUC__) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned int lo, hi; __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); return (sqlite_uint64)hi << 32 | lo; } #elif defined(_MSC_VER) __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ __asm { rdtsc ret ; return value at EDX:EAX } } #endif #elif (defined(__GNUC__) && defined(__x86_64__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long val; __asm__ __volatile__ ("rdtsc" : "=A" (val)); return val; } #elif (defined(__GNUC__) && defined(__ppc__)) __inline__ sqlite_uint64 sqlite3Hwtime(void){ unsigned long long retval; unsigned long junk; __asm__ __volatile__ ("\n\ 1: mftbu %1\n\ mftb %L0\n\ mftbu %0\n\ cmpw %0,%1\n\ bne 1b" : "=r" (retval), "=r" (junk)); return retval; } #else #error Need implementation of sqlite3Hwtime() for your platform. /* ** To compile without implementing sqlite3Hwtime() for your platform, ** you can remove the above #error and use the following ** stub function. You will lose timing support for many ** of the debugging and testing utilities, but it should at ** least compile and run. */ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif #endif /* !defined(_HWTIME_H_) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in vdbe.c ***********************/ #endif #ifndef NDEBUG /* ** This function is only called from within an assert() expression. It ** checks that the sqlite3.nTransaction variable is correctly set to ** the number of non-transaction savepoints currently in the ** linked list starting at sqlite3.pSavepoint. ** ** Usage: ** ** assert( checkSavepointCount(db) ); */ static int checkSavepointCount(sqlite3 *db){ int n = 0; Savepoint *p; for(p=db->pSavepoint; p; p=p->pNext) n++; assert( n==(db->nSavepoint + db->isTransactionSavepoint) ); return 1; } #endif /* ** Return the register of pOp->p2 after first preparing it to be ** overwritten with an integer value. */ static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){ Mem *pOut; assert( pOp->p2>0 ); assert( pOp->p2<=(p->nMem-p->nCursor) ); pOut = &p->aMem[pOp->p2]; memAboutToChange(p, pOut); if( VdbeMemDynamic(pOut) ) sqlite3VdbeMemSetNull(pOut); pOut->flags = MEM_Int; return pOut; } /* ** Execute as much of a VDBE program as we can. ** This is the core of sqlite3_step(). */ SQLITE_PRIVATE int sqlite3VdbeExec( Vdbe *p /* The VDBE */ ){ Op *aOp = p->aOp; /* Copy of p->aOp */ Op *pOp = aOp; /* Current operation */ #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) Op *pOrigOp; /* Value of pOp at the top of the loop */ #endif int rc = SQLITE_OK; /* Value to return */ sqlite3 *db = p->db; /* The database */ u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */ u8 encoding = ENC(db); /* The database encoding */ int iCompare = 0; /* Result of last OP_Compare operation */ unsigned nVmStep = 0; /* Number of virtual machine steps */ #ifndef SQLITE_OMIT_PROGRESS_CALLBACK unsigned nProgressLimit = 0;/* Invoke xProgress() when nVmStep reaches this */ #endif Mem *aMem = p->aMem; /* Copy of p->aMem */ Mem *pIn1 = 0; /* 1st input operand */ Mem *pIn2 = 0; /* 2nd input operand */ Mem *pIn3 = 0; /* 3rd input operand */ Mem *pOut = 0; /* Output operand */ int *aPermute = 0; /* Permutation of columns for OP_Compare */ i64 lastRowid = db->lastRowid; /* Saved value of the last insert ROWID */ #ifdef VDBE_PROFILE u64 start; /* CPU clock count at start of opcode */ #endif /*** INSERT STACK UNION HERE ***/ assert( p->magic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */ sqlite3VdbeEnter(p); if( p->rc==SQLITE_NOMEM ){ /* This happens if a malloc() inside a call to sqlite3_column_text() or ** sqlite3_column_text16() failed. */ goto no_mem; } assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY ); assert( p->bIsReader || p->readOnly!=0 ); p->rc = SQLITE_OK; p->iCurrentTime = 0; assert( p->explain==0 ); p->pResultSet = 0; db->busyHandler.nBusy = 0; if( db->u1.isInterrupted ) goto abort_due_to_interrupt; sqlite3VdbeIOTraceSql(p); #ifndef SQLITE_OMIT_PROGRESS_CALLBACK if( db->xProgress ){ u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP]; assert( 0 < db->nProgressOps ); nProgressLimit = db->nProgressOps - (iPrior % db->nProgressOps); } #endif #ifdef SQLITE_DEBUG sqlite3BeginBenignMalloc(); if( p->pc==0 && (p->db->flags & (SQLITE_VdbeListing|SQLITE_VdbeEQP|SQLITE_VdbeTrace))!=0 ){ int i; int once = 1; sqlite3VdbePrintSql(p); if( p->db->flags & SQLITE_VdbeListing ){ printf("VDBE Program Listing:\n"); for(i=0; inOp; i++){ sqlite3VdbePrintOp(stdout, i, &aOp[i]); } } if( p->db->flags & SQLITE_VdbeEQP ){ for(i=0; inOp; i++){ if( aOp[i].opcode==OP_Explain ){ if( once ) printf("VDBE Query Plan:\n"); printf("%s\n", aOp[i].p4.z); once = 0; } } } if( p->db->flags & SQLITE_VdbeTrace ) printf("VDBE Trace:\n"); } sqlite3EndBenignMalloc(); #endif for(pOp=&aOp[p->pc]; rc==SQLITE_OK; pOp++){ assert( pOp>=aOp && pOp<&aOp[p->nOp]); if( db->mallocFailed ) goto no_mem; #ifdef VDBE_PROFILE start = sqlite3Hwtime(); #endif nVmStep++; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS if( p->anExec ) p->anExec[(int)(pOp-aOp)]++; #endif /* Only allow tracing if SQLITE_DEBUG is defined. */ #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeTrace ){ sqlite3VdbePrintOp(stdout, (int)(pOp - aOp), pOp); } #endif /* Check to see if we need to simulate an interrupt. This only happens ** if we have a special test build. */ #ifdef SQLITE_TEST if( sqlite3_interrupt_count>0 ){ sqlite3_interrupt_count--; if( sqlite3_interrupt_count==0 ){ sqlite3_interrupt(db); } } #endif /* Sanity checking on other operands */ #ifdef SQLITE_DEBUG assert( pOp->opflags==sqlite3OpcodeProperty[pOp->opcode] ); if( (pOp->opflags & OPFLG_IN1)!=0 ){ assert( pOp->p1>0 ); assert( pOp->p1<=(p->nMem-p->nCursor) ); assert( memIsValid(&aMem[pOp->p1]) ); assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) ); REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]); } if( (pOp->opflags & OPFLG_IN2)!=0 ){ assert( pOp->p2>0 ); assert( pOp->p2<=(p->nMem-p->nCursor) ); assert( memIsValid(&aMem[pOp->p2]) ); assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) ); REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]); } if( (pOp->opflags & OPFLG_IN3)!=0 ){ assert( pOp->p3>0 ); assert( pOp->p3<=(p->nMem-p->nCursor) ); assert( memIsValid(&aMem[pOp->p3]) ); assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) ); REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]); } if( (pOp->opflags & OPFLG_OUT2)!=0 ){ assert( pOp->p2>0 ); assert( pOp->p2<=(p->nMem-p->nCursor) ); memAboutToChange(p, &aMem[pOp->p2]); } if( (pOp->opflags & OPFLG_OUT3)!=0 ){ assert( pOp->p3>0 ); assert( pOp->p3<=(p->nMem-p->nCursor) ); memAboutToChange(p, &aMem[pOp->p3]); } #endif #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) pOrigOp = pOp; #endif switch( pOp->opcode ){ /***************************************************************************** ** What follows is a massive switch statement where each case implements a ** separate instruction in the virtual machine. If we follow the usual ** indentation conventions, each case should be indented by 6 spaces. But ** that is a lot of wasted space on the left margin. So the code within ** the switch statement will break with convention and be flush-left. Another ** big comment (similar to this one) will mark the point in the code where ** we transition back to normal indentation. ** ** The formatting of each case is important. The makefile for SQLite ** generates two C files "opcodes.h" and "opcodes.c" by scanning this ** file looking for lines that begin with "case OP_". The opcodes.h files ** will be filled with #defines that give unique integer values to each ** opcode and the opcodes.c file is filled with an array of strings where ** each string is the symbolic name for the corresponding opcode. If the ** case statement is followed by a comment of the form "/# same as ... #/" ** that comment is used to determine the particular value of the opcode. ** ** Other keywords in the comment that follows each case are used to ** construct the OPFLG_INITIALIZER value that initializes opcodeProperty[]. ** Keywords include: in1, in2, in3, out2, out3. See ** the mkopcodeh.awk script for additional information. ** ** Documentation about VDBE opcodes is generated by scanning this file ** for lines of that contain "Opcode:". That line and all subsequent ** comment lines are used in the generation of the opcode.html documentation ** file. ** ** SUMMARY: ** ** Formatting is important to scripts that scan this file. ** Do not deviate from the formatting style currently in use. ** *****************************************************************************/ /* Opcode: Goto * P2 * * * ** ** An unconditional jump to address P2. ** The next instruction executed will be ** the one at index P2 from the beginning of ** the program. ** ** The P1 parameter is not actually used by this opcode. However, it ** is sometimes set to 1 instead of 0 as a hint to the command-line shell ** that this Goto is the bottom of a loop and that the lines from P2 down ** to the current line should be indented for EXPLAIN output. */ case OP_Goto: { /* jump */ jump_to_p2_and_check_for_interrupt: pOp = &aOp[pOp->p2 - 1]; /* Opcodes that are used as the bottom of a loop (OP_Next, OP_Prev, ** OP_VNext, OP_RowSetNext, or OP_SorterNext) all jump here upon ** completion. Check to see if sqlite3_interrupt() has been called ** or if the progress callback needs to be invoked. ** ** This code uses unstructured "goto" statements and does not look clean. ** But that is not due to sloppy coding habits. The code is written this ** way for performance, to avoid having to run the interrupt and progress ** checks on every opcode. This helps sqlite3_step() to run about 1.5% ** faster according to "valgrind --tool=cachegrind" */ check_for_interrupt: if( db->u1.isInterrupted ) goto abort_due_to_interrupt; #ifndef SQLITE_OMIT_PROGRESS_CALLBACK /* Call the progress callback if it is configured and the required number ** of VDBE ops have been executed (either since this invocation of ** sqlite3VdbeExec() or since last time the progress callback was called). ** If the progress callback returns non-zero, exit the virtual machine with ** a return code SQLITE_ABORT. */ if( db->xProgress!=0 && nVmStep>=nProgressLimit ){ assert( db->nProgressOps!=0 ); nProgressLimit = nVmStep + db->nProgressOps - (nVmStep%db->nProgressOps); if( db->xProgress(db->pProgressArg) ){ rc = SQLITE_INTERRUPT; goto vdbe_error_halt; } } #endif break; } /* Opcode: Gosub P1 P2 * * * ** ** Write the current address onto register P1 ** and then jump to address P2. */ case OP_Gosub: { /* jump */ assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); pIn1 = &aMem[pOp->p1]; assert( VdbeMemDynamic(pIn1)==0 ); memAboutToChange(p, pIn1); pIn1->flags = MEM_Int; pIn1->u.i = (int)(pOp-aOp); REGISTER_TRACE(pOp->p1, pIn1); /* Most jump operations do a goto to this spot in order to update ** the pOp pointer. */ jump_to_p2: pOp = &aOp[pOp->p2 - 1]; break; } /* Opcode: Return P1 * * * * ** ** Jump to the next instruction after the address in register P1. After ** the jump, register P1 becomes undefined. */ case OP_Return: { /* in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags==MEM_Int ); pOp = &aOp[pIn1->u.i]; pIn1->flags = MEM_Undefined; break; } /* Opcode: InitCoroutine P1 P2 P3 * * ** ** Set up register P1 so that it will Yield to the coroutine ** located at address P3. ** ** If P2!=0 then the coroutine implementation immediately follows ** this opcode. So jump over the coroutine implementation to ** address P2. ** ** See also: EndCoroutine */ case OP_InitCoroutine: { /* jump */ assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); assert( pOp->p2>=0 && pOp->p2nOp ); assert( pOp->p3>=0 && pOp->p3nOp ); pOut = &aMem[pOp->p1]; assert( !VdbeMemDynamic(pOut) ); pOut->u.i = pOp->p3 - 1; pOut->flags = MEM_Int; if( pOp->p2 ) goto jump_to_p2; break; } /* Opcode: EndCoroutine P1 * * * * ** ** The instruction at the address in register P1 is a Yield. ** Jump to the P2 parameter of that Yield. ** After the jump, register P1 becomes undefined. ** ** See also: InitCoroutine */ case OP_EndCoroutine: { /* in1 */ VdbeOp *pCaller; pIn1 = &aMem[pOp->p1]; assert( pIn1->flags==MEM_Int ); assert( pIn1->u.i>=0 && pIn1->u.inOp ); pCaller = &aOp[pIn1->u.i]; assert( pCaller->opcode==OP_Yield ); assert( pCaller->p2>=0 && pCaller->p2nOp ); pOp = &aOp[pCaller->p2 - 1]; pIn1->flags = MEM_Undefined; break; } /* Opcode: Yield P1 P2 * * * ** ** Swap the program counter with the value in register P1. This ** has the effect of yielding to a coroutine. ** ** If the coroutine that is launched by this instruction ends with ** Yield or Return then continue to the next instruction. But if ** the coroutine launched by this instruction ends with ** EndCoroutine, then jump to P2 rather than continuing with the ** next instruction. ** ** See also: InitCoroutine */ case OP_Yield: { /* in1, jump */ int pcDest; pIn1 = &aMem[pOp->p1]; assert( VdbeMemDynamic(pIn1)==0 ); pIn1->flags = MEM_Int; pcDest = (int)pIn1->u.i; pIn1->u.i = (int)(pOp - aOp); REGISTER_TRACE(pOp->p1, pIn1); pOp = &aOp[pcDest]; break; } /* Opcode: HaltIfNull P1 P2 P3 P4 P5 ** Synopsis: if r[P3]=null halt ** ** Check the value in register P3. If it is NULL then Halt using ** parameter P1, P2, and P4 as if this were a Halt instruction. If the ** value in register P3 is not NULL, then this routine is a no-op. ** The P5 parameter should be 1. */ case OP_HaltIfNull: { /* in3 */ pIn3 = &aMem[pOp->p3]; if( (pIn3->flags & MEM_Null)==0 ) break; /* Fall through into OP_Halt */ } /* Opcode: Halt P1 P2 * P4 P5 ** ** Exit immediately. All open cursors, etc are closed ** automatically. ** ** P1 is the result code returned by sqlite3_exec(), sqlite3_reset(), ** or sqlite3_finalize(). For a normal halt, this should be SQLITE_OK (0). ** For errors, it can be some other value. If P1!=0 then P2 will determine ** whether or not to rollback the current transaction. Do not rollback ** if P2==OE_Fail. Do the rollback if P2==OE_Rollback. If P2==OE_Abort, ** then back out all changes that have occurred during this execution of the ** VDBE, but do not rollback the transaction. ** ** If P4 is not null then it is an error message string. ** ** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. ** ** 0: (no change) ** 1: NOT NULL contraint failed: P4 ** 2: UNIQUE constraint failed: P4 ** 3: CHECK constraint failed: P4 ** 4: FOREIGN KEY constraint failed: P4 ** ** If P5 is not zero and P4 is NULL, then everything after the ":" is ** omitted. ** ** There is an implied "Halt 0 0 0" instruction inserted at the very end of ** every program. So a jump past the last instruction of the program ** is the same as executing Halt. */ case OP_Halt: { const char *zType; const char *zLogFmt; VdbeFrame *pFrame; int pcx; pcx = (int)(pOp - aOp); if( pOp->p1==SQLITE_OK && p->pFrame ){ /* Halt the sub-program. Return control to the parent frame. */ pFrame = p->pFrame; p->pFrame = pFrame->pParent; p->nFrame--; sqlite3VdbeSetChanges(db, p->nChange); pcx = sqlite3VdbeFrameRestore(pFrame); lastRowid = db->lastRowid; if( pOp->p2==OE_Ignore ){ /* Instruction pcx is the OP_Program that invoked the sub-program ** currently being halted. If the p2 instruction of this OP_Halt ** instruction is set to OE_Ignore, then the sub-program is throwing ** an IGNORE exception. In this case jump to the address specified ** as the p2 of the calling OP_Program. */ pcx = p->aOp[pcx].p2-1; } aOp = p->aOp; aMem = p->aMem; pOp = &aOp[pcx]; break; } p->rc = pOp->p1; p->errorAction = (u8)pOp->p2; p->pc = pcx; if( p->rc ){ if( pOp->p5 ){ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", "FOREIGN KEY" }; assert( pOp->p5>=1 && pOp->p5<=4 ); testcase( pOp->p5==1 ); testcase( pOp->p5==2 ); testcase( pOp->p5==3 ); testcase( pOp->p5==4 ); zType = azType[pOp->p5-1]; }else{ zType = 0; } assert( zType!=0 || pOp->p4.z!=0 ); zLogFmt = "abort at %d in [%s]: %s"; if( zType && pOp->p4.z ){ sqlite3VdbeError(p, "%s constraint failed: %s", zType, pOp->p4.z); }else if( pOp->p4.z ){ sqlite3VdbeError(p, "%s", pOp->p4.z); }else{ sqlite3VdbeError(p, "%s constraint failed", zType); } sqlite3_log(pOp->p1, zLogFmt, pcx, p->zSql, p->zErrMsg); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); if( rc==SQLITE_BUSY ){ p->rc = rc = SQLITE_BUSY; }else{ assert( rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ); assert( rc==SQLITE_OK || db->nDeferredCons>0 || db->nDeferredImmCons>0 ); rc = p->rc ? SQLITE_ERROR : SQLITE_DONE; } goto vdbe_return; } /* Opcode: Integer P1 P2 * * * ** Synopsis: r[P2]=P1 ** ** The 32-bit integer value P1 is written into register P2. */ case OP_Integer: { /* out2 */ pOut = out2Prerelease(p, pOp); pOut->u.i = pOp->p1; break; } /* Opcode: Int64 * P2 * P4 * ** Synopsis: r[P2]=P4 ** ** P4 is a pointer to a 64-bit integer value. ** Write that value into register P2. */ case OP_Int64: { /* out2 */ pOut = out2Prerelease(p, pOp); assert( pOp->p4.pI64!=0 ); pOut->u.i = *pOp->p4.pI64; break; } #ifndef SQLITE_OMIT_FLOATING_POINT /* Opcode: Real * P2 * P4 * ** Synopsis: r[P2]=P4 ** ** P4 is a pointer to a 64-bit floating point value. ** Write that value into register P2. */ case OP_Real: { /* same as TK_FLOAT, out2 */ pOut = out2Prerelease(p, pOp); pOut->flags = MEM_Real; assert( !sqlite3IsNaN(*pOp->p4.pReal) ); pOut->u.r = *pOp->p4.pReal; break; } #endif /* Opcode: String8 * P2 * P4 * ** Synopsis: r[P2]='P4' ** ** P4 points to a nul terminated UTF-8 string. This opcode is transformed ** into a String opcode before it is executed for the first time. During ** this transformation, the length of string P4 is computed and stored ** as the P1 parameter. */ case OP_String8: { /* same as TK_STRING, out2 */ assert( pOp->p4.z!=0 ); pOut = out2Prerelease(p, pOp); pOp->opcode = OP_String; pOp->p1 = sqlite3Strlen30(pOp->p4.z); #ifndef SQLITE_OMIT_UTF16 if( encoding!=SQLITE_UTF8 ){ rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC); if( rc==SQLITE_TOOBIG ) goto too_big; if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem; assert( pOut->szMalloc>0 && pOut->zMalloc==pOut->z ); assert( VdbeMemDynamic(pOut)==0 ); pOut->szMalloc = 0; pOut->flags |= MEM_Static; if( pOp->p4type==P4_DYNAMIC ){ sqlite3DbFree(db, pOp->p4.z); } pOp->p4type = P4_DYNAMIC; pOp->p4.z = pOut->z; pOp->p1 = pOut->n; } #endif if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } /* Fall through to the next case, OP_String */ } /* Opcode: String P1 P2 P3 P4 P5 ** Synopsis: r[P2]='P4' (len=P1) ** ** The string value P4 of length P1 (bytes) is stored in register P2. ** ** If P5!=0 and the content of register P3 is greater than zero, then ** the datatype of the register P2 is converted to BLOB. The content is ** the same sequence of bytes, it is merely interpreted as a BLOB instead ** of a string, as if it had been CAST. */ case OP_String: { /* out2 */ assert( pOp->p4.z!=0 ); pOut = out2Prerelease(p, pOp); pOut->flags = MEM_Str|MEM_Static|MEM_Term; pOut->z = pOp->p4.z; pOut->n = pOp->p1; pOut->enc = encoding; UPDATE_MAX_BLOBSIZE(pOut); if( pOp->p5 ){ assert( pOp->p3>0 ); assert( pOp->p3<=(p->nMem-p->nCursor) ); pIn3 = &aMem[pOp->p3]; assert( pIn3->flags & MEM_Int ); if( pIn3->u.i ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term; } break; } /* Opcode: Null P1 P2 P3 * * ** Synopsis: r[P2..P3]=NULL ** ** Write a NULL into registers P2. If P3 greater than P2, then also write ** NULL into register P3 and every register in between P2 and P3. If P3 ** is less than P2 (typically P3 is zero) then only register P2 is ** set to NULL. ** ** If the P1 value is non-zero, then also set the MEM_Cleared flag so that ** NULL values will not compare equal even if SQLITE_NULLEQ is set on ** OP_Ne or OP_Eq. */ case OP_Null: { /* out2 */ int cnt; u16 nullFlag; pOut = out2Prerelease(p, pOp); cnt = pOp->p3-pOp->p2; assert( pOp->p3<=(p->nMem-p->nCursor) ); pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null; while( cnt>0 ){ pOut++; memAboutToChange(p, pOut); sqlite3VdbeMemSetNull(pOut); pOut->flags = nullFlag; cnt--; } break; } /* Opcode: SoftNull P1 * * * * ** Synopsis: r[P1]=NULL ** ** Set register P1 to have the value NULL as seen by the OP_MakeRecord ** instruction, but do not free any string or blob memory associated with ** the register, so that if the value was a string or blob that was ** previously copied using OP_SCopy, the copies will continue to be valid. */ case OP_SoftNull: { assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); pOut = &aMem[pOp->p1]; pOut->flags = (pOut->flags|MEM_Null)&~MEM_Undefined; break; } /* Opcode: Blob P1 P2 * P4 * ** Synopsis: r[P2]=P4 (len=P1) ** ** P4 points to a blob of data P1 bytes long. Store this ** blob in register P2. */ case OP_Blob: { /* out2 */ assert( pOp->p1 <= SQLITE_MAX_LENGTH ); pOut = out2Prerelease(p, pOp); sqlite3VdbeMemSetStr(pOut, pOp->p4.z, pOp->p1, 0, 0); pOut->enc = encoding; UPDATE_MAX_BLOBSIZE(pOut); break; } /* Opcode: Variable P1 P2 * P4 * ** Synopsis: r[P2]=parameter(P1,P4) ** ** Transfer the values of bound parameter P1 into register P2 ** ** If the parameter is named, then its name appears in P4. ** The P4 value is used by sqlite3_bind_parameter_name(). */ case OP_Variable: { /* out2 */ Mem *pVar; /* Value being transferred */ assert( pOp->p1>0 && pOp->p1<=p->nVar ); assert( pOp->p4.z==0 || pOp->p4.z==p->azVar[pOp->p1-1] ); pVar = &p->aVar[pOp->p1 - 1]; if( sqlite3VdbeMemTooBig(pVar) ){ goto too_big; } pOut = out2Prerelease(p, pOp); sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static); UPDATE_MAX_BLOBSIZE(pOut); break; } /* Opcode: Move P1 P2 P3 * * ** Synopsis: r[P2@P3]=r[P1@P3] ** ** Move the P3 values in register P1..P1+P3-1 over into ** registers P2..P2+P3-1. Registers P1..P1+P3-1 are ** left holding a NULL. It is an error for register ranges ** P1..P1+P3-1 and P2..P2+P3-1 to overlap. It is an error ** for P3 to be less than 1. */ case OP_Move: { int n; /* Number of registers left to copy */ int p1; /* Register to copy from */ int p2; /* Register to copy to */ n = pOp->p3; p1 = pOp->p1; p2 = pOp->p2; assert( n>0 && p1>0 && p2>0 ); assert( p1+n<=p2 || p2+n<=p1 ); pIn1 = &aMem[p1]; pOut = &aMem[p2]; do{ assert( pOut<=&aMem[(p->nMem-p->nCursor)] ); assert( pIn1<=&aMem[(p->nMem-p->nCursor)] ); assert( memIsValid(pIn1) ); memAboutToChange(p, pOut); sqlite3VdbeMemMove(pOut, pIn1); #ifdef SQLITE_DEBUG if( pOut->pScopyFrom>=&aMem[p1] && pOut->pScopyFrompScopyFrom += pOp->p2 - p1; } #endif Deephemeralize(pOut); REGISTER_TRACE(p2++, pOut); pIn1++; pOut++; }while( --n ); break; } /* Opcode: Copy P1 P2 P3 * * ** Synopsis: r[P2@P3+1]=r[P1@P3+1] ** ** Make a copy of registers P1..P1+P3 into registers P2..P2+P3. ** ** This instruction makes a deep copy of the value. A duplicate ** is made of any string or blob constant. See also OP_SCopy. */ case OP_Copy: { int n; n = pOp->p3; pIn1 = &aMem[pOp->p1]; pOut = &aMem[pOp->p2]; assert( pOut!=pIn1 ); while( 1 ){ sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); Deephemeralize(pOut); #ifdef SQLITE_DEBUG pOut->pScopyFrom = 0; #endif REGISTER_TRACE(pOp->p2+pOp->p3-n, pOut); if( (n--)==0 ) break; pOut++; pIn1++; } break; } /* Opcode: SCopy P1 P2 * * * ** Synopsis: r[P2]=r[P1] ** ** Make a shallow copy of register P1 into register P2. ** ** This instruction makes a shallow copy of the value. If the value ** is a string or blob, then the copy is only a pointer to the ** original and hence if the original changes so will the copy. ** Worse, if the original is deallocated, the copy becomes invalid. ** Thus the program must guarantee that the original will not change ** during the lifetime of the copy. Use OP_Copy to make a complete ** copy. */ case OP_SCopy: { /* out2 */ pIn1 = &aMem[pOp->p1]; pOut = &aMem[pOp->p2]; assert( pOut!=pIn1 ); sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem); #ifdef SQLITE_DEBUG if( pOut->pScopyFrom==0 ) pOut->pScopyFrom = pIn1; #endif break; } /* Opcode: ResultRow P1 P2 * * * ** Synopsis: output=r[P1@P2] ** ** The registers P1 through P1+P2-1 contain a single row of ** results. This opcode causes the sqlite3_step() call to terminate ** with an SQLITE_ROW return code and it sets up the sqlite3_stmt ** structure to provide access to the r(P1)..r(P1+P2-1) values as ** the result row. */ case OP_ResultRow: { Mem *pMem; int i; assert( p->nResColumn==pOp->p2 ); assert( pOp->p1>0 ); assert( pOp->p1+pOp->p2<=(p->nMem-p->nCursor)+1 ); #ifndef SQLITE_OMIT_PROGRESS_CALLBACK /* Run the progress counter just before returning. */ if( db->xProgress!=0 && nVmStep>=nProgressLimit && db->xProgress(db->pProgressArg)!=0 ){ rc = SQLITE_INTERRUPT; goto vdbe_error_halt; } #endif /* If this statement has violated immediate foreign key constraints, do ** not return the number of rows modified. And do not RELEASE the statement ** transaction. It needs to be rolled back. */ if( SQLITE_OK!=(rc = sqlite3VdbeCheckFk(p, 0)) ){ assert( db->flags&SQLITE_CountRows ); assert( p->usesStmtJournal ); break; } /* If the SQLITE_CountRows flag is set in sqlite3.flags mask, then ** DML statements invoke this opcode to return the number of rows ** modified to the user. This is the only way that a VM that ** opens a statement transaction may invoke this opcode. ** ** In case this is such a statement, close any statement transaction ** opened by this VM before returning control to the user. This is to ** ensure that statement-transactions are always nested, not overlapping. ** If the open statement-transaction is not closed here, then the user ** may step another VM that opens its own statement transaction. This ** may lead to overlapping statement transactions. ** ** The statement transaction is never a top-level transaction. Hence ** the RELEASE call below can never fail. */ assert( p->iStatement==0 || db->flags&SQLITE_CountRows ); rc = sqlite3VdbeCloseStatement(p, SAVEPOINT_RELEASE); if( NEVER(rc!=SQLITE_OK) ){ break; } /* Invalidate all ephemeral cursor row caches */ p->cacheCtr = (p->cacheCtr + 2)|1; /* Make sure the results of the current row are \000 terminated ** and have an assigned type. The results are de-ephemeralized as ** a side effect. */ pMem = p->pResultSet = &aMem[pOp->p1]; for(i=0; ip2; i++){ assert( memIsValid(&pMem[i]) ); Deephemeralize(&pMem[i]); assert( (pMem[i].flags & MEM_Ephem)==0 || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 ); sqlite3VdbeMemNulTerminate(&pMem[i]); REGISTER_TRACE(pOp->p1+i, &pMem[i]); } if( db->mallocFailed ) goto no_mem; /* Return SQLITE_ROW */ p->pc = (int)(pOp - aOp) + 1; rc = SQLITE_ROW; goto vdbe_return; } /* Opcode: Concat P1 P2 P3 * * ** Synopsis: r[P3]=r[P2]+r[P1] ** ** Add the text in register P1 onto the end of the text in ** register P2 and store the result in register P3. ** If either the P1 or P2 text are NULL then store NULL in P3. ** ** P3 = P2 || P1 ** ** It is illegal for P1 and P3 to be the same register. Sometimes, ** if P3 is the same register as P2, the implementation is able ** to avoid a memcpy(). */ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */ i64 nByte; pIn1 = &aMem[pOp->p1]; pIn2 = &aMem[pOp->p2]; pOut = &aMem[pOp->p3]; assert( pIn1!=pOut ); if( (pIn1->flags | pIn2->flags) & MEM_Null ){ sqlite3VdbeMemSetNull(pOut); break; } if( ExpandBlob(pIn1) || ExpandBlob(pIn2) ) goto no_mem; Stringify(pIn1, encoding); Stringify(pIn2, encoding); nByte = pIn1->n + pIn2->n; if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){ goto no_mem; } MemSetTypeFlag(pOut, MEM_Str); if( pOut!=pIn2 ){ memcpy(pOut->z, pIn2->z, pIn2->n); } memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n); pOut->z[nByte]=0; pOut->z[nByte+1] = 0; pOut->flags |= MEM_Term; pOut->n = (int)nByte; pOut->enc = encoding; UPDATE_MAX_BLOBSIZE(pOut); break; } /* Opcode: Add P1 P2 P3 * * ** Synopsis: r[P3]=r[P1]+r[P2] ** ** Add the value in register P1 to the value in register P2 ** and store the result in register P3. ** If either input is NULL, the result is NULL. */ /* Opcode: Multiply P1 P2 P3 * * ** Synopsis: r[P3]=r[P1]*r[P2] ** ** ** Multiply the value in register P1 by the value in register P2 ** and store the result in register P3. ** If either input is NULL, the result is NULL. */ /* Opcode: Subtract P1 P2 P3 * * ** Synopsis: r[P3]=r[P2]-r[P1] ** ** Subtract the value in register P1 from the value in register P2 ** and store the result in register P3. ** If either input is NULL, the result is NULL. */ /* Opcode: Divide P1 P2 P3 * * ** Synopsis: r[P3]=r[P2]/r[P1] ** ** Divide the value in register P1 by the value in register P2 ** and store the result in register P3 (P3=P2/P1). If the value in ** register P1 is zero, then the result is NULL. If either input is ** NULL, the result is NULL. */ /* Opcode: Remainder P1 P2 P3 * * ** Synopsis: r[P3]=r[P2]%r[P1] ** ** Compute the remainder after integer register P2 is divided by ** register P1 and store the result in register P3. ** If the value in register P1 is zero the result is NULL. ** If either operand is NULL, the result is NULL. */ case OP_Add: /* same as TK_PLUS, in1, in2, out3 */ case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */ case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */ case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ char bIntint; /* Started out as two integer operands */ u16 flags; /* Combined MEM_* flags from both inputs */ u16 type1; /* Numeric type of left operand */ u16 type2; /* Numeric type of right operand */ i64 iA; /* Integer value of left operand */ i64 iB; /* Integer value of right operand */ double rA; /* Real value of left operand */ double rB; /* Real value of right operand */ pIn1 = &aMem[pOp->p1]; type1 = numericType(pIn1); pIn2 = &aMem[pOp->p2]; type2 = numericType(pIn2); pOut = &aMem[pOp->p3]; flags = pIn1->flags | pIn2->flags; if( (flags & MEM_Null)!=0 ) goto arithmetic_result_is_null; if( (type1 & type2 & MEM_Int)!=0 ){ iA = pIn1->u.i; iB = pIn2->u.i; bIntint = 1; switch( pOp->opcode ){ case OP_Add: if( sqlite3AddInt64(&iB,iA) ) goto fp_math; break; case OP_Subtract: if( sqlite3SubInt64(&iB,iA) ) goto fp_math; break; case OP_Multiply: if( sqlite3MulInt64(&iB,iA) ) goto fp_math; break; case OP_Divide: { if( iA==0 ) goto arithmetic_result_is_null; if( iA==-1 && iB==SMALLEST_INT64 ) goto fp_math; iB /= iA; break; } default: { if( iA==0 ) goto arithmetic_result_is_null; if( iA==-1 ) iA = 1; iB %= iA; break; } } pOut->u.i = iB; MemSetTypeFlag(pOut, MEM_Int); }else{ bIntint = 0; fp_math: rA = sqlite3VdbeRealValue(pIn1); rB = sqlite3VdbeRealValue(pIn2); switch( pOp->opcode ){ case OP_Add: rB += rA; break; case OP_Subtract: rB -= rA; break; case OP_Multiply: rB *= rA; break; case OP_Divide: { /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ if( rA==(double)0 ) goto arithmetic_result_is_null; rB /= rA; break; } default: { iA = (i64)rA; iB = (i64)rB; if( iA==0 ) goto arithmetic_result_is_null; if( iA==-1 ) iA = 1; rB = (double)(iB % iA); break; } } #ifdef SQLITE_OMIT_FLOATING_POINT pOut->u.i = rB; MemSetTypeFlag(pOut, MEM_Int); #else if( sqlite3IsNaN(rB) ){ goto arithmetic_result_is_null; } pOut->u.r = rB; MemSetTypeFlag(pOut, MEM_Real); if( ((type1|type2)&MEM_Real)==0 && !bIntint ){ sqlite3VdbeIntegerAffinity(pOut); } #endif } break; arithmetic_result_is_null: sqlite3VdbeMemSetNull(pOut); break; } /* Opcode: CollSeq P1 * * P4 ** ** P4 is a pointer to a CollSeq struct. If the next call to a user function ** or aggregate calls sqlite3GetFuncCollSeq(), this collation sequence will ** be returned. This is used by the built-in min(), max() and nullif() ** functions. ** ** If P1 is not zero, then it is a register that a subsequent min() or ** max() aggregate will set to 1 if the current row is not the minimum or ** maximum. The P1 register is initialized to 0 by this instruction. ** ** The interface used by the implementation of the aforementioned functions ** to retrieve the collation sequence set by this opcode is not available ** publicly. Only built-in functions have access to this feature. */ case OP_CollSeq: { assert( pOp->p4type==P4_COLLSEQ ); if( pOp->p1 ){ sqlite3VdbeMemSetInt64(&aMem[pOp->p1], 0); } break; } /* Opcode: Function0 P1 P2 P3 P4 P5 ** Synopsis: r[P3]=func(r[P2@P5]) ** ** Invoke a user function (P4 is a pointer to a FuncDef object that ** defines the function) with P5 arguments taken from register P2 and ** successors. The result of the function is stored in register P3. ** Register P3 must not be one of the function inputs. ** ** P1 is a 32-bit bitmask indicating whether or not each argument to the ** function was determined to be constant at compile time. If the first ** argument was constant then bit 0 of P1 is set. This is used to determine ** whether meta data associated with a user function argument using the ** sqlite3_set_auxdata() API may be safely retained until the next ** invocation of this opcode. ** ** See also: Function, AggStep, AggFinal */ /* Opcode: Function P1 P2 P3 P4 P5 ** Synopsis: r[P3]=func(r[P2@P5]) ** ** Invoke a user function (P4 is a pointer to an sqlite3_context object that ** contains a pointer to the function to be run) with P5 arguments taken ** from register P2 and successors. The result of the function is stored ** in register P3. Register P3 must not be one of the function inputs. ** ** P1 is a 32-bit bitmask indicating whether or not each argument to the ** function was determined to be constant at compile time. If the first ** argument was constant then bit 0 of P1 is set. This is used to determine ** whether meta data associated with a user function argument using the ** sqlite3_set_auxdata() API may be safely retained until the next ** invocation of this opcode. ** ** SQL functions are initially coded as OP_Function0 with P4 pointing ** to a FuncDef object. But on first evaluation, the P4 operand is ** automatically converted into an sqlite3_context object and the operation ** changed to this OP_Function opcode. In this way, the initialization of ** the sqlite3_context object occurs only once, rather than once for each ** evaluation of the function. ** ** See also: Function0, AggStep, AggFinal */ case OP_Function0: { int n; sqlite3_context *pCtx; assert( pOp->p4type==P4_FUNCDEF ); n = pOp->p5; assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) ); assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); pCtx = sqlite3DbMallocRaw(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*)); if( pCtx==0 ) goto no_mem; pCtx->pOut = 0; pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; pCtx->argc = n; pOp->p4type = P4_FUNCCTX; pOp->p4.pCtx = pCtx; pOp->opcode = OP_Function; /* Fall through into OP_Function */ } case OP_Function: { int i; sqlite3_context *pCtx; assert( pOp->p4type==P4_FUNCCTX ); pCtx = pOp->p4.pCtx; /* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it ** reinitializes the relavant parts of the sqlite3_context object */ pOut = &aMem[pOp->p3]; if( pCtx->pOut != pOut ){ pCtx->pOut = pOut; for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i]; } memAboutToChange(p, pCtx->pOut); #ifdef SQLITE_DEBUG for(i=0; iargc; i++){ assert( memIsValid(pCtx->argv[i]) ); REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]); } #endif MemSetTypeFlag(pCtx->pOut, MEM_Null); pCtx->fErrorOrAux = 0; db->lastRowid = lastRowid; (*pCtx->pFunc->xFunc)(pCtx, pCtx->argc, pCtx->argv); /* IMP: R-24505-23230 */ lastRowid = db->lastRowid; /* Remember rowid changes made by xFunc */ /* If the function returned an error, throw an exception */ if( pCtx->fErrorOrAux ){ if( pCtx->isError ){ sqlite3VdbeError(p, "%s", sqlite3_value_text(pCtx->pOut)); rc = pCtx->isError; } sqlite3VdbeDeleteAuxData(p, pCtx->iOp, pOp->p1); } /* Copy the result of the function into register P3 */ if( pOut->flags & (MEM_Str|MEM_Blob) ){ sqlite3VdbeChangeEncoding(pCtx->pOut, encoding); if( sqlite3VdbeMemTooBig(pCtx->pOut) ) goto too_big; } REGISTER_TRACE(pOp->p3, pCtx->pOut); UPDATE_MAX_BLOBSIZE(pCtx->pOut); break; } /* Opcode: BitAnd P1 P2 P3 * * ** Synopsis: r[P3]=r[P1]&r[P2] ** ** Take the bit-wise AND of the values in register P1 and P2 and ** store the result in register P3. ** If either input is NULL, the result is NULL. */ /* Opcode: BitOr P1 P2 P3 * * ** Synopsis: r[P3]=r[P1]|r[P2] ** ** Take the bit-wise OR of the values in register P1 and P2 and ** store the result in register P3. ** If either input is NULL, the result is NULL. */ /* Opcode: ShiftLeft P1 P2 P3 * * ** Synopsis: r[P3]=r[P2]<>r[P1] ** ** Shift the integer value in register P2 to the right by the ** number of bits specified by the integer in register P1. ** Store the result in register P3. ** If either input is NULL, the result is NULL. */ case OP_BitAnd: /* same as TK_BITAND, in1, in2, out3 */ case OP_BitOr: /* same as TK_BITOR, in1, in2, out3 */ case OP_ShiftLeft: /* same as TK_LSHIFT, in1, in2, out3 */ case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */ i64 iA; u64 uA; i64 iB; u8 op; pIn1 = &aMem[pOp->p1]; pIn2 = &aMem[pOp->p2]; pOut = &aMem[pOp->p3]; if( (pIn1->flags | pIn2->flags) & MEM_Null ){ sqlite3VdbeMemSetNull(pOut); break; } iA = sqlite3VdbeIntValue(pIn2); iB = sqlite3VdbeIntValue(pIn1); op = pOp->opcode; if( op==OP_BitAnd ){ iA &= iB; }else if( op==OP_BitOr ){ iA |= iB; }else if( iB!=0 ){ assert( op==OP_ShiftRight || op==OP_ShiftLeft ); /* If shifting by a negative amount, shift in the other direction */ if( iB<0 ){ assert( OP_ShiftRight==OP_ShiftLeft+1 ); op = 2*OP_ShiftLeft + 1 - op; iB = iB>(-64) ? -iB : 64; } if( iB>=64 ){ iA = (iA>=0 || op==OP_ShiftLeft) ? 0 : -1; }else{ memcpy(&uA, &iA, sizeof(uA)); if( op==OP_ShiftLeft ){ uA <<= iB; }else{ uA >>= iB; /* Sign-extend on a right shift of a negative number */ if( iA<0 ) uA |= ((((u64)0xffffffff)<<32)|0xffffffff) << (64-iB); } memcpy(&iA, &uA, sizeof(iA)); } } pOut->u.i = iA; MemSetTypeFlag(pOut, MEM_Int); break; } /* Opcode: AddImm P1 P2 * * * ** Synopsis: r[P1]=r[P1]+P2 ** ** Add the constant P2 to the value in register P1. ** The result is always an integer. ** ** To force any register to be an integer, just add 0. */ case OP_AddImm: { /* in1 */ pIn1 = &aMem[pOp->p1]; memAboutToChange(p, pIn1); sqlite3VdbeMemIntegerify(pIn1); pIn1->u.i += pOp->p2; break; } /* Opcode: MustBeInt P1 P2 * * * ** ** Force the value in register P1 to be an integer. If the value ** in P1 is not an integer and cannot be converted into an integer ** without data loss, then jump immediately to P2, or if P2==0 ** raise an SQLITE_MISMATCH exception. */ case OP_MustBeInt: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; if( (pIn1->flags & MEM_Int)==0 ){ applyAffinity(pIn1, SQLITE_AFF_NUMERIC, encoding); VdbeBranchTaken((pIn1->flags&MEM_Int)==0, 2); if( (pIn1->flags & MEM_Int)==0 ){ if( pOp->p2==0 ){ rc = SQLITE_MISMATCH; goto abort_due_to_error; }else{ goto jump_to_p2; } } } MemSetTypeFlag(pIn1, MEM_Int); break; } #ifndef SQLITE_OMIT_FLOATING_POINT /* Opcode: RealAffinity P1 * * * * ** ** If register P1 holds an integer convert it to a real value. ** ** This opcode is used when extracting information from a column that ** has REAL affinity. Such column values may still be stored as ** integers, for space efficiency, but after extraction we want them ** to have only a real value. */ case OP_RealAffinity: { /* in1 */ pIn1 = &aMem[pOp->p1]; if( pIn1->flags & MEM_Int ){ sqlite3VdbeMemRealify(pIn1); } break; } #endif #ifndef SQLITE_OMIT_CAST /* Opcode: Cast P1 P2 * * * ** Synopsis: affinity(r[P1]) ** ** Force the value in register P1 to be the type defined by P2. ** **
      **
    • TEXT **
    • BLOB **
    • NUMERIC **
    • INTEGER **
    • REAL **
    ** ** A NULL value is not changed by this routine. It remains NULL. */ case OP_Cast: { /* in1 */ assert( pOp->p2>=SQLITE_AFF_BLOB && pOp->p2<=SQLITE_AFF_REAL ); testcase( pOp->p2==SQLITE_AFF_TEXT ); testcase( pOp->p2==SQLITE_AFF_BLOB ); testcase( pOp->p2==SQLITE_AFF_NUMERIC ); testcase( pOp->p2==SQLITE_AFF_INTEGER ); testcase( pOp->p2==SQLITE_AFF_REAL ); pIn1 = &aMem[pOp->p1]; memAboutToChange(p, pIn1); rc = ExpandBlob(pIn1); sqlite3VdbeMemCast(pIn1, pOp->p2, encoding); UPDATE_MAX_BLOBSIZE(pIn1); break; } #endif /* SQLITE_OMIT_CAST */ /* Opcode: Lt P1 P2 P3 P4 P5 ** Synopsis: if r[P1]r[P3] goto P2 ** ** This works just like the Lt opcode except that the jump is taken if ** the content of register P3 is greater than the content of ** register P1. See the Lt opcode for additional information. */ /* Opcode: Ge P1 P2 P3 P4 P5 ** Synopsis: if r[P1]>=r[P3] goto P2 ** ** This works just like the Lt opcode except that the jump is taken if ** the content of register P3 is greater than or equal to the content of ** register P1. See the Lt opcode for additional information. */ case OP_Eq: /* same as TK_EQ, jump, in1, in3 */ case OP_Ne: /* same as TK_NE, jump, in1, in3 */ case OP_Lt: /* same as TK_LT, jump, in1, in3 */ case OP_Le: /* same as TK_LE, jump, in1, in3 */ case OP_Gt: /* same as TK_GT, jump, in1, in3 */ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ int res; /* Result of the comparison of pIn1 against pIn3 */ char affinity; /* Affinity to use for comparison */ u16 flags1; /* Copy of initial value of pIn1->flags */ u16 flags3; /* Copy of initial value of pIn3->flags */ pIn1 = &aMem[pOp->p1]; pIn3 = &aMem[pOp->p3]; flags1 = pIn1->flags; flags3 = pIn3->flags; if( (flags1 | flags3)&MEM_Null ){ /* One or both operands are NULL */ if( pOp->p5 & SQLITE_NULLEQ ){ /* If SQLITE_NULLEQ is set (which will only happen if the operator is ** OP_Eq or OP_Ne) then take the jump or not depending on whether ** or not both operands are null. */ assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne ); assert( (flags1 & MEM_Cleared)==0 ); assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 ); if( (flags1&MEM_Null)!=0 && (flags3&MEM_Null)!=0 && (flags3&MEM_Cleared)==0 ){ res = 0; /* Results are equal */ }else{ res = 1; /* Results are not equal */ } }else{ /* SQLITE_NULLEQ is clear and at least one operand is NULL, ** then the result is always NULL. ** The jump is taken if the SQLITE_JUMPIFNULL bit is set. */ if( pOp->p5 & SQLITE_STOREP2 ){ pOut = &aMem[pOp->p2]; MemSetTypeFlag(pOut, MEM_Null); REGISTER_TRACE(pOp->p2, pOut); }else{ VdbeBranchTaken(2,3); if( pOp->p5 & SQLITE_JUMPIFNULL ){ goto jump_to_p2; } } break; } }else{ /* Neither operand is NULL. Do a comparison. */ affinity = pOp->p5 & SQLITE_AFF_MASK; if( affinity>=SQLITE_AFF_NUMERIC ){ if( (pIn1->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ applyNumericAffinity(pIn1,0); } if( (pIn3->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ applyNumericAffinity(pIn3,0); } }else if( affinity==SQLITE_AFF_TEXT ){ if( (pIn1->flags & MEM_Str)==0 && (pIn1->flags & (MEM_Int|MEM_Real))!=0 ){ testcase( pIn1->flags & MEM_Int ); testcase( pIn1->flags & MEM_Real ); sqlite3VdbeMemStringify(pIn1, encoding, 1); testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) ); flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); } if( (pIn3->flags & MEM_Str)==0 && (pIn3->flags & (MEM_Int|MEM_Real))!=0 ){ testcase( pIn3->flags & MEM_Int ); testcase( pIn3->flags & MEM_Real ); sqlite3VdbeMemStringify(pIn3, encoding, 1); testcase( (flags3&MEM_Dyn) != (pIn3->flags&MEM_Dyn) ); flags3 = (pIn3->flags & ~MEM_TypeMask) | (flags3 & MEM_TypeMask); } } assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 ); if( pIn1->flags & MEM_Zero ){ sqlite3VdbeMemExpandBlob(pIn1); flags1 &= ~MEM_Zero; } if( pIn3->flags & MEM_Zero ){ sqlite3VdbeMemExpandBlob(pIn3); flags3 &= ~MEM_Zero; } if( db->mallocFailed ) goto no_mem; res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl); } switch( pOp->opcode ){ case OP_Eq: res = res==0; break; case OP_Ne: res = res!=0; break; case OP_Lt: res = res<0; break; case OP_Le: res = res<=0; break; case OP_Gt: res = res>0; break; default: res = res>=0; break; } /* Undo any changes made by applyAffinity() to the input registers. */ assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) ); pIn1->flags = flags1; assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) ); pIn3->flags = flags3; if( pOp->p5 & SQLITE_STOREP2 ){ pOut = &aMem[pOp->p2]; memAboutToChange(p, pOut); MemSetTypeFlag(pOut, MEM_Int); pOut->u.i = res; REGISTER_TRACE(pOp->p2, pOut); }else{ VdbeBranchTaken(res!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3); if( res ){ goto jump_to_p2; } } break; } /* Opcode: Permutation * * * P4 * ** ** Set the permutation used by the OP_Compare operator to be the array ** of integers in P4. ** ** The permutation is only valid until the next OP_Compare that has ** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should ** occur immediately prior to the OP_Compare. */ case OP_Permutation: { assert( pOp->p4type==P4_INTARRAY ); assert( pOp->p4.ai ); aPermute = pOp->p4.ai; break; } /* Opcode: Compare P1 P2 P3 P4 P5 ** Synopsis: r[P1@P3] <-> r[P2@P3] ** ** Compare two vectors of registers in reg(P1)..reg(P1+P3-1) (call this ** vector "A") and in reg(P2)..reg(P2+P3-1) ("B"). Save the result of ** the comparison for use by the next OP_Jump instruct. ** ** If P5 has the OPFLAG_PERMUTE bit set, then the order of comparison is ** determined by the most recent OP_Permutation operator. If the ** OPFLAG_PERMUTE bit is clear, then register are compared in sequential ** order. ** ** P4 is a KeyInfo structure that defines collating sequences and sort ** orders for the comparison. The permutation applies to registers ** only. The KeyInfo elements are used sequentially. ** ** The comparison is a sort comparison, so NULLs compare equal, ** NULLs are less than numbers, numbers are less than strings, ** and strings are less than blobs. */ case OP_Compare: { int n; int i; int p1; int p2; const KeyInfo *pKeyInfo; int idx; CollSeq *pColl; /* Collating sequence to use on this term */ int bRev; /* True for DESCENDING sort order */ if( (pOp->p5 & OPFLAG_PERMUTE)==0 ) aPermute = 0; n = pOp->p3; pKeyInfo = pOp->p4.pKeyInfo; assert( n>0 ); assert( pKeyInfo!=0 ); p1 = pOp->p1; p2 = pOp->p2; #if SQLITE_DEBUG if( aPermute ){ int k, mx = 0; for(k=0; kmx ) mx = aPermute[k]; assert( p1>0 && p1+mx<=(p->nMem-p->nCursor)+1 ); assert( p2>0 && p2+mx<=(p->nMem-p->nCursor)+1 ); }else{ assert( p1>0 && p1+n<=(p->nMem-p->nCursor)+1 ); assert( p2>0 && p2+n<=(p->nMem-p->nCursor)+1 ); } #endif /* SQLITE_DEBUG */ for(i=0; inField ); pColl = pKeyInfo->aColl[i]; bRev = pKeyInfo->aSortOrder[i]; iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl); if( iCompare ){ if( bRev ) iCompare = -iCompare; break; } } aPermute = 0; break; } /* Opcode: Jump P1 P2 P3 * * ** ** Jump to the instruction at address P1, P2, or P3 depending on whether ** in the most recent OP_Compare instruction the P1 vector was less than ** equal to, or greater than the P2 vector, respectively. */ case OP_Jump: { /* jump */ if( iCompare<0 ){ VdbeBranchTaken(0,3); pOp = &aOp[pOp->p1 - 1]; }else if( iCompare==0 ){ VdbeBranchTaken(1,3); pOp = &aOp[pOp->p2 - 1]; }else{ VdbeBranchTaken(2,3); pOp = &aOp[pOp->p3 - 1]; } break; } /* Opcode: And P1 P2 P3 * * ** Synopsis: r[P3]=(r[P1] && r[P2]) ** ** Take the logical AND of the values in registers P1 and P2 and ** write the result into register P3. ** ** If either P1 or P2 is 0 (false) then the result is 0 even if ** the other input is NULL. A NULL and true or two NULLs give ** a NULL output. */ /* Opcode: Or P1 P2 P3 * * ** Synopsis: r[P3]=(r[P1] || r[P2]) ** ** Take the logical OR of the values in register P1 and P2 and ** store the answer in register P3. ** ** If either P1 or P2 is nonzero (true) then the result is 1 (true) ** even if the other input is NULL. A NULL and false or two NULLs ** give a NULL output. */ case OP_And: /* same as TK_AND, in1, in2, out3 */ case OP_Or: { /* same as TK_OR, in1, in2, out3 */ int v1; /* Left operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ int v2; /* Right operand: 0==FALSE, 1==TRUE, 2==UNKNOWN or NULL */ pIn1 = &aMem[pOp->p1]; if( pIn1->flags & MEM_Null ){ v1 = 2; }else{ v1 = sqlite3VdbeIntValue(pIn1)!=0; } pIn2 = &aMem[pOp->p2]; if( pIn2->flags & MEM_Null ){ v2 = 2; }else{ v2 = sqlite3VdbeIntValue(pIn2)!=0; } if( pOp->opcode==OP_And ){ static const unsigned char and_logic[] = { 0, 0, 0, 0, 1, 2, 0, 2, 2 }; v1 = and_logic[v1*3+v2]; }else{ static const unsigned char or_logic[] = { 0, 1, 2, 1, 1, 1, 2, 1, 2 }; v1 = or_logic[v1*3+v2]; } pOut = &aMem[pOp->p3]; if( v1==2 ){ MemSetTypeFlag(pOut, MEM_Null); }else{ pOut->u.i = v1; MemSetTypeFlag(pOut, MEM_Int); } break; } /* Opcode: Not P1 P2 * * * ** Synopsis: r[P2]= !r[P1] ** ** Interpret the value in register P1 as a boolean value. Store the ** boolean complement in register P2. If the value in register P1 is ** NULL, then a NULL is stored in P2. */ case OP_Not: { /* same as TK_NOT, in1, out2 */ pIn1 = &aMem[pOp->p1]; pOut = &aMem[pOp->p2]; sqlite3VdbeMemSetNull(pOut); if( (pIn1->flags & MEM_Null)==0 ){ pOut->flags = MEM_Int; pOut->u.i = !sqlite3VdbeIntValue(pIn1); } break; } /* Opcode: BitNot P1 P2 * * * ** Synopsis: r[P1]= ~r[P1] ** ** Interpret the content of register P1 as an integer. Store the ** ones-complement of the P1 value into register P2. If P1 holds ** a NULL then store a NULL in P2. */ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */ pIn1 = &aMem[pOp->p1]; pOut = &aMem[pOp->p2]; sqlite3VdbeMemSetNull(pOut); if( (pIn1->flags & MEM_Null)==0 ){ pOut->flags = MEM_Int; pOut->u.i = ~sqlite3VdbeIntValue(pIn1); } break; } /* Opcode: Once P1 P2 * * * ** ** Check the "once" flag number P1. If it is set, jump to instruction P2. ** Otherwise, set the flag and fall through to the next instruction. ** In other words, this opcode causes all following opcodes up through P2 ** (but not including P2) to run just once and to be skipped on subsequent ** times through the loop. ** ** All "once" flags are initially cleared whenever a prepared statement ** first begins to run. */ case OP_Once: { /* jump */ assert( pOp->p1nOnceFlag ); VdbeBranchTaken(p->aOnceFlag[pOp->p1]!=0, 2); if( p->aOnceFlag[pOp->p1] ){ goto jump_to_p2; }else{ p->aOnceFlag[pOp->p1] = 1; } break; } /* Opcode: If P1 P2 P3 * * ** ** Jump to P2 if the value in register P1 is true. The value ** is considered true if it is numeric and non-zero. If the value ** in P1 is NULL then take the jump if and only if P3 is non-zero. */ /* Opcode: IfNot P1 P2 P3 * * ** ** Jump to P2 if the value in register P1 is False. The value ** is considered false if it has a numeric value of zero. If the value ** in P1 is NULL then take the jump if and only if P3 is non-zero. */ case OP_If: /* jump, in1 */ case OP_IfNot: { /* jump, in1 */ int c; pIn1 = &aMem[pOp->p1]; if( pIn1->flags & MEM_Null ){ c = pOp->p3; }else{ #ifdef SQLITE_OMIT_FLOATING_POINT c = sqlite3VdbeIntValue(pIn1)!=0; #else c = sqlite3VdbeRealValue(pIn1)!=0.0; #endif if( pOp->opcode==OP_IfNot ) c = !c; } VdbeBranchTaken(c!=0, 2); if( c ){ goto jump_to_p2; } break; } /* Opcode: IsNull P1 P2 * * * ** Synopsis: if r[P1]==NULL goto P2 ** ** Jump to P2 if the value in register P1 is NULL. */ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ pIn1 = &aMem[pOp->p1]; VdbeBranchTaken( (pIn1->flags & MEM_Null)!=0, 2); if( (pIn1->flags & MEM_Null)!=0 ){ goto jump_to_p2; } break; } /* Opcode: NotNull P1 P2 * * * ** Synopsis: if r[P1]!=NULL goto P2 ** ** Jump to P2 if the value in register P1 is not NULL. */ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ pIn1 = &aMem[pOp->p1]; VdbeBranchTaken( (pIn1->flags & MEM_Null)==0, 2); if( (pIn1->flags & MEM_Null)==0 ){ goto jump_to_p2; } break; } /* Opcode: Column P1 P2 P3 P4 P5 ** Synopsis: r[P3]=PX ** ** Interpret the data that cursor P1 points to as a structure built using ** the MakeRecord instruction. (See the MakeRecord opcode for additional ** information about the format of the data.) Extract the P2-th column ** from this record. If there are less that (P2+1) ** values in the record, extract a NULL. ** ** The value extracted is stored in register P3. ** ** If the column contains fewer than P2 fields, then extract a NULL. Or, ** if the P4 argument is a P4_MEM use the value of the P4 argument as ** the result. ** ** If the OPFLAG_CLEARCACHE bit is set on P5 and P1 is a pseudo-table cursor, ** then the cache of the cursor is reset prior to extracting the column. ** The first OP_Column against a pseudo-table after the value of the content ** register has changed should have this bit set. ** ** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 when ** the result is guaranteed to only be used as the argument of a length() ** or typeof() function, respectively. The loading of large blobs can be ** skipped for length() and all content loading can be skipped for typeof(). */ case OP_Column: { i64 payloadSize64; /* Number of bytes in the record */ int p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ BtCursor *pCrsr; /* The BTree cursor */ u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */ int len; /* The length of the serialized data for the column */ int i; /* Loop counter */ Mem *pDest; /* Where to write the extracted value */ Mem sMem; /* For storing the record being decoded */ const u8 *zData; /* Part of the record being decoded */ const u8 *zHdr; /* Next unparsed byte of the header */ const u8 *zEndHdr; /* Pointer to first byte after the header */ u32 offset; /* Offset into the data */ u32 szField; /* Number of bytes in the content of a field */ u32 avail; /* Number of bytes of available data */ u32 t; /* A type code from the record header */ u16 fx; /* pDest->flags value */ Mem *pReg; /* PseudoTable input register */ p2 = pOp->p2; assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); pDest = &aMem[pOp->p3]; memAboutToChange(p, pDest); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( p2nField ); aOffset = pC->aOffset; #ifndef SQLITE_OMIT_VIRTUALTABLE assert( pC->pVtabCursor==0 ); /* OP_Column never called on virtual table */ #endif pCrsr = pC->pCursor; assert( pCrsr!=0 || pC->pseudoTableReg>0 ); /* pCrsr NULL on PseudoTables */ assert( pCrsr!=0 || pC->nullRow ); /* pC->nullRow on PseudoTables */ /* If the cursor cache is stale, bring it up-to-date */ rc = sqlite3VdbeCursorMoveto(pC); if( rc ) goto abort_due_to_error; if( pC->cacheStatus!=p->cacheCtr ){ if( pC->nullRow ){ if( pCrsr==0 ){ assert( pC->pseudoTableReg>0 ); pReg = &aMem[pC->pseudoTableReg]; assert( pReg->flags & MEM_Blob ); assert( memIsValid(pReg) ); pC->payloadSize = pC->szRow = avail = pReg->n; pC->aRow = (u8*)pReg->z; }else{ sqlite3VdbeMemSetNull(pDest); goto op_column_out; } }else{ assert( pCrsr ); if( pC->isTable==0 ){ assert( sqlite3BtreeCursorIsValid(pCrsr) ); VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &payloadSize64); assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ /* sqlite3BtreeParseCellPtr() uses getVarint32() to extract the ** payload size, so it is impossible for payloadSize64 to be ** larger than 32 bits. */ assert( (payloadSize64 & SQLITE_MAX_U32)==(u64)payloadSize64 ); pC->aRow = sqlite3BtreeKeyFetch(pCrsr, &avail); pC->payloadSize = (u32)payloadSize64; }else{ assert( sqlite3BtreeCursorIsValid(pCrsr) ); VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &pC->payloadSize); assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ pC->aRow = sqlite3BtreeDataFetch(pCrsr, &avail); } assert( avail<=65536 ); /* Maximum page size is 64KiB */ if( pC->payloadSize <= (u32)avail ){ pC->szRow = pC->payloadSize; }else{ pC->szRow = avail; } if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } } pC->cacheStatus = p->cacheCtr; pC->iHdrOffset = getVarint32(pC->aRow, offset); pC->nHdrParsed = 0; aOffset[0] = offset; /* Make sure a corrupt database has not given us an oversize header. ** Do this now to avoid an oversize memory allocation. ** ** Type entries can be between 1 and 5 bytes each. But 4 and 5 byte ** types use so much data space that there can only be 4096 and 32 of ** them, respectively. So the maximum header length results from a ** 3-byte type for each of the maximum of 32768 columns plus three ** extra bytes for the header length itself. 32768*3 + 3 = 98307. */ if( offset > 98307 || offset > pC->payloadSize ){ rc = SQLITE_CORRUPT_BKPT; goto op_column_error; } if( availaRow does not have to hold the entire row, but it does at least ** need to cover the header of the record. If pC->aRow does not contain ** the complete header, then set it to zero, forcing the header to be ** dynamically allocated. */ pC->aRow = 0; pC->szRow = 0; } /* The following goto is an optimization. It can be omitted and ** everything will still work. But OP_Column is measurably faster ** by skipping the subsequent conditional, which is always true. */ assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */ goto op_column_read_header; } /* Make sure at least the first p2+1 entries of the header have been ** parsed and valid information is in aOffset[] and pC->aType[]. */ if( pC->nHdrParsed<=p2 ){ /* If there is more header available for parsing in the record, try ** to extract additional fields up through the p2+1-th field */ op_column_read_header: if( pC->iHdrOffsetaRow==0 ){ memset(&sMem, 0, sizeof(sMem)); rc = sqlite3VdbeMemFromBtree(pCrsr, 0, aOffset[0], !pC->isTable, &sMem); if( rc!=SQLITE_OK ){ goto op_column_error; } zData = (u8*)sMem.z; }else{ zData = pC->aRow; } /* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */ i = pC->nHdrParsed; offset = aOffset[i]; zHdr = zData + pC->iHdrOffset; zEndHdr = zData + aOffset[0]; assert( i<=p2 && zHdraType[i] = t; szField = sqlite3VdbeSerialTypeLen(t); offset += szField; if( offsetnHdrParsed = i; pC->iHdrOffset = (u32)(zHdr - zData); if( pC->aRow==0 ){ sqlite3VdbeMemRelease(&sMem); sMem.flags = MEM_Null; } /* The record is corrupt if any of the following are true: ** (1) the bytes of the header extend past the declared header size ** (zHdr>zEndHdr) ** (2) the entire header was used but not all data was used ** (zHdr==zEndHdr && offset!=pC->payloadSize) ** (3) the end of the data extends beyond the end of the record. ** (offset > pC->payloadSize) */ if( (zHdr>=zEndHdr && (zHdr>zEndHdr || offset!=pC->payloadSize)) || (offset > pC->payloadSize) ){ rc = SQLITE_CORRUPT_BKPT; goto op_column_error; } } /* If after trying to extract new entries from the header, nHdrParsed is ** still not up to p2, that means that the record has fewer than p2 ** columns. So the result will be either the default value or a NULL. */ if( pC->nHdrParsed<=p2 ){ if( pOp->p4type==P4_MEM ){ sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static); }else{ sqlite3VdbeMemSetNull(pDest); } goto op_column_out; } } /* Extract the content for the p2+1-th column. Control can only ** reach this point if aOffset[p2], aOffset[p2+1], and pC->aType[p2] are ** all valid. */ assert( p2nHdrParsed ); assert( rc==SQLITE_OK ); assert( sqlite3VdbeCheckMemInvariants(pDest) ); if( VdbeMemDynamic(pDest) ) sqlite3VdbeMemSetNull(pDest); t = pC->aType[p2]; if( pC->szRow>=aOffset[p2+1] ){ /* This is the common case where the desired content fits on the original ** page - where the content is not on an overflow page */ sqlite3VdbeSerialGet(pC->aRow+aOffset[p2], t, pDest); }else{ /* This branch happens only when content is on overflow pages */ if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) || (len = sqlite3VdbeSerialTypeLen(t))==0 ){ /* Content is irrelevant for ** 1. the typeof() function, ** 2. the length(X) function if X is a blob, and ** 3. if the content length is zero. ** So we might as well use bogus content rather than reading ** content from disk. NULL will work for the value for strings ** and blobs and whatever is in the payloadSize64 variable ** will work for everything else. */ sqlite3VdbeSerialGet(t<=13 ? (u8*)&payloadSize64 : 0, t, pDest); }else{ rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, !pC->isTable, pDest); if( rc!=SQLITE_OK ){ goto op_column_error; } sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); pDest->flags &= ~MEM_Ephem; } } pDest->enc = encoding; op_column_out: /* If the column value is an ephemeral string, go ahead and persist ** that string in case the cursor moves before the column value is ** used. The following code does the equivalent of Deephemeralize() ** but does it faster. */ if( (pDest->flags & MEM_Ephem)!=0 && pDest->z ){ fx = pDest->flags & (MEM_Str|MEM_Blob); assert( fx!=0 ); zData = (const u8*)pDest->z; len = pDest->n; if( sqlite3VdbeMemClearAndResize(pDest, len+2) ) goto no_mem; memcpy(pDest->z, zData, len); pDest->z[len] = 0; pDest->z[len+1] = 0; pDest->flags = fx|MEM_Term; } op_column_error: UPDATE_MAX_BLOBSIZE(pDest); REGISTER_TRACE(pOp->p3, pDest); break; } /* Opcode: Affinity P1 P2 * P4 * ** Synopsis: affinity(r[P1@P2]) ** ** Apply affinities to a range of P2 registers starting with P1. ** ** P4 is a string that is P2 characters long. The nth character of the ** string indicates the column affinity that should be used for the nth ** memory cell in the range. */ case OP_Affinity: { const char *zAffinity; /* The affinity to be applied */ char cAff; /* A single character of affinity */ zAffinity = pOp->p4.z; assert( zAffinity!=0 ); assert( zAffinity[pOp->p2]==0 ); pIn1 = &aMem[pOp->p1]; while( (cAff = *(zAffinity++))!=0 ){ assert( pIn1 <= &p->aMem[(p->nMem-p->nCursor)] ); assert( memIsValid(pIn1) ); applyAffinity(pIn1, cAff, encoding); pIn1++; } break; } /* Opcode: MakeRecord P1 P2 P3 P4 * ** Synopsis: r[P3]=mkrec(r[P1@P2]) ** ** Convert P2 registers beginning with P1 into the [record format] ** use as a data record in a database table or as a key ** in an index. The OP_Column opcode can decode the record later. ** ** P4 may be a string that is P2 characters long. The nth character of the ** string indicates the column affinity that should be used for the nth ** field of the index key. ** ** The mapping from character to affinity is given by the SQLITE_AFF_ ** macros defined in sqliteInt.h. ** ** If P4 is NULL then all index fields have the affinity BLOB. */ case OP_MakeRecord: { u8 *zNewRecord; /* A buffer to hold the data for the new record */ Mem *pRec; /* The new record */ u64 nData; /* Number of bytes of data space */ int nHdr; /* Number of bytes of header space */ i64 nByte; /* Data space required for this record */ i64 nZero; /* Number of zero bytes at the end of the record */ int nVarint; /* Number of bytes in a varint */ u32 serial_type; /* Type field */ Mem *pData0; /* First field to be combined into the record */ Mem *pLast; /* Last field of the record */ int nField; /* Number of fields in the record */ char *zAffinity; /* The affinity string for the record */ int file_format; /* File format to use for encoding */ int i; /* Space used in zNewRecord[] header */ int j; /* Space used in zNewRecord[] content */ int len; /* Length of a field */ /* Assuming the record contains N fields, the record format looks ** like this: ** ** ------------------------------------------------------------------------ ** | hdr-size | type 0 | type 1 | ... | type N-1 | data0 | ... | data N-1 | ** ------------------------------------------------------------------------ ** ** Data(0) is taken from register P1. Data(1) comes from register P1+1 ** and so forth. ** ** Each type field is a varint representing the serial type of the ** corresponding data element (see sqlite3VdbeSerialType()). The ** hdr-size field is also a varint which is the offset from the beginning ** of the record to data0. */ nData = 0; /* Number of bytes of data space */ nHdr = 0; /* Number of bytes of header space */ nZero = 0; /* Number of zero bytes at the end of the record */ nField = pOp->p1; zAffinity = pOp->p4.z; assert( nField>0 && pOp->p2>0 && pOp->p2+nField<=(p->nMem-p->nCursor)+1 ); pData0 = &aMem[nField]; nField = pOp->p2; pLast = &pData0[nField-1]; file_format = p->minWriteFileFormat; /* Identify the output register */ assert( pOp->p3p1 || pOp->p3>=pOp->p1+pOp->p2 ); pOut = &aMem[pOp->p3]; memAboutToChange(p, pOut); /* Apply the requested affinity to all inputs */ assert( pData0<=pLast ); if( zAffinity ){ pRec = pData0; do{ applyAffinity(pRec++, *(zAffinity++), encoding); assert( zAffinity[0]==0 || pRec<=pLast ); }while( zAffinity[0] ); } /* Loop through the elements that will make up the record to figure ** out how much space is required for the new record. */ pRec = pLast; do{ assert( memIsValid(pRec) ); pRec->uTemp = serial_type = sqlite3VdbeSerialType(pRec, file_format); len = sqlite3VdbeSerialTypeLen(serial_type); if( pRec->flags & MEM_Zero ){ if( nData ){ if( sqlite3VdbeMemExpandBlob(pRec) ) goto no_mem; }else{ nZero += pRec->u.nZero; len -= pRec->u.nZero; } } nData += len; testcase( serial_type==127 ); testcase( serial_type==128 ); nHdr += serial_type<=127 ? 1 : sqlite3VarintLen(serial_type); }while( (--pRec)>=pData0 ); /* EVIDENCE-OF: R-22564-11647 The header begins with a single varint ** which determines the total number of bytes in the header. The varint ** value is the size of the header in bytes including the size varint ** itself. */ testcase( nHdr==126 ); testcase( nHdr==127 ); if( nHdr<=126 ){ /* The common case */ nHdr += 1; }else{ /* Rare case of a really large header */ nVarint = sqlite3VarintLen(nHdr); nHdr += nVarint; if( nVarintdb->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } /* Make sure the output register has a buffer large enough to store ** the new record. The output register (pOp->p3) is not allowed to ** be one of the input registers (because the following call to ** sqlite3VdbeMemClearAndResize() could clobber the value before it is used). */ if( sqlite3VdbeMemClearAndResize(pOut, (int)nByte) ){ goto no_mem; } zNewRecord = (u8 *)pOut->z; /* Write the record */ i = putVarint32(zNewRecord, nHdr); j = nHdr; assert( pData0<=pLast ); pRec = pData0; do{ serial_type = pRec->uTemp; /* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more ** additional varints, one per column. */ i += putVarint32(&zNewRecord[i], serial_type); /* serial type */ /* EVIDENCE-OF: R-64536-51728 The values for each column in the record ** immediately follow the header. */ j += sqlite3VdbeSerialPut(&zNewRecord[j], pRec, serial_type); /* content */ }while( (++pRec)<=pLast ); assert( i==nHdr ); assert( j==nByte ); assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); pOut->n = (int)nByte; pOut->flags = MEM_Blob; if( nZero ){ pOut->u.nZero = nZero; pOut->flags |= MEM_Zero; } pOut->enc = SQLITE_UTF8; /* In case the blob is ever converted to text */ REGISTER_TRACE(pOp->p3, pOut); UPDATE_MAX_BLOBSIZE(pOut); break; } /* Opcode: Count P1 P2 * * * ** Synopsis: r[P2]=count() ** ** Store the number of entries (an integer value) in the table or index ** opened by cursor P1 in register P2 */ #ifndef SQLITE_OMIT_BTREECOUNT case OP_Count: { /* out2 */ i64 nEntry; BtCursor *pCrsr; pCrsr = p->apCsr[pOp->p1]->pCursor; assert( pCrsr ); nEntry = 0; /* Not needed. Only used to silence a warning. */ rc = sqlite3BtreeCount(pCrsr, &nEntry); pOut = out2Prerelease(p, pOp); pOut->u.i = nEntry; break; } #endif /* Opcode: Savepoint P1 * * P4 * ** ** Open, release or rollback the savepoint named by parameter P4, depending ** on the value of P1. To open a new savepoint, P1==0. To release (commit) an ** existing savepoint, P1==1, or to rollback an existing savepoint P1==2. */ case OP_Savepoint: { int p1; /* Value of P1 operand */ char *zName; /* Name of savepoint */ int nName; Savepoint *pNew; Savepoint *pSavepoint; Savepoint *pTmp; int iSavepoint; int ii; p1 = pOp->p1; zName = pOp->p4.z; /* Assert that the p1 parameter is valid. Also that if there is no open ** transaction, then there cannot be any savepoints. */ assert( db->pSavepoint==0 || db->autoCommit==0 ); assert( p1==SAVEPOINT_BEGIN||p1==SAVEPOINT_RELEASE||p1==SAVEPOINT_ROLLBACK ); assert( db->pSavepoint || db->isTransactionSavepoint==0 ); assert( checkSavepointCount(db) ); assert( p->bIsReader ); if( p1==SAVEPOINT_BEGIN ){ if( db->nVdbeWrite>0 ){ /* A new savepoint cannot be created if there are active write ** statements (i.e. open read/write incremental blob handles). */ sqlite3VdbeError(p, "cannot open savepoint - SQL statements in progress"); rc = SQLITE_BUSY; }else{ nName = sqlite3Strlen30(zName); #ifndef SQLITE_OMIT_VIRTUALTABLE /* This call is Ok even if this savepoint is actually a transaction ** savepoint (and therefore should not prompt xSavepoint()) callbacks. ** If this is a transaction savepoint being opened, it is guaranteed ** that the db->aVTrans[] array is empty. */ assert( db->autoCommit==0 || db->nVTrans==0 ); rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, db->nStatement+db->nSavepoint); if( rc!=SQLITE_OK ) goto abort_due_to_error; #endif /* Create a new savepoint structure. */ pNew = sqlite3DbMallocRaw(db, sizeof(Savepoint)+nName+1); if( pNew ){ pNew->zName = (char *)&pNew[1]; memcpy(pNew->zName, zName, nName+1); /* If there is no open transaction, then mark this as a special ** "transaction savepoint". */ if( db->autoCommit ){ db->autoCommit = 0; db->isTransactionSavepoint = 1; }else{ db->nSavepoint++; } /* Link the new savepoint into the database handle's list. */ pNew->pNext = db->pSavepoint; db->pSavepoint = pNew; pNew->nDeferredCons = db->nDeferredCons; pNew->nDeferredImmCons = db->nDeferredImmCons; } } }else{ iSavepoint = 0; /* Find the named savepoint. If there is no such savepoint, then an ** an error is returned to the user. */ for( pSavepoint = db->pSavepoint; pSavepoint && sqlite3StrICmp(pSavepoint->zName, zName); pSavepoint = pSavepoint->pNext ){ iSavepoint++; } if( !pSavepoint ){ sqlite3VdbeError(p, "no such savepoint: %s", zName); rc = SQLITE_ERROR; }else if( db->nVdbeWrite>0 && p1==SAVEPOINT_RELEASE ){ /* It is not possible to release (commit) a savepoint if there are ** active write statements. */ sqlite3VdbeError(p, "cannot release savepoint - " "SQL statements in progress"); rc = SQLITE_BUSY; }else{ /* Determine whether or not this is a transaction savepoint. If so, ** and this is a RELEASE command, then the current transaction ** is committed. */ int isTransaction = pSavepoint->pNext==0 && db->isTransactionSavepoint; if( isTransaction && p1==SAVEPOINT_RELEASE ){ if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ goto vdbe_return; } db->autoCommit = 1; if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ p->pc = (int)(pOp - aOp); db->autoCommit = 0; p->rc = rc = SQLITE_BUSY; goto vdbe_return; } db->isTransactionSavepoint = 0; rc = p->rc; }else{ int isSchemaChange; iSavepoint = db->nSavepoint - iSavepoint - 1; if( p1==SAVEPOINT_ROLLBACK ){ isSchemaChange = (db->flags & SQLITE_InternChanges)!=0; for(ii=0; iinDb; ii++){ rc = sqlite3BtreeTripAllCursors(db->aDb[ii].pBt, SQLITE_ABORT_ROLLBACK, isSchemaChange==0); if( rc!=SQLITE_OK ) goto abort_due_to_error; } }else{ isSchemaChange = 0; } for(ii=0; iinDb; ii++){ rc = sqlite3BtreeSavepoint(db->aDb[ii].pBt, p1, iSavepoint); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } } if( isSchemaChange ){ sqlite3ExpirePreparedStatements(db); sqlite3ResetAllSchemasOfConnection(db); db->flags = (db->flags | SQLITE_InternChanges); } } /* Regardless of whether this is a RELEASE or ROLLBACK, destroy all ** savepoints nested inside of the savepoint being operated on. */ while( db->pSavepoint!=pSavepoint ){ pTmp = db->pSavepoint; db->pSavepoint = pTmp->pNext; sqlite3DbFree(db, pTmp); db->nSavepoint--; } /* If it is a RELEASE, then destroy the savepoint being operated on ** too. If it is a ROLLBACK TO, then set the number of deferred ** constraint violations present in the database to the value stored ** when the savepoint was created. */ if( p1==SAVEPOINT_RELEASE ){ assert( pSavepoint==db->pSavepoint ); db->pSavepoint = pSavepoint->pNext; sqlite3DbFree(db, pSavepoint); if( !isTransaction ){ db->nSavepoint--; } }else{ db->nDeferredCons = pSavepoint->nDeferredCons; db->nDeferredImmCons = pSavepoint->nDeferredImmCons; } if( !isTransaction || p1==SAVEPOINT_ROLLBACK ){ rc = sqlite3VtabSavepoint(db, p1, iSavepoint); if( rc!=SQLITE_OK ) goto abort_due_to_error; } } } break; } /* Opcode: AutoCommit P1 P2 * * * ** ** Set the database auto-commit flag to P1 (1 or 0). If P2 is true, roll ** back any currently active btree transactions. If there are any active ** VMs (apart from this one), then a ROLLBACK fails. A COMMIT fails if ** there are active writing VMs or active VMs that use shared cache. ** ** This instruction causes the VM to halt. */ case OP_AutoCommit: { int desiredAutoCommit; int iRollback; int turnOnAC; desiredAutoCommit = pOp->p1; iRollback = pOp->p2; turnOnAC = desiredAutoCommit && !db->autoCommit; assert( desiredAutoCommit==1 || desiredAutoCommit==0 ); assert( desiredAutoCommit==1 || iRollback==0 ); assert( db->nVdbeActive>0 ); /* At least this one VM is active */ assert( p->bIsReader ); if( turnOnAC && !iRollback && db->nVdbeWrite>0 ){ /* If this instruction implements a COMMIT and other VMs are writing ** return an error indicating that the other VMs must complete first. */ sqlite3VdbeError(p, "cannot commit transaction - " "SQL statements in progress"); rc = SQLITE_BUSY; }else if( desiredAutoCommit!=db->autoCommit ){ if( iRollback ){ assert( desiredAutoCommit==1 ); sqlite3RollbackAll(db, SQLITE_ABORT_ROLLBACK); db->autoCommit = 1; }else if( (rc = sqlite3VdbeCheckFk(p, 1))!=SQLITE_OK ){ goto vdbe_return; }else{ db->autoCommit = (u8)desiredAutoCommit; if( sqlite3VdbeHalt(p)==SQLITE_BUSY ){ p->pc = (int)(pOp - aOp); db->autoCommit = (u8)(1-desiredAutoCommit); p->rc = rc = SQLITE_BUSY; goto vdbe_return; } } assert( db->nStatement==0 ); sqlite3CloseSavepoints(db); if( p->rc==SQLITE_OK ){ rc = SQLITE_DONE; }else{ rc = SQLITE_ERROR; } goto vdbe_return; }else{ sqlite3VdbeError(p, (!desiredAutoCommit)?"cannot start a transaction within a transaction":( (iRollback)?"cannot rollback - no transaction is active": "cannot commit - no transaction is active")); rc = SQLITE_ERROR; } break; } /* Opcode: Transaction P1 P2 P3 P4 P5 ** ** Begin a transaction on database P1 if a transaction is not already ** active. ** If P2 is non-zero, then a write-transaction is started, or if a ** read-transaction is already active, it is upgraded to a write-transaction. ** If P2 is zero, then a read-transaction is started. ** ** P1 is the index of the database file on which the transaction is ** started. Index 0 is the main database file and index 1 is the ** file used for temporary tables. Indices of 2 or more are used for ** attached databases. ** ** If a write-transaction is started and the Vdbe.usesStmtJournal flag is ** true (this flag is set if the Vdbe may modify more than one row and may ** throw an ABORT exception), a statement transaction may also be opened. ** More specifically, a statement transaction is opened iff the database ** connection is currently not in autocommit mode, or if there are other ** active statements. A statement transaction allows the changes made by this ** VDBE to be rolled back after an error without having to roll back the ** entire transaction. If no error is encountered, the statement transaction ** will automatically commit when the VDBE halts. ** ** If P5!=0 then this opcode also checks the schema cookie against P3 ** and the schema generation counter against P4. ** The cookie changes its value whenever the database schema changes. ** This operation is used to detect when that the cookie has changed ** and that the current process needs to reread the schema. If the schema ** cookie in P3 differs from the schema cookie in the database header or ** if the schema generation counter in P4 differs from the current ** generation counter, then an SQLITE_SCHEMA error is raised and execution ** halts. The sqlite3_step() wrapper function might then reprepare the ** statement and rerun it from the beginning. */ case OP_Transaction: { Btree *pBt; int iMeta; int iGen; assert( p->bIsReader ); assert( p->readOnly==0 || pOp->p2==0 ); assert( pOp->p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); if( pOp->p2 && (db->flags & SQLITE_QueryOnly)!=0 ){ rc = SQLITE_READONLY; goto abort_due_to_error; } pBt = db->aDb[pOp->p1].pBt; if( pBt ){ rc = sqlite3BtreeBeginTrans(pBt, pOp->p2); if( rc==SQLITE_BUSY ){ p->pc = (int)(pOp - aOp); p->rc = rc = SQLITE_BUSY; goto vdbe_return; } if( rc!=SQLITE_OK ){ goto abort_due_to_error; } if( pOp->p2 && p->usesStmtJournal && (db->autoCommit==0 || db->nVdbeRead>1) ){ assert( sqlite3BtreeIsInTrans(pBt) ); if( p->iStatement==0 ){ assert( db->nStatement>=0 && db->nSavepoint>=0 ); db->nStatement++; p->iStatement = db->nSavepoint + db->nStatement; } rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, p->iStatement-1); if( rc==SQLITE_OK ){ rc = sqlite3BtreeBeginStmt(pBt, p->iStatement); } /* Store the current value of the database handles deferred constraint ** counter. If the statement transaction needs to be rolled back, ** the value of this counter needs to be restored too. */ p->nStmtDefCons = db->nDeferredCons; p->nStmtDefImmCons = db->nDeferredImmCons; } /* Gather the schema version number for checking: ** IMPLEMENTATION-OF: R-32195-19465 The schema version is used by SQLite ** each time a query is executed to ensure that the internal cache of the ** schema used when compiling the SQL query matches the schema of the ** database against which the compiled query is actually executed. */ sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta); iGen = db->aDb[pOp->p1].pSchema->iGeneration; }else{ iGen = iMeta = 0; } assert( pOp->p5==0 || pOp->p4type==P4_INT32 ); if( pOp->p5 && (iMeta!=pOp->p3 || iGen!=pOp->p4.i) ){ sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = sqlite3DbStrDup(db, "database schema has changed"); /* If the schema-cookie from the database file matches the cookie ** stored with the in-memory representation of the schema, do ** not reload the schema from the database file. ** ** If virtual-tables are in use, this is not just an optimization. ** Often, v-tables store their data in other SQLite tables, which ** are queried from within xNext() and other v-table methods using ** prepared queries. If such a query is out-of-date, we do not want to ** discard the database schema, as the user code implementing the ** v-table would have to be ready for the sqlite3_vtab structure itself ** to be invalidated whenever sqlite3_step() is called from within ** a v-table method. */ if( db->aDb[pOp->p1].pSchema->schema_cookie!=iMeta ){ sqlite3ResetOneSchema(db, pOp->p1); } p->expired = 1; rc = SQLITE_SCHEMA; } break; } /* Opcode: ReadCookie P1 P2 P3 * * ** ** Read cookie number P3 from database P1 and write it into register P2. ** P3==1 is the schema version. P3==2 is the database format. ** P3==3 is the recommended pager cache size, and so forth. P1==0 is ** the main database file and P1==1 is the database file used to store ** temporary tables. ** ** There must be a read-lock on the database (either a transaction ** must be started or there must be an open cursor) before ** executing this instruction. */ case OP_ReadCookie: { /* out2 */ int iMeta; int iDb; int iCookie; assert( p->bIsReader ); iDb = pOp->p1; iCookie = pOp->p3; assert( pOp->p3=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 ); assert( DbMaskTest(p->btreeMask, iDb) ); sqlite3BtreeGetMeta(db->aDb[iDb].pBt, iCookie, (u32 *)&iMeta); pOut = out2Prerelease(p, pOp); pOut->u.i = iMeta; break; } /* Opcode: SetCookie P1 P2 P3 * * ** ** Write the content of register P3 (interpreted as an integer) ** into cookie number P2 of database P1. P2==1 is the schema version. ** P2==2 is the database format. P2==3 is the recommended pager cache ** size, and so forth. P1==0 is the main database file and P1==1 is the ** database file used to store temporary tables. ** ** A transaction must be started before executing this opcode. */ case OP_SetCookie: { /* in3 */ Db *pDb; assert( pOp->p2p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); assert( p->readOnly==0 ); pDb = &db->aDb[pOp->p1]; assert( pDb->pBt!=0 ); assert( sqlite3SchemaMutexHeld(db, pOp->p1, 0) ); pIn3 = &aMem[pOp->p3]; sqlite3VdbeMemIntegerify(pIn3); /* See note about index shifting on OP_ReadCookie */ rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, (int)pIn3->u.i); if( pOp->p2==BTREE_SCHEMA_VERSION ){ /* When the schema cookie changes, record the new cookie internally */ pDb->pSchema->schema_cookie = (int)pIn3->u.i; db->flags |= SQLITE_InternChanges; }else if( pOp->p2==BTREE_FILE_FORMAT ){ /* Record changes in the file format */ pDb->pSchema->file_format = (u8)pIn3->u.i; } if( pOp->p1==1 ){ /* Invalidate all prepared statements whenever the TEMP database ** schema is changed. Ticket #1644 */ sqlite3ExpirePreparedStatements(db); p->expired = 0; } break; } /* Opcode: OpenRead P1 P2 P3 P4 P5 ** Synopsis: root=P2 iDb=P3 ** ** Open a read-only cursor for the database table whose root page is ** P2 in a database file. The database file is determined by P3. ** P3==0 means the main database, P3==1 means the database used for ** temporary tables, and P3>1 means used the corresponding attached ** database. Give the new cursor an identifier of P1. The P1 ** values need not be contiguous but all P1 values should be small integers. ** It is an error for P1 to be negative. ** ** If P5!=0 then use the content of register P2 as the root page, not ** the value of P2 itself. ** ** There will be a read lock on the database whenever there is an ** open cursor. If the database was unlocked prior to this instruction ** then a read lock is acquired as part of this instruction. A read ** lock allows other processes to read the database but prohibits ** any other process from modifying the database. The read lock is ** released when all cursors are closed. If this instruction attempts ** to get a read lock but fails, the script terminates with an ** SQLITE_BUSY error code. ** ** The P4 value may be either an integer (P4_INT32) or a pointer to ** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo ** structure, then said structure defines the content and collating ** sequence of the index being opened. Otherwise, if P4 is an integer ** value, it is set to the number of columns in the table. ** ** See also: OpenWrite, ReopenIdx */ /* Opcode: ReopenIdx P1 P2 P3 P4 P5 ** Synopsis: root=P2 iDb=P3 ** ** The ReopenIdx opcode works exactly like ReadOpen except that it first ** checks to see if the cursor on P1 is already open with a root page ** number of P2 and if it is this opcode becomes a no-op. In other words, ** if the cursor is already open, do not reopen it. ** ** The ReopenIdx opcode may only be used with P5==0 and with P4 being ** a P4_KEYINFO object. Furthermore, the P3 value must be the same as ** every other ReopenIdx or OpenRead for the same cursor number. ** ** See the OpenRead opcode documentation for additional information. */ /* Opcode: OpenWrite P1 P2 P3 P4 P5 ** Synopsis: root=P2 iDb=P3 ** ** Open a read/write cursor named P1 on the table or index whose root ** page is P2. Or if P5!=0 use the content of register P2 to find the ** root page. ** ** The P4 value may be either an integer (P4_INT32) or a pointer to ** a KeyInfo structure (P4_KEYINFO). If it is a pointer to a KeyInfo ** structure, then said structure defines the content and collating ** sequence of the index being opened. Otherwise, if P4 is an integer ** value, it is set to the number of columns in the table, or to the ** largest index of any column of the table that is actually used. ** ** This instruction works just like OpenRead except that it opens the cursor ** in read/write mode. For a given table, there can be one or more read-only ** cursors or a single read/write cursor but not both. ** ** See also OpenRead. */ case OP_ReopenIdx: { int nField; KeyInfo *pKeyInfo; int p2; int iDb; int wrFlag; Btree *pX; VdbeCursor *pCur; Db *pDb; assert( pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ ); assert( pOp->p4type==P4_KEYINFO ); pCur = p->apCsr[pOp->p1]; if( pCur && pCur->pgnoRoot==(u32)pOp->p2 ){ assert( pCur->iDb==pOp->p3 ); /* Guaranteed by the code generator */ goto open_cursor_set_hints; } /* If the cursor is not currently open or is open on a different ** index, then fall through into OP_OpenRead to force a reopen */ case OP_OpenRead: case OP_OpenWrite: assert( (pOp->p5&(OPFLAG_P2ISREG|OPFLAG_BULKCSR|OPFLAG_SEEKEQ))==pOp->p5 ); assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ ); assert( p->bIsReader ); assert( pOp->opcode==OP_OpenRead || pOp->opcode==OP_ReopenIdx || p->readOnly==0 ); if( p->expired ){ rc = SQLITE_ABORT_ROLLBACK; break; } nField = 0; pKeyInfo = 0; p2 = pOp->p2; iDb = pOp->p3; assert( iDb>=0 && iDbnDb ); assert( DbMaskTest(p->btreeMask, iDb) ); pDb = &db->aDb[iDb]; pX = pDb->pBt; assert( pX!=0 ); if( pOp->opcode==OP_OpenWrite ){ wrFlag = 1; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( pDb->pSchema->file_format < p->minWriteFileFormat ){ p->minWriteFileFormat = pDb->pSchema->file_format; } }else{ wrFlag = 0; } if( pOp->p5 & OPFLAG_P2ISREG ){ assert( p2>0 ); assert( p2<=(p->nMem-p->nCursor) ); pIn2 = &aMem[p2]; assert( memIsValid(pIn2) ); assert( (pIn2->flags & MEM_Int)!=0 ); sqlite3VdbeMemIntegerify(pIn2); p2 = (int)pIn2->u.i; /* The p2 value always comes from a prior OP_CreateTable opcode and ** that opcode will always set the p2 value to 2 or more or else fail. ** If there were a failure, the prepared statement would have halted ** before reaching this instruction. */ if( NEVER(p2<2) ) { rc = SQLITE_CORRUPT_BKPT; goto abort_due_to_error; } } if( pOp->p4type==P4_KEYINFO ){ pKeyInfo = pOp->p4.pKeyInfo; assert( pKeyInfo->enc==ENC(db) ); assert( pKeyInfo->db==db ); nField = pKeyInfo->nField+pKeyInfo->nXField; }else if( pOp->p4type==P4_INT32 ){ nField = pOp->p4.i; } assert( pOp->p1>=0 ); assert( nField>=0 ); testcase( nField==0 ); /* Table with INTEGER PRIMARY KEY and nothing else */ pCur = allocateCursor(p, pOp->p1, nField, iDb, 1); if( pCur==0 ) goto no_mem; pCur->nullRow = 1; pCur->isOrdered = 1; pCur->pgnoRoot = p2; rc = sqlite3BtreeCursor(pX, p2, wrFlag, pKeyInfo, pCur->pCursor); pCur->pKeyInfo = pKeyInfo; /* Set the VdbeCursor.isTable variable. Previous versions of ** SQLite used to check if the root-page flags were sane at this point ** and report database corruption if they were not, but this check has ** since moved into the btree layer. */ pCur->isTable = pOp->p4type!=P4_KEYINFO; open_cursor_set_hints: assert( OPFLAG_BULKCSR==BTREE_BULKLOAD ); assert( OPFLAG_SEEKEQ==BTREE_SEEK_EQ ); sqlite3BtreeCursorHints(pCur->pCursor, (pOp->p5 & (OPFLAG_BULKCSR|OPFLAG_SEEKEQ))); break; } /* Opcode: OpenEphemeral P1 P2 * P4 P5 ** Synopsis: nColumn=P2 ** ** Open a new cursor P1 to a transient table. ** The cursor is always opened read/write even if ** the main database is read-only. The ephemeral ** table is deleted automatically when the cursor is closed. ** ** P2 is the number of columns in the ephemeral table. ** The cursor points to a BTree table if P4==0 and to a BTree index ** if P4 is not 0. If P4 is not NULL, it points to a KeyInfo structure ** that defines the format of keys in the index. ** ** The P5 parameter can be a mask of the BTREE_* flags defined ** in btree.h. These flags control aspects of the operation of ** the btree. The BTREE_OMIT_JOURNAL and BTREE_SINGLE flags are ** added automatically. */ /* Opcode: OpenAutoindex P1 P2 * P4 * ** Synopsis: nColumn=P2 ** ** This opcode works the same as OP_OpenEphemeral. It has a ** different name to distinguish its use. Tables created using ** by this opcode will be used for automatically created transient ** indices in joins. */ case OP_OpenAutoindex: case OP_OpenEphemeral: { VdbeCursor *pCx; KeyInfo *pKeyInfo; static const int vfsFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE | SQLITE_OPEN_TRANSIENT_DB; assert( pOp->p1>=0 ); assert( pOp->p2>=0 ); pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->isEphemeral = 1; rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBt, BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags); if( rc==SQLITE_OK ){ rc = sqlite3BtreeBeginTrans(pCx->pBt, 1); } if( rc==SQLITE_OK ){ /* If a transient index is required, create it by calling ** sqlite3BtreeCreateTable() with the BTREE_BLOBKEY flag before ** opening it. If a transient table is required, just use the ** automatically created table with root-page 1 (an BLOB_INTKEY table). */ if( (pKeyInfo = pOp->p4.pKeyInfo)!=0 ){ int pgno; assert( pOp->p4type==P4_KEYINFO ); rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5); if( rc==SQLITE_OK ){ assert( pgno==MASTER_ROOT+1 ); assert( pKeyInfo->db==db ); assert( pKeyInfo->enc==ENC(db) ); pCx->pKeyInfo = pKeyInfo; rc = sqlite3BtreeCursor(pCx->pBt, pgno, 1, pKeyInfo, pCx->pCursor); } pCx->isTable = 0; }else{ rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, 1, 0, pCx->pCursor); pCx->isTable = 1; } } pCx->isOrdered = (pOp->p5!=BTREE_UNORDERED); break; } /* Opcode: SorterOpen P1 P2 P3 P4 * ** ** This opcode works like OP_OpenEphemeral except that it opens ** a transient index that is specifically designed to sort large ** tables using an external merge-sort algorithm. ** ** If argument P3 is non-zero, then it indicates that the sorter may ** assume that a stable sort considering the first P3 fields of each ** key is sufficient to produce the required results. */ case OP_SorterOpen: { VdbeCursor *pCx; assert( pOp->p1>=0 ); assert( pOp->p2>=0 ); pCx = allocateCursor(p, pOp->p1, pOp->p2, -1, 1); if( pCx==0 ) goto no_mem; pCx->pKeyInfo = pOp->p4.pKeyInfo; assert( pCx->pKeyInfo->db==db ); assert( pCx->pKeyInfo->enc==ENC(db) ); rc = sqlite3VdbeSorterInit(db, pOp->p3, pCx); break; } /* Opcode: SequenceTest P1 P2 * * * ** Synopsis: if( cursor[P1].ctr++ ) pc = P2 ** ** P1 is a sorter cursor. If the sequence counter is currently zero, jump ** to P2. Regardless of whether or not the jump is taken, increment the ** the sequence value. */ case OP_SequenceTest: { VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC->pSorter ); if( (pC->seqCount++)==0 ){ goto jump_to_p2; } break; } /* Opcode: OpenPseudo P1 P2 P3 * * ** Synopsis: P3 columns in r[P2] ** ** Open a new cursor that points to a fake table that contains a single ** row of data. The content of that one row is the content of memory ** register P2. In other words, cursor P1 becomes an alias for the ** MEM_Blob content contained in register P2. ** ** A pseudo-table created by this opcode is used to hold a single ** row output from the sorter so that the row can be decomposed into ** individual columns using the OP_Column opcode. The OP_Column opcode ** is the only cursor opcode that works with a pseudo-table. ** ** P3 is the number of fields in the records that will be stored by ** the pseudo-table. */ case OP_OpenPseudo: { VdbeCursor *pCx; assert( pOp->p1>=0 ); assert( pOp->p3>=0 ); pCx = allocateCursor(p, pOp->p1, pOp->p3, -1, 0); if( pCx==0 ) goto no_mem; pCx->nullRow = 1; pCx->pseudoTableReg = pOp->p2; pCx->isTable = 1; assert( pOp->p5==0 ); break; } /* Opcode: Close P1 * * * * ** ** Close a cursor previously opened as P1. If P1 is not ** currently open, this instruction is a no-op. */ case OP_Close: { assert( pOp->p1>=0 && pOp->p1nCursor ); sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); p->apCsr[pOp->p1] = 0; break; } #ifdef SQLITE_ENABLE_COLUMN_USED_MASK /* Opcode: ColumnsUsed P1 * * P4 * ** ** This opcode (which only exists if SQLite was compiled with ** SQLITE_ENABLE_COLUMN_USED_MASK) identifies which columns of the ** table or index for cursor P1 are used. P4 is a 64-bit integer ** (P4_INT64) in which the first 63 bits are one for each of the ** first 63 columns of the table or index that are actually used ** by the cursor. The high-order bit is set if any column after ** the 64th is used. */ case OP_ColumnsUsed: { VdbeCursor *pC; pC = p->apCsr[pOp->p1]; assert( pC->pCursor ); pC->maskUsed = *(u64*)pOp->p4.pI64; break; } #endif /* Opcode: SeekGE P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), ** use the value in register P3 as the key. If cursor P1 refers ** to an SQL index, then P3 is the first in an array of P4 registers ** that are used as an unpacked index key. ** ** Reposition cursor P1 so that it points to the smallest entry that ** is greater than or equal to the key value. If there are no records ** greater than or equal to the key and P2 is not zero, then jump to P2. ** ** This opcode leaves the cursor configured to move in forward order, ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. ** ** See also: Found, NotFound, SeekLt, SeekGt, SeekLe */ /* Opcode: SeekGT P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), ** use the value in register P3 as a key. If cursor P1 refers ** to an SQL index, then P3 is the first in an array of P4 registers ** that are used as an unpacked index key. ** ** Reposition cursor P1 so that it points to the smallest entry that ** is greater than the key value. If there are no records greater than ** the key and P2 is not zero, then jump to P2. ** ** This opcode leaves the cursor configured to move in forward order, ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. ** ** See also: Found, NotFound, SeekLt, SeekGe, SeekLe */ /* Opcode: SeekLT P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), ** use the value in register P3 as a key. If cursor P1 refers ** to an SQL index, then P3 is the first in an array of P4 registers ** that are used as an unpacked index key. ** ** Reposition cursor P1 so that it points to the largest entry that ** is less than the key value. If there are no records less than ** the key and P2 is not zero, then jump to P2. ** ** This opcode leaves the cursor configured to move in reverse order, ** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLe */ /* Opcode: SeekLE P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If cursor P1 refers to an SQL table (B-Tree that uses integer keys), ** use the value in register P3 as a key. If cursor P1 refers ** to an SQL index, then P3 is the first in an array of P4 registers ** that are used as an unpacked index key. ** ** Reposition cursor P1 so that it points to the largest entry that ** is less than or equal to the key value. If there are no records ** less than or equal to the key and P2 is not zero, then jump to P2. ** ** This opcode leaves the cursor configured to move in reverse order, ** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ case OP_SeekLT: /* jump, in3 */ case OP_SeekLE: /* jump, in3 */ case OP_SeekGE: /* jump, in3 */ case OP_SeekGT: { /* jump, in3 */ int res; int oc; VdbeCursor *pC; UnpackedRecord r; int nField; i64 iKey; /* The rowid we are to seek to */ assert( pOp->p1>=0 && pOp->p1nCursor ); assert( pOp->p2!=0 ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pseudoTableReg==0 ); assert( OP_SeekLE == OP_SeekLT+1 ); assert( OP_SeekGE == OP_SeekLT+2 ); assert( OP_SeekGT == OP_SeekLT+3 ); assert( pC->isOrdered ); assert( pC->pCursor!=0 ); oc = pOp->opcode; pC->nullRow = 0; #ifdef SQLITE_DEBUG pC->seekOp = pOp->opcode; #endif /* For a cursor with the BTREE_SEEK_EQ hint, only the OP_SeekGE and ** OP_SeekLE opcodes are allowed, and these must be immediately followed ** by an OP_IdxGT or OP_IdxLT opcode, respectively, with the same key. */ #ifdef SQLITE_DEBUG if( sqlite3BtreeCursorHasHint(pC->pCursor, BTREE_SEEK_EQ) ){ assert( pOp->opcode==OP_SeekGE || pOp->opcode==OP_SeekLE ); assert( pOp[1].opcode==OP_IdxLT || pOp[1].opcode==OP_IdxGT ); assert( pOp[1].p1==pOp[0].p1 ); assert( pOp[1].p2==pOp[0].p2 ); assert( pOp[1].p3==pOp[0].p3 ); assert( pOp[1].p4.i==pOp[0].p4.i ); } #endif if( pC->isTable ){ /* The input value in P3 might be of any type: integer, real, string, ** blob, or NULL. But it needs to be an integer before we can do ** the seek, so convert it. */ pIn3 = &aMem[pOp->p3]; if( (pIn3->flags & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ applyNumericAffinity(pIn3, 0); } iKey = sqlite3VdbeIntValue(pIn3); /* If the P3 value could not be converted into an integer without ** loss of information, then special processing is required... */ if( (pIn3->flags & MEM_Int)==0 ){ if( (pIn3->flags & MEM_Real)==0 ){ /* If the P3 value cannot be converted into any kind of a number, ** then the seek is not possible, so jump to P2 */ VdbeBranchTaken(1,2); goto jump_to_p2; break; } /* If the approximation iKey is larger than the actual real search ** term, substitute >= for > and < for <=. e.g. if the search term ** is 4.9 and the integer approximation 5: ** ** (x > 4.9) -> (x >= 5) ** (x <= 4.9) -> (x < 5) */ if( pIn3->u.r<(double)iKey ){ assert( OP_SeekGE==(OP_SeekGT-1) ); assert( OP_SeekLT==(OP_SeekLE-1) ); assert( (OP_SeekLE & 0x0001)==(OP_SeekGT & 0x0001) ); if( (oc & 0x0001)==(OP_SeekGT & 0x0001) ) oc--; } /* If the approximation iKey is smaller than the actual real search ** term, substitute <= for < and > for >=. */ else if( pIn3->u.r>(double)iKey ){ assert( OP_SeekLE==(OP_SeekLT+1) ); assert( OP_SeekGT==(OP_SeekGE+1) ); assert( (OP_SeekLT & 0x0001)==(OP_SeekGE & 0x0001) ); if( (oc & 0x0001)==(OP_SeekLT & 0x0001) ) oc++; } } rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)iKey, 0, &res); pC->movetoTarget = iKey; /* Used by OP_Delete */ if( rc!=SQLITE_OK ){ goto abort_due_to_error; } }else{ nField = pOp->p4.i; assert( pOp->p4type==P4_INT32 ); assert( nField>0 ); r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)nField; /* The next line of code computes as follows, only faster: ** if( oc==OP_SeekGT || oc==OP_SeekLE ){ ** r.default_rc = -1; ** }else{ ** r.default_rc = +1; ** } */ r.default_rc = ((1 & (oc - OP_SeekLT)) ? -1 : +1); assert( oc!=OP_SeekGT || r.default_rc==-1 ); assert( oc!=OP_SeekLE || r.default_rc==-1 ); assert( oc!=OP_SeekGE || r.default_rc==+1 ); assert( oc!=OP_SeekLT || r.default_rc==+1 ); r.aMem = &aMem[pOp->p3]; #ifdef SQLITE_DEBUG { int i; for(i=0; ipCursor, &r, 0, 0, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } } pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; #ifdef SQLITE_TEST sqlite3_search_count++; #endif if( oc>=OP_SeekGE ){ assert( oc==OP_SeekGE || oc==OP_SeekGT ); if( res<0 || (res==0 && oc==OP_SeekGT) ){ res = 0; rc = sqlite3BtreeNext(pC->pCursor, &res); if( rc!=SQLITE_OK ) goto abort_due_to_error; }else{ res = 0; } }else{ assert( oc==OP_SeekLT || oc==OP_SeekLE ); if( res>0 || (res==0 && oc==OP_SeekLT) ){ res = 0; rc = sqlite3BtreePrevious(pC->pCursor, &res); if( rc!=SQLITE_OK ) goto abort_due_to_error; }else{ /* res might be negative because the table is empty. Check to ** see if this is the case. */ res = sqlite3BtreeEof(pC->pCursor); } } assert( pOp->p2>0 ); VdbeBranchTaken(res!=0,2); if( res ){ goto jump_to_p2; } break; } /* Opcode: Seek P1 P2 * * * ** Synopsis: intkey=r[P2] ** ** P1 is an open table cursor and P2 is a rowid integer. Arrange ** for P1 to move so that it points to the rowid given by P2. ** ** This is actually a deferred seek. Nothing actually happens until ** the cursor is used to read a record. That way, if no reads ** occur, no unnecessary I/O happens. */ case OP_Seek: { /* in2 */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pCursor!=0 ); assert( pC->isTable ); pC->nullRow = 0; pIn2 = &aMem[pOp->p2]; pC->movetoTarget = sqlite3VdbeIntValue(pIn2); pC->deferredMoveto = 1; break; } /* Opcode: Found P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If P4==0 then register P3 holds a blob constructed by MakeRecord. If ** P4>0 then register P3 is the first of P4 registers that form an unpacked ** record. ** ** Cursor P1 is on an index btree. If the record identified by P3 and P4 ** is a prefix of any entry in P1 then a jump is made to P2 and ** P1 is left pointing at the matching entry. ** ** This operation leaves the cursor in a state where it can be ** advanced in the forward direction. The Next instruction will work, ** but not the Prev instruction. ** ** See also: NotFound, NoConflict, NotExists. SeekGe */ /* Opcode: NotFound P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If P4==0 then register P3 holds a blob constructed by MakeRecord. If ** P4>0 then register P3 is the first of P4 registers that form an unpacked ** record. ** ** Cursor P1 is on an index btree. If the record identified by P3 and P4 ** is not the prefix of any entry in P1 then a jump is made to P2. If P1 ** does contain an entry whose prefix matches the P3/P4 record then control ** falls through to the next instruction and P1 is left pointing at the ** matching entry. ** ** This operation leaves the cursor in a state where it cannot be ** advanced in either direction. In other words, the Next and Prev ** opcodes do not work after this operation. ** ** See also: Found, NotExists, NoConflict */ /* Opcode: NoConflict P1 P2 P3 P4 * ** Synopsis: key=r[P3@P4] ** ** If P4==0 then register P3 holds a blob constructed by MakeRecord. If ** P4>0 then register P3 is the first of P4 registers that form an unpacked ** record. ** ** Cursor P1 is on an index btree. If the record identified by P3 and P4 ** contains any NULL value, jump immediately to P2. If all terms of the ** record are not-NULL then a check is done to determine if any row in the ** P1 index btree has a matching key prefix. If there are no matches, jump ** immediately to P2. If there is a match, fall through and leave the P1 ** cursor pointing to the matching row. ** ** This opcode is similar to OP_NotFound with the exceptions that the ** branch is always taken if any part of the search key input is NULL. ** ** This operation leaves the cursor in a state where it cannot be ** advanced in either direction. In other words, the Next and Prev ** opcodes do not work after this operation. ** ** See also: NotFound, Found, NotExists */ case OP_NoConflict: /* jump, in3 */ case OP_NotFound: /* jump, in3 */ case OP_Found: { /* jump, in3 */ int alreadyExists; int takeJump; int ii; VdbeCursor *pC; int res; char *pFree; UnpackedRecord *pIdxKey; UnpackedRecord r; char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7]; #ifdef SQLITE_TEST if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++; #endif assert( pOp->p1>=0 && pOp->p1nCursor ); assert( pOp->p4type==P4_INT32 ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); #ifdef SQLITE_DEBUG pC->seekOp = pOp->opcode; #endif pIn3 = &aMem[pOp->p3]; assert( pC->pCursor!=0 ); assert( pC->isTable==0 ); pFree = 0; if( pOp->p4.i>0 ){ r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)pOp->p4.i; r.aMem = pIn3; for(ii=0; iip3+ii, &r.aMem[ii]); #endif } pIdxKey = &r; }else{ pIdxKey = sqlite3VdbeAllocUnpackedRecord( pC->pKeyInfo, aTempRec, sizeof(aTempRec), &pFree ); if( pIdxKey==0 ) goto no_mem; assert( pIn3->flags & MEM_Blob ); ExpandBlob(pIn3); sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey); } pIdxKey->default_rc = 0; takeJump = 0; if( pOp->opcode==OP_NoConflict ){ /* For the OP_NoConflict opcode, take the jump if any of the ** input fields are NULL, since any key with a NULL will not ** conflict */ for(ii=0; iinField; ii++){ if( pIdxKey->aMem[ii].flags & MEM_Null ){ takeJump = 1; break; } } } rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, pIdxKey, 0, 0, &res); sqlite3DbFree(db, pFree); if( rc!=SQLITE_OK ){ break; } pC->seekResult = res; alreadyExists = (res==0); pC->nullRow = 1-alreadyExists; pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; if( pOp->opcode==OP_Found ){ VdbeBranchTaken(alreadyExists!=0,2); if( alreadyExists ) goto jump_to_p2; }else{ VdbeBranchTaken(takeJump||alreadyExists==0,2); if( takeJump || !alreadyExists ) goto jump_to_p2; } break; } /* Opcode: NotExists P1 P2 P3 * * ** Synopsis: intkey=r[P3] ** ** P1 is the index of a cursor open on an SQL table btree (with integer ** keys). P3 is an integer rowid. If P1 does not contain a record with ** rowid P3 then jump immediately to P2. If P1 does contain a record ** with rowid P3 then leave the cursor pointing at that record and fall ** through to the next instruction. ** ** The OP_NotFound opcode performs the same operation on index btrees ** (with arbitrary multi-value keys). ** ** This opcode leaves the cursor in a state where it cannot be advanced ** in either direction. In other words, the Next and Prev opcodes will ** not work following this opcode. ** ** See also: Found, NotFound, NoConflict */ case OP_NotExists: { /* jump, in3 */ VdbeCursor *pC; BtCursor *pCrsr; int res; u64 iKey; pIn3 = &aMem[pOp->p3]; assert( pIn3->flags & MEM_Int ); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); #ifdef SQLITE_DEBUG pC->seekOp = 0; #endif assert( pC->isTable ); assert( pC->pseudoTableReg==0 ); pCrsr = pC->pCursor; assert( pCrsr!=0 ); res = 0; iKey = pIn3->u.i; rc = sqlite3BtreeMovetoUnpacked(pCrsr, 0, iKey, 0, &res); pC->movetoTarget = iKey; /* Used by OP_Delete */ pC->nullRow = 0; pC->cacheStatus = CACHE_STALE; pC->deferredMoveto = 0; VdbeBranchTaken(res!=0,2); pC->seekResult = res; if( res!=0 ) goto jump_to_p2; break; } /* Opcode: Sequence P1 P2 * * * ** Synopsis: r[P2]=cursor[P1].ctr++ ** ** Find the next available sequence number for cursor P1. ** Write the sequence number into register P2. ** The sequence number on the cursor is incremented after this ** instruction. */ case OP_Sequence: { /* out2 */ assert( pOp->p1>=0 && pOp->p1nCursor ); assert( p->apCsr[pOp->p1]!=0 ); pOut = out2Prerelease(p, pOp); pOut->u.i = p->apCsr[pOp->p1]->seqCount++; break; } /* Opcode: NewRowid P1 P2 P3 * * ** Synopsis: r[P2]=rowid ** ** Get a new integer record number (a.k.a "rowid") used as the key to a table. ** The record number is not previously used as a key in the database ** table that cursor P1 points to. The new record number is written ** written to register P2. ** ** If P3>0 then P3 is a register in the root frame of this VDBE that holds ** the largest previously generated record number. No new record numbers are ** allowed to be less than this value. When this value reaches its maximum, ** an SQLITE_FULL error is generated. The P3 register is updated with the ' ** generated record number. This P3 mechanism is used to help implement the ** AUTOINCREMENT feature. */ case OP_NewRowid: { /* out2 */ i64 v; /* The new rowid */ VdbeCursor *pC; /* Cursor of table to get the new rowid */ int res; /* Result of an sqlite3BtreeLast() */ int cnt; /* Counter to limit the number of searches */ Mem *pMem; /* Register holding largest rowid for AUTOINCREMENT */ VdbeFrame *pFrame; /* Root frame of VDBE */ v = 0; res = 0; pOut = out2Prerelease(p, pOp); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pCursor!=0 ); { /* The next rowid or record number (different terms for the same ** thing) is obtained in a two-step algorithm. ** ** First we attempt to find the largest existing rowid and add one ** to that. But if the largest existing rowid is already the maximum ** positive integer, we have to fall through to the second ** probabilistic algorithm ** ** The second algorithm is to select a rowid at random and see if ** it already exists in the table. If it does not exist, we have ** succeeded. If the random rowid does exist, we select a new one ** and try again, up to 100 times. */ assert( pC->isTable ); #ifdef SQLITE_32BIT_ROWID # define MAX_ROWID 0x7fffffff #else /* Some compilers complain about constants of the form 0x7fffffffffffffff. ** Others complain about 0x7ffffffffffffffffLL. The following macro seems ** to provide the constant while making all compilers happy. */ # define MAX_ROWID (i64)( (((u64)0x7fffffff)<<32) | (u64)0xffffffff ) #endif if( !pC->useRandomRowid ){ rc = sqlite3BtreeLast(pC->pCursor, &res); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } if( res ){ v = 1; /* IMP: R-61914-48074 */ }else{ assert( sqlite3BtreeCursorIsValid(pC->pCursor) ); rc = sqlite3BtreeKeySize(pC->pCursor, &v); assert( rc==SQLITE_OK ); /* Cannot fail following BtreeLast() */ if( v>=MAX_ROWID ){ pC->useRandomRowid = 1; }else{ v++; /* IMP: R-29538-34987 */ } } } #ifndef SQLITE_OMIT_AUTOINCREMENT if( pOp->p3 ){ /* Assert that P3 is a valid memory cell. */ assert( pOp->p3>0 ); if( p->pFrame ){ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); /* Assert that P3 is a valid memory cell. */ assert( pOp->p3<=pFrame->nMem ); pMem = &pFrame->aMem[pOp->p3]; }else{ /* Assert that P3 is a valid memory cell. */ assert( pOp->p3<=(p->nMem-p->nCursor) ); pMem = &aMem[pOp->p3]; memAboutToChange(p, pMem); } assert( memIsValid(pMem) ); REGISTER_TRACE(pOp->p3, pMem); sqlite3VdbeMemIntegerify(pMem); assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */ if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){ rc = SQLITE_FULL; /* IMP: R-12275-61338 */ goto abort_due_to_error; } if( vu.i+1 ){ v = pMem->u.i + 1; } pMem->u.i = v; } #endif if( pC->useRandomRowid ){ /* IMPLEMENTATION-OF: R-07677-41881 If the largest ROWID is equal to the ** largest possible integer (9223372036854775807) then the database ** engine starts picking positive candidate ROWIDs at random until ** it finds one that is not previously used. */ assert( pOp->p3==0 ); /* We cannot be in random rowid mode if this is ** an AUTOINCREMENT table. */ cnt = 0; do{ sqlite3_randomness(sizeof(v), &v); v &= (MAX_ROWID>>1); v++; /* Ensure that v is greater than zero */ }while( ((rc = sqlite3BtreeMovetoUnpacked(pC->pCursor, 0, (u64)v, 0, &res))==SQLITE_OK) && (res==0) && (++cnt<100)); if( rc==SQLITE_OK && res==0 ){ rc = SQLITE_FULL; /* IMP: R-38219-53002 */ goto abort_due_to_error; } assert( v>0 ); /* EV: R-40812-03570 */ } pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; } pOut->u.i = v; break; } /* Opcode: Insert P1 P2 P3 P4 P5 ** Synopsis: intkey=r[P3] data=r[P2] ** ** Write an entry into the table of cursor P1. A new entry is ** created if it doesn't already exist or the data for an existing ** entry is overwritten. The data is the value MEM_Blob stored in register ** number P2. The key is stored in register P3. The key must ** be a MEM_Int. ** ** If the OPFLAG_NCHANGE flag of P5 is set, then the row change count is ** incremented (otherwise not). If the OPFLAG_LASTROWID flag of P5 is set, ** then rowid is stored for subsequent return by the ** sqlite3_last_insert_rowid() function (otherwise it is unmodified). ** ** If the OPFLAG_USESEEKRESULT flag of P5 is set and if the result of ** the last seek operation (OP_NotExists) was a success, then this ** operation will not attempt to find the appropriate row before doing ** the insert but will instead overwrite the row that the cursor is ** currently pointing to. Presumably, the prior OP_NotExists opcode ** has already positioned the cursor correctly. This is an optimization ** that boosts performance by avoiding redundant seeks. ** ** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an ** UPDATE operation. Otherwise (if the flag is clear) then this opcode ** is part of an INSERT operation. The difference is only important to ** the update hook. ** ** Parameter P4 may point to a string containing the table-name, or ** may be NULL. If it is not NULL, then the update-hook ** (sqlite3.xUpdateCallback) is invoked following a successful insert. ** ** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically ** allocated, then ownership of P2 is transferred to the pseudo-cursor ** and register P2 becomes ephemeral. If the cursor is changed, the ** value of register P2 will then change. Make sure this does not ** cause any problems.) ** ** This instruction only works on tables. The equivalent instruction ** for indices is OP_IdxInsert. */ /* Opcode: InsertInt P1 P2 P3 P4 P5 ** Synopsis: intkey=P3 data=r[P2] ** ** This works exactly like OP_Insert except that the key is the ** integer value P3, not the value of the integer stored in register P3. */ case OP_Insert: case OP_InsertInt: { Mem *pData; /* MEM cell holding data for the record to be inserted */ Mem *pKey; /* MEM cell holding key for the record */ i64 iKey; /* The integer ROWID or key for the record to be inserted */ VdbeCursor *pC; /* Cursor to table into which insert is written */ int nZero; /* Number of zero-bytes to append */ int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */ const char *zDb; /* database name - used by the update hook */ const char *zTbl; /* Table name - used by the opdate hook */ int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */ pData = &aMem[pOp->p2]; assert( pOp->p1>=0 && pOp->p1nCursor ); assert( memIsValid(pData) ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pCursor!=0 ); assert( pC->pseudoTableReg==0 ); assert( pC->isTable ); REGISTER_TRACE(pOp->p2, pData); if( pOp->opcode==OP_Insert ){ pKey = &aMem[pOp->p3]; assert( pKey->flags & MEM_Int ); assert( memIsValid(pKey) ); REGISTER_TRACE(pOp->p3, pKey); iKey = pKey->u.i; }else{ assert( pOp->opcode==OP_InsertInt ); iKey = pOp->p3; } if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = iKey; if( pData->flags & MEM_Null ){ pData->z = 0; pData->n = 0; }else{ assert( pData->flags & (MEM_Blob|MEM_Str) ); } seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); if( pData->flags & MEM_Zero ){ nZero = pData->u.nZero; }else{ nZero = 0; } rc = sqlite3BtreeInsert(pC->pCursor, 0, iKey, pData->z, pData->n, nZero, (pOp->p5 & OPFLAG_APPEND)!=0, seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; /* Invoke the update-hook if required. */ if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z ){ zDb = db->aDb[pC->iDb].zName; zTbl = pOp->p4.z; op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); assert( pC->isTable ); db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey); assert( pC->iDb>=0 ); } break; } /* Opcode: Delete P1 P2 * P4 * ** ** Delete the record at which the P1 cursor is currently pointing. ** ** The cursor will be left pointing at either the next or the previous ** record in the table. If it is left pointing at the next record, then ** the next Next instruction will be a no-op. Hence it is OK to delete ** a record from within a Next loop. ** ** If the OPFLAG_NCHANGE flag of P2 is set, then the row change count is ** incremented (otherwise not). ** ** P1 must not be pseudo-table. It has to be a real table with ** multiple rows. ** ** If P4 is not NULL, then it is the name of the table that P1 is ** pointing to. The update hook will be invoked, if it exists. ** If P4 is not NULL then the P1 cursor must have been positioned ** using OP_NotFound prior to invoking this opcode. */ case OP_Delete: { VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pCursor!=0 ); /* Only valid for real tables, no pseudotables */ assert( pC->deferredMoveto==0 ); #ifdef SQLITE_DEBUG /* The seek operation that positioned the cursor prior to OP_Delete will ** have also set the pC->movetoTarget field to the rowid of the row that ** is being deleted */ if( pOp->p4.z && pC->isTable ){ i64 iKey = 0; sqlite3BtreeKeySize(pC->pCursor, &iKey); assert( pC->movetoTarget==iKey ); } #endif rc = sqlite3BtreeDelete(pC->pCursor); pC->cacheStatus = CACHE_STALE; /* Invoke the update-hook if required. */ if( rc==SQLITE_OK && db->xUpdateCallback && pOp->p4.z && pC->isTable ){ db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, db->aDb[pC->iDb].zName, pOp->p4.z, pC->movetoTarget); assert( pC->iDb>=0 ); } if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++; break; } /* Opcode: ResetCount * * * * * ** ** The value of the change counter is copied to the database handle ** change counter (returned by subsequent calls to sqlite3_changes()). ** Then the VMs internal change counter resets to 0. ** This is used by trigger programs. */ case OP_ResetCount: { sqlite3VdbeSetChanges(db, p->nChange); p->nChange = 0; break; } /* Opcode: SorterCompare P1 P2 P3 P4 ** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2 ** ** P1 is a sorter cursor. This instruction compares a prefix of the ** record blob in register P3 against a prefix of the entry that ** the sorter cursor currently points to. Only the first P4 fields ** of r[P3] and the sorter record are compared. ** ** If either P3 or the sorter contains a NULL in one of their significant ** fields (not counting the P4 fields at the end which are ignored) then ** the comparison is assumed to be equal. ** ** Fall through to next instruction if the two records compare equal to ** each other. Jump to P2 if they are different. */ case OP_SorterCompare: { VdbeCursor *pC; int res; int nKeyCol; pC = p->apCsr[pOp->p1]; assert( isSorter(pC) ); assert( pOp->p4type==P4_INT32 ); pIn3 = &aMem[pOp->p3]; nKeyCol = pOp->p4.i; res = 0; rc = sqlite3VdbeSorterCompare(pC, pIn3, nKeyCol, &res); VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; }; /* Opcode: SorterData P1 P2 P3 * * ** Synopsis: r[P2]=data ** ** Write into register P2 the current sorter data for sorter cursor P1. ** Then clear the column header cache on cursor P3. ** ** This opcode is normally use to move a record out of the sorter and into ** a register that is the source for a pseudo-table cursor created using ** OpenPseudo. That pseudo-table cursor is the one that is identified by ** parameter P3. Clearing the P3 column cache as part of this opcode saves ** us from having to issue a separate NullRow instruction to clear that cache. */ case OP_SorterData: { VdbeCursor *pC; pOut = &aMem[pOp->p2]; pC = p->apCsr[pOp->p1]; assert( isSorter(pC) ); rc = sqlite3VdbeSorterRowkey(pC, pOut); assert( rc!=SQLITE_OK || (pOut->flags & MEM_Blob) ); assert( pOp->p1>=0 && pOp->p1nCursor ); p->apCsr[pOp->p3]->cacheStatus = CACHE_STALE; break; } /* Opcode: RowData P1 P2 * * * ** Synopsis: r[P2]=data ** ** Write into register P2 the complete row data for cursor P1. ** There is no interpretation of the data. ** It is just copied onto the P2 register exactly as ** it is found in the database file. ** ** If the P1 cursor must be pointing to a valid row (not a NULL row) ** of a real table, not a pseudo-table. */ /* Opcode: RowKey P1 P2 * * * ** Synopsis: r[P2]=key ** ** Write into register P2 the complete row key for cursor P1. ** There is no interpretation of the data. ** The key is copied onto the P2 register exactly as ** it is found in the database file. ** ** If the P1 cursor must be pointing to a valid row (not a NULL row) ** of a real table, not a pseudo-table. */ case OP_RowKey: case OP_RowData: { VdbeCursor *pC; BtCursor *pCrsr; u32 n; i64 n64; pOut = &aMem[pOp->p2]; memAboutToChange(p, pOut); /* Note that RowKey and RowData are really exactly the same instruction */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( isSorter(pC)==0 ); assert( pC->isTable || pOp->opcode!=OP_RowData ); assert( pC->isTable==0 || pOp->opcode==OP_RowData ); assert( pC!=0 ); assert( pC->nullRow==0 ); assert( pC->pseudoTableReg==0 ); assert( pC->pCursor!=0 ); pCrsr = pC->pCursor; /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or ** OP_Rewind/Op_Next with no intervening instructions that might invalidate ** the cursor. If this where not the case, on of the following assert()s ** would fail. Should this ever change (because of changes in the code ** generator) then the fix would be to insert a call to ** sqlite3VdbeCursorMoveto(). */ assert( pC->deferredMoveto==0 ); assert( sqlite3BtreeCursorIsValid(pCrsr) ); #if 0 /* Not required due to the previous to assert() statements */ rc = sqlite3VdbeCursorMoveto(pC); if( rc!=SQLITE_OK ) goto abort_due_to_error; #endif if( pC->isTable==0 ){ assert( !pC->isTable ); VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &n64); assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ if( n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } n = (u32)n64; }else{ VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &n); assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } } testcase( n==0 ); if( sqlite3VdbeMemClearAndResize(pOut, MAX(n,32)) ){ goto no_mem; } pOut->n = n; MemSetTypeFlag(pOut, MEM_Blob); if( pC->isTable==0 ){ rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z); }else{ rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z); } pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */ UPDATE_MAX_BLOBSIZE(pOut); REGISTER_TRACE(pOp->p2, pOut); break; } /* Opcode: Rowid P1 P2 * * * ** Synopsis: r[P2]=rowid ** ** Store in register P2 an integer which is the key of the table entry that ** P1 is currently point to. ** ** P1 can be either an ordinary table or a virtual table. There used to ** be a separate OP_VRowid opcode for use with virtual tables, but this ** one opcode now works for both table types. */ case OP_Rowid: { /* out2 */ VdbeCursor *pC; i64 v; sqlite3_vtab *pVtab; const sqlite3_module *pModule; pOut = out2Prerelease(p, pOp); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->pseudoTableReg==0 || pC->nullRow ); if( pC->nullRow ){ pOut->flags = MEM_Null; break; }else if( pC->deferredMoveto ){ v = pC->movetoTarget; #ifndef SQLITE_OMIT_VIRTUALTABLE }else if( pC->pVtabCursor ){ pVtab = pC->pVtabCursor->pVtab; pModule = pVtab->pModule; assert( pModule->xRowid ); rc = pModule->xRowid(pC->pVtabCursor, &v); sqlite3VtabImportErrmsg(p, pVtab); #endif /* SQLITE_OMIT_VIRTUALTABLE */ }else{ assert( pC->pCursor!=0 ); rc = sqlite3VdbeCursorRestore(pC); if( rc ) goto abort_due_to_error; if( pC->nullRow ){ pOut->flags = MEM_Null; break; } rc = sqlite3BtreeKeySize(pC->pCursor, &v); assert( rc==SQLITE_OK ); /* Always so because of CursorRestore() above */ } pOut->u.i = v; break; } /* Opcode: NullRow P1 * * * * ** ** Move the cursor P1 to a null row. Any OP_Column operations ** that occur while the cursor is on the null row will always ** write a NULL. */ case OP_NullRow: { VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pC->nullRow = 1; pC->cacheStatus = CACHE_STALE; if( pC->pCursor ){ sqlite3BtreeClearCursor(pC->pCursor); } break; } /* Opcode: Last P1 P2 P3 * * ** ** The next use of the Rowid or Column or Prev instruction for P1 ** will refer to the last entry in the database table or index. ** If the table or index is empty and P2>0, then jump immediately to P2. ** If P2 is 0 or if the table or index is not empty, fall through ** to the following instruction. ** ** This opcode leaves the cursor configured to move in reverse order, ** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. */ case OP_Last: { /* jump */ VdbeCursor *pC; BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->pCursor; res = 0; assert( pCrsr!=0 ); rc = sqlite3BtreeLast(pCrsr, &res); pC->nullRow = (u8)res; pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; pC->seekResult = pOp->p3; #ifdef SQLITE_DEBUG pC->seekOp = OP_Last; #endif if( pOp->p2>0 ){ VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; } break; } /* Opcode: Sort P1 P2 * * * ** ** This opcode does exactly the same thing as OP_Rewind except that ** it increments an undocumented global variable used for testing. ** ** Sorting is accomplished by writing records into a sorting index, ** then rewinding that index and playing it back from beginning to ** end. We use the OP_Sort opcode instead of OP_Rewind to do the ** rewinding so that the global variable will be incremented and ** regression tests can determine whether or not the optimizer is ** correctly optimizing out sorts. */ case OP_SorterSort: /* jump */ case OP_Sort: { /* jump */ #ifdef SQLITE_TEST sqlite3_sort_count++; sqlite3_search_count--; #endif p->aCounter[SQLITE_STMTSTATUS_SORT]++; /* Fall through into OP_Rewind */ } /* Opcode: Rewind P1 P2 * * * ** ** The next use of the Rowid or Column or Next instruction for P1 ** will refer to the first entry in the database table or index. ** If the table or index is empty, jump immediately to P2. ** If the table or index is not empty, fall through to the following ** instruction. ** ** This opcode leaves the cursor configured to move in forward order, ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ case OP_Rewind: { /* jump */ VdbeCursor *pC; BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) ); res = 1; #ifdef SQLITE_DEBUG pC->seekOp = OP_Rewind; #endif if( isSorter(pC) ){ rc = sqlite3VdbeSorterRewind(pC, &res); }else{ pCrsr = pC->pCursor; assert( pCrsr ); rc = sqlite3BtreeFirst(pCrsr, &res); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; } pC->nullRow = (u8)res; assert( pOp->p2>0 && pOp->p2nOp ); VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; } /* Opcode: Next P1 P2 P3 P4 P5 ** ** Advance cursor P1 so that it points to the next key/data pair in its ** table or index. If there are no more key/value pairs then fall through ** to the following instruction. But if the cursor advance was successful, ** jump immediately to P2. ** ** The Next opcode is only valid following an SeekGT, SeekGE, or ** OP_Rewind opcode used to position the cursor. Next is not allowed ** to follow SeekLT, SeekLE, or OP_Last. ** ** The P1 cursor must be for a real table, not a pseudo-table. P1 must have ** been opened prior to this opcode or the program will segfault. ** ** The P3 value is a hint to the btree implementation. If P3==1, that ** means P1 is an SQL index and that this instruction could have been ** omitted if that index had been unique. P3 is usually 0. P3 is ** always either 0 or 1. ** ** P4 is always of type P4_ADVANCE. The function pointer points to ** sqlite3BtreeNext(). ** ** If P5 is positive and the jump is taken, then event counter ** number P5-1 in the prepared statement is incremented. ** ** See also: Prev, NextIfOpen */ /* Opcode: NextIfOpen P1 P2 P3 P4 P5 ** ** This opcode works just like Next except that if cursor P1 is not ** open it behaves a no-op. */ /* Opcode: Prev P1 P2 P3 P4 P5 ** ** Back up cursor P1 so that it points to the previous key/data pair in its ** table or index. If there is no previous key/value pairs then fall through ** to the following instruction. But if the cursor backup was successful, ** jump immediately to P2. ** ** ** The Prev opcode is only valid following an SeekLT, SeekLE, or ** OP_Last opcode used to position the cursor. Prev is not allowed ** to follow SeekGT, SeekGE, or OP_Rewind. ** ** The P1 cursor must be for a real table, not a pseudo-table. If P1 is ** not open then the behavior is undefined. ** ** The P3 value is a hint to the btree implementation. If P3==1, that ** means P1 is an SQL index and that this instruction could have been ** omitted if that index had been unique. P3 is usually 0. P3 is ** always either 0 or 1. ** ** P4 is always of type P4_ADVANCE. The function pointer points to ** sqlite3BtreePrevious(). ** ** If P5 is positive and the jump is taken, then event counter ** number P5-1 in the prepared statement is incremented. */ /* Opcode: PrevIfOpen P1 P2 P3 P4 P5 ** ** This opcode works just like Prev except that if cursor P1 is not ** open it behaves a no-op. */ case OP_SorterNext: { /* jump */ VdbeCursor *pC; int res; pC = p->apCsr[pOp->p1]; assert( isSorter(pC) ); res = 0; rc = sqlite3VdbeSorterNext(db, pC, &res); goto next_tail; case OP_PrevIfOpen: /* jump */ case OP_NextIfOpen: /* jump */ if( p->apCsr[pOp->p1]==0 ) break; /* Fall through */ case OP_Prev: /* jump */ case OP_Next: /* jump */ assert( pOp->p1>=0 && pOp->p1nCursor ); assert( pOp->p5aCounter) ); pC = p->apCsr[pOp->p1]; res = pOp->p3; assert( pC!=0 ); assert( pC->deferredMoveto==0 ); assert( pC->pCursor ); assert( res==0 || (res==1 && pC->isTable==0) ); testcase( res==1 ); assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext ); assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious ); assert( pOp->opcode!=OP_NextIfOpen || pOp->p4.xAdvance==sqlite3BtreeNext ); assert( pOp->opcode!=OP_PrevIfOpen || pOp->p4.xAdvance==sqlite3BtreePrevious); /* The Next opcode is only used after SeekGT, SeekGE, and Rewind. ** The Prev opcode is only used after SeekLT, SeekLE, and Last. */ assert( pOp->opcode!=OP_Next || pOp->opcode!=OP_NextIfOpen || pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE || pC->seekOp==OP_Rewind || pC->seekOp==OP_Found); assert( pOp->opcode!=OP_Prev || pOp->opcode!=OP_PrevIfOpen || pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE || pC->seekOp==OP_Last ); rc = pOp->p4.xAdvance(pC->pCursor, &res); next_tail: pC->cacheStatus = CACHE_STALE; VdbeBranchTaken(res==0,2); if( res==0 ){ pC->nullRow = 0; p->aCounter[pOp->p5]++; #ifdef SQLITE_TEST sqlite3_search_count++; #endif goto jump_to_p2_and_check_for_interrupt; }else{ pC->nullRow = 1; } goto check_for_interrupt; } /* Opcode: IdxInsert P1 P2 P3 * P5 ** Synopsis: key=r[P2] ** ** Register P2 holds an SQL index key made using the ** MakeRecord instructions. This opcode writes that key ** into the index P1. Data for the entry is nil. ** ** P3 is a flag that provides a hint to the b-tree layer that this ** insert is likely to be an append. ** ** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is ** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear, ** then the change counter is unchanged. ** ** If P5 has the OPFLAG_USESEEKRESULT bit set, then the cursor must have ** just done a seek to the spot where the new entry is to be inserted. ** This flag avoids doing an extra seek. ** ** This instruction only works for indices. The equivalent instruction ** for tables is OP_Insert. */ case OP_SorterInsert: /* in2 */ case OP_IdxInsert: { /* in2 */ VdbeCursor *pC; int nKey; const char *zKey; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( isSorter(pC)==(pOp->opcode==OP_SorterInsert) ); pIn2 = &aMem[pOp->p2]; assert( pIn2->flags & MEM_Blob ); if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; assert( pC->pCursor!=0 ); assert( pC->isTable==0 ); rc = ExpandBlob(pIn2); if( rc==SQLITE_OK ){ if( pOp->opcode==OP_SorterInsert ){ rc = sqlite3VdbeSorterWrite(pC, pIn2); }else{ nKey = pIn2->n; zKey = pIn2->z; rc = sqlite3BtreeInsert(pC->pCursor, zKey, nKey, "", 0, 0, pOp->p3, ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); pC->cacheStatus = CACHE_STALE; } } break; } /* Opcode: IdxDelete P1 P2 P3 * * ** Synopsis: key=r[P2@P3] ** ** The content of P3 registers starting at register P2 form ** an unpacked index key. This opcode removes that entry from the ** index opened by cursor P1. */ case OP_IdxDelete: { VdbeCursor *pC; BtCursor *pCrsr; int res; UnpackedRecord r; assert( pOp->p3>0 ); assert( pOp->p2>0 && pOp->p2+pOp->p3<=(p->nMem-p->nCursor)+1 ); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->pCursor; assert( pCrsr!=0 ); assert( pOp->p5==0 ); r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)pOp->p3; r.default_rc = 0; r.aMem = &aMem[pOp->p2]; #ifdef SQLITE_DEBUG { int i; for(i=0; ideferredMoveto==0 ); pC->cacheStatus = CACHE_STALE; break; } /* Opcode: IdxRowid P1 P2 * * * ** Synopsis: r[P2]=rowid ** ** Write into register P2 an integer which is the last entry in the record at ** the end of the index key pointed to by cursor P1. This integer should be ** the rowid of the table entry to which this index entry points. ** ** See also: Rowid, MakeRecord. */ case OP_IdxRowid: { /* out2 */ BtCursor *pCrsr; VdbeCursor *pC; i64 rowid; pOut = out2Prerelease(p, pOp); assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); pCrsr = pC->pCursor; assert( pCrsr!=0 ); pOut->flags = MEM_Null; assert( pC->isTable==0 ); assert( pC->deferredMoveto==0 ); /* sqlite3VbeCursorRestore() can only fail if the record has been deleted ** out from under the cursor. That will never happend for an IdxRowid ** opcode, hence the NEVER() arround the check of the return value. */ rc = sqlite3VdbeCursorRestore(pC); if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; if( !pC->nullRow ){ rowid = 0; /* Not needed. Only used to silence a warning. */ rc = sqlite3VdbeIdxRowid(db, pCrsr, &rowid); if( rc!=SQLITE_OK ){ goto abort_due_to_error; } pOut->u.i = rowid; pOut->flags = MEM_Int; } break; } /* Opcode: IdxGE P1 P2 P3 P4 P5 ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index ** key that omits the PRIMARY KEY. Compare this key value against the index ** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID ** fields at the end. ** ** If the P1 index entry is greater than or equal to the key value ** then jump to P2. Otherwise fall through to the next instruction. */ /* Opcode: IdxGT P1 P2 P3 P4 P5 ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index ** key that omits the PRIMARY KEY. Compare this key value against the index ** that P1 is currently pointing to, ignoring the PRIMARY KEY or ROWID ** fields at the end. ** ** If the P1 index entry is greater than the key value ** then jump to P2. Otherwise fall through to the next instruction. */ /* Opcode: IdxLT P1 P2 P3 P4 P5 ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index ** key that omits the PRIMARY KEY or ROWID. Compare this key value against ** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ** ROWID on the P1 index. ** ** If the P1 index entry is less than the key value then jump to P2. ** Otherwise fall through to the next instruction. */ /* Opcode: IdxLE P1 P2 P3 P4 P5 ** Synopsis: key=r[P3@P4] ** ** The P4 register values beginning with P3 form an unpacked index ** key that omits the PRIMARY KEY or ROWID. Compare this key value against ** the index that P1 is currently pointing to, ignoring the PRIMARY KEY or ** ROWID on the P1 index. ** ** If the P1 index entry is less than or equal to the key value then jump ** to P2. Otherwise fall through to the next instruction. */ case OP_IdxLE: /* jump */ case OP_IdxGT: /* jump */ case OP_IdxLT: /* jump */ case OP_IdxGE: { /* jump */ VdbeCursor *pC; int res; UnpackedRecord r; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->isOrdered ); assert( pC->pCursor!=0); assert( pC->deferredMoveto==0 ); assert( pOp->p5==0 || pOp->p5==1 ); assert( pOp->p4type==P4_INT32 ); r.pKeyInfo = pC->pKeyInfo; r.nField = (u16)pOp->p4.i; if( pOp->opcodeopcode==OP_IdxLE || pOp->opcode==OP_IdxGT ); r.default_rc = -1; }else{ assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxLT ); r.default_rc = 0; } r.aMem = &aMem[pOp->p3]; #ifdef SQLITE_DEBUG { int i; for(i=0; iopcode&1)==(OP_IdxLT&1) ){ assert( pOp->opcode==OP_IdxLE || pOp->opcode==OP_IdxLT ); res = -res; }else{ assert( pOp->opcode==OP_IdxGE || pOp->opcode==OP_IdxGT ); res++; } VdbeBranchTaken(res>0,2); if( res>0 ) goto jump_to_p2; break; } /* Opcode: Destroy P1 P2 P3 * * ** ** Delete an entire database table or index whose root page in the database ** file is given by P1. ** ** The table being destroyed is in the main database file if P3==0. If ** P3==1 then the table to be clear is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If AUTOVACUUM is enabled then it is possible that another root page ** might be moved into the newly deleted root page in order to keep all ** root pages contiguous at the beginning of the database. The former ** value of the root page that moved - its value before the move occurred - ** is stored in register P2. If no page ** movement was required (because the table being dropped was already ** the last one in the database) then a zero is stored in register P2. ** If AUTOVACUUM is disabled then a zero is stored in register P2. ** ** See also: Clear */ case OP_Destroy: { /* out2 */ int iMoved; int iDb; assert( p->readOnly==0 ); pOut = out2Prerelease(p, pOp); pOut->flags = MEM_Null; if( db->nVdbeRead > db->nVDestroy+1 ){ rc = SQLITE_LOCKED; p->errorAction = OE_Abort; }else{ iDb = pOp->p3; assert( DbMaskTest(p->btreeMask, iDb) ); iMoved = 0; /* Not needed. Only to silence a warning. */ rc = sqlite3BtreeDropTable(db->aDb[iDb].pBt, pOp->p1, &iMoved); pOut->flags = MEM_Int; pOut->u.i = iMoved; #ifndef SQLITE_OMIT_AUTOVACUUM if( rc==SQLITE_OK && iMoved!=0 ){ sqlite3RootPageMoved(db, iDb, iMoved, pOp->p1); /* All OP_Destroy operations occur on the same btree */ assert( resetSchemaOnFault==0 || resetSchemaOnFault==iDb+1 ); resetSchemaOnFault = iDb+1; } #endif } break; } /* Opcode: Clear P1 P2 P3 ** ** Delete all contents of the database table or index whose root page ** in the database file is given by P1. But, unlike Destroy, do not ** remove the table or index from the database file. ** ** The table being clear is in the main database file if P2==0. If ** P2==1 then the table to be clear is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If the P3 value is non-zero, then the table referred to must be an ** intkey table (an SQL table, not an index). In this case the row change ** count is incremented by the number of rows in the table being cleared. ** If P3 is greater than zero, then the value stored in register P3 is ** also incremented by the number of rows in the table being cleared. ** ** See also: Destroy */ case OP_Clear: { int nChange; nChange = 0; assert( p->readOnly==0 ); assert( DbMaskTest(p->btreeMask, pOp->p2) ); rc = sqlite3BtreeClearTable( db->aDb[pOp->p2].pBt, pOp->p1, (pOp->p3 ? &nChange : 0) ); if( pOp->p3 ){ p->nChange += nChange; if( pOp->p3>0 ){ assert( memIsValid(&aMem[pOp->p3]) ); memAboutToChange(p, &aMem[pOp->p3]); aMem[pOp->p3].u.i += nChange; } } break; } /* Opcode: ResetSorter P1 * * * * ** ** Delete all contents from the ephemeral table or sorter ** that is open on cursor P1. ** ** This opcode only works for cursors used for sorting and ** opened with OP_OpenEphemeral or OP_SorterOpen. */ case OP_ResetSorter: { VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); if( pC->pSorter ){ sqlite3VdbeSorterReset(db, pC->pSorter); }else{ assert( pC->isEphemeral ); rc = sqlite3BtreeClearTableOfCursor(pC->pCursor); } break; } /* Opcode: CreateTable P1 P2 * * * ** Synopsis: r[P2]=root iDb=P1 ** ** Allocate a new table in the main database file if P1==0 or in the ** auxiliary database file if P1==1 or in an attached database if ** P1>1. Write the root page number of the new table into ** register P2 ** ** The difference between a table and an index is this: A table must ** have a 4-byte integer key and can have arbitrary data. An index ** has an arbitrary key but no data. ** ** See also: CreateIndex */ /* Opcode: CreateIndex P1 P2 * * * ** Synopsis: r[P2]=root iDb=P1 ** ** Allocate a new index in the main database file if P1==0 or in the ** auxiliary database file if P1==1 or in an attached database if ** P1>1. Write the root page number of the new table into ** register P2. ** ** See documentation on OP_CreateTable for additional information. */ case OP_CreateIndex: /* out2 */ case OP_CreateTable: { /* out2 */ int pgno; int flags; Db *pDb; pOut = out2Prerelease(p, pOp); pgno = 0; assert( pOp->p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); assert( p->readOnly==0 ); pDb = &db->aDb[pOp->p1]; assert( pDb->pBt!=0 ); if( pOp->opcode==OP_CreateTable ){ /* flags = BTREE_INTKEY; */ flags = BTREE_INTKEY; }else{ flags = BTREE_BLOBKEY; } rc = sqlite3BtreeCreateTable(pDb->pBt, &pgno, flags); pOut->u.i = pgno; break; } /* Opcode: ParseSchema P1 * * P4 * ** ** Read and parse all entries from the SQLITE_MASTER table of database P1 ** that match the WHERE clause P4. ** ** This opcode invokes the parser to create a new virtual machine, ** then runs the new virtual machine. It is thus a re-entrant opcode. */ case OP_ParseSchema: { int iDb; const char *zMaster; char *zSql; InitData initData; /* Any prepared statement that invokes this opcode will hold mutexes ** on every btree. This is a prerequisite for invoking ** sqlite3InitCallback(). */ #ifdef SQLITE_DEBUG for(iDb=0; iDbnDb; iDb++){ assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); } #endif iDb = pOp->p1; assert( iDb>=0 && iDbnDb ); assert( DbHasProperty(db, iDb, DB_SchemaLoaded) ); /* Used to be a conditional */ { zMaster = SCHEMA_TABLE(iDb); initData.db = db; initData.iDb = pOp->p1; initData.pzErrMsg = &p->zErrMsg; zSql = sqlite3MPrintf(db, "SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid", db->aDb[iDb].zName, zMaster, pOp->p4.z); if( zSql==0 ){ rc = SQLITE_NOMEM; }else{ assert( db->init.busy==0 ); db->init.busy = 1; initData.rc = SQLITE_OK; assert( !db->mallocFailed ); rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); if( rc==SQLITE_OK ) rc = initData.rc; sqlite3DbFree(db, zSql); db->init.busy = 0; } } if( rc ) sqlite3ResetAllSchemasOfConnection(db); if( rc==SQLITE_NOMEM ){ goto no_mem; } break; } #if !defined(SQLITE_OMIT_ANALYZE) /* Opcode: LoadAnalysis P1 * * * * ** ** Read the sqlite_stat1 table for database P1 and load the content ** of that table into the internal index hash table. This will cause ** the analysis to be used when preparing all subsequent queries. */ case OP_LoadAnalysis: { assert( pOp->p1>=0 && pOp->p1nDb ); rc = sqlite3AnalysisLoad(db, pOp->p1); break; } #endif /* !defined(SQLITE_OMIT_ANALYZE) */ /* Opcode: DropTable P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe ** the table named P4 in database P1. This is called after a table ** is dropped from disk (using the Destroy opcode) in order to keep ** the internal representation of the ** schema consistent with what is on disk. */ case OP_DropTable: { sqlite3UnlinkAndDeleteTable(db, pOp->p1, pOp->p4.z); break; } /* Opcode: DropIndex P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe ** the index named P4 in database P1. This is called after an index ** is dropped from disk (using the Destroy opcode) ** in order to keep the internal representation of the ** schema consistent with what is on disk. */ case OP_DropIndex: { sqlite3UnlinkAndDeleteIndex(db, pOp->p1, pOp->p4.z); break; } /* Opcode: DropTrigger P1 * * P4 * ** ** Remove the internal (in-memory) data structures that describe ** the trigger named P4 in database P1. This is called after a trigger ** is dropped from disk (using the Destroy opcode) in order to keep ** the internal representation of the ** schema consistent with what is on disk. */ case OP_DropTrigger: { sqlite3UnlinkAndDeleteTrigger(db, pOp->p1, pOp->p4.z); break; } #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* Opcode: IntegrityCk P1 P2 P3 * P5 ** ** Do an analysis of the currently open database. Store in ** register P1 the text of an error message describing any problems. ** If no problems are found, store a NULL in register P1. ** ** The register P3 contains the maximum number of allowed errors. ** At most reg(P3) errors will be reported. ** In other words, the analysis stops as soon as reg(P1) errors are ** seen. Reg(P1) is updated with the number of errors remaining. ** ** The root page numbers of all tables in the database are integer ** stored in reg(P1), reg(P1+1), reg(P1+2), .... There are P2 tables ** total. ** ** If P5 is not zero, the check is done on the auxiliary database ** file, not the main database file. ** ** This opcode is used to implement the integrity_check pragma. */ case OP_IntegrityCk: { int nRoot; /* Number of tables to check. (Number of root pages.) */ int *aRoot; /* Array of rootpage numbers for tables to be checked */ int j; /* Loop counter */ int nErr; /* Number of errors reported */ char *z; /* Text of the error report */ Mem *pnErr; /* Register keeping track of errors remaining */ assert( p->bIsReader ); nRoot = pOp->p2; assert( nRoot>0 ); aRoot = sqlite3DbMallocRaw(db, sizeof(int)*(nRoot+1) ); if( aRoot==0 ) goto no_mem; assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); pnErr = &aMem[pOp->p3]; assert( (pnErr->flags & MEM_Int)!=0 ); assert( (pnErr->flags & (MEM_Str|MEM_Blob))==0 ); pIn1 = &aMem[pOp->p1]; for(j=0; jp5nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); z = sqlite3BtreeIntegrityCheck(db->aDb[pOp->p5].pBt, aRoot, nRoot, (int)pnErr->u.i, &nErr); sqlite3DbFree(db, aRoot); pnErr->u.i -= nErr; sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); }else if( z==0 ){ goto no_mem; }else{ sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free); } UPDATE_MAX_BLOBSIZE(pIn1); sqlite3VdbeChangeEncoding(pIn1, encoding); break; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* Opcode: RowSetAdd P1 P2 * * * ** Synopsis: rowset(P1)=r[P2] ** ** Insert the integer value held by register P2 into a boolean index ** held in register P1. ** ** An assertion fails if P2 is not an integer. */ case OP_RowSetAdd: { /* in1, in2 */ pIn1 = &aMem[pOp->p1]; pIn2 = &aMem[pOp->p2]; assert( (pIn2->flags & MEM_Int)!=0 ); if( (pIn1->flags & MEM_RowSet)==0 ){ sqlite3VdbeMemSetRowSet(pIn1); if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; } sqlite3RowSetInsert(pIn1->u.pRowSet, pIn2->u.i); break; } /* Opcode: RowSetRead P1 P2 P3 * * ** Synopsis: r[P3]=rowset(P1) ** ** Extract the smallest value from boolean index P1 and put that value into ** register P3. Or, if boolean index P1 is initially empty, leave P3 ** unchanged and jump to instruction P2. */ case OP_RowSetRead: { /* jump, in1, out3 */ i64 val; pIn1 = &aMem[pOp->p1]; if( (pIn1->flags & MEM_RowSet)==0 || sqlite3RowSetNext(pIn1->u.pRowSet, &val)==0 ){ /* The boolean index is empty */ sqlite3VdbeMemSetNull(pIn1); VdbeBranchTaken(1,2); goto jump_to_p2_and_check_for_interrupt; }else{ /* A value was pulled from the index */ VdbeBranchTaken(0,2); sqlite3VdbeMemSetInt64(&aMem[pOp->p3], val); } goto check_for_interrupt; } /* Opcode: RowSetTest P1 P2 P3 P4 ** Synopsis: if r[P3] in rowset(P1) goto P2 ** ** Register P3 is assumed to hold a 64-bit integer value. If register P1 ** contains a RowSet object and that RowSet object contains ** the value held in P3, jump to register P2. Otherwise, insert the ** integer in P3 into the RowSet and continue on to the ** next opcode. ** ** The RowSet object is optimized for the case where successive sets ** of integers, where each set contains no duplicates. Each set ** of values is identified by a unique P4 value. The first set ** must have P4==0, the final set P4=-1. P4 must be either -1 or ** non-negative. For non-negative values of P4 only the lower 4 ** bits are significant. ** ** This allows optimizations: (a) when P4==0 there is no need to test ** the rowset object for P3, as it is guaranteed not to contain it, ** (b) when P4==-1 there is no need to insert the value, as it will ** never be tested for, and (c) when a value that is part of set X is ** inserted, there is no need to search to see if the same value was ** previously inserted as part of set X (only if it was previously ** inserted as part of some other set). */ case OP_RowSetTest: { /* jump, in1, in3 */ int iSet; int exists; pIn1 = &aMem[pOp->p1]; pIn3 = &aMem[pOp->p3]; iSet = pOp->p4.i; assert( pIn3->flags&MEM_Int ); /* If there is anything other than a rowset object in memory cell P1, ** delete it now and initialize P1 with an empty rowset */ if( (pIn1->flags & MEM_RowSet)==0 ){ sqlite3VdbeMemSetRowSet(pIn1); if( (pIn1->flags & MEM_RowSet)==0 ) goto no_mem; } assert( pOp->p4type==P4_INT32 ); assert( iSet==-1 || iSet>=0 ); if( iSet ){ exists = sqlite3RowSetTest(pIn1->u.pRowSet, iSet, pIn3->u.i); VdbeBranchTaken(exists!=0,2); if( exists ) goto jump_to_p2; } if( iSet>=0 ){ sqlite3RowSetInsert(pIn1->u.pRowSet, pIn3->u.i); } break; } #ifndef SQLITE_OMIT_TRIGGER /* Opcode: Program P1 P2 P3 P4 P5 ** ** Execute the trigger program passed as P4 (type P4_SUBPROGRAM). ** ** P1 contains the address of the memory cell that contains the first memory ** cell in an array of values used as arguments to the sub-program. P2 ** contains the address to jump to if the sub-program throws an IGNORE ** exception using the RAISE() function. Register P3 contains the address ** of a memory cell in this (the parent) VM that is used to allocate the ** memory required by the sub-vdbe at runtime. ** ** P4 is a pointer to the VM containing the trigger program. ** ** If P5 is non-zero, then recursive program invocation is enabled. */ case OP_Program: { /* jump */ int nMem; /* Number of memory registers for sub-program */ int nByte; /* Bytes of runtime space required for sub-program */ Mem *pRt; /* Register to allocate runtime space */ Mem *pMem; /* Used to iterate through memory cells */ Mem *pEnd; /* Last memory cell in new array */ VdbeFrame *pFrame; /* New vdbe frame to execute in */ SubProgram *pProgram; /* Sub-program to execute */ void *t; /* Token identifying trigger */ pProgram = pOp->p4.pProgram; pRt = &aMem[pOp->p3]; assert( pProgram->nOp>0 ); /* If the p5 flag is clear, then recursive invocation of triggers is ** disabled for backwards compatibility (p5 is set if this sub-program ** is really a trigger, not a foreign key action, and the flag set ** and cleared by the "PRAGMA recursive_triggers" command is clear). ** ** It is recursive invocation of triggers, at the SQL level, that is ** disabled. In some cases a single trigger may generate more than one ** SubProgram (if the trigger may be executed with more than one different ** ON CONFLICT algorithm). SubProgram structures associated with a ** single trigger all have the same value for the SubProgram.token ** variable. */ if( pOp->p5 ){ t = pProgram->token; for(pFrame=p->pFrame; pFrame && pFrame->token!=t; pFrame=pFrame->pParent); if( pFrame ) break; } if( p->nFrame>=db->aLimit[SQLITE_LIMIT_TRIGGER_DEPTH] ){ rc = SQLITE_ERROR; sqlite3VdbeError(p, "too many levels of trigger recursion"); break; } /* Register pRt is used to store the memory required to save the state ** of the current program, and the memory required at runtime to execute ** the trigger program. If this trigger has been fired before, then pRt ** is already allocated. Otherwise, it must be initialized. */ if( (pRt->flags&MEM_Frame)==0 ){ /* SubProgram.nMem is set to the number of memory cells used by the ** program stored in SubProgram.aOp. As well as these, one memory ** cell is required for each cursor used by the program. Set local ** variable nMem (and later, VdbeFrame.nChildMem) to this value. */ nMem = pProgram->nMem + pProgram->nCsr; nByte = ROUND8(sizeof(VdbeFrame)) + nMem * sizeof(Mem) + pProgram->nCsr * sizeof(VdbeCursor *) + pProgram->nOnce * sizeof(u8); pFrame = sqlite3DbMallocZero(db, nByte); if( !pFrame ){ goto no_mem; } sqlite3VdbeMemRelease(pRt); pRt->flags = MEM_Frame; pRt->u.pFrame = pFrame; pFrame->v = p; pFrame->nChildMem = nMem; pFrame->nChildCsr = pProgram->nCsr; pFrame->pc = (int)(pOp - aOp); pFrame->aMem = p->aMem; pFrame->nMem = p->nMem; pFrame->apCsr = p->apCsr; pFrame->nCursor = p->nCursor; pFrame->aOp = p->aOp; pFrame->nOp = p->nOp; pFrame->token = pProgram->token; pFrame->aOnceFlag = p->aOnceFlag; pFrame->nOnceFlag = p->nOnceFlag; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS pFrame->anExec = p->anExec; #endif pEnd = &VdbeFrameMem(pFrame)[pFrame->nChildMem]; for(pMem=VdbeFrameMem(pFrame); pMem!=pEnd; pMem++){ pMem->flags = MEM_Undefined; pMem->db = db; } }else{ pFrame = pRt->u.pFrame; assert( pProgram->nMem+pProgram->nCsr==pFrame->nChildMem ); assert( pProgram->nCsr==pFrame->nChildCsr ); assert( (int)(pOp - aOp)==pFrame->pc ); } p->nFrame++; pFrame->pParent = p->pFrame; pFrame->lastRowid = lastRowid; pFrame->nChange = p->nChange; pFrame->nDbChange = p->db->nChange; p->nChange = 0; p->pFrame = pFrame; p->aMem = aMem = &VdbeFrameMem(pFrame)[-1]; p->nMem = pFrame->nChildMem; p->nCursor = (u16)pFrame->nChildCsr; p->apCsr = (VdbeCursor **)&aMem[p->nMem+1]; p->aOp = aOp = pProgram->aOp; p->nOp = pProgram->nOp; p->aOnceFlag = (u8 *)&p->apCsr[p->nCursor]; p->nOnceFlag = pProgram->nOnce; #ifdef SQLITE_ENABLE_STMT_SCANSTATUS p->anExec = 0; #endif pOp = &aOp[-1]; memset(p->aOnceFlag, 0, p->nOnceFlag); break; } /* Opcode: Param P1 P2 * * * ** ** This opcode is only ever present in sub-programs called via the ** OP_Program instruction. Copy a value currently stored in a memory ** cell of the calling (parent) frame to cell P2 in the current frames ** address space. This is used by trigger programs to access the new.* ** and old.* values. ** ** The address of the cell in the parent frame is determined by adding ** the value of the P1 argument to the value of the P1 argument to the ** calling OP_Program instruction. */ case OP_Param: { /* out2 */ VdbeFrame *pFrame; Mem *pIn; pOut = out2Prerelease(p, pOp); pFrame = p->pFrame; pIn = &pFrame->aMem[pOp->p1 + pFrame->aOp[pFrame->pc].p1]; sqlite3VdbeMemShallowCopy(pOut, pIn, MEM_Ephem); break; } #endif /* #ifndef SQLITE_OMIT_TRIGGER */ #ifndef SQLITE_OMIT_FOREIGN_KEY /* Opcode: FkCounter P1 P2 * * * ** Synopsis: fkctr[P1]+=P2 ** ** Increment a "constraint counter" by P2 (P2 may be negative or positive). ** If P1 is non-zero, the database constraint counter is incremented ** (deferred foreign key constraints). Otherwise, if P1 is zero, the ** statement counter is incremented (immediate foreign key constraints). */ case OP_FkCounter: { if( db->flags & SQLITE_DeferFKs ){ db->nDeferredImmCons += pOp->p2; }else if( pOp->p1 ){ db->nDeferredCons += pOp->p2; }else{ p->nFkConstraint += pOp->p2; } break; } /* Opcode: FkIfZero P1 P2 * * * ** Synopsis: if fkctr[P1]==0 goto P2 ** ** This opcode tests if a foreign key constraint-counter is currently zero. ** If so, jump to instruction P2. Otherwise, fall through to the next ** instruction. ** ** If P1 is non-zero, then the jump is taken if the database constraint-counter ** is zero (the one that counts deferred constraint violations). If P1 is ** zero, the jump is taken if the statement constraint-counter is zero ** (immediate foreign key constraint violations). */ case OP_FkIfZero: { /* jump */ if( pOp->p1 ){ VdbeBranchTaken(db->nDeferredCons==0 && db->nDeferredImmCons==0, 2); if( db->nDeferredCons==0 && db->nDeferredImmCons==0 ) goto jump_to_p2; }else{ VdbeBranchTaken(p->nFkConstraint==0 && db->nDeferredImmCons==0, 2); if( p->nFkConstraint==0 && db->nDeferredImmCons==0 ) goto jump_to_p2; } break; } #endif /* #ifndef SQLITE_OMIT_FOREIGN_KEY */ #ifndef SQLITE_OMIT_AUTOINCREMENT /* Opcode: MemMax P1 P2 * * * ** Synopsis: r[P1]=max(r[P1],r[P2]) ** ** P1 is a register in the root frame of this VM (the root frame is ** different from the current frame if this instruction is being executed ** within a sub-program). Set the value of register P1 to the maximum of ** its current value and the value in register P2. ** ** This instruction throws an error if the memory cell is not initially ** an integer. */ case OP_MemMax: { /* in2 */ VdbeFrame *pFrame; if( p->pFrame ){ for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); pIn1 = &pFrame->aMem[pOp->p1]; }else{ pIn1 = &aMem[pOp->p1]; } assert( memIsValid(pIn1) ); sqlite3VdbeMemIntegerify(pIn1); pIn2 = &aMem[pOp->p2]; sqlite3VdbeMemIntegerify(pIn2); if( pIn1->u.iu.i){ pIn1->u.i = pIn2->u.i; } break; } #endif /* SQLITE_OMIT_AUTOINCREMENT */ /* Opcode: IfPos P1 P2 * * * ** Synopsis: if r[P1]>0 goto P2 ** ** Register P1 must contain an integer. ** If the value of register P1 is 1 or greater, jump to P2 and ** add the literal value P3 to register P1. ** ** If the initial value of register P1 is less than 1, then the ** value is unchanged and control passes through to the next instruction. */ case OP_IfPos: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); VdbeBranchTaken( pIn1->u.i>0, 2); if( pIn1->u.i>0 ) goto jump_to_p2; break; } /* Opcode: IfNeg P1 P2 P3 * * ** Synopsis: r[P1]+=P3, if r[P1]<0 goto P2 ** ** Register P1 must contain an integer. Add literal P3 to the value in ** register P1 then if the value of register P1 is less than zero, jump to P2. */ case OP_IfNeg: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); pIn1->u.i += pOp->p3; VdbeBranchTaken(pIn1->u.i<0, 2); if( pIn1->u.i<0 ) goto jump_to_p2; break; } /* Opcode: IfNotZero P1 P2 P3 * * ** Synopsis: if r[P1]!=0 then r[P1]+=P3, goto P2 ** ** Register P1 must contain an integer. If the content of register P1 is ** initially nonzero, then add P3 to P1 and jump to P2. If register P1 is ** initially zero, leave it unchanged and fall through. */ case OP_IfNotZero: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); VdbeBranchTaken(pIn1->u.i<0, 2); if( pIn1->u.i ){ pIn1->u.i += pOp->p3; goto jump_to_p2; } break; } /* Opcode: DecrJumpZero P1 P2 * * * ** Synopsis: if (--r[P1])==0 goto P2 ** ** Register P1 must hold an integer. Decrement the value in register P1 ** then jump to P2 if the new value is exactly zero. */ case OP_DecrJumpZero: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); pIn1->u.i--; VdbeBranchTaken(pIn1->u.i==0, 2); if( pIn1->u.i==0 ) goto jump_to_p2; break; } /* Opcode: JumpZeroIncr P1 P2 * * * ** Synopsis: if (r[P1]++)==0 ) goto P2 ** ** The register P1 must contain an integer. If register P1 is initially ** zero, then jump to P2. Increment register P1 regardless of whether or ** not the jump is taken. */ case OP_JumpZeroIncr: { /* jump, in1 */ pIn1 = &aMem[pOp->p1]; assert( pIn1->flags&MEM_Int ); VdbeBranchTaken(pIn1->u.i==0, 2); if( (pIn1->u.i++)==0 ) goto jump_to_p2; break; } /* Opcode: AggStep0 * P2 P3 P4 P5 ** Synopsis: accum=r[P3] step(r[P2@P5]) ** ** Execute the step function for an aggregate. The ** function has P5 arguments. P4 is a pointer to the FuncDef ** structure that specifies the function. Register P3 is the ** accumulator. ** ** The P5 arguments are taken from register P2 and its ** successors. */ /* Opcode: AggStep * P2 P3 P4 P5 ** Synopsis: accum=r[P3] step(r[P2@P5]) ** ** Execute the step function for an aggregate. The ** function has P5 arguments. P4 is a pointer to an sqlite3_context ** object that is used to run the function. Register P3 is ** as the accumulator. ** ** The P5 arguments are taken from register P2 and its ** successors. ** ** This opcode is initially coded as OP_AggStep0. On first evaluation, ** the FuncDef stored in P4 is converted into an sqlite3_context and ** the opcode is changed. In this way, the initialization of the ** sqlite3_context only happens once, instead of on each call to the ** step function. */ case OP_AggStep0: { int n; sqlite3_context *pCtx; assert( pOp->p4type==P4_FUNCDEF ); n = pOp->p5; assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); assert( n==0 || (pOp->p2>0 && pOp->p2+n<=(p->nMem-p->nCursor)+1) ); assert( pOp->p3p2 || pOp->p3>=pOp->p2+n ); pCtx = sqlite3DbMallocRaw(db, sizeof(*pCtx) + (n-1)*sizeof(sqlite3_value*)); if( pCtx==0 ) goto no_mem; pCtx->pMem = 0; pCtx->pFunc = pOp->p4.pFunc; pCtx->iOp = (int)(pOp - aOp); pCtx->pVdbe = p; pCtx->argc = n; pOp->p4type = P4_FUNCCTX; pOp->p4.pCtx = pCtx; pOp->opcode = OP_AggStep; /* Fall through into OP_AggStep */ } case OP_AggStep: { int i; sqlite3_context *pCtx; Mem *pMem; Mem t; assert( pOp->p4type==P4_FUNCCTX ); pCtx = pOp->p4.pCtx; pMem = &aMem[pOp->p3]; /* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it ** reinitializes the relavant parts of the sqlite3_context object */ if( pCtx->pMem != pMem ){ pCtx->pMem = pMem; for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i]; } #ifdef SQLITE_DEBUG for(i=0; iargc; i++){ assert( memIsValid(pCtx->argv[i]) ); REGISTER_TRACE(pOp->p2+i, pCtx->argv[i]); } #endif pMem->n++; sqlite3VdbeMemInit(&t, db, MEM_Null); pCtx->pOut = &t; pCtx->fErrorOrAux = 0; pCtx->skipFlag = 0; (pCtx->pFunc->xStep)(pCtx,pCtx->argc,pCtx->argv); /* IMP: R-24505-23230 */ if( pCtx->fErrorOrAux ){ if( pCtx->isError ){ sqlite3VdbeError(p, "%s", sqlite3_value_text(&t)); rc = pCtx->isError; } sqlite3VdbeMemRelease(&t); }else{ assert( t.flags==MEM_Null ); } if( pCtx->skipFlag ){ assert( pOp[-1].opcode==OP_CollSeq ); i = pOp[-1].p1; if( i ) sqlite3VdbeMemSetInt64(&aMem[i], 1); } break; } /* Opcode: AggFinal P1 P2 * P4 * ** Synopsis: accum=r[P1] N=P2 ** ** Execute the finalizer function for an aggregate. P1 is ** the memory location that is the accumulator for the aggregate. ** ** P2 is the number of arguments that the step function takes and ** P4 is a pointer to the FuncDef for this function. The P2 ** argument is not used by this opcode. It is only there to disambiguate ** functions that can take varying numbers of arguments. The ** P4 argument is only needed for the degenerate case where ** the step function was not previously called. */ case OP_AggFinal: { Mem *pMem; assert( pOp->p1>0 && pOp->p1<=(p->nMem-p->nCursor) ); pMem = &aMem[pOp->p1]; assert( (pMem->flags & ~(MEM_Null|MEM_Agg))==0 ); rc = sqlite3VdbeMemFinalize(pMem, pOp->p4.pFunc); if( rc ){ sqlite3VdbeError(p, "%s", sqlite3_value_text(pMem)); } sqlite3VdbeChangeEncoding(pMem, encoding); UPDATE_MAX_BLOBSIZE(pMem); if( sqlite3VdbeMemTooBig(pMem) ){ goto too_big; } break; } #ifndef SQLITE_OMIT_WAL /* Opcode: Checkpoint P1 P2 P3 * * ** ** Checkpoint database P1. This is a no-op if P1 is not currently in ** WAL mode. Parameter P2 is one of SQLITE_CHECKPOINT_PASSIVE, FULL, ** RESTART, or TRUNCATE. Write 1 or 0 into mem[P3] if the checkpoint returns ** SQLITE_BUSY or not, respectively. Write the number of pages in the ** WAL after the checkpoint into mem[P3+1] and the number of pages ** in the WAL that have been checkpointed after the checkpoint ** completes into mem[P3+2]. However on an error, mem[P3+1] and ** mem[P3+2] are initialized to -1. */ case OP_Checkpoint: { int i; /* Loop counter */ int aRes[3]; /* Results */ Mem *pMem; /* Write results here */ assert( p->readOnly==0 ); aRes[0] = 0; aRes[1] = aRes[2] = -1; assert( pOp->p2==SQLITE_CHECKPOINT_PASSIVE || pOp->p2==SQLITE_CHECKPOINT_FULL || pOp->p2==SQLITE_CHECKPOINT_RESTART || pOp->p2==SQLITE_CHECKPOINT_TRUNCATE ); rc = sqlite3Checkpoint(db, pOp->p1, pOp->p2, &aRes[1], &aRes[2]); if( rc==SQLITE_BUSY ){ rc = SQLITE_OK; aRes[0] = 1; } for(i=0, pMem = &aMem[pOp->p3]; i<3; i++, pMem++){ sqlite3VdbeMemSetInt64(pMem, (i64)aRes[i]); } break; }; #endif #ifndef SQLITE_OMIT_PRAGMA /* Opcode: JournalMode P1 P2 P3 * * ** ** Change the journal mode of database P1 to P3. P3 must be one of the ** PAGER_JOURNALMODE_XXX values. If changing between the various rollback ** modes (delete, truncate, persist, off and memory), this is a simple ** operation. No IO is required. ** ** If changing into or out of WAL mode the procedure is more complicated. ** ** Write a string containing the final journal-mode to register P2. */ case OP_JournalMode: { /* out2 */ Btree *pBt; /* Btree to change journal mode of */ Pager *pPager; /* Pager associated with pBt */ int eNew; /* New journal mode */ int eOld; /* The old journal mode */ #ifndef SQLITE_OMIT_WAL const char *zFilename; /* Name of database file for pPager */ #endif pOut = out2Prerelease(p, pOp); eNew = pOp->p3; assert( eNew==PAGER_JOURNALMODE_DELETE || eNew==PAGER_JOURNALMODE_TRUNCATE || eNew==PAGER_JOURNALMODE_PERSIST || eNew==PAGER_JOURNALMODE_OFF || eNew==PAGER_JOURNALMODE_MEMORY || eNew==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_QUERY ); assert( pOp->p1>=0 && pOp->p1nDb ); assert( p->readOnly==0 ); pBt = db->aDb[pOp->p1].pBt; pPager = sqlite3BtreePager(pBt); eOld = sqlite3PagerGetJournalMode(pPager); if( eNew==PAGER_JOURNALMODE_QUERY ) eNew = eOld; if( !sqlite3PagerOkToChangeJournalMode(pPager) ) eNew = eOld; #ifndef SQLITE_OMIT_WAL zFilename = sqlite3PagerFilename(pPager, 1); /* Do not allow a transition to journal_mode=WAL for a database ** in temporary storage or if the VFS does not support shared memory */ if( eNew==PAGER_JOURNALMODE_WAL && (sqlite3Strlen30(zFilename)==0 /* Temp file */ || !sqlite3PagerWalSupported(pPager)) /* No shared-memory support */ ){ eNew = eOld; } if( (eNew!=eOld) && (eOld==PAGER_JOURNALMODE_WAL || eNew==PAGER_JOURNALMODE_WAL) ){ if( !db->autoCommit || db->nVdbeRead>1 ){ rc = SQLITE_ERROR; sqlite3VdbeError(p, "cannot change %s wal mode from within a transaction", (eNew==PAGER_JOURNALMODE_WAL ? "into" : "out of") ); break; }else{ if( eOld==PAGER_JOURNALMODE_WAL ){ /* If leaving WAL mode, close the log file. If successful, the call ** to PagerCloseWal() checkpoints and deletes the write-ahead-log ** file. An EXCLUSIVE lock may still be held on the database file ** after a successful return. */ rc = sqlite3PagerCloseWal(pPager); if( rc==SQLITE_OK ){ sqlite3PagerSetJournalMode(pPager, eNew); } }else if( eOld==PAGER_JOURNALMODE_MEMORY ){ /* Cannot transition directly from MEMORY to WAL. Use mode OFF ** as an intermediate */ sqlite3PagerSetJournalMode(pPager, PAGER_JOURNALMODE_OFF); } /* Open a transaction on the database file. Regardless of the journal ** mode, this transaction always uses a rollback journal. */ assert( sqlite3BtreeIsInTrans(pBt)==0 ); if( rc==SQLITE_OK ){ rc = sqlite3BtreeSetVersion(pBt, (eNew==PAGER_JOURNALMODE_WAL ? 2 : 1)); } } } #endif /* ifndef SQLITE_OMIT_WAL */ if( rc ){ eNew = eOld; } eNew = sqlite3PagerSetJournalMode(pPager, eNew); pOut->flags = MEM_Str|MEM_Static|MEM_Term; pOut->z = (char *)sqlite3JournalModename(eNew); pOut->n = sqlite3Strlen30(pOut->z); pOut->enc = SQLITE_UTF8; sqlite3VdbeChangeEncoding(pOut, encoding); break; }; #endif /* SQLITE_OMIT_PRAGMA */ #if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) /* Opcode: Vacuum * * * * * ** ** Vacuum the entire database. This opcode will cause other virtual ** machines to be created and run. It may not be called from within ** a transaction. */ case OP_Vacuum: { assert( p->readOnly==0 ); rc = sqlite3RunVacuum(&p->zErrMsg, db); break; } #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) /* Opcode: IncrVacuum P1 P2 * * * ** ** Perform a single step of the incremental vacuum procedure on ** the P1 database. If the vacuum has finished, jump to instruction ** P2. Otherwise, fall through to the next instruction. */ case OP_IncrVacuum: { /* jump */ Btree *pBt; assert( pOp->p1>=0 && pOp->p1nDb ); assert( DbMaskTest(p->btreeMask, pOp->p1) ); assert( p->readOnly==0 ); pBt = db->aDb[pOp->p1].pBt; rc = sqlite3BtreeIncrVacuum(pBt); VdbeBranchTaken(rc==SQLITE_DONE,2); if( rc==SQLITE_DONE ){ rc = SQLITE_OK; goto jump_to_p2; } break; } #endif /* Opcode: Expire P1 * * * * ** ** Cause precompiled statements to expire. When an expired statement ** is executed using sqlite3_step() it will either automatically ** reprepare itself (if it was originally created using sqlite3_prepare_v2()) ** or it will fail with SQLITE_SCHEMA. ** ** If P1 is 0, then all SQL statements become expired. If P1 is non-zero, ** then only the currently executing statement is expired. */ case OP_Expire: { if( !pOp->p1 ){ sqlite3ExpirePreparedStatements(db); }else{ p->expired = 1; } break; } #ifndef SQLITE_OMIT_SHARED_CACHE /* Opcode: TableLock P1 P2 P3 P4 * ** Synopsis: iDb=P1 root=P2 write=P3 ** ** Obtain a lock on a particular table. This instruction is only used when ** the shared-cache feature is enabled. ** ** P1 is the index of the database in sqlite3.aDb[] of the database ** on which the lock is acquired. A readlock is obtained if P3==0 or ** a write lock if P3==1. ** ** P2 contains the root-page of the table to lock. ** ** P4 contains a pointer to the name of the table being locked. This is only ** used to generate an error message if the lock cannot be obtained. */ case OP_TableLock: { u8 isWriteLock = (u8)pOp->p3; if( isWriteLock || 0==(db->flags&SQLITE_ReadUncommitted) ){ int p1 = pOp->p1; assert( p1>=0 && p1nDb ); assert( DbMaskTest(p->btreeMask, p1) ); assert( isWriteLock==0 || isWriteLock==1 ); rc = sqlite3BtreeLockTable(db->aDb[p1].pBt, pOp->p2, isWriteLock); if( (rc&0xFF)==SQLITE_LOCKED ){ const char *z = pOp->p4.z; sqlite3VdbeError(p, "database table is locked: %s", z); } } break; } #endif /* SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VBegin * * * P4 * ** ** P4 may be a pointer to an sqlite3_vtab structure. If so, call the ** xBegin method for that table. ** ** Also, whether or not P4 is set, check that this is not being called from ** within a callback to a virtual table xSync() method. If it is, the error ** code will be set to SQLITE_LOCKED. */ case OP_VBegin: { VTable *pVTab; pVTab = pOp->p4.pVtab; rc = sqlite3VtabBegin(db, pVTab); if( pVTab ) sqlite3VtabImportErrmsg(p, pVTab->pVtab); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VCreate P1 P2 * * * ** ** P2 is a register that holds the name of a virtual table in database ** P1. Call the xCreate method for that table. */ case OP_VCreate: { Mem sMem; /* For storing the record being decoded */ const char *zTab; /* Name of the virtual table */ memset(&sMem, 0, sizeof(sMem)); sMem.db = db; /* Because P2 is always a static string, it is impossible for the ** sqlite3VdbeMemCopy() to fail */ assert( (aMem[pOp->p2].flags & MEM_Str)!=0 ); assert( (aMem[pOp->p2].flags & MEM_Static)!=0 ); rc = sqlite3VdbeMemCopy(&sMem, &aMem[pOp->p2]); assert( rc==SQLITE_OK ); zTab = (const char*)sqlite3_value_text(&sMem); assert( zTab || db->mallocFailed ); if( zTab ){ rc = sqlite3VtabCallCreate(db, pOp->p1, zTab, &p->zErrMsg); } sqlite3VdbeMemRelease(&sMem); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VDestroy P1 * * P4 * ** ** P4 is the name of a virtual table in database P1. Call the xDestroy method ** of that table. */ case OP_VDestroy: { db->nVDestroy++; rc = sqlite3VtabCallDestroy(db, pOp->p1, pOp->p4.z); db->nVDestroy--; break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VOpen P1 * * P4 * ** ** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** P1 is a cursor number. This opcode opens a cursor to the virtual ** table and stores that cursor in P1. */ case OP_VOpen: { VdbeCursor *pCur; sqlite3_vtab_cursor *pVtabCursor; sqlite3_vtab *pVtab; const sqlite3_module *pModule; assert( p->bIsReader ); pCur = 0; pVtabCursor = 0; pVtab = pOp->p4.pVtab->pVtab; if( pVtab==0 || NEVER(pVtab->pModule==0) ){ rc = SQLITE_LOCKED; break; } pModule = pVtab->pModule; rc = pModule->xOpen(pVtab, &pVtabCursor); sqlite3VtabImportErrmsg(p, pVtab); if( SQLITE_OK==rc ){ /* Initialize sqlite3_vtab_cursor base class */ pVtabCursor->pVtab = pVtab; /* Initialize vdbe cursor object */ pCur = allocateCursor(p, pOp->p1, 0, -1, 0); if( pCur ){ pCur->pVtabCursor = pVtabCursor; pVtab->nRef++; }else{ assert( db->mallocFailed ); pModule->xClose(pVtabCursor); goto no_mem; } } break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VFilter P1 P2 P3 P4 * ** Synopsis: iplan=r[P3] zplan='P4' ** ** P1 is a cursor opened using VOpen. P2 is an address to jump to if ** the filtered result set is empty. ** ** P4 is either NULL or a string that was generated by the xBestIndex ** method of the module. The interpretation of the P4 string is left ** to the module implementation. ** ** This opcode invokes the xFilter method on the virtual table specified ** by P1. The integer query plan parameter to xFilter is stored in register ** P3. Register P3+1 stores the argc parameter to be passed to the ** xFilter method. Registers P3+2..P3+1+argc are the argc ** additional parameters which are passed to ** xFilter as argv. Register P3+2 becomes argv[0] when passed to xFilter. ** ** A jump is made to P2 if the result set after filtering would be empty. */ case OP_VFilter: { /* jump */ int nArg; int iQuery; const sqlite3_module *pModule; Mem *pQuery; Mem *pArgc; sqlite3_vtab_cursor *pVtabCursor; sqlite3_vtab *pVtab; VdbeCursor *pCur; int res; int i; Mem **apArg; pQuery = &aMem[pOp->p3]; pArgc = &pQuery[1]; pCur = p->apCsr[pOp->p1]; assert( memIsValid(pQuery) ); REGISTER_TRACE(pOp->p3, pQuery); assert( pCur->pVtabCursor ); pVtabCursor = pCur->pVtabCursor; pVtab = pVtabCursor->pVtab; pModule = pVtab->pModule; /* Grab the index number and argc parameters */ assert( (pQuery->flags&MEM_Int)!=0 && pArgc->flags==MEM_Int ); nArg = (int)pArgc->u.i; iQuery = (int)pQuery->u.i; /* Invoke the xFilter method */ res = 0; apArg = p->apArg; for(i = 0; ixFilter(pVtabCursor, iQuery, pOp->p4.z, nArg, apArg); sqlite3VtabImportErrmsg(p, pVtab); if( rc==SQLITE_OK ){ res = pModule->xEof(pVtabCursor); } pCur->nullRow = 0; VdbeBranchTaken(res!=0,2); if( res ) goto jump_to_p2; break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VColumn P1 P2 P3 * * ** Synopsis: r[P3]=vcolumn(P2) ** ** Store the value of the P2-th column of ** the row of the virtual-table that the ** P1 cursor is pointing to into register P3. */ case OP_VColumn: { sqlite3_vtab *pVtab; const sqlite3_module *pModule; Mem *pDest; sqlite3_context sContext; VdbeCursor *pCur = p->apCsr[pOp->p1]; assert( pCur->pVtabCursor ); assert( pOp->p3>0 && pOp->p3<=(p->nMem-p->nCursor) ); pDest = &aMem[pOp->p3]; memAboutToChange(p, pDest); if( pCur->nullRow ){ sqlite3VdbeMemSetNull(pDest); break; } pVtab = pCur->pVtabCursor->pVtab; pModule = pVtab->pModule; assert( pModule->xColumn ); memset(&sContext, 0, sizeof(sContext)); sContext.pOut = pDest; MemSetTypeFlag(pDest, MEM_Null); rc = pModule->xColumn(pCur->pVtabCursor, &sContext, pOp->p2); sqlite3VtabImportErrmsg(p, pVtab); if( sContext.isError ){ rc = sContext.isError; } sqlite3VdbeChangeEncoding(pDest, encoding); REGISTER_TRACE(pOp->p3, pDest); UPDATE_MAX_BLOBSIZE(pDest); if( sqlite3VdbeMemTooBig(pDest) ){ goto too_big; } break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VNext P1 P2 * * * ** ** Advance virtual table P1 to the next row in its result set and ** jump to instruction P2. Or, if the virtual table has reached ** the end of its result set, then fall through to the next instruction. */ case OP_VNext: { /* jump */ sqlite3_vtab *pVtab; const sqlite3_module *pModule; int res; VdbeCursor *pCur; res = 0; pCur = p->apCsr[pOp->p1]; assert( pCur->pVtabCursor ); if( pCur->nullRow ){ break; } pVtab = pCur->pVtabCursor->pVtab; pModule = pVtab->pModule; assert( pModule->xNext ); /* Invoke the xNext() method of the module. There is no way for the ** underlying implementation to return an error if one occurs during ** xNext(). Instead, if an error occurs, true is returned (indicating that ** data is available) and the error code returned when xColumn or ** some other method is next invoked on the save virtual table cursor. */ rc = pModule->xNext(pCur->pVtabCursor); sqlite3VtabImportErrmsg(p, pVtab); if( rc==SQLITE_OK ){ res = pModule->xEof(pCur->pVtabCursor); } VdbeBranchTaken(!res,2); if( !res ){ /* If there is data, jump to P2 */ goto jump_to_p2_and_check_for_interrupt; } goto check_for_interrupt; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VRename P1 * * P4 * ** ** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** This opcode invokes the corresponding xRename method. The value ** in register P1 is passed as the zName argument to the xRename method. */ case OP_VRename: { sqlite3_vtab *pVtab; Mem *pName; pVtab = pOp->p4.pVtab->pVtab; pName = &aMem[pOp->p1]; assert( pVtab->pModule->xRename ); assert( memIsValid(pName) ); assert( p->readOnly==0 ); REGISTER_TRACE(pOp->p1, pName); assert( pName->flags & MEM_Str ); testcase( pName->enc==SQLITE_UTF8 ); testcase( pName->enc==SQLITE_UTF16BE ); testcase( pName->enc==SQLITE_UTF16LE ); rc = sqlite3VdbeChangeEncoding(pName, SQLITE_UTF8); if( rc==SQLITE_OK ){ rc = pVtab->pModule->xRename(pVtab, pName->z); sqlite3VtabImportErrmsg(p, pVtab); p->expired = 0; } break; } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VUpdate P1 P2 P3 P4 P5 ** Synopsis: data=r[P3@P2] ** ** P4 is a pointer to a virtual table object, an sqlite3_vtab structure. ** This opcode invokes the corresponding xUpdate method. P2 values ** are contiguous memory cells starting at P3 to pass to the xUpdate ** invocation. The value in register (P3+P2-1) corresponds to the ** p2th element of the argv array passed to xUpdate. ** ** The xUpdate method will do a DELETE or an INSERT or both. ** The argv[0] element (which corresponds to memory cell P3) ** is the rowid of a row to delete. If argv[0] is NULL then no ** deletion occurs. The argv[1] element is the rowid of the new ** row. This can be NULL to have the virtual table select the new ** rowid for itself. The subsequent elements in the array are ** the values of columns in the new row. ** ** If P2==1 then no insert is performed. argv[0] is the rowid of ** a row to delete. ** ** P1 is a boolean flag. If it is set to true and the xUpdate call ** is successful, then the value returned by sqlite3_last_insert_rowid() ** is set to the value of the rowid for the row just inserted. ** ** P5 is the error actions (OE_Replace, OE_Fail, OE_Ignore, etc) to ** apply in the case of a constraint failure on an insert or update. */ case OP_VUpdate: { sqlite3_vtab *pVtab; const sqlite3_module *pModule; int nArg; int i; sqlite_int64 rowid; Mem **apArg; Mem *pX; assert( pOp->p2==1 || pOp->p5==OE_Fail || pOp->p5==OE_Rollback || pOp->p5==OE_Abort || pOp->p5==OE_Ignore || pOp->p5==OE_Replace ); assert( p->readOnly==0 ); pVtab = pOp->p4.pVtab->pVtab; if( pVtab==0 || NEVER(pVtab->pModule==0) ){ rc = SQLITE_LOCKED; break; } pModule = pVtab->pModule; nArg = pOp->p2; assert( pOp->p4type==P4_VTAB ); if( ALWAYS(pModule->xUpdate) ){ u8 vtabOnConflict = db->vtabOnConflict; apArg = p->apArg; pX = &aMem[pOp->p3]; for(i=0; ivtabOnConflict = pOp->p5; rc = pModule->xUpdate(pVtab, nArg, apArg, &rowid); db->vtabOnConflict = vtabOnConflict; sqlite3VtabImportErrmsg(p, pVtab); if( rc==SQLITE_OK && pOp->p1 ){ assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) ); db->lastRowid = lastRowid = rowid; } if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){ if( pOp->p5==OE_Ignore ){ rc = SQLITE_OK; }else{ p->errorAction = ((pOp->p5==OE_Replace) ? OE_Abort : pOp->p5); } }else{ p->nChange++; } } break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* Opcode: Pagecount P1 P2 * * * ** ** Write the current number of pages in database P1 to memory cell P2. */ case OP_Pagecount: { /* out2 */ pOut = out2Prerelease(p, pOp); pOut->u.i = sqlite3BtreeLastPage(db->aDb[pOp->p1].pBt); break; } #endif #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* Opcode: MaxPgcnt P1 P2 P3 * * ** ** Try to set the maximum page count for database P1 to the value in P3. ** Do not let the maximum page count fall below the current page count and ** do not change the maximum page count value if P3==0. ** ** Store the maximum page count after the change in register P2. */ case OP_MaxPgcnt: { /* out2 */ unsigned int newMax; Btree *pBt; pOut = out2Prerelease(p, pOp); pBt = db->aDb[pOp->p1].pBt; newMax = 0; if( pOp->p3 ){ newMax = sqlite3BtreeLastPage(pBt); if( newMax < (unsigned)pOp->p3 ) newMax = (unsigned)pOp->p3; } pOut->u.i = sqlite3BtreeMaxPageCount(pBt, newMax); break; } #endif /* Opcode: Init * P2 * P4 * ** Synopsis: Start at P2 ** ** Programs contain a single instance of this opcode as the very first ** opcode. ** ** If tracing is enabled (by the sqlite3_trace()) interface, then ** the UTF-8 string contained in P4 is emitted on the trace callback. ** Or if P4 is blank, use the string returned by sqlite3_sql(). ** ** If P2 is not zero, jump to instruction P2. */ case OP_Init: { /* jump */ char *zTrace; char *z; #ifndef SQLITE_OMIT_TRACE if( db->xTrace && !p->doingRerun && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 ){ z = sqlite3VdbeExpandSql(p, zTrace); db->xTrace(db->pTraceArg, z); sqlite3DbFree(db, z); } #ifdef SQLITE_USE_FCNTL_TRACE zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql); if( zTrace ){ int i; for(i=0; inDb; i++){ if( DbMaskTest(p->btreeMask, i)==0 ) continue; sqlite3_file_control(db, db->aDb[i].zName, SQLITE_FCNTL_TRACE, zTrace); } } #endif /* SQLITE_USE_FCNTL_TRACE */ #ifdef SQLITE_DEBUG if( (db->flags & SQLITE_SqlTrace)!=0 && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 ){ sqlite3DebugPrintf("SQL-trace: %s\n", zTrace); } #endif /* SQLITE_DEBUG */ #endif /* SQLITE_OMIT_TRACE */ if( pOp->p2 ) goto jump_to_p2; break; } /* Opcode: Noop * * * * * ** ** Do nothing. This instruction is often useful as a jump ** destination. */ /* ** The magic Explain opcode are only inserted when explain==2 (which ** is to say when the EXPLAIN QUERY PLAN syntax is used.) ** This opcode records information from the optimizer. It is the ** the same as a no-op. This opcodesnever appears in a real VM program. */ default: { /* This is really OP_Noop and OP_Explain */ assert( pOp->opcode==OP_Noop || pOp->opcode==OP_Explain ); break; } /***************************************************************************** ** The cases of the switch statement above this line should all be indented ** by 6 spaces. But the left-most 6 spaces have been removed to improve the ** readability. From this point on down, the normal indentation rules are ** restored. *****************************************************************************/ } #ifdef VDBE_PROFILE { u64 endTime = sqlite3Hwtime(); if( endTime>start ) pOrigOp->cycles += endTime - start; pOrigOp->cnt++; } #endif /* The following code adds nothing to the actual functionality ** of the program. It is only here for testing and debugging. ** On the other hand, it does burn CPU cycles every time through ** the evaluator loop. So we can leave it out when NDEBUG is defined. */ #ifndef NDEBUG assert( pOp>=&aOp[-1] && pOp<&aOp[p->nOp-1] ); #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeTrace ){ if( rc!=0 ) printf("rc=%d\n",rc); if( pOrigOp->opflags & (OPFLG_OUT2) ){ registerTrace(pOrigOp->p2, &aMem[pOrigOp->p2]); } if( pOrigOp->opflags & OPFLG_OUT3 ){ registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]); } } #endif /* SQLITE_DEBUG */ #endif /* NDEBUG */ } /* The end of the for(;;) loop the loops through opcodes */ /* If we reach this point, it means that execution is finished with ** an error of some kind. */ vdbe_error_halt: assert( rc ); p->rc = rc; testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(rc, "statement aborts at %d: [%s] %s", (int)(pOp - aOp), p->zSql, p->zErrMsg); sqlite3VdbeHalt(p); if( rc==SQLITE_IOERR_NOMEM ) db->mallocFailed = 1; rc = SQLITE_ERROR; if( resetSchemaOnFault>0 ){ sqlite3ResetOneSchema(db, resetSchemaOnFault-1); } /* This is the only way out of this procedure. We have to ** release the mutexes on btrees that were acquired at the ** top. */ vdbe_return: db->lastRowid = lastRowid; testcase( nVmStep>0 ); p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; sqlite3VdbeLeave(p); return rc; /* Jump to here if a string or blob larger than SQLITE_MAX_LENGTH ** is encountered. */ too_big: sqlite3VdbeError(p, "string or blob too big"); rc = SQLITE_TOOBIG; goto vdbe_error_halt; /* Jump to here if a malloc() fails. */ no_mem: db->mallocFailed = 1; sqlite3VdbeError(p, "out of memory"); rc = SQLITE_NOMEM; goto vdbe_error_halt; /* Jump to here for any other kind of fatal error. The "rc" variable ** should hold the error number. */ abort_due_to_error: assert( p->zErrMsg==0 ); if( db->mallocFailed ) rc = SQLITE_NOMEM; if( rc!=SQLITE_IOERR_NOMEM ){ sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); } goto vdbe_error_halt; /* Jump to here if the sqlite3_interrupt() API sets the interrupt ** flag. */ abort_due_to_interrupt: assert( db->u1.isInterrupted ); rc = SQLITE_INTERRUPT; p->rc = rc; sqlite3VdbeError(p, "%s", sqlite3ErrStr(rc)); goto vdbe_error_halt; } /************** End of vdbe.c ************************************************/ /************** Begin file vdbeblob.c ****************************************/ /* ** 2007 May 1 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code used to implement incremental BLOB I/O. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ #ifndef SQLITE_OMIT_INCRBLOB /* ** Valid sqlite3_blob* handles point to Incrblob structures. */ typedef struct Incrblob Incrblob; struct Incrblob { int flags; /* Copy of "flags" passed to sqlite3_blob_open() */ int nByte; /* Size of open blob, in bytes */ int iOffset; /* Byte offset of blob in cursor data */ int iCol; /* Table column this handle is open on */ BtCursor *pCsr; /* Cursor pointing at blob row */ sqlite3_stmt *pStmt; /* Statement holding cursor open */ sqlite3 *db; /* The associated database */ }; /* ** This function is used by both blob_open() and blob_reopen(). It seeks ** the b-tree cursor associated with blob handle p to point to row iRow. ** If successful, SQLITE_OK is returned and subsequent calls to ** sqlite3_blob_read() or sqlite3_blob_write() access the specified row. ** ** If an error occurs, or if the specified row does not exist or does not ** contain a value of type TEXT or BLOB in the column nominated when the ** blob handle was opened, then an error code is returned and *pzErr may ** be set to point to a buffer containing an error message. It is the ** responsibility of the caller to free the error message buffer using ** sqlite3DbFree(). ** ** If an error does occur, then the b-tree cursor is closed. All subsequent ** calls to sqlite3_blob_read(), blob_write() or blob_reopen() will ** immediately return SQLITE_ABORT. */ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ int rc; /* Error code */ char *zErr = 0; /* Error message */ Vdbe *v = (Vdbe *)p->pStmt; /* Set the value of the SQL statements only variable to integer iRow. ** This is done directly instead of using sqlite3_bind_int64() to avoid ** triggering asserts related to mutexes. */ assert( v->aVar[0].flags&MEM_Int ); v->aVar[0].u.i = iRow; rc = sqlite3_step(p->pStmt); if( rc==SQLITE_ROW ){ VdbeCursor *pC = v->apCsr[0]; u32 type = pC->aType[p->iCol]; if( type<12 ){ zErr = sqlite3MPrintf(p->db, "cannot open value of type %s", type==0?"null": type==7?"real": "integer" ); rc = SQLITE_ERROR; sqlite3_finalize(p->pStmt); p->pStmt = 0; }else{ p->iOffset = pC->aType[p->iCol + pC->nField]; p->nByte = sqlite3VdbeSerialTypeLen(type); p->pCsr = pC->pCursor; sqlite3BtreeIncrblobCursor(p->pCsr); } } if( rc==SQLITE_ROW ){ rc = SQLITE_OK; }else if( p->pStmt ){ rc = sqlite3_finalize(p->pStmt); p->pStmt = 0; if( rc==SQLITE_OK ){ zErr = sqlite3MPrintf(p->db, "no such rowid: %lld", iRow); rc = SQLITE_ERROR; }else{ zErr = sqlite3MPrintf(p->db, "%s", sqlite3_errmsg(p->db)); } } assert( rc!=SQLITE_OK || zErr==0 ); assert( rc!=SQLITE_ROW && rc!=SQLITE_DONE ); *pzErr = zErr; return rc; } /* ** Open a blob handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open( sqlite3* db, /* The database connection */ const char *zDb, /* The attached database containing the blob */ const char *zTable, /* The table containing the blob */ const char *zColumn, /* The column containing the blob */ sqlite_int64 iRow, /* The row containing the glob */ int flags, /* True -> read/write access, false -> read-only */ sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */ ){ int nAttempt = 0; int iCol; /* Index of zColumn in row-record */ /* This VDBE program seeks a btree cursor to the identified ** db/table/row entry. The reason for using a vdbe program instead ** of writing code to use the b-tree layer directly is that the ** vdbe program will take advantage of the various transaction, ** locking and error handling infrastructure built into the vdbe. ** ** After seeking the cursor, the vdbe executes an OP_ResultRow. ** Code external to the Vdbe then "borrows" the b-tree cursor and ** uses it to implement the blob_read(), blob_write() and ** blob_bytes() functions. ** ** The sqlite3_blob_close() function finalizes the vdbe program, ** which closes the b-tree cursor and (possibly) commits the ** transaction. */ static const int iLn = VDBE_OFFSET_LINENO(4); static const VdbeOpList openBlob[] = { /* {OP_Transaction, 0, 0, 0}, // 0: Inserted separately */ {OP_TableLock, 0, 0, 0}, /* 1: Acquire a read or write lock */ /* One of the following two instructions is replaced by an OP_Noop. */ {OP_OpenRead, 0, 0, 0}, /* 2: Open cursor 0 for reading */ {OP_OpenWrite, 0, 0, 0}, /* 3: Open cursor 0 for read/write */ {OP_Variable, 1, 1, 1}, /* 4: Push the rowid to the stack */ {OP_NotExists, 0, 10, 1}, /* 5: Seek the cursor */ {OP_Column, 0, 0, 1}, /* 6 */ {OP_ResultRow, 1, 0, 0}, /* 7 */ {OP_Goto, 0, 4, 0}, /* 8 */ {OP_Close, 0, 0, 0}, /* 9 */ {OP_Halt, 0, 0, 0}, /* 10 */ }; int rc = SQLITE_OK; char *zErr = 0; Table *pTab; Parse *pParse = 0; Incrblob *pBlob = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( ppBlob==0 ){ return SQLITE_MISUSE_BKPT; } #endif *ppBlob = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zTable==0 ){ return SQLITE_MISUSE_BKPT; } #endif flags = !!flags; /* flags = (flags ? 1 : 0); */ sqlite3_mutex_enter(db->mutex); pBlob = (Incrblob *)sqlite3DbMallocZero(db, sizeof(Incrblob)); if( !pBlob ) goto blob_open_out; pParse = sqlite3StackAllocRaw(db, sizeof(*pParse)); if( !pParse ) goto blob_open_out; do { memset(pParse, 0, sizeof(Parse)); pParse->db = db; sqlite3DbFree(db, zErr); zErr = 0; sqlite3BtreeEnterAll(db); pTab = sqlite3LocateTable(pParse, 0, zTable, zDb); if( pTab && IsVirtual(pTab) ){ pTab = 0; sqlite3ErrorMsg(pParse, "cannot open virtual table: %s", zTable); } if( pTab && !HasRowid(pTab) ){ pTab = 0; sqlite3ErrorMsg(pParse, "cannot open table without rowid: %s", zTable); } #ifndef SQLITE_OMIT_VIEW if( pTab && pTab->pSelect ){ pTab = 0; sqlite3ErrorMsg(pParse, "cannot open view: %s", zTable); } #endif if( !pTab ){ if( pParse->zErrMsg ){ sqlite3DbFree(db, zErr); zErr = pParse->zErrMsg; pParse->zErrMsg = 0; } rc = SQLITE_ERROR; sqlite3BtreeLeaveAll(db); goto blob_open_out; } /* Now search pTab for the exact column. */ for(iCol=0; iColnCol; iCol++) { if( sqlite3StrICmp(pTab->aCol[iCol].zName, zColumn)==0 ){ break; } } if( iCol==pTab->nCol ){ sqlite3DbFree(db, zErr); zErr = sqlite3MPrintf(db, "no such column: \"%s\"", zColumn); rc = SQLITE_ERROR; sqlite3BtreeLeaveAll(db); goto blob_open_out; } /* If the value is being opened for writing, check that the ** column is not indexed, and that it is not part of a foreign key. ** It is against the rules to open a column to which either of these ** descriptions applies for writing. */ if( flags ){ const char *zFault = 0; Index *pIdx; #ifndef SQLITE_OMIT_FOREIGN_KEY if( db->flags&SQLITE_ForeignKeys ){ /* Check that the column is not part of an FK child key definition. It ** is not necessary to check if it is part of a parent key, as parent ** key columns must be indexed. The check below will pick up this ** case. */ FKey *pFKey; for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ int j; for(j=0; jnCol; j++){ if( pFKey->aCol[j].iFrom==iCol ){ zFault = "foreign key"; } } } } #endif for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int j; for(j=0; jnKeyCol; j++){ if( pIdx->aiColumn[j]==iCol ){ zFault = "indexed"; } } } if( zFault ){ sqlite3DbFree(db, zErr); zErr = sqlite3MPrintf(db, "cannot open %s column for writing", zFault); rc = SQLITE_ERROR; sqlite3BtreeLeaveAll(db); goto blob_open_out; } } pBlob->pStmt = (sqlite3_stmt *)sqlite3VdbeCreate(pParse); assert( pBlob->pStmt || db->mallocFailed ); if( pBlob->pStmt ){ Vdbe *v = (Vdbe *)pBlob->pStmt; int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, flags, pTab->pSchema->schema_cookie, pTab->pSchema->iGeneration); sqlite3VdbeChangeP5(v, 1); sqlite3VdbeAddOpList(v, ArraySize(openBlob), openBlob, iLn); /* Make sure a mutex is held on the table to be accessed */ sqlite3VdbeUsesBtree(v, iDb); /* Configure the OP_TableLock instruction */ #ifdef SQLITE_OMIT_SHARED_CACHE sqlite3VdbeChangeToNoop(v, 1); #else sqlite3VdbeChangeP1(v, 1, iDb); sqlite3VdbeChangeP2(v, 1, pTab->tnum); sqlite3VdbeChangeP3(v, 1, flags); sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT); #endif /* Remove either the OP_OpenWrite or OpenRead. Set the P2 ** parameter of the other to pTab->tnum. */ sqlite3VdbeChangeToNoop(v, 3 - flags); sqlite3VdbeChangeP2(v, 2 + flags, pTab->tnum); sqlite3VdbeChangeP3(v, 2 + flags, iDb); /* Configure the number of columns. Configure the cursor to ** think that the table has one more column than it really ** does. An OP_Column to retrieve this imaginary column will ** always return an SQL NULL. This is useful because it means ** we can invoke OP_Column to fill in the vdbe cursors type ** and offset cache without causing any IO. */ sqlite3VdbeChangeP4(v, 2+flags, SQLITE_INT_TO_PTR(pTab->nCol+1),P4_INT32); sqlite3VdbeChangeP2(v, 6, pTab->nCol); if( !db->mallocFailed ){ pParse->nVar = 1; pParse->nMem = 1; pParse->nTab = 1; sqlite3VdbeMakeReady(v, pParse); } } pBlob->flags = flags; pBlob->iCol = iCol; pBlob->db = db; sqlite3BtreeLeaveAll(db); if( db->mallocFailed ){ goto blob_open_out; } sqlite3_bind_int64(pBlob->pStmt, 1, iRow); rc = blobSeekToRow(pBlob, iRow, &zErr); } while( (++nAttempt)mallocFailed==0 ){ *ppBlob = (sqlite3_blob *)pBlob; }else{ if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); sqlite3DbFree(db, pBlob); } sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); sqlite3DbFree(db, zErr); sqlite3ParserReset(pParse); sqlite3StackFree(db, pParse); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Close a blob handle that was previously created using ** sqlite3_blob_open(). */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *pBlob){ Incrblob *p = (Incrblob *)pBlob; int rc; sqlite3 *db; if( p ){ db = p->db; sqlite3_mutex_enter(db->mutex); rc = sqlite3_finalize(p->pStmt); sqlite3DbFree(db, p); sqlite3_mutex_leave(db->mutex); }else{ rc = SQLITE_OK; } return rc; } /* ** Perform a read or write operation on a blob */ static int blobReadWrite( sqlite3_blob *pBlob, void *z, int n, int iOffset, int (*xCall)(BtCursor*, u32, u32, void*) ){ int rc; Incrblob *p = (Incrblob *)pBlob; Vdbe *v; sqlite3 *db; if( p==0 ) return SQLITE_MISUSE_BKPT; db = p->db; sqlite3_mutex_enter(db->mutex); v = (Vdbe*)p->pStmt; if( n<0 || iOffset<0 || ((sqlite3_int64)iOffset+n)>p->nByte ){ /* Request is out of range. Return a transient error. */ rc = SQLITE_ERROR; }else if( v==0 ){ /* If there is no statement handle, then the blob-handle has ** already been invalidated. Return SQLITE_ABORT in this case. */ rc = SQLITE_ABORT; }else{ /* Call either BtreeData() or BtreePutData(). If SQLITE_ABORT is ** returned, clean-up the statement handle. */ assert( db == v->db ); sqlite3BtreeEnterCursor(p->pCsr); rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); sqlite3BtreeLeaveCursor(p->pCsr); if( rc==SQLITE_ABORT ){ sqlite3VdbeFinalize(v); p->pStmt = 0; }else{ v->rc = rc; } } sqlite3Error(db, rc); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Read data from a blob handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){ return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreeData); } /* ** Write data to a blob handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){ return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData); } /* ** Query a blob handle for the size of the data. ** ** The Incrblob.nByte field is fixed for the lifetime of the Incrblob ** so no mutex is required for access. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *pBlob){ Incrblob *p = (Incrblob *)pBlob; return (p && p->pStmt) ? p->nByte : 0; } /* ** Move an existing blob handle to point to a different row of the same ** database table. ** ** If an error occurs, or if the specified row does not exist or does not ** contain a blob or text value, then an error code is returned and the ** database handle error code and message set. If this happens, then all ** subsequent calls to sqlite3_blob_xxx() functions (except blob_close()) ** immediately return SQLITE_ABORT. */ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ int rc; Incrblob *p = (Incrblob *)pBlob; sqlite3 *db; if( p==0 ) return SQLITE_MISUSE_BKPT; db = p->db; sqlite3_mutex_enter(db->mutex); if( p->pStmt==0 ){ /* If there is no statement handle, then the blob-handle has ** already been invalidated. Return SQLITE_ABORT in this case. */ rc = SQLITE_ABORT; }else{ char *zErr; rc = blobSeekToRow(p, iRow, &zErr); if( rc!=SQLITE_OK ){ sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); sqlite3DbFree(db, zErr); } assert( rc!=SQLITE_SCHEMA ); } rc = sqlite3ApiExit(db, rc); assert( rc==SQLITE_OK || p->pStmt==0 ); sqlite3_mutex_leave(db->mutex); return rc; } #endif /* #ifndef SQLITE_OMIT_INCRBLOB */ /************** End of vdbeblob.c ********************************************/ /************** Begin file vdbesort.c ****************************************/ /* ** 2011-07-09 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code for the VdbeSorter object, used in concert with ** a VdbeCursor to sort large numbers of keys for CREATE INDEX statements ** or by SELECT statements with ORDER BY clauses that cannot be satisfied ** using indexes and without LIMIT clauses. ** ** The VdbeSorter object implements a multi-threaded external merge sort ** algorithm that is efficient even if the number of elements being sorted ** exceeds the available memory. ** ** Here is the (internal, non-API) interface between this module and the ** rest of the SQLite system: ** ** sqlite3VdbeSorterInit() Create a new VdbeSorter object. ** ** sqlite3VdbeSorterWrite() Add a single new row to the VdbeSorter ** object. The row is a binary blob in the ** OP_MakeRecord format that contains both ** the ORDER BY key columns and result columns ** in the case of a SELECT w/ ORDER BY, or ** the complete record for an index entry ** in the case of a CREATE INDEX. ** ** sqlite3VdbeSorterRewind() Sort all content previously added. ** Position the read cursor on the ** first sorted element. ** ** sqlite3VdbeSorterNext() Advance the read cursor to the next sorted ** element. ** ** sqlite3VdbeSorterRowkey() Return the complete binary blob for the ** row currently under the read cursor. ** ** sqlite3VdbeSorterCompare() Compare the binary blob for the row ** currently under the read cursor against ** another binary blob X and report if ** X is strictly less than the read cursor. ** Used to enforce uniqueness in a ** CREATE UNIQUE INDEX statement. ** ** sqlite3VdbeSorterClose() Close the VdbeSorter object and reclaim ** all resources. ** ** sqlite3VdbeSorterReset() Refurbish the VdbeSorter for reuse. This ** is like Close() followed by Init() only ** much faster. ** ** The interfaces above must be called in a particular order. Write() can ** only occur in between Init()/Reset() and Rewind(). Next(), Rowkey(), and ** Compare() can only occur in between Rewind() and Close()/Reset(). i.e. ** ** Init() ** for each record: Write() ** Rewind() ** Rowkey()/Compare() ** Next() ** Close() ** ** Algorithm: ** ** Records passed to the sorter via calls to Write() are initially held ** unsorted in main memory. Assuming the amount of memory used never exceeds ** a threshold, when Rewind() is called the set of records is sorted using ** an in-memory merge sort. In this case, no temporary files are required ** and subsequent calls to Rowkey(), Next() and Compare() read records ** directly from main memory. ** ** If the amount of space used to store records in main memory exceeds the ** threshold, then the set of records currently in memory are sorted and ** written to a temporary file in "Packed Memory Array" (PMA) format. ** A PMA created at this point is known as a "level-0 PMA". Higher levels ** of PMAs may be created by merging existing PMAs together - for example ** merging two or more level-0 PMAs together creates a level-1 PMA. ** ** The threshold for the amount of main memory to use before flushing ** records to a PMA is roughly the same as the limit configured for the ** page-cache of the main database. Specifically, the threshold is set to ** the value returned by "PRAGMA main.page_size" multipled by ** that returned by "PRAGMA main.cache_size", in bytes. ** ** If the sorter is running in single-threaded mode, then all PMAs generated ** are appended to a single temporary file. Or, if the sorter is running in ** multi-threaded mode then up to (N+1) temporary files may be opened, where ** N is the configured number of worker threads. In this case, instead of ** sorting the records and writing the PMA to a temporary file itself, the ** calling thread usually launches a worker thread to do so. Except, if ** there are already N worker threads running, the main thread does the work ** itself. ** ** The sorter is running in multi-threaded mode if (a) the library was built ** with pre-processor symbol SQLITE_MAX_WORKER_THREADS set to a value greater ** than zero, and (b) worker threads have been enabled at runtime by calling ** "PRAGMA threads=N" with some value of N greater than 0. ** ** When Rewind() is called, any data remaining in memory is flushed to a ** final PMA. So at this point the data is stored in some number of sorted ** PMAs within temporary files on disk. ** ** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the ** sorter is running in single-threaded mode, then these PMAs are merged ** incrementally as keys are retreived from the sorter by the VDBE. The ** MergeEngine object, described in further detail below, performs this ** merge. ** ** Or, if running in multi-threaded mode, then a background thread is ** launched to merge the existing PMAs. Once the background thread has ** merged T bytes of data into a single sorted PMA, the main thread ** begins reading keys from that PMA while the background thread proceeds ** with merging the next T bytes of data. And so on. ** ** Parameter T is set to half the value of the memory threshold used ** by Write() above to determine when to create a new PMA. ** ** If there are more than SORTER_MAX_MERGE_COUNT PMAs in total when ** Rewind() is called, then a hierarchy of incremental-merges is used. ** First, T bytes of data from the first SORTER_MAX_MERGE_COUNT PMAs on ** disk are merged together. Then T bytes of data from the second set, and ** so on, such that no operation ever merges more than SORTER_MAX_MERGE_COUNT ** PMAs at a time. This done is to improve locality. ** ** If running in multi-threaded mode and there are more than ** SORTER_MAX_MERGE_COUNT PMAs on disk when Rewind() is called, then more ** than one background thread may be created. Specifically, there may be ** one background thread for each temporary file on disk, and one background ** thread to merge the output of each of the others to a single PMA for ** the main thread to read from. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ /* ** If SQLITE_DEBUG_SORTER_THREADS is defined, this module outputs various ** messages to stderr that may be helpful in understanding the performance ** characteristics of the sorter in multi-threaded mode. */ #if 0 # define SQLITE_DEBUG_SORTER_THREADS 1 #endif /* ** Hard-coded maximum amount of data to accumulate in memory before flushing ** to a level 0 PMA. The purpose of this limit is to prevent various integer ** overflows. 512MiB. */ #define SQLITE_MAX_PMASZ (1<<29) /* ** Private objects used by the sorter */ typedef struct MergeEngine MergeEngine; /* Merge PMAs together */ typedef struct PmaReader PmaReader; /* Incrementally read one PMA */ typedef struct PmaWriter PmaWriter; /* Incrementally write one PMA */ typedef struct SorterRecord SorterRecord; /* A record being sorted */ typedef struct SortSubtask SortSubtask; /* A sub-task in the sort process */ typedef struct SorterFile SorterFile; /* Temporary file object wrapper */ typedef struct SorterList SorterList; /* In-memory list of records */ typedef struct IncrMerger IncrMerger; /* Read & merge multiple PMAs */ /* ** A container for a temp file handle and the current amount of data ** stored in the file. */ struct SorterFile { sqlite3_file *pFd; /* File handle */ i64 iEof; /* Bytes of data stored in pFd */ }; /* ** An in-memory list of objects to be sorted. ** ** If aMemory==0 then each object is allocated separately and the objects ** are connected using SorterRecord.u.pNext. If aMemory!=0 then all objects ** are stored in the aMemory[] bulk memory, one right after the other, and ** are connected using SorterRecord.u.iNext. */ struct SorterList { SorterRecord *pList; /* Linked list of records */ u8 *aMemory; /* If non-NULL, bulk memory to hold pList */ int szPMA; /* Size of pList as PMA in bytes */ }; /* ** The MergeEngine object is used to combine two or more smaller PMAs into ** one big PMA using a merge operation. Separate PMAs all need to be ** combined into one big PMA in order to be able to step through the sorted ** records in order. ** ** The aReadr[] array contains a PmaReader object for each of the PMAs being ** merged. An aReadr[] object either points to a valid key or else is at EOF. ** ("EOF" means "End Of File". When aReadr[] is at EOF there is no more data.) ** For the purposes of the paragraphs below, we assume that the array is ** actually N elements in size, where N is the smallest power of 2 greater ** to or equal to the number of PMAs being merged. The extra aReadr[] elements ** are treated as if they are empty (always at EOF). ** ** The aTree[] array is also N elements in size. The value of N is stored in ** the MergeEngine.nTree variable. ** ** The final (N/2) elements of aTree[] contain the results of comparing ** pairs of PMA keys together. Element i contains the result of ** comparing aReadr[2*i-N] and aReadr[2*i-N+1]. Whichever key is smaller, the ** aTree element is set to the index of it. ** ** For the purposes of this comparison, EOF is considered greater than any ** other key value. If the keys are equal (only possible with two EOF ** values), it doesn't matter which index is stored. ** ** The (N/4) elements of aTree[] that precede the final (N/2) described ** above contains the index of the smallest of each block of 4 PmaReaders ** And so on. So that aTree[1] contains the index of the PmaReader that ** currently points to the smallest key value. aTree[0] is unused. ** ** Example: ** ** aReadr[0] -> Banana ** aReadr[1] -> Feijoa ** aReadr[2] -> Elderberry ** aReadr[3] -> Currant ** aReadr[4] -> Grapefruit ** aReadr[5] -> Apple ** aReadr[6] -> Durian ** aReadr[7] -> EOF ** ** aTree[] = { X, 5 0, 5 0, 3, 5, 6 } ** ** The current element is "Apple" (the value of the key indicated by ** PmaReader 5). When the Next() operation is invoked, PmaReader 5 will ** be advanced to the next key in its segment. Say the next key is ** "Eggplant": ** ** aReadr[5] -> Eggplant ** ** The contents of aTree[] are updated first by comparing the new PmaReader ** 5 key to the current key of PmaReader 4 (still "Grapefruit"). The PmaReader ** 5 value is still smaller, so aTree[6] is set to 5. And so on up the tree. ** The value of PmaReader 6 - "Durian" - is now smaller than that of PmaReader ** 5, so aTree[3] is set to 6. Key 0 is smaller than key 6 (Bananafile2. And instead of using a ** background thread to prepare data for the PmaReader, with a single ** threaded IncrMerger the allocate part of pTask->file2 is "refilled" with ** keys from pMerger by the calling thread whenever the PmaReader runs out ** of data. */ struct IncrMerger { SortSubtask *pTask; /* Task that owns this merger */ MergeEngine *pMerger; /* Merge engine thread reads data from */ i64 iStartOff; /* Offset to start writing file at */ int mxSz; /* Maximum bytes of data to store */ int bEof; /* Set to true when merge is finished */ int bUseThread; /* True to use a bg thread for this object */ SorterFile aFile[2]; /* aFile[0] for reading, [1] for writing */ }; /* ** An instance of this object is used for writing a PMA. ** ** The PMA is written one record at a time. Each record is of an arbitrary ** size. But I/O is more efficient if it occurs in page-sized blocks where ** each block is aligned on a page boundary. This object caches writes to ** the PMA so that aligned, page-size blocks are written. */ struct PmaWriter { int eFWErr; /* Non-zero if in an error state */ u8 *aBuffer; /* Pointer to write buffer */ int nBuffer; /* Size of write buffer in bytes */ int iBufStart; /* First byte of buffer to write */ int iBufEnd; /* Last byte of buffer to write */ i64 iWriteOff; /* Offset of start of buffer in file */ sqlite3_file *pFd; /* File handle to write to */ }; /* ** This object is the header on a single record while that record is being ** held in memory and prior to being written out as part of a PMA. ** ** How the linked list is connected depends on how memory is being managed ** by this module. If using a separate allocation for each in-memory record ** (VdbeSorter.list.aMemory==0), then the list is always connected using the ** SorterRecord.u.pNext pointers. ** ** Or, if using the single large allocation method (VdbeSorter.list.aMemory!=0), ** then while records are being accumulated the list is linked using the ** SorterRecord.u.iNext offset. This is because the aMemory[] array may ** be sqlite3Realloc()ed while records are being accumulated. Once the VM ** has finished passing records to the sorter, or when the in-memory buffer ** is full, the list is sorted. As part of the sorting process, it is ** converted to use the SorterRecord.u.pNext pointers. See function ** vdbeSorterSort() for details. */ struct SorterRecord { int nVal; /* Size of the record in bytes */ union { SorterRecord *pNext; /* Pointer to next record in list */ int iNext; /* Offset within aMemory of next record */ } u; /* The data for the record immediately follows this header */ }; /* Return a pointer to the buffer containing the record data for SorterRecord ** object p. Should be used as if: ** ** void *SRVAL(SorterRecord *p) { return (void*)&p[1]; } */ #define SRVAL(p) ((void*)((SorterRecord*)(p) + 1)) /* Maximum number of PMAs that a single MergeEngine can merge */ #define SORTER_MAX_MERGE_COUNT 16 static int vdbeIncrSwap(IncrMerger*); static void vdbeIncrFree(IncrMerger *); /* ** Free all memory belonging to the PmaReader object passed as the ** argument. All structure fields are set to zero before returning. */ static void vdbePmaReaderClear(PmaReader *pReadr){ sqlite3_free(pReadr->aAlloc); sqlite3_free(pReadr->aBuffer); if( pReadr->aMap ) sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); vdbeIncrFree(pReadr->pIncr); memset(pReadr, 0, sizeof(PmaReader)); } /* ** Read the next nByte bytes of data from the PMA p. ** If successful, set *ppOut to point to a buffer containing the data ** and return SQLITE_OK. Otherwise, if an error occurs, return an SQLite ** error code. ** ** The buffer returned in *ppOut is only valid until the ** next call to this function. */ static int vdbePmaReadBlob( PmaReader *p, /* PmaReader from which to take the blob */ int nByte, /* Bytes of data to read */ u8 **ppOut /* OUT: Pointer to buffer containing data */ ){ int iBuf; /* Offset within buffer to read from */ int nAvail; /* Bytes of data available in buffer */ if( p->aMap ){ *ppOut = &p->aMap[p->iReadOff]; p->iReadOff += nByte; return SQLITE_OK; } assert( p->aBuffer ); /* If there is no more data to be read from the buffer, read the next ** p->nBuffer bytes of data from the file into it. Or, if there are less ** than p->nBuffer bytes remaining in the PMA, read all remaining data. */ iBuf = p->iReadOff % p->nBuffer; if( iBuf==0 ){ int nRead; /* Bytes to read from disk */ int rc; /* sqlite3OsRead() return code */ /* Determine how many bytes of data to read. */ if( (p->iEof - p->iReadOff) > (i64)p->nBuffer ){ nRead = p->nBuffer; }else{ nRead = (int)(p->iEof - p->iReadOff); } assert( nRead>0 ); /* Readr data from the file. Return early if an error occurs. */ rc = sqlite3OsRead(p->pFd, p->aBuffer, nRead, p->iReadOff); assert( rc!=SQLITE_IOERR_SHORT_READ ); if( rc!=SQLITE_OK ) return rc; } nAvail = p->nBuffer - iBuf; if( nByte<=nAvail ){ /* The requested data is available in the in-memory buffer. In this ** case there is no need to make a copy of the data, just return a ** pointer into the buffer to the caller. */ *ppOut = &p->aBuffer[iBuf]; p->iReadOff += nByte; }else{ /* The requested data is not all available in the in-memory buffer. ** In this case, allocate space at p->aAlloc[] to copy the requested ** range into. Then return a copy of pointer p->aAlloc to the caller. */ int nRem; /* Bytes remaining to copy */ /* Extend the p->aAlloc[] allocation if required. */ if( p->nAllocnAlloc*2); while( nByte>nNew ) nNew = nNew*2; aNew = sqlite3Realloc(p->aAlloc, nNew); if( !aNew ) return SQLITE_NOMEM; p->nAlloc = nNew; p->aAlloc = aNew; } /* Copy as much data as is available in the buffer into the start of ** p->aAlloc[]. */ memcpy(p->aAlloc, &p->aBuffer[iBuf], nAvail); p->iReadOff += nAvail; nRem = nByte - nAvail; /* The following loop copies up to p->nBuffer bytes per iteration into ** the p->aAlloc[] buffer. */ while( nRem>0 ){ int rc; /* vdbePmaReadBlob() return code */ int nCopy; /* Number of bytes to copy */ u8 *aNext; /* Pointer to buffer to copy data from */ nCopy = nRem; if( nRem>p->nBuffer ) nCopy = p->nBuffer; rc = vdbePmaReadBlob(p, nCopy, &aNext); if( rc!=SQLITE_OK ) return rc; assert( aNext!=p->aAlloc ); memcpy(&p->aAlloc[nByte - nRem], aNext, nCopy); nRem -= nCopy; } *ppOut = p->aAlloc; } return SQLITE_OK; } /* ** Read a varint from the stream of data accessed by p. Set *pnOut to ** the value read. */ static int vdbePmaReadVarint(PmaReader *p, u64 *pnOut){ int iBuf; if( p->aMap ){ p->iReadOff += sqlite3GetVarint(&p->aMap[p->iReadOff], pnOut); }else{ iBuf = p->iReadOff % p->nBuffer; if( iBuf && (p->nBuffer-iBuf)>=9 ){ p->iReadOff += sqlite3GetVarint(&p->aBuffer[iBuf], pnOut); }else{ u8 aVarint[16], *a; int i = 0, rc; do{ rc = vdbePmaReadBlob(p, 1, &a); if( rc ) return rc; aVarint[(i++)&0xf] = a[0]; }while( (a[0]&0x80)!=0 ); sqlite3GetVarint(aVarint, pnOut); } } return SQLITE_OK; } /* ** Attempt to memory map file pFile. If successful, set *pp to point to the ** new mapping and return SQLITE_OK. If the mapping is not attempted ** (because the file is too large or the VFS layer is configured not to use ** mmap), return SQLITE_OK and set *pp to NULL. ** ** Or, if an error occurs, return an SQLite error code. The final value of ** *pp is undefined in this case. */ static int vdbeSorterMapFile(SortSubtask *pTask, SorterFile *pFile, u8 **pp){ int rc = SQLITE_OK; if( pFile->iEof<=(i64)(pTask->pSorter->db->nMaxSorterMmap) ){ sqlite3_file *pFd = pFile->pFd; if( pFd->pMethods->iVersion>=3 ){ rc = sqlite3OsFetch(pFd, 0, (int)pFile->iEof, (void**)pp); testcase( rc!=SQLITE_OK ); } } return rc; } /* ** Attach PmaReader pReadr to file pFile (if it is not already attached to ** that file) and seek it to offset iOff within the file. Return SQLITE_OK ** if successful, or an SQLite error code if an error occurs. */ static int vdbePmaReaderSeek( SortSubtask *pTask, /* Task context */ PmaReader *pReadr, /* Reader whose cursor is to be moved */ SorterFile *pFile, /* Sorter file to read from */ i64 iOff /* Offset in pFile */ ){ int rc = SQLITE_OK; assert( pReadr->pIncr==0 || pReadr->pIncr->bEof==0 ); if( sqlite3FaultSim(201) ) return SQLITE_IOERR_READ; if( pReadr->aMap ){ sqlite3OsUnfetch(pReadr->pFd, 0, pReadr->aMap); pReadr->aMap = 0; } pReadr->iReadOff = iOff; pReadr->iEof = pFile->iEof; pReadr->pFd = pFile->pFd; rc = vdbeSorterMapFile(pTask, pFile, &pReadr->aMap); if( rc==SQLITE_OK && pReadr->aMap==0 ){ int pgsz = pTask->pSorter->pgsz; int iBuf = pReadr->iReadOff % pgsz; if( pReadr->aBuffer==0 ){ pReadr->aBuffer = (u8*)sqlite3Malloc(pgsz); if( pReadr->aBuffer==0 ) rc = SQLITE_NOMEM; pReadr->nBuffer = pgsz; } if( rc==SQLITE_OK && iBuf ){ int nRead = pgsz - iBuf; if( (pReadr->iReadOff + nRead) > pReadr->iEof ){ nRead = (int)(pReadr->iEof - pReadr->iReadOff); } rc = sqlite3OsRead( pReadr->pFd, &pReadr->aBuffer[iBuf], nRead, pReadr->iReadOff ); testcase( rc!=SQLITE_OK ); } } return rc; } /* ** Advance PmaReader pReadr to the next key in its PMA. Return SQLITE_OK if ** no error occurs, or an SQLite error code if one does. */ static int vdbePmaReaderNext(PmaReader *pReadr){ int rc = SQLITE_OK; /* Return Code */ u64 nRec = 0; /* Size of record in bytes */ if( pReadr->iReadOff>=pReadr->iEof ){ IncrMerger *pIncr = pReadr->pIncr; int bEof = 1; if( pIncr ){ rc = vdbeIncrSwap(pIncr); if( rc==SQLITE_OK && pIncr->bEof==0 ){ rc = vdbePmaReaderSeek( pIncr->pTask, pReadr, &pIncr->aFile[0], pIncr->iStartOff ); bEof = 0; } } if( bEof ){ /* This is an EOF condition */ vdbePmaReaderClear(pReadr); testcase( rc!=SQLITE_OK ); return rc; } } if( rc==SQLITE_OK ){ rc = vdbePmaReadVarint(pReadr, &nRec); } if( rc==SQLITE_OK ){ pReadr->nKey = (int)nRec; rc = vdbePmaReadBlob(pReadr, (int)nRec, &pReadr->aKey); testcase( rc!=SQLITE_OK ); } return rc; } /* ** Initialize PmaReader pReadr to scan through the PMA stored in file pFile ** starting at offset iStart and ending at offset iEof-1. This function ** leaves the PmaReader pointing to the first key in the PMA (or EOF if the ** PMA is empty). ** ** If the pnByte parameter is NULL, then it is assumed that the file ** contains a single PMA, and that that PMA omits the initial length varint. */ static int vdbePmaReaderInit( SortSubtask *pTask, /* Task context */ SorterFile *pFile, /* Sorter file to read from */ i64 iStart, /* Start offset in pFile */ PmaReader *pReadr, /* PmaReader to populate */ i64 *pnByte /* IN/OUT: Increment this value by PMA size */ ){ int rc; assert( pFile->iEof>iStart ); assert( pReadr->aAlloc==0 && pReadr->nAlloc==0 ); assert( pReadr->aBuffer==0 ); assert( pReadr->aMap==0 ); rc = vdbePmaReaderSeek(pTask, pReadr, pFile, iStart); if( rc==SQLITE_OK ){ u64 nByte; /* Size of PMA in bytes */ rc = vdbePmaReadVarint(pReadr, &nByte); pReadr->iEof = pReadr->iReadOff + nByte; *pnByte += nByte; } if( rc==SQLITE_OK ){ rc = vdbePmaReaderNext(pReadr); } return rc; } /* ** A version of vdbeSorterCompare() that assumes that it has already been ** determined that the first field of key1 is equal to the first field of ** key2. */ static int vdbeSorterCompareTail( SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ const void *pKey1, int nKey1, /* Left side of comparison */ const void *pKey2, int nKey2 /* Right side of comparison */ ){ UnpackedRecord *r2 = pTask->pUnpacked; if( *pbKey2Cached==0 ){ sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); *pbKey2Cached = 1; } return sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, r2, 1); } /* ** Compare key1 (buffer pKey1, size nKey1 bytes) with key2 (buffer pKey2, ** size nKey2 bytes). Use (pTask->pKeyInfo) for the collation sequences ** used by the comparison. Return the result of the comparison. ** ** If IN/OUT parameter *pbKey2Cached is true when this function is called, ** it is assumed that (pTask->pUnpacked) contains the unpacked version ** of key2. If it is false, (pTask->pUnpacked) is populated with the unpacked ** version of key2 and *pbKey2Cached set to true before returning. ** ** If an OOM error is encountered, (pTask->pUnpacked->error_rc) is set ** to SQLITE_NOMEM. */ static int vdbeSorterCompare( SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ const void *pKey1, int nKey1, /* Left side of comparison */ const void *pKey2, int nKey2 /* Right side of comparison */ ){ UnpackedRecord *r2 = pTask->pUnpacked; if( !*pbKey2Cached ){ sqlite3VdbeRecordUnpack(pTask->pSorter->pKeyInfo, nKey2, pKey2, r2); *pbKey2Cached = 1; } return sqlite3VdbeRecordCompare(nKey1, pKey1, r2); } /* ** A specially optimized version of vdbeSorterCompare() that assumes that ** the first field of each key is a TEXT value and that the collation ** sequence to compare them with is BINARY. */ static int vdbeSorterCompareText( SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ const void *pKey1, int nKey1, /* Left side of comparison */ const void *pKey2, int nKey2 /* Right side of comparison */ ){ const u8 * const p1 = (const u8 * const)pKey1; const u8 * const p2 = (const u8 * const)pKey2; const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */ const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */ int n1; int n2; int res; getVarint32(&p1[1], n1); n1 = (n1 - 13) / 2; getVarint32(&p2[1], n2); n2 = (n2 - 13) / 2; res = memcmp(v1, v2, MIN(n1, n2)); if( res==0 ){ res = n1 - n2; } if( res==0 ){ if( pTask->pSorter->pKeyInfo->nField>1 ){ res = vdbeSorterCompareTail( pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2 ); } }else{ if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){ res = res * -1; } } return res; } /* ** A specially optimized version of vdbeSorterCompare() that assumes that ** the first field of each key is an INTEGER value. */ static int vdbeSorterCompareInt( SortSubtask *pTask, /* Subtask context (for pKeyInfo) */ int *pbKey2Cached, /* True if pTask->pUnpacked is pKey2 */ const void *pKey1, int nKey1, /* Left side of comparison */ const void *pKey2, int nKey2 /* Right side of comparison */ ){ const u8 * const p1 = (const u8 * const)pKey1; const u8 * const p2 = (const u8 * const)pKey2; const int s1 = p1[1]; /* Left hand serial type */ const int s2 = p2[1]; /* Right hand serial type */ const u8 * const v1 = &p1[ p1[0] ]; /* Pointer to value 1 */ const u8 * const v2 = &p2[ p2[0] ]; /* Pointer to value 2 */ int res; /* Return value */ assert( (s1>0 && s1<7) || s1==8 || s1==9 ); assert( (s2>0 && s2<7) || s2==8 || s2==9 ); if( s1>7 && s2>7 ){ res = s1 - s2; }else{ if( s1==s2 ){ if( (*v1 ^ *v2) & 0x80 ){ /* The two values have different signs */ res = (*v1 & 0x80) ? -1 : +1; }else{ /* The two values have the same sign. Compare using memcmp(). */ static const u8 aLen[] = {0, 1, 2, 3, 4, 6, 8 }; int i; res = 0; for(i=0; i7 ){ res = +1; }else if( s1>7 ){ res = -1; }else{ res = s1 - s2; } assert( res!=0 ); if( res>0 ){ if( *v1 & 0x80 ) res = -1; }else{ if( *v2 & 0x80 ) res = +1; } } } if( res==0 ){ if( pTask->pSorter->pKeyInfo->nField>1 ){ res = vdbeSorterCompareTail( pTask, pbKey2Cached, pKey1, nKey1, pKey2, nKey2 ); } }else if( pTask->pSorter->pKeyInfo->aSortOrder[0] ){ res = res * -1; } return res; } /* ** Initialize the temporary index cursor just opened as a sorter cursor. ** ** Usually, the sorter module uses the value of (pCsr->pKeyInfo->nField) ** to determine the number of fields that should be compared from the ** records being sorted. However, if the value passed as argument nField ** is non-zero and the sorter is able to guarantee a stable sort, nField ** is used instead. This is used when sorting records for a CREATE INDEX ** statement. In this case, keys are always delivered to the sorter in ** order of the primary key, which happens to be make up the final part ** of the records being sorted. So if the sort is stable, there is never ** any reason to compare PK fields and they can be ignored for a small ** performance boost. ** ** The sorter can guarantee a stable sort when running in single-threaded ** mode, but not in multi-threaded mode. ** ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ SQLITE_PRIVATE int sqlite3VdbeSorterInit( sqlite3 *db, /* Database connection (for malloc()) */ int nField, /* Number of key fields in each record */ VdbeCursor *pCsr /* Cursor that holds the new sorter */ ){ int pgsz; /* Page size of main database */ int i; /* Used to iterate through aTask[] */ int mxCache; /* Cache size */ VdbeSorter *pSorter; /* The new sorter */ KeyInfo *pKeyInfo; /* Copy of pCsr->pKeyInfo with db==0 */ int szKeyInfo; /* Size of pCsr->pKeyInfo in bytes */ int sz; /* Size of pSorter in bytes */ int rc = SQLITE_OK; #if SQLITE_MAX_WORKER_THREADS==0 # define nWorker 0 #else int nWorker; #endif /* Initialize the upper limit on the number of worker threads */ #if SQLITE_MAX_WORKER_THREADS>0 if( sqlite3TempInMemory(db) || sqlite3GlobalConfig.bCoreMutex==0 ){ nWorker = 0; }else{ nWorker = db->aLimit[SQLITE_LIMIT_WORKER_THREADS]; } #endif /* Do not allow the total number of threads (main thread + all workers) ** to exceed the maximum merge count */ #if SQLITE_MAX_WORKER_THREADS>=SORTER_MAX_MERGE_COUNT if( nWorker>=SORTER_MAX_MERGE_COUNT ){ nWorker = SORTER_MAX_MERGE_COUNT-1; } #endif assert( pCsr->pKeyInfo && pCsr->pBt==0 ); szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*); sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask); pSorter = (VdbeSorter*)sqlite3DbMallocZero(db, sz + szKeyInfo); pCsr->pSorter = pSorter; if( pSorter==0 ){ rc = SQLITE_NOMEM; }else{ pSorter->pKeyInfo = pKeyInfo = (KeyInfo*)((u8*)pSorter + sz); memcpy(pKeyInfo, pCsr->pKeyInfo, szKeyInfo); pKeyInfo->db = 0; if( nField && nWorker==0 ){ pKeyInfo->nXField += (pKeyInfo->nField - nField); pKeyInfo->nField = nField; } pSorter->pgsz = pgsz = sqlite3BtreeGetPageSize(db->aDb[0].pBt); pSorter->nTask = nWorker + 1; pSorter->iPrev = nWorker-1; pSorter->bUseThreads = (pSorter->nTask>1); pSorter->db = db; for(i=0; inTask; i++){ SortSubtask *pTask = &pSorter->aTask[i]; pTask->pSorter = pSorter; } if( !sqlite3TempInMemory(db) ){ u32 szPma = sqlite3GlobalConfig.szPma; pSorter->mnPmaSize = szPma * pgsz; mxCache = db->aDb[0].pSchema->cache_size; if( mxCache<(int)szPma ) mxCache = (int)szPma; pSorter->mxPmaSize = MIN((i64)mxCache*pgsz, SQLITE_MAX_PMASZ); /* EVIDENCE-OF: R-26747-61719 When the application provides any amount of ** scratch memory using SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary ** large heap allocations. */ if( sqlite3GlobalConfig.pScratch==0 ){ assert( pSorter->iMemory==0 ); pSorter->nMemory = pgsz; pSorter->list.aMemory = (u8*)sqlite3Malloc(pgsz); if( !pSorter->list.aMemory ) rc = SQLITE_NOMEM; } } if( (pKeyInfo->nField+pKeyInfo->nXField)<13 && (pKeyInfo->aColl[0]==0 || pKeyInfo->aColl[0]==db->pDfltColl) ){ pSorter->typeMask = SORTER_TYPE_INTEGER | SORTER_TYPE_TEXT; } } return rc; } #undef nWorker /* Defined at the top of this function */ /* ** Free the list of sorted records starting at pRecord. */ static void vdbeSorterRecordFree(sqlite3 *db, SorterRecord *pRecord){ SorterRecord *p; SorterRecord *pNext; for(p=pRecord; p; p=pNext){ pNext = p->u.pNext; sqlite3DbFree(db, p); } } /* ** Free all resources owned by the object indicated by argument pTask. All ** fields of *pTask are zeroed before returning. */ static void vdbeSortSubtaskCleanup(sqlite3 *db, SortSubtask *pTask){ sqlite3DbFree(db, pTask->pUnpacked); #if SQLITE_MAX_WORKER_THREADS>0 /* pTask->list.aMemory can only be non-zero if it was handed memory ** from the main thread. That only occurs SQLITE_MAX_WORKER_THREADS>0 */ if( pTask->list.aMemory ){ sqlite3_free(pTask->list.aMemory); }else #endif { assert( pTask->list.aMemory==0 ); vdbeSorterRecordFree(0, pTask->list.pList); } if( pTask->file.pFd ){ sqlite3OsCloseFree(pTask->file.pFd); } if( pTask->file2.pFd ){ sqlite3OsCloseFree(pTask->file2.pFd); } memset(pTask, 0, sizeof(SortSubtask)); } #ifdef SQLITE_DEBUG_SORTER_THREADS static void vdbeSorterWorkDebug(SortSubtask *pTask, const char *zEvent){ i64 t; int iTask = (pTask - pTask->pSorter->aTask); sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); fprintf(stderr, "%lld:%d %s\n", t, iTask, zEvent); } static void vdbeSorterRewindDebug(const char *zEvent){ i64 t; sqlite3OsCurrentTimeInt64(sqlite3_vfs_find(0), &t); fprintf(stderr, "%lld:X %s\n", t, zEvent); } static void vdbeSorterPopulateDebug( SortSubtask *pTask, const char *zEvent ){ i64 t; int iTask = (pTask - pTask->pSorter->aTask); sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); fprintf(stderr, "%lld:bg%d %s\n", t, iTask, zEvent); } static void vdbeSorterBlockDebug( SortSubtask *pTask, int bBlocked, const char *zEvent ){ if( bBlocked ){ i64 t; sqlite3OsCurrentTimeInt64(pTask->pSorter->db->pVfs, &t); fprintf(stderr, "%lld:main %s\n", t, zEvent); } } #else # define vdbeSorterWorkDebug(x,y) # define vdbeSorterRewindDebug(y) # define vdbeSorterPopulateDebug(x,y) # define vdbeSorterBlockDebug(x,y,z) #endif #if SQLITE_MAX_WORKER_THREADS>0 /* ** Join thread pTask->thread. */ static int vdbeSorterJoinThread(SortSubtask *pTask){ int rc = SQLITE_OK; if( pTask->pThread ){ #ifdef SQLITE_DEBUG_SORTER_THREADS int bDone = pTask->bDone; #endif void *pRet = SQLITE_INT_TO_PTR(SQLITE_ERROR); vdbeSorterBlockDebug(pTask, !bDone, "enter"); (void)sqlite3ThreadJoin(pTask->pThread, &pRet); vdbeSorterBlockDebug(pTask, !bDone, "exit"); rc = SQLITE_PTR_TO_INT(pRet); assert( pTask->bDone==1 ); pTask->bDone = 0; pTask->pThread = 0; } return rc; } /* ** Launch a background thread to run xTask(pIn). */ static int vdbeSorterCreateThread( SortSubtask *pTask, /* Thread will use this task object */ void *(*xTask)(void*), /* Routine to run in a separate thread */ void *pIn /* Argument passed into xTask() */ ){ assert( pTask->pThread==0 && pTask->bDone==0 ); return sqlite3ThreadCreate(&pTask->pThread, xTask, pIn); } /* ** Join all outstanding threads launched by SorterWrite() to create ** level-0 PMAs. */ static int vdbeSorterJoinAll(VdbeSorter *pSorter, int rcin){ int rc = rcin; int i; /* This function is always called by the main user thread. ** ** If this function is being called after SorterRewind() has been called, ** it is possible that thread pSorter->aTask[pSorter->nTask-1].pThread ** is currently attempt to join one of the other threads. To avoid a race ** condition where this thread also attempts to join the same object, join ** thread pSorter->aTask[pSorter->nTask-1].pThread first. */ for(i=pSorter->nTask-1; i>=0; i--){ SortSubtask *pTask = &pSorter->aTask[i]; int rc2 = vdbeSorterJoinThread(pTask); if( rc==SQLITE_OK ) rc = rc2; } return rc; } #else # define vdbeSorterJoinAll(x,rcin) (rcin) # define vdbeSorterJoinThread(pTask) SQLITE_OK #endif /* ** Allocate a new MergeEngine object capable of handling up to ** nReader PmaReader inputs. ** ** nReader is automatically rounded up to the next power of two. ** nReader may not exceed SORTER_MAX_MERGE_COUNT even after rounding up. */ static MergeEngine *vdbeMergeEngineNew(int nReader){ int N = 2; /* Smallest power of two >= nReader */ int nByte; /* Total bytes of space to allocate */ MergeEngine *pNew; /* Pointer to allocated object to return */ assert( nReader<=SORTER_MAX_MERGE_COUNT ); while( NnTree = N; pNew->pTask = 0; pNew->aReadr = (PmaReader*)&pNew[1]; pNew->aTree = (int*)&pNew->aReadr[N]; } return pNew; } /* ** Free the MergeEngine object passed as the only argument. */ static void vdbeMergeEngineFree(MergeEngine *pMerger){ int i; if( pMerger ){ for(i=0; inTree; i++){ vdbePmaReaderClear(&pMerger->aReadr[i]); } } sqlite3_free(pMerger); } /* ** Free all resources associated with the IncrMerger object indicated by ** the first argument. */ static void vdbeIncrFree(IncrMerger *pIncr){ if( pIncr ){ #if SQLITE_MAX_WORKER_THREADS>0 if( pIncr->bUseThread ){ vdbeSorterJoinThread(pIncr->pTask); if( pIncr->aFile[0].pFd ) sqlite3OsCloseFree(pIncr->aFile[0].pFd); if( pIncr->aFile[1].pFd ) sqlite3OsCloseFree(pIncr->aFile[1].pFd); } #endif vdbeMergeEngineFree(pIncr->pMerger); sqlite3_free(pIncr); } } /* ** Reset a sorting cursor back to its original empty state. */ SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *db, VdbeSorter *pSorter){ int i; (void)vdbeSorterJoinAll(pSorter, SQLITE_OK); assert( pSorter->bUseThreads || pSorter->pReader==0 ); #if SQLITE_MAX_WORKER_THREADS>0 if( pSorter->pReader ){ vdbePmaReaderClear(pSorter->pReader); sqlite3DbFree(db, pSorter->pReader); pSorter->pReader = 0; } #endif vdbeMergeEngineFree(pSorter->pMerger); pSorter->pMerger = 0; for(i=0; inTask; i++){ SortSubtask *pTask = &pSorter->aTask[i]; vdbeSortSubtaskCleanup(db, pTask); pTask->pSorter = pSorter; } if( pSorter->list.aMemory==0 ){ vdbeSorterRecordFree(0, pSorter->list.pList); } pSorter->list.pList = 0; pSorter->list.szPMA = 0; pSorter->bUsePMA = 0; pSorter->iMemory = 0; pSorter->mxKeysize = 0; sqlite3DbFree(db, pSorter->pUnpacked); pSorter->pUnpacked = 0; } /* ** Free any cursor components allocated by sqlite3VdbeSorterXXX routines. */ SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *db, VdbeCursor *pCsr){ VdbeSorter *pSorter = pCsr->pSorter; if( pSorter ){ sqlite3VdbeSorterReset(db, pSorter); sqlite3_free(pSorter->list.aMemory); sqlite3DbFree(db, pSorter); pCsr->pSorter = 0; } } #if SQLITE_MAX_MMAP_SIZE>0 /* ** The first argument is a file-handle open on a temporary file. The file ** is guaranteed to be nByte bytes or smaller in size. This function ** attempts to extend the file to nByte bytes in size and to ensure that ** the VFS has memory mapped it. ** ** Whether or not the file does end up memory mapped of course depends on ** the specific VFS implementation. */ static void vdbeSorterExtendFile(sqlite3 *db, sqlite3_file *pFd, i64 nByte){ if( nByte<=(i64)(db->nMaxSorterMmap) && pFd->pMethods->iVersion>=3 ){ void *p = 0; int chunksize = 4*1024; sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_CHUNK_SIZE, &chunksize); sqlite3OsFileControlHint(pFd, SQLITE_FCNTL_SIZE_HINT, &nByte); sqlite3OsFetch(pFd, 0, (int)nByte, &p); sqlite3OsUnfetch(pFd, 0, p); } } #else # define vdbeSorterExtendFile(x,y,z) #endif /* ** Allocate space for a file-handle and open a temporary file. If successful, ** set *ppFd to point to the malloc'd file-handle and return SQLITE_OK. ** Otherwise, set *ppFd to 0 and return an SQLite error code. */ static int vdbeSorterOpenTempFile( sqlite3 *db, /* Database handle doing sort */ i64 nExtend, /* Attempt to extend file to this size */ sqlite3_file **ppFd ){ int rc; if( sqlite3FaultSim(202) ) return SQLITE_IOERR_ACCESS; rc = sqlite3OsOpenMalloc(db->pVfs, 0, ppFd, SQLITE_OPEN_TEMP_JOURNAL | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE, &rc ); if( rc==SQLITE_OK ){ i64 max = SQLITE_MAX_MMAP_SIZE; sqlite3OsFileControlHint(*ppFd, SQLITE_FCNTL_MMAP_SIZE, (void*)&max); if( nExtend>0 ){ vdbeSorterExtendFile(db, *ppFd, nExtend); } } return rc; } /* ** If it has not already been allocated, allocate the UnpackedRecord ** structure at pTask->pUnpacked. Return SQLITE_OK if successful (or ** if no allocation was required), or SQLITE_NOMEM otherwise. */ static int vdbeSortAllocUnpacked(SortSubtask *pTask){ if( pTask->pUnpacked==0 ){ char *pFree; pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord( pTask->pSorter->pKeyInfo, 0, 0, &pFree ); assert( pTask->pUnpacked==(UnpackedRecord*)pFree ); if( pFree==0 ) return SQLITE_NOMEM; pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField; pTask->pUnpacked->errCode = 0; } return SQLITE_OK; } /* ** Merge the two sorted lists p1 and p2 into a single list. ** Set *ppOut to the head of the new list. */ static void vdbeSorterMerge( SortSubtask *pTask, /* Calling thread context */ SorterRecord *p1, /* First list to merge */ SorterRecord *p2, /* Second list to merge */ SorterRecord **ppOut /* OUT: Head of merged list */ ){ SorterRecord *pFinal = 0; SorterRecord **pp = &pFinal; int bCached = 0; while( p1 && p2 ){ int res; res = pTask->xCompare( pTask, &bCached, SRVAL(p1), p1->nVal, SRVAL(p2), p2->nVal ); if( res<=0 ){ *pp = p1; pp = &p1->u.pNext; p1 = p1->u.pNext; }else{ *pp = p2; pp = &p2->u.pNext; p2 = p2->u.pNext; bCached = 0; } } *pp = p1 ? p1 : p2; *ppOut = pFinal; } /* ** Return the SorterCompare function to compare values collected by the ** sorter object passed as the only argument. */ static SorterCompare vdbeSorterGetCompare(VdbeSorter *p){ if( p->typeMask==SORTER_TYPE_INTEGER ){ return vdbeSorterCompareInt; }else if( p->typeMask==SORTER_TYPE_TEXT ){ return vdbeSorterCompareText; } return vdbeSorterCompare; } /* ** Sort the linked list of records headed at pTask->pList. Return ** SQLITE_OK if successful, or an SQLite error code (i.e. SQLITE_NOMEM) if ** an error occurs. */ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ int i; SorterRecord **aSlot; SorterRecord *p; int rc; rc = vdbeSortAllocUnpacked(pTask); if( rc!=SQLITE_OK ) return rc; p = pList->pList; pTask->xCompare = vdbeSorterGetCompare(pTask->pSorter); aSlot = (SorterRecord **)sqlite3MallocZero(64 * sizeof(SorterRecord *)); if( !aSlot ){ return SQLITE_NOMEM; } while( p ){ SorterRecord *pNext; if( pList->aMemory ){ if( (u8*)p==pList->aMemory ){ pNext = 0; }else{ assert( p->u.iNextaMemory) ); pNext = (SorterRecord*)&pList->aMemory[p->u.iNext]; } }else{ pNext = p->u.pNext; } p->u.pNext = 0; for(i=0; aSlot[i]; i++){ vdbeSorterMerge(pTask, p, aSlot[i], &p); aSlot[i] = 0; } aSlot[i] = p; p = pNext; } p = 0; for(i=0; i<64; i++){ vdbeSorterMerge(pTask, p, aSlot[i], &p); } pList->pList = p; sqlite3_free(aSlot); assert( pTask->pUnpacked->errCode==SQLITE_OK || pTask->pUnpacked->errCode==SQLITE_NOMEM ); return pTask->pUnpacked->errCode; } /* ** Initialize a PMA-writer object. */ static void vdbePmaWriterInit( sqlite3_file *pFd, /* File handle to write to */ PmaWriter *p, /* Object to populate */ int nBuf, /* Buffer size */ i64 iStart /* Offset of pFd to begin writing at */ ){ memset(p, 0, sizeof(PmaWriter)); p->aBuffer = (u8*)sqlite3Malloc(nBuf); if( !p->aBuffer ){ p->eFWErr = SQLITE_NOMEM; }else{ p->iBufEnd = p->iBufStart = (iStart % nBuf); p->iWriteOff = iStart - p->iBufStart; p->nBuffer = nBuf; p->pFd = pFd; } } /* ** Write nData bytes of data to the PMA. Return SQLITE_OK ** if successful, or an SQLite error code if an error occurs. */ static void vdbePmaWriteBlob(PmaWriter *p, u8 *pData, int nData){ int nRem = nData; while( nRem>0 && p->eFWErr==0 ){ int nCopy = nRem; if( nCopy>(p->nBuffer - p->iBufEnd) ){ nCopy = p->nBuffer - p->iBufEnd; } memcpy(&p->aBuffer[p->iBufEnd], &pData[nData-nRem], nCopy); p->iBufEnd += nCopy; if( p->iBufEnd==p->nBuffer ){ p->eFWErr = sqlite3OsWrite(p->pFd, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); p->iBufStart = p->iBufEnd = 0; p->iWriteOff += p->nBuffer; } assert( p->iBufEndnBuffer ); nRem -= nCopy; } } /* ** Flush any buffered data to disk and clean up the PMA-writer object. ** The results of using the PMA-writer after this call are undefined. ** Return SQLITE_OK if flushing the buffered data succeeds or is not ** required. Otherwise, return an SQLite error code. ** ** Before returning, set *piEof to the offset immediately following the ** last byte written to the file. */ static int vdbePmaWriterFinish(PmaWriter *p, i64 *piEof){ int rc; if( p->eFWErr==0 && ALWAYS(p->aBuffer) && p->iBufEnd>p->iBufStart ){ p->eFWErr = sqlite3OsWrite(p->pFd, &p->aBuffer[p->iBufStart], p->iBufEnd - p->iBufStart, p->iWriteOff + p->iBufStart ); } *piEof = (p->iWriteOff + p->iBufEnd); sqlite3_free(p->aBuffer); rc = p->eFWErr; memset(p, 0, sizeof(PmaWriter)); return rc; } /* ** Write value iVal encoded as a varint to the PMA. Return ** SQLITE_OK if successful, or an SQLite error code if an error occurs. */ static void vdbePmaWriteVarint(PmaWriter *p, u64 iVal){ int nByte; u8 aByte[10]; nByte = sqlite3PutVarint(aByte, iVal); vdbePmaWriteBlob(p, aByte, nByte); } /* ** Write the current contents of in-memory linked-list pList to a level-0 ** PMA in the temp file belonging to sub-task pTask. Return SQLITE_OK if ** successful, or an SQLite error code otherwise. ** ** The format of a PMA is: ** ** * A varint. This varint contains the total number of bytes of content ** in the PMA (not including the varint itself). ** ** * One or more records packed end-to-end in order of ascending keys. ** Each record consists of a varint followed by a blob of data (the ** key). The varint is the number of bytes in the blob of data. */ static int vdbeSorterListToPMA(SortSubtask *pTask, SorterList *pList){ sqlite3 *db = pTask->pSorter->db; int rc = SQLITE_OK; /* Return code */ PmaWriter writer; /* Object used to write to the file */ #ifdef SQLITE_DEBUG /* Set iSz to the expected size of file pTask->file after writing the PMA. ** This is used by an assert() statement at the end of this function. */ i64 iSz = pList->szPMA + sqlite3VarintLen(pList->szPMA) + pTask->file.iEof; #endif vdbeSorterWorkDebug(pTask, "enter"); memset(&writer, 0, sizeof(PmaWriter)); assert( pList->szPMA>0 ); /* If the first temporary PMA file has not been opened, open it now. */ if( pTask->file.pFd==0 ){ rc = vdbeSorterOpenTempFile(db, 0, &pTask->file.pFd); assert( rc!=SQLITE_OK || pTask->file.pFd ); assert( pTask->file.iEof==0 ); assert( pTask->nPMA==0 ); } /* Try to get the file to memory map */ if( rc==SQLITE_OK ){ vdbeSorterExtendFile(db, pTask->file.pFd, pTask->file.iEof+pList->szPMA+9); } /* Sort the list */ if( rc==SQLITE_OK ){ rc = vdbeSorterSort(pTask, pList); } if( rc==SQLITE_OK ){ SorterRecord *p; SorterRecord *pNext = 0; vdbePmaWriterInit(pTask->file.pFd, &writer, pTask->pSorter->pgsz, pTask->file.iEof); pTask->nPMA++; vdbePmaWriteVarint(&writer, pList->szPMA); for(p=pList->pList; p; p=pNext){ pNext = p->u.pNext; vdbePmaWriteVarint(&writer, p->nVal); vdbePmaWriteBlob(&writer, SRVAL(p), p->nVal); if( pList->aMemory==0 ) sqlite3_free(p); } pList->pList = p; rc = vdbePmaWriterFinish(&writer, &pTask->file.iEof); } vdbeSorterWorkDebug(pTask, "exit"); assert( rc!=SQLITE_OK || pList->pList==0 ); assert( rc!=SQLITE_OK || pTask->file.iEof==iSz ); return rc; } /* ** Advance the MergeEngine to its next entry. ** Set *pbEof to true there is no next entry because ** the MergeEngine has reached the end of all its inputs. ** ** Return SQLITE_OK if successful or an error code if an error occurs. */ static int vdbeMergeEngineStep( MergeEngine *pMerger, /* The merge engine to advance to the next row */ int *pbEof /* Set TRUE at EOF. Set false for more content */ ){ int rc; int iPrev = pMerger->aTree[1];/* Index of PmaReader to advance */ SortSubtask *pTask = pMerger->pTask; /* Advance the current PmaReader */ rc = vdbePmaReaderNext(&pMerger->aReadr[iPrev]); /* Update contents of aTree[] */ if( rc==SQLITE_OK ){ int i; /* Index of aTree[] to recalculate */ PmaReader *pReadr1; /* First PmaReader to compare */ PmaReader *pReadr2; /* Second PmaReader to compare */ int bCached = 0; /* Find the first two PmaReaders to compare. The one that was just ** advanced (iPrev) and the one next to it in the array. */ pReadr1 = &pMerger->aReadr[(iPrev & 0xFFFE)]; pReadr2 = &pMerger->aReadr[(iPrev | 0x0001)]; for(i=(pMerger->nTree+iPrev)/2; i>0; i=i/2){ /* Compare pReadr1 and pReadr2. Store the result in variable iRes. */ int iRes; if( pReadr1->pFd==0 ){ iRes = +1; }else if( pReadr2->pFd==0 ){ iRes = -1; }else{ iRes = pTask->xCompare(pTask, &bCached, pReadr1->aKey, pReadr1->nKey, pReadr2->aKey, pReadr2->nKey ); } /* If pReadr1 contained the smaller value, set aTree[i] to its index. ** Then set pReadr2 to the next PmaReader to compare to pReadr1. In this ** case there is no cache of pReadr2 in pTask->pUnpacked, so set ** pKey2 to point to the record belonging to pReadr2. ** ** Alternatively, if pReadr2 contains the smaller of the two values, ** set aTree[i] to its index and update pReadr1. If vdbeSorterCompare() ** was actually called above, then pTask->pUnpacked now contains ** a value equivalent to pReadr2. So set pKey2 to NULL to prevent ** vdbeSorterCompare() from decoding pReadr2 again. ** ** If the two values were equal, then the value from the oldest ** PMA should be considered smaller. The VdbeSorter.aReadr[] array ** is sorted from oldest to newest, so pReadr1 contains older values ** than pReadr2 iff (pReadr1aTree[i] = (int)(pReadr1 - pMerger->aReadr); pReadr2 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; bCached = 0; }else{ if( pReadr1->pFd ) bCached = 0; pMerger->aTree[i] = (int)(pReadr2 - pMerger->aReadr); pReadr1 = &pMerger->aReadr[ pMerger->aTree[i ^ 0x0001] ]; } } *pbEof = (pMerger->aReadr[pMerger->aTree[1]].pFd==0); } return (rc==SQLITE_OK ? pTask->pUnpacked->errCode : rc); } #if SQLITE_MAX_WORKER_THREADS>0 /* ** The main routine for background threads that write level-0 PMAs. */ static void *vdbeSorterFlushThread(void *pCtx){ SortSubtask *pTask = (SortSubtask*)pCtx; int rc; /* Return code */ assert( pTask->bDone==0 ); rc = vdbeSorterListToPMA(pTask, &pTask->list); pTask->bDone = 1; return SQLITE_INT_TO_PTR(rc); } #endif /* SQLITE_MAX_WORKER_THREADS>0 */ /* ** Flush the current contents of VdbeSorter.list to a new PMA, possibly ** using a background thread. */ static int vdbeSorterFlushPMA(VdbeSorter *pSorter){ #if SQLITE_MAX_WORKER_THREADS==0 pSorter->bUsePMA = 1; return vdbeSorterListToPMA(&pSorter->aTask[0], &pSorter->list); #else int rc = SQLITE_OK; int i; SortSubtask *pTask = 0; /* Thread context used to create new PMA */ int nWorker = (pSorter->nTask-1); /* Set the flag to indicate that at least one PMA has been written. ** Or will be, anyhow. */ pSorter->bUsePMA = 1; /* Select a sub-task to sort and flush the current list of in-memory ** records to disk. If the sorter is running in multi-threaded mode, ** round-robin between the first (pSorter->nTask-1) tasks. Except, if ** the background thread from a sub-tasks previous turn is still running, ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy, ** fall back to using the final sub-task. The first (pSorter->nTask-1) ** sub-tasks are prefered as they use background threads - the final ** sub-task uses the main thread. */ for(i=0; iiPrev + i + 1) % nWorker; pTask = &pSorter->aTask[iTest]; if( pTask->bDone ){ rc = vdbeSorterJoinThread(pTask); } if( rc!=SQLITE_OK || pTask->pThread==0 ) break; } if( rc==SQLITE_OK ){ if( i==nWorker ){ /* Use the foreground thread for this operation */ rc = vdbeSorterListToPMA(&pSorter->aTask[nWorker], &pSorter->list); }else{ /* Launch a background thread for this operation */ u8 *aMem = pTask->list.aMemory; void *pCtx = (void*)pTask; assert( pTask->pThread==0 && pTask->bDone==0 ); assert( pTask->list.pList==0 ); assert( pTask->list.aMemory==0 || pSorter->list.aMemory!=0 ); pSorter->iPrev = (u8)(pTask - pSorter->aTask); pTask->list = pSorter->list; pSorter->list.pList = 0; pSorter->list.szPMA = 0; if( aMem ){ pSorter->list.aMemory = aMem; pSorter->nMemory = sqlite3MallocSize(aMem); }else if( pSorter->list.aMemory ){ pSorter->list.aMemory = sqlite3Malloc(pSorter->nMemory); if( !pSorter->list.aMemory ) return SQLITE_NOMEM; } rc = vdbeSorterCreateThread(pTask, vdbeSorterFlushThread, pCtx); } } return rc; #endif /* SQLITE_MAX_WORKER_THREADS!=0 */ } /* ** Add a record to the sorter. */ SQLITE_PRIVATE int sqlite3VdbeSorterWrite( const VdbeCursor *pCsr, /* Sorter cursor */ Mem *pVal /* Memory cell containing record */ ){ VdbeSorter *pSorter = pCsr->pSorter; int rc = SQLITE_OK; /* Return Code */ SorterRecord *pNew; /* New list element */ int bFlush; /* True to flush contents of memory to PMA */ int nReq; /* Bytes of memory required */ int nPMA; /* Bytes of PMA space required */ int t; /* serial type of first record field */ getVarint32((const u8*)&pVal->z[1], t); if( t>0 && t<10 && t!=7 ){ pSorter->typeMask &= SORTER_TYPE_INTEGER; }else if( t>10 && (t & 0x01) ){ pSorter->typeMask &= SORTER_TYPE_TEXT; }else{ pSorter->typeMask = 0; } assert( pSorter ); /* Figure out whether or not the current contents of memory should be ** flushed to a PMA before continuing. If so, do so. ** ** If using the single large allocation mode (pSorter->aMemory!=0), then ** flush the contents of memory to a new PMA if (a) at least one value is ** already in memory and (b) the new value will not fit in memory. ** ** Or, if using separate allocations for each record, flush the contents ** of memory to a PMA if either of the following are true: ** ** * The total memory allocated for the in-memory list is greater ** than (page-size * cache-size), or ** ** * The total memory allocated for the in-memory list is greater ** than (page-size * 10) and sqlite3HeapNearlyFull() returns true. */ nReq = pVal->n + sizeof(SorterRecord); nPMA = pVal->n + sqlite3VarintLen(pVal->n); if( pSorter->mxPmaSize ){ if( pSorter->list.aMemory ){ bFlush = pSorter->iMemory && (pSorter->iMemory+nReq) > pSorter->mxPmaSize; }else{ bFlush = ( (pSorter->list.szPMA > pSorter->mxPmaSize) || (pSorter->list.szPMA > pSorter->mnPmaSize && sqlite3HeapNearlyFull()) ); } if( bFlush ){ rc = vdbeSorterFlushPMA(pSorter); pSorter->list.szPMA = 0; pSorter->iMemory = 0; assert( rc!=SQLITE_OK || pSorter->list.pList==0 ); } } pSorter->list.szPMA += nPMA; if( nPMA>pSorter->mxKeysize ){ pSorter->mxKeysize = nPMA; } if( pSorter->list.aMemory ){ int nMin = pSorter->iMemory + nReq; if( nMin>pSorter->nMemory ){ u8 *aNew; int nNew = pSorter->nMemory * 2; while( nNew < nMin ) nNew = nNew*2; if( nNew > pSorter->mxPmaSize ) nNew = pSorter->mxPmaSize; if( nNew < nMin ) nNew = nMin; aNew = sqlite3Realloc(pSorter->list.aMemory, nNew); if( !aNew ) return SQLITE_NOMEM; pSorter->list.pList = (SorterRecord*)( aNew + ((u8*)pSorter->list.pList - pSorter->list.aMemory) ); pSorter->list.aMemory = aNew; pSorter->nMemory = nNew; } pNew = (SorterRecord*)&pSorter->list.aMemory[pSorter->iMemory]; pSorter->iMemory += ROUND8(nReq); pNew->u.iNext = (int)((u8*)(pSorter->list.pList) - pSorter->list.aMemory); }else{ pNew = (SorterRecord *)sqlite3Malloc(nReq); if( pNew==0 ){ return SQLITE_NOMEM; } pNew->u.pNext = pSorter->list.pList; } memcpy(SRVAL(pNew), pVal->z, pVal->n); pNew->nVal = pVal->n; pSorter->list.pList = pNew; return rc; } /* ** Read keys from pIncr->pMerger and populate pIncr->aFile[1]. The format ** of the data stored in aFile[1] is the same as that used by regular PMAs, ** except that the number-of-bytes varint is omitted from the start. */ static int vdbeIncrPopulate(IncrMerger *pIncr){ int rc = SQLITE_OK; int rc2; i64 iStart = pIncr->iStartOff; SorterFile *pOut = &pIncr->aFile[1]; SortSubtask *pTask = pIncr->pTask; MergeEngine *pMerger = pIncr->pMerger; PmaWriter writer; assert( pIncr->bEof==0 ); vdbeSorterPopulateDebug(pTask, "enter"); vdbePmaWriterInit(pOut->pFd, &writer, pTask->pSorter->pgsz, iStart); while( rc==SQLITE_OK ){ int dummy; PmaReader *pReader = &pMerger->aReadr[ pMerger->aTree[1] ]; int nKey = pReader->nKey; i64 iEof = writer.iWriteOff + writer.iBufEnd; /* Check if the output file is full or if the input has been exhausted. ** In either case exit the loop. */ if( pReader->pFd==0 ) break; if( (iEof + nKey + sqlite3VarintLen(nKey))>(iStart + pIncr->mxSz) ) break; /* Write the next key to the output. */ vdbePmaWriteVarint(&writer, nKey); vdbePmaWriteBlob(&writer, pReader->aKey, nKey); assert( pIncr->pMerger->pTask==pTask ); rc = vdbeMergeEngineStep(pIncr->pMerger, &dummy); } rc2 = vdbePmaWriterFinish(&writer, &pOut->iEof); if( rc==SQLITE_OK ) rc = rc2; vdbeSorterPopulateDebug(pTask, "exit"); return rc; } #if SQLITE_MAX_WORKER_THREADS>0 /* ** The main routine for background threads that populate aFile[1] of ** multi-threaded IncrMerger objects. */ static void *vdbeIncrPopulateThread(void *pCtx){ IncrMerger *pIncr = (IncrMerger*)pCtx; void *pRet = SQLITE_INT_TO_PTR( vdbeIncrPopulate(pIncr) ); pIncr->pTask->bDone = 1; return pRet; } /* ** Launch a background thread to populate aFile[1] of pIncr. */ static int vdbeIncrBgPopulate(IncrMerger *pIncr){ void *p = (void*)pIncr; assert( pIncr->bUseThread ); return vdbeSorterCreateThread(pIncr->pTask, vdbeIncrPopulateThread, p); } #endif /* ** This function is called when the PmaReader corresponding to pIncr has ** finished reading the contents of aFile[0]. Its purpose is to "refill" ** aFile[0] such that the PmaReader should start rereading it from the ** beginning. ** ** For single-threaded objects, this is accomplished by literally reading ** keys from pIncr->pMerger and repopulating aFile[0]. ** ** For multi-threaded objects, all that is required is to wait until the ** background thread is finished (if it is not already) and then swap ** aFile[0] and aFile[1] in place. If the contents of pMerger have not ** been exhausted, this function also launches a new background thread ** to populate the new aFile[1]. ** ** SQLITE_OK is returned on success, or an SQLite error code otherwise. */ static int vdbeIncrSwap(IncrMerger *pIncr){ int rc = SQLITE_OK; #if SQLITE_MAX_WORKER_THREADS>0 if( pIncr->bUseThread ){ rc = vdbeSorterJoinThread(pIncr->pTask); if( rc==SQLITE_OK ){ SorterFile f0 = pIncr->aFile[0]; pIncr->aFile[0] = pIncr->aFile[1]; pIncr->aFile[1] = f0; } if( rc==SQLITE_OK ){ if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ pIncr->bEof = 1; }else{ rc = vdbeIncrBgPopulate(pIncr); } } }else #endif { rc = vdbeIncrPopulate(pIncr); pIncr->aFile[0] = pIncr->aFile[1]; if( pIncr->aFile[0].iEof==pIncr->iStartOff ){ pIncr->bEof = 1; } } return rc; } /* ** Allocate and return a new IncrMerger object to read data from pMerger. ** ** If an OOM condition is encountered, return NULL. In this case free the ** pMerger argument before returning. */ static int vdbeIncrMergerNew( SortSubtask *pTask, /* The thread that will be using the new IncrMerger */ MergeEngine *pMerger, /* The MergeEngine that the IncrMerger will control */ IncrMerger **ppOut /* Write the new IncrMerger here */ ){ int rc = SQLITE_OK; IncrMerger *pIncr = *ppOut = (IncrMerger*) (sqlite3FaultSim(100) ? 0 : sqlite3MallocZero(sizeof(*pIncr))); if( pIncr ){ pIncr->pMerger = pMerger; pIncr->pTask = pTask; pIncr->mxSz = MAX(pTask->pSorter->mxKeysize+9,pTask->pSorter->mxPmaSize/2); pTask->file2.iEof += pIncr->mxSz; }else{ vdbeMergeEngineFree(pMerger); rc = SQLITE_NOMEM; } return rc; } #if SQLITE_MAX_WORKER_THREADS>0 /* ** Set the "use-threads" flag on object pIncr. */ static void vdbeIncrMergerSetThreads(IncrMerger *pIncr){ pIncr->bUseThread = 1; pIncr->pTask->file2.iEof -= pIncr->mxSz; } #endif /* SQLITE_MAX_WORKER_THREADS>0 */ /* ** Recompute pMerger->aTree[iOut] by comparing the next keys on the ** two PmaReaders that feed that entry. Neither of the PmaReaders ** are advanced. This routine merely does the comparison. */ static void vdbeMergeEngineCompare( MergeEngine *pMerger, /* Merge engine containing PmaReaders to compare */ int iOut /* Store the result in pMerger->aTree[iOut] */ ){ int i1; int i2; int iRes; PmaReader *p1; PmaReader *p2; assert( iOutnTree && iOut>0 ); if( iOut>=(pMerger->nTree/2) ){ i1 = (iOut - pMerger->nTree/2) * 2; i2 = i1 + 1; }else{ i1 = pMerger->aTree[iOut*2]; i2 = pMerger->aTree[iOut*2+1]; } p1 = &pMerger->aReadr[i1]; p2 = &pMerger->aReadr[i2]; if( p1->pFd==0 ){ iRes = i2; }else if( p2->pFd==0 ){ iRes = i1; }else{ SortSubtask *pTask = pMerger->pTask; int bCached = 0; int res; assert( pTask->pUnpacked!=0 ); /* from vdbeSortSubtaskMain() */ res = pTask->xCompare( pTask, &bCached, p1->aKey, p1->nKey, p2->aKey, p2->nKey ); if( res<=0 ){ iRes = i1; }else{ iRes = i2; } } pMerger->aTree[iOut] = iRes; } /* ** Allowed values for the eMode parameter to vdbeMergeEngineInit() ** and vdbePmaReaderIncrMergeInit(). ** ** Only INCRINIT_NORMAL is valid in single-threaded builds (when ** SQLITE_MAX_WORKER_THREADS==0). The other values are only used ** when there exists one or more separate worker threads. */ #define INCRINIT_NORMAL 0 #define INCRINIT_TASK 1 #define INCRINIT_ROOT 2 /* ** Forward reference required as the vdbeIncrMergeInit() and ** vdbePmaReaderIncrInit() routines are called mutually recursively when ** building a merge tree. */ static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode); /* ** Initialize the MergeEngine object passed as the second argument. Once this ** function returns, the first key of merged data may be read from the ** MergeEngine object in the usual fashion. ** ** If argument eMode is INCRINIT_ROOT, then it is assumed that any IncrMerge ** objects attached to the PmaReader objects that the merger reads from have ** already been populated, but that they have not yet populated aFile[0] and ** set the PmaReader objects up to read from it. In this case all that is ** required is to call vdbePmaReaderNext() on each PmaReader to point it at ** its first key. ** ** Otherwise, if eMode is any value other than INCRINIT_ROOT, then use ** vdbePmaReaderIncrMergeInit() to initialize each PmaReader that feeds data ** to pMerger. ** ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ static int vdbeMergeEngineInit( SortSubtask *pTask, /* Thread that will run pMerger */ MergeEngine *pMerger, /* MergeEngine to initialize */ int eMode /* One of the INCRINIT_XXX constants */ ){ int rc = SQLITE_OK; /* Return code */ int i; /* For looping over PmaReader objects */ int nTree = pMerger->nTree; /* eMode is always INCRINIT_NORMAL in single-threaded mode */ assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); /* Verify that the MergeEngine is assigned to a single thread */ assert( pMerger->pTask==0 ); pMerger->pTask = pTask; for(i=0; i0 && eMode==INCRINIT_ROOT ){ /* PmaReaders should be normally initialized in order, as if they are ** reading from the same temp file this makes for more linear file IO. ** However, in the INCRINIT_ROOT case, if PmaReader aReadr[nTask-1] is ** in use it will block the vdbePmaReaderNext() call while it uses ** the main thread to fill its buffer. So calling PmaReaderNext() ** on this PmaReader before any of the multi-threaded PmaReaders takes ** better advantage of multi-processor hardware. */ rc = vdbePmaReaderNext(&pMerger->aReadr[nTree-i-1]); }else{ rc = vdbePmaReaderIncrInit(&pMerger->aReadr[i], INCRINIT_NORMAL); } if( rc!=SQLITE_OK ) return rc; } for(i=pMerger->nTree-1; i>0; i--){ vdbeMergeEngineCompare(pMerger, i); } return pTask->pUnpacked->errCode; } /* ** The PmaReader passed as the first argument is guaranteed to be an ** incremental-reader (pReadr->pIncr!=0). This function serves to open ** and/or initialize the temp file related fields of the IncrMerge ** object at (pReadr->pIncr). ** ** If argument eMode is set to INCRINIT_NORMAL, then all PmaReaders ** in the sub-tree headed by pReadr are also initialized. Data is then ** loaded into the buffers belonging to pReadr and it is set to point to ** the first key in its range. ** ** If argument eMode is set to INCRINIT_TASK, then pReadr is guaranteed ** to be a multi-threaded PmaReader and this function is being called in a ** background thread. In this case all PmaReaders in the sub-tree are ** initialized as for INCRINIT_NORMAL and the aFile[1] buffer belonging to ** pReadr is populated. However, pReadr itself is not set up to point ** to its first key. A call to vdbePmaReaderNext() is still required to do ** that. ** ** The reason this function does not call vdbePmaReaderNext() immediately ** in the INCRINIT_TASK case is that vdbePmaReaderNext() assumes that it has ** to block on thread (pTask->thread) before accessing aFile[1]. But, since ** this entire function is being run by thread (pTask->thread), that will ** lead to the current background thread attempting to join itself. ** ** Finally, if argument eMode is set to INCRINIT_ROOT, it may be assumed ** that pReadr->pIncr is a multi-threaded IncrMerge objects, and that all ** child-trees have already been initialized using IncrInit(INCRINIT_TASK). ** In this case vdbePmaReaderNext() is called on all child PmaReaders and ** the current PmaReader set to point to the first key in its range. ** ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){ int rc = SQLITE_OK; IncrMerger *pIncr = pReadr->pIncr; SortSubtask *pTask = pIncr->pTask; sqlite3 *db = pTask->pSorter->db; /* eMode is always INCRINIT_NORMAL in single-threaded mode */ assert( SQLITE_MAX_WORKER_THREADS>0 || eMode==INCRINIT_NORMAL ); rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode); /* Set up the required files for pIncr. A multi-theaded IncrMerge object ** requires two temp files to itself, whereas a single-threaded object ** only requires a region of pTask->file2. */ if( rc==SQLITE_OK ){ int mxSz = pIncr->mxSz; #if SQLITE_MAX_WORKER_THREADS>0 if( pIncr->bUseThread ){ rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[0].pFd); if( rc==SQLITE_OK ){ rc = vdbeSorterOpenTempFile(db, mxSz, &pIncr->aFile[1].pFd); } }else #endif /*if( !pIncr->bUseThread )*/{ if( pTask->file2.pFd==0 ){ assert( pTask->file2.iEof>0 ); rc = vdbeSorterOpenTempFile(db, pTask->file2.iEof, &pTask->file2.pFd); pTask->file2.iEof = 0; } if( rc==SQLITE_OK ){ pIncr->aFile[1].pFd = pTask->file2.pFd; pIncr->iStartOff = pTask->file2.iEof; pTask->file2.iEof += mxSz; } } } #if SQLITE_MAX_WORKER_THREADS>0 if( rc==SQLITE_OK && pIncr->bUseThread ){ /* Use the current thread to populate aFile[1], even though this ** PmaReader is multi-threaded. If this is an INCRINIT_TASK object, ** then this function is already running in background thread ** pIncr->pTask->thread. ** ** If this is the INCRINIT_ROOT object, then it is running in the ** main VDBE thread. But that is Ok, as that thread cannot return ** control to the VDBE or proceed with anything useful until the ** first results are ready from this merger object anyway. */ assert( eMode==INCRINIT_ROOT || eMode==INCRINIT_TASK ); rc = vdbeIncrPopulate(pIncr); } #endif if( rc==SQLITE_OK && (SQLITE_MAX_WORKER_THREADS==0 || eMode!=INCRINIT_TASK) ){ rc = vdbePmaReaderNext(pReadr); } return rc; } #if SQLITE_MAX_WORKER_THREADS>0 /* ** The main routine for vdbePmaReaderIncrMergeInit() operations run in ** background threads. */ static void *vdbePmaReaderBgIncrInit(void *pCtx){ PmaReader *pReader = (PmaReader*)pCtx; void *pRet = SQLITE_INT_TO_PTR( vdbePmaReaderIncrMergeInit(pReader,INCRINIT_TASK) ); pReader->pIncr->pTask->bDone = 1; return pRet; } #endif /* ** If the PmaReader passed as the first argument is not an incremental-reader ** (if pReadr->pIncr==0), then this function is a no-op. Otherwise, it invokes ** the vdbePmaReaderIncrMergeInit() function with the parameters passed to ** this routine to initialize the incremental merge. ** ** If the IncrMerger object is multi-threaded (IncrMerger.bUseThread==1), ** then a background thread is launched to call vdbePmaReaderIncrMergeInit(). ** Or, if the IncrMerger is single threaded, the same function is called ** using the current thread. */ static int vdbePmaReaderIncrInit(PmaReader *pReadr, int eMode){ IncrMerger *pIncr = pReadr->pIncr; /* Incremental merger */ int rc = SQLITE_OK; /* Return code */ if( pIncr ){ #if SQLITE_MAX_WORKER_THREADS>0 assert( pIncr->bUseThread==0 || eMode==INCRINIT_TASK ); if( pIncr->bUseThread ){ void *pCtx = (void*)pReadr; rc = vdbeSorterCreateThread(pIncr->pTask, vdbePmaReaderBgIncrInit, pCtx); }else #endif { rc = vdbePmaReaderIncrMergeInit(pReadr, eMode); } } return rc; } /* ** Allocate a new MergeEngine object to merge the contents of nPMA level-0 ** PMAs from pTask->file. If no error occurs, set *ppOut to point to ** the new object and return SQLITE_OK. Or, if an error does occur, set *ppOut ** to NULL and return an SQLite error code. ** ** When this function is called, *piOffset is set to the offset of the ** first PMA to read from pTask->file. Assuming no error occurs, it is ** set to the offset immediately following the last byte of the last ** PMA before returning. If an error does occur, then the final value of ** *piOffset is undefined. */ static int vdbeMergeEngineLevel0( SortSubtask *pTask, /* Sorter task to read from */ int nPMA, /* Number of PMAs to read */ i64 *piOffset, /* IN/OUT: Readr offset in pTask->file */ MergeEngine **ppOut /* OUT: New merge-engine */ ){ MergeEngine *pNew; /* Merge engine to return */ i64 iOff = *piOffset; int i; int rc = SQLITE_OK; *ppOut = pNew = vdbeMergeEngineNew(nPMA); if( pNew==0 ) rc = SQLITE_NOMEM; for(i=0; iaReadr[i]; rc = vdbePmaReaderInit(pTask, &pTask->file, iOff, pReadr, &nDummy); iOff = pReadr->iEof; } if( rc!=SQLITE_OK ){ vdbeMergeEngineFree(pNew); *ppOut = 0; } *piOffset = iOff; return rc; } /* ** Return the depth of a tree comprising nPMA PMAs, assuming a fanout of ** SORTER_MAX_MERGE_COUNT. The returned value does not include leaf nodes. ** ** i.e. ** ** nPMA<=16 -> TreeDepth() == 0 ** nPMA<=256 -> TreeDepth() == 1 ** nPMA<=65536 -> TreeDepth() == 2 */ static int vdbeSorterTreeDepth(int nPMA){ int nDepth = 0; i64 nDiv = SORTER_MAX_MERGE_COUNT; while( nDiv < (i64)nPMA ){ nDiv = nDiv * SORTER_MAX_MERGE_COUNT; nDepth++; } return nDepth; } /* ** pRoot is the root of an incremental merge-tree with depth nDepth (according ** to vdbeSorterTreeDepth()). pLeaf is the iSeq'th leaf to be added to the ** tree, counting from zero. This function adds pLeaf to the tree. ** ** If successful, SQLITE_OK is returned. If an error occurs, an SQLite error ** code is returned and pLeaf is freed. */ static int vdbeSorterAddToTree( SortSubtask *pTask, /* Task context */ int nDepth, /* Depth of tree according to TreeDepth() */ int iSeq, /* Sequence number of leaf within tree */ MergeEngine *pRoot, /* Root of tree */ MergeEngine *pLeaf /* Leaf to add to tree */ ){ int rc = SQLITE_OK; int nDiv = 1; int i; MergeEngine *p = pRoot; IncrMerger *pIncr; rc = vdbeIncrMergerNew(pTask, pLeaf, &pIncr); for(i=1; iaReadr[iIter]; if( pReadr->pIncr==0 ){ MergeEngine *pNew = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ rc = vdbeIncrMergerNew(pTask, pNew, &pReadr->pIncr); } } if( rc==SQLITE_OK ){ p = pReadr->pIncr->pMerger; nDiv = nDiv / SORTER_MAX_MERGE_COUNT; } } if( rc==SQLITE_OK ){ p->aReadr[iSeq % SORTER_MAX_MERGE_COUNT].pIncr = pIncr; }else{ vdbeIncrFree(pIncr); } return rc; } /* ** This function is called as part of a SorterRewind() operation on a sorter ** that has already written two or more level-0 PMAs to one or more temp ** files. It builds a tree of MergeEngine/IncrMerger/PmaReader objects that ** can be used to incrementally merge all PMAs on disk. ** ** If successful, SQLITE_OK is returned and *ppOut set to point to the ** MergeEngine object at the root of the tree before returning. Or, if an ** error occurs, an SQLite error code is returned and the final value ** of *ppOut is undefined. */ static int vdbeSorterMergeTreeBuild( VdbeSorter *pSorter, /* The VDBE cursor that implements the sort */ MergeEngine **ppOut /* Write the MergeEngine here */ ){ MergeEngine *pMain = 0; int rc = SQLITE_OK; int iTask; #if SQLITE_MAX_WORKER_THREADS>0 /* If the sorter uses more than one task, then create the top-level ** MergeEngine here. This MergeEngine will read data from exactly ** one PmaReader per sub-task. */ assert( pSorter->bUseThreads || pSorter->nTask==1 ); if( pSorter->nTask>1 ){ pMain = vdbeMergeEngineNew(pSorter->nTask); if( pMain==0 ) rc = SQLITE_NOMEM; } #endif for(iTask=0; rc==SQLITE_OK && iTasknTask; iTask++){ SortSubtask *pTask = &pSorter->aTask[iTask]; assert( pTask->nPMA>0 || SQLITE_MAX_WORKER_THREADS>0 ); if( SQLITE_MAX_WORKER_THREADS==0 || pTask->nPMA ){ MergeEngine *pRoot = 0; /* Root node of tree for this task */ int nDepth = vdbeSorterTreeDepth(pTask->nPMA); i64 iReadOff = 0; if( pTask->nPMA<=SORTER_MAX_MERGE_COUNT ){ rc = vdbeMergeEngineLevel0(pTask, pTask->nPMA, &iReadOff, &pRoot); }else{ int i; int iSeq = 0; pRoot = vdbeMergeEngineNew(SORTER_MAX_MERGE_COUNT); if( pRoot==0 ) rc = SQLITE_NOMEM; for(i=0; inPMA && rc==SQLITE_OK; i += SORTER_MAX_MERGE_COUNT){ MergeEngine *pMerger = 0; /* New level-0 PMA merger */ int nReader; /* Number of level-0 PMAs to merge */ nReader = MIN(pTask->nPMA - i, SORTER_MAX_MERGE_COUNT); rc = vdbeMergeEngineLevel0(pTask, nReader, &iReadOff, &pMerger); if( rc==SQLITE_OK ){ rc = vdbeSorterAddToTree(pTask, nDepth, iSeq++, pRoot, pMerger); } } } if( rc==SQLITE_OK ){ #if SQLITE_MAX_WORKER_THREADS>0 if( pMain!=0 ){ rc = vdbeIncrMergerNew(pTask, pRoot, &pMain->aReadr[iTask].pIncr); }else #endif { assert( pMain==0 ); pMain = pRoot; } }else{ vdbeMergeEngineFree(pRoot); } } } if( rc!=SQLITE_OK ){ vdbeMergeEngineFree(pMain); pMain = 0; } *ppOut = pMain; return rc; } /* ** This function is called as part of an sqlite3VdbeSorterRewind() operation ** on a sorter that has written two or more PMAs to temporary files. It sets ** up either VdbeSorter.pMerger (for single threaded sorters) or pReader ** (for multi-threaded sorters) so that it can be used to iterate through ** all records stored in the sorter. ** ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ static int vdbeSorterSetupMerge(VdbeSorter *pSorter){ int rc; /* Return code */ SortSubtask *pTask0 = &pSorter->aTask[0]; MergeEngine *pMain = 0; #if SQLITE_MAX_WORKER_THREADS sqlite3 *db = pTask0->pSorter->db; int i; SorterCompare xCompare = vdbeSorterGetCompare(pSorter); for(i=0; inTask; i++){ pSorter->aTask[i].xCompare = xCompare; } #endif rc = vdbeSorterMergeTreeBuild(pSorter, &pMain); if( rc==SQLITE_OK ){ #if SQLITE_MAX_WORKER_THREADS assert( pSorter->bUseThreads==0 || pSorter->nTask>1 ); if( pSorter->bUseThreads ){ int iTask; PmaReader *pReadr = 0; SortSubtask *pLast = &pSorter->aTask[pSorter->nTask-1]; rc = vdbeSortAllocUnpacked(pLast); if( rc==SQLITE_OK ){ pReadr = (PmaReader*)sqlite3DbMallocZero(db, sizeof(PmaReader)); pSorter->pReader = pReadr; if( pReadr==0 ) rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ rc = vdbeIncrMergerNew(pLast, pMain, &pReadr->pIncr); if( rc==SQLITE_OK ){ vdbeIncrMergerSetThreads(pReadr->pIncr); for(iTask=0; iTask<(pSorter->nTask-1); iTask++){ IncrMerger *pIncr; if( (pIncr = pMain->aReadr[iTask].pIncr) ){ vdbeIncrMergerSetThreads(pIncr); assert( pIncr->pTask!=pLast ); } } for(iTask=0; rc==SQLITE_OK && iTasknTask; iTask++){ /* Check that: ** ** a) The incremental merge object is configured to use the ** right task, and ** b) If it is using task (nTask-1), it is configured to run ** in single-threaded mode. This is important, as the ** root merge (INCRINIT_ROOT) will be using the same task ** object. */ PmaReader *p = &pMain->aReadr[iTask]; assert( p->pIncr==0 || ( (p->pIncr->pTask==&pSorter->aTask[iTask]) /* a */ && (iTask!=pSorter->nTask-1 || p->pIncr->bUseThread==0) /* b */ )); rc = vdbePmaReaderIncrInit(p, INCRINIT_TASK); } } pMain = 0; } if( rc==SQLITE_OK ){ rc = vdbePmaReaderIncrMergeInit(pReadr, INCRINIT_ROOT); } }else #endif { rc = vdbeMergeEngineInit(pTask0, pMain, INCRINIT_NORMAL); pSorter->pMerger = pMain; pMain = 0; } } if( rc!=SQLITE_OK ){ vdbeMergeEngineFree(pMain); } return rc; } /* ** Once the sorter has been populated by calls to sqlite3VdbeSorterWrite, ** this function is called to prepare for iterating through the records ** in sorted order. */ SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int rc = SQLITE_OK; /* Return code */ assert( pSorter ); /* If no data has been written to disk, then do not do so now. Instead, ** sort the VdbeSorter.pRecord list. The vdbe layer will read data directly ** from the in-memory list. */ if( pSorter->bUsePMA==0 ){ if( pSorter->list.pList ){ *pbEof = 0; rc = vdbeSorterSort(&pSorter->aTask[0], &pSorter->list); }else{ *pbEof = 1; } return rc; } /* Write the current in-memory list to a PMA. When the VdbeSorterWrite() ** function flushes the contents of memory to disk, it immediately always ** creates a new list consisting of a single key immediately afterwards. ** So the list is never empty at this point. */ assert( pSorter->list.pList ); rc = vdbeSorterFlushPMA(pSorter); /* Join all threads */ rc = vdbeSorterJoinAll(pSorter, rc); vdbeSorterRewindDebug("rewind"); /* Assuming no errors have occurred, set up a merger structure to ** incrementally read and merge all remaining PMAs. */ assert( pSorter->pReader==0 ); if( rc==SQLITE_OK ){ rc = vdbeSorterSetupMerge(pSorter); *pbEof = 0; } vdbeSorterRewindDebug("rewinddone"); return rc; } /* ** Advance to the next element in the sorter. */ SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *db, const VdbeCursor *pCsr, int *pbEof){ VdbeSorter *pSorter = pCsr->pSorter; int rc; /* Return code */ assert( pSorter->bUsePMA || (pSorter->pReader==0 && pSorter->pMerger==0) ); if( pSorter->bUsePMA ){ assert( pSorter->pReader==0 || pSorter->pMerger==0 ); assert( pSorter->bUseThreads==0 || pSorter->pReader ); assert( pSorter->bUseThreads==1 || pSorter->pMerger ); #if SQLITE_MAX_WORKER_THREADS>0 if( pSorter->bUseThreads ){ rc = vdbePmaReaderNext(pSorter->pReader); *pbEof = (pSorter->pReader->pFd==0); }else #endif /*if( !pSorter->bUseThreads )*/ { assert( pSorter->pMerger!=0 ); assert( pSorter->pMerger->pTask==(&pSorter->aTask[0]) ); rc = vdbeMergeEngineStep(pSorter->pMerger, pbEof); } }else{ SorterRecord *pFree = pSorter->list.pList; pSorter->list.pList = pFree->u.pNext; pFree->u.pNext = 0; if( pSorter->list.aMemory==0 ) vdbeSorterRecordFree(db, pFree); *pbEof = !pSorter->list.pList; rc = SQLITE_OK; } return rc; } /* ** Return a pointer to a buffer owned by the sorter that contains the ** current key. */ static void *vdbeSorterRowkey( const VdbeSorter *pSorter, /* Sorter object */ int *pnKey /* OUT: Size of current key in bytes */ ){ void *pKey; if( pSorter->bUsePMA ){ PmaReader *pReader; #if SQLITE_MAX_WORKER_THREADS>0 if( pSorter->bUseThreads ){ pReader = pSorter->pReader; }else #endif /*if( !pSorter->bUseThreads )*/{ pReader = &pSorter->pMerger->aReadr[pSorter->pMerger->aTree[1]]; } *pnKey = pReader->nKey; pKey = pReader->aKey; }else{ *pnKey = pSorter->list.pList->nVal; pKey = SRVAL(pSorter->list.pList); } return pKey; } /* ** Copy the current sorter key into the memory cell pOut. */ SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *pCsr, Mem *pOut){ VdbeSorter *pSorter = pCsr->pSorter; void *pKey; int nKey; /* Sorter key to copy into pOut */ pKey = vdbeSorterRowkey(pSorter, &nKey); if( sqlite3VdbeMemClearAndResize(pOut, nKey) ){ return SQLITE_NOMEM; } pOut->n = nKey; MemSetTypeFlag(pOut, MEM_Blob); memcpy(pOut->z, pKey, nKey); return SQLITE_OK; } /* ** Compare the key in memory cell pVal with the key that the sorter cursor ** passed as the first argument currently points to. For the purposes of ** the comparison, ignore the rowid field at the end of each record. ** ** If the sorter cursor key contains any NULL values, consider it to be ** less than pVal. Even if pVal also contains NULL values. ** ** If an error occurs, return an SQLite error code (i.e. SQLITE_NOMEM). ** Otherwise, set *pRes to a negative, zero or positive value if the ** key in pVal is smaller than, equal to or larger than the current sorter ** key. ** ** This routine forms the core of the OP_SorterCompare opcode, which in ** turn is used to verify uniqueness when constructing a UNIQUE INDEX. */ SQLITE_PRIVATE int sqlite3VdbeSorterCompare( const VdbeCursor *pCsr, /* Sorter cursor */ Mem *pVal, /* Value to compare to current sorter key */ int nKeyCol, /* Compare this many columns */ int *pRes /* OUT: Result of comparison */ ){ VdbeSorter *pSorter = pCsr->pSorter; UnpackedRecord *r2 = pSorter->pUnpacked; KeyInfo *pKeyInfo = pCsr->pKeyInfo; int i; void *pKey; int nKey; /* Sorter key to compare pVal with */ if( r2==0 ){ char *p; r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo,0,0,&p); assert( pSorter->pUnpacked==(UnpackedRecord*)p ); if( r2==0 ) return SQLITE_NOMEM; r2->nField = nKeyCol; } assert( r2->nField==nKeyCol ); pKey = vdbeSorterRowkey(pSorter, &nKey); sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, r2); for(i=0; iaMem[i].flags & MEM_Null ){ *pRes = -1; return SQLITE_OK; } } *pRes = sqlite3VdbeRecordCompare(pVal->n, pVal->z, r2); return SQLITE_OK; } /************** End of vdbesort.c ********************************************/ /************** Begin file journal.c *****************************************/ /* ** 2007 August 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file implements a special kind of sqlite3_file object used ** by SQLite to create journal files if the atomic-write optimization ** is enabled. ** ** The distinctive characteristic of this sqlite3_file is that the ** actual on disk file is created lazily. When the file is created, ** the caller specifies a buffer size for an in-memory buffer to ** be used to service read() and write() requests. The actual file ** on disk is not created or populated until either: ** ** 1) The in-memory representation grows too large for the allocated ** buffer, or ** 2) The sqlite3JournalCreate() function is called. */ #ifdef SQLITE_ENABLE_ATOMIC_WRITE /* #include "sqliteInt.h" */ /* ** A JournalFile object is a subclass of sqlite3_file used by ** as an open file handle for journal files. */ struct JournalFile { sqlite3_io_methods *pMethod; /* I/O methods on journal files */ int nBuf; /* Size of zBuf[] in bytes */ char *zBuf; /* Space to buffer journal writes */ int iSize; /* Amount of zBuf[] currently used */ int flags; /* xOpen flags */ sqlite3_vfs *pVfs; /* The "real" underlying VFS */ sqlite3_file *pReal; /* The "real" underlying file descriptor */ const char *zJournal; /* Name of the journal file */ }; typedef struct JournalFile JournalFile; /* ** If it does not already exists, create and populate the on-disk file ** for JournalFile p. */ static int createFile(JournalFile *p){ int rc = SQLITE_OK; if( !p->pReal ){ sqlite3_file *pReal = (sqlite3_file *)&p[1]; rc = sqlite3OsOpen(p->pVfs, p->zJournal, pReal, p->flags, 0); if( rc==SQLITE_OK ){ p->pReal = pReal; if( p->iSize>0 ){ assert(p->iSize<=p->nBuf); rc = sqlite3OsWrite(p->pReal, p->zBuf, p->iSize, 0); } if( rc!=SQLITE_OK ){ /* If an error occurred while writing to the file, close it before ** returning. This way, SQLite uses the in-memory journal data to ** roll back changes made to the internal page-cache before this ** function was called. */ sqlite3OsClose(pReal); p->pReal = 0; } } } return rc; } /* ** Close the file. */ static int jrnlClose(sqlite3_file *pJfd){ JournalFile *p = (JournalFile *)pJfd; if( p->pReal ){ sqlite3OsClose(p->pReal); } sqlite3_free(p->zBuf); return SQLITE_OK; } /* ** Read data from the file. */ static int jrnlRead( sqlite3_file *pJfd, /* The journal file from which to read */ void *zBuf, /* Put the results here */ int iAmt, /* Number of bytes to read */ sqlite_int64 iOfst /* Begin reading at this offset */ ){ int rc = SQLITE_OK; JournalFile *p = (JournalFile *)pJfd; if( p->pReal ){ rc = sqlite3OsRead(p->pReal, zBuf, iAmt, iOfst); }else if( (iAmt+iOfst)>p->iSize ){ rc = SQLITE_IOERR_SHORT_READ; }else{ memcpy(zBuf, &p->zBuf[iOfst], iAmt); } return rc; } /* ** Write data to the file. */ static int jrnlWrite( sqlite3_file *pJfd, /* The journal file into which to write */ const void *zBuf, /* Take data to be written from here */ int iAmt, /* Number of bytes to write */ sqlite_int64 iOfst /* Begin writing at this offset into the file */ ){ int rc = SQLITE_OK; JournalFile *p = (JournalFile *)pJfd; if( !p->pReal && (iOfst+iAmt)>p->nBuf ){ rc = createFile(p); } if( rc==SQLITE_OK ){ if( p->pReal ){ rc = sqlite3OsWrite(p->pReal, zBuf, iAmt, iOfst); }else{ memcpy(&p->zBuf[iOfst], zBuf, iAmt); if( p->iSize<(iOfst+iAmt) ){ p->iSize = (iOfst+iAmt); } } } return rc; } /* ** Truncate the file. */ static int jrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ int rc = SQLITE_OK; JournalFile *p = (JournalFile *)pJfd; if( p->pReal ){ rc = sqlite3OsTruncate(p->pReal, size); }else if( sizeiSize ){ p->iSize = size; } return rc; } /* ** Sync the file. */ static int jrnlSync(sqlite3_file *pJfd, int flags){ int rc; JournalFile *p = (JournalFile *)pJfd; if( p->pReal ){ rc = sqlite3OsSync(p->pReal, flags); }else{ rc = SQLITE_OK; } return rc; } /* ** Query the size of the file in bytes. */ static int jrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ int rc = SQLITE_OK; JournalFile *p = (JournalFile *)pJfd; if( p->pReal ){ rc = sqlite3OsFileSize(p->pReal, pSize); }else{ *pSize = (sqlite_int64) p->iSize; } return rc; } /* ** Table of methods for JournalFile sqlite3_file object. */ static struct sqlite3_io_methods JournalFileMethods = { 1, /* iVersion */ jrnlClose, /* xClose */ jrnlRead, /* xRead */ jrnlWrite, /* xWrite */ jrnlTruncate, /* xTruncate */ jrnlSync, /* xSync */ jrnlFileSize, /* xFileSize */ 0, /* xLock */ 0, /* xUnlock */ 0, /* xCheckReservedLock */ 0, /* xFileControl */ 0, /* xSectorSize */ 0, /* xDeviceCharacteristics */ 0, /* xShmMap */ 0, /* xShmLock */ 0, /* xShmBarrier */ 0 /* xShmUnmap */ }; /* ** Open a journal file. */ SQLITE_PRIVATE int sqlite3JournalOpen( sqlite3_vfs *pVfs, /* The VFS to use for actual file I/O */ const char *zName, /* Name of the journal file */ sqlite3_file *pJfd, /* Preallocated, blank file handle */ int flags, /* Opening flags */ int nBuf /* Bytes buffered before opening the file */ ){ JournalFile *p = (JournalFile *)pJfd; memset(p, 0, sqlite3JournalSize(pVfs)); if( nBuf>0 ){ p->zBuf = sqlite3MallocZero(nBuf); if( !p->zBuf ){ return SQLITE_NOMEM; } }else{ return sqlite3OsOpen(pVfs, zName, pJfd, flags, 0); } p->pMethod = &JournalFileMethods; p->nBuf = nBuf; p->flags = flags; p->zJournal = zName; p->pVfs = pVfs; return SQLITE_OK; } /* ** If the argument p points to a JournalFile structure, and the underlying ** file has not yet been created, create it now. */ SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *p){ if( p->pMethods!=&JournalFileMethods ){ return SQLITE_OK; } return createFile((JournalFile *)p); } /* ** The file-handle passed as the only argument is guaranteed to be an open ** file. It may or may not be of class JournalFile. If the file is a ** JournalFile, and the underlying file on disk has not yet been opened, ** return 0. Otherwise, return 1. */ SQLITE_PRIVATE int sqlite3JournalExists(sqlite3_file *p){ return (p->pMethods!=&JournalFileMethods || ((JournalFile *)p)->pReal!=0); } /* ** Return the number of bytes required to store a JournalFile that uses vfs ** pVfs to create the underlying on-disk files. */ SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *pVfs){ return (pVfs->szOsFile+sizeof(JournalFile)); } #endif /************** End of journal.c *********************************************/ /************** Begin file memjournal.c **************************************/ /* ** 2008 October 7 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains code use to implement an in-memory rollback journal. ** The in-memory rollback journal is used to journal transactions for ** ":memory:" databases and when the journal_mode=MEMORY pragma is used. */ /* #include "sqliteInt.h" */ /* Forward references to internal structures */ typedef struct MemJournal MemJournal; typedef struct FilePoint FilePoint; typedef struct FileChunk FileChunk; /* Space to hold the rollback journal is allocated in increments of ** this many bytes. ** ** The size chosen is a little less than a power of two. That way, ** the FileChunk object will have a size that almost exactly fills ** a power-of-two allocation. This minimizes wasted space in power-of-two ** memory allocators. */ #define JOURNAL_CHUNKSIZE ((int)(1024-sizeof(FileChunk*))) /* ** The rollback journal is composed of a linked list of these structures. */ struct FileChunk { FileChunk *pNext; /* Next chunk in the journal */ u8 zChunk[JOURNAL_CHUNKSIZE]; /* Content of this chunk */ }; /* ** An instance of this object serves as a cursor into the rollback journal. ** The cursor can be either for reading or writing. */ struct FilePoint { sqlite3_int64 iOffset; /* Offset from the beginning of the file */ FileChunk *pChunk; /* Specific chunk into which cursor points */ }; /* ** This subclass is a subclass of sqlite3_file. Each open memory-journal ** is an instance of this class. */ struct MemJournal { sqlite3_io_methods *pMethod; /* Parent class. MUST BE FIRST */ FileChunk *pFirst; /* Head of in-memory chunk-list */ FilePoint endpoint; /* Pointer to the end of the file */ FilePoint readpoint; /* Pointer to the end of the last xRead() */ }; /* ** Read data from the in-memory journal file. This is the implementation ** of the sqlite3_vfs.xRead method. */ static int memjrnlRead( sqlite3_file *pJfd, /* The journal file from which to read */ void *zBuf, /* Put the results here */ int iAmt, /* Number of bytes to read */ sqlite_int64 iOfst /* Begin reading at this offset */ ){ MemJournal *p = (MemJournal *)pJfd; u8 *zOut = zBuf; int nRead = iAmt; int iChunkOffset; FileChunk *pChunk; /* SQLite never tries to read past the end of a rollback journal file */ assert( iOfst+iAmt<=p->endpoint.iOffset ); if( p->readpoint.iOffset!=iOfst || iOfst==0 ){ sqlite3_int64 iOff = 0; for(pChunk=p->pFirst; ALWAYS(pChunk) && (iOff+JOURNAL_CHUNKSIZE)<=iOfst; pChunk=pChunk->pNext ){ iOff += JOURNAL_CHUNKSIZE; } }else{ pChunk = p->readpoint.pChunk; } iChunkOffset = (int)(iOfst%JOURNAL_CHUNKSIZE); do { int iSpace = JOURNAL_CHUNKSIZE - iChunkOffset; int nCopy = MIN(nRead, (JOURNAL_CHUNKSIZE - iChunkOffset)); memcpy(zOut, &pChunk->zChunk[iChunkOffset], nCopy); zOut += nCopy; nRead -= iSpace; iChunkOffset = 0; } while( nRead>=0 && (pChunk=pChunk->pNext)!=0 && nRead>0 ); p->readpoint.iOffset = iOfst+iAmt; p->readpoint.pChunk = pChunk; return SQLITE_OK; } /* ** Write data to the file. */ static int memjrnlWrite( sqlite3_file *pJfd, /* The journal file into which to write */ const void *zBuf, /* Take data to be written from here */ int iAmt, /* Number of bytes to write */ sqlite_int64 iOfst /* Begin writing at this offset into the file */ ){ MemJournal *p = (MemJournal *)pJfd; int nWrite = iAmt; u8 *zWrite = (u8 *)zBuf; /* An in-memory journal file should only ever be appended to. Random ** access writes are not required by sqlite. */ assert( iOfst==p->endpoint.iOffset ); UNUSED_PARAMETER(iOfst); while( nWrite>0 ){ FileChunk *pChunk = p->endpoint.pChunk; int iChunkOffset = (int)(p->endpoint.iOffset%JOURNAL_CHUNKSIZE); int iSpace = MIN(nWrite, JOURNAL_CHUNKSIZE - iChunkOffset); if( iChunkOffset==0 ){ /* New chunk is required to extend the file. */ FileChunk *pNew = sqlite3_malloc(sizeof(FileChunk)); if( !pNew ){ return SQLITE_IOERR_NOMEM; } pNew->pNext = 0; if( pChunk ){ assert( p->pFirst ); pChunk->pNext = pNew; }else{ assert( !p->pFirst ); p->pFirst = pNew; } p->endpoint.pChunk = pNew; } memcpy(&p->endpoint.pChunk->zChunk[iChunkOffset], zWrite, iSpace); zWrite += iSpace; nWrite -= iSpace; p->endpoint.iOffset += iSpace; } return SQLITE_OK; } /* ** Truncate the file. */ static int memjrnlTruncate(sqlite3_file *pJfd, sqlite_int64 size){ MemJournal *p = (MemJournal *)pJfd; FileChunk *pChunk; assert(size==0); UNUSED_PARAMETER(size); pChunk = p->pFirst; while( pChunk ){ FileChunk *pTmp = pChunk; pChunk = pChunk->pNext; sqlite3_free(pTmp); } sqlite3MemJournalOpen(pJfd); return SQLITE_OK; } /* ** Close the file. */ static int memjrnlClose(sqlite3_file *pJfd){ memjrnlTruncate(pJfd, 0); return SQLITE_OK; } /* ** Sync the file. ** ** Syncing an in-memory journal is a no-op. And, in fact, this routine ** is never called in a working implementation. This implementation ** exists purely as a contingency, in case some malfunction in some other ** part of SQLite causes Sync to be called by mistake. */ static int memjrnlSync(sqlite3_file *NotUsed, int NotUsed2){ UNUSED_PARAMETER2(NotUsed, NotUsed2); return SQLITE_OK; } /* ** Query the size of the file in bytes. */ static int memjrnlFileSize(sqlite3_file *pJfd, sqlite_int64 *pSize){ MemJournal *p = (MemJournal *)pJfd; *pSize = (sqlite_int64) p->endpoint.iOffset; return SQLITE_OK; } /* ** Table of methods for MemJournal sqlite3_file object. */ static const struct sqlite3_io_methods MemJournalMethods = { 1, /* iVersion */ memjrnlClose, /* xClose */ memjrnlRead, /* xRead */ memjrnlWrite, /* xWrite */ memjrnlTruncate, /* xTruncate */ memjrnlSync, /* xSync */ memjrnlFileSize, /* xFileSize */ 0, /* xLock */ 0, /* xUnlock */ 0, /* xCheckReservedLock */ 0, /* xFileControl */ 0, /* xSectorSize */ 0, /* xDeviceCharacteristics */ 0, /* xShmMap */ 0, /* xShmLock */ 0, /* xShmBarrier */ 0, /* xShmUnmap */ 0, /* xFetch */ 0 /* xUnfetch */ }; /* ** Open a journal file. */ SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *pJfd){ MemJournal *p = (MemJournal *)pJfd; assert( EIGHT_BYTE_ALIGNMENT(p) ); memset(p, 0, sqlite3MemJournalSize()); p->pMethod = (sqlite3_io_methods*)&MemJournalMethods; } /* ** Return true if the file-handle passed as an argument is ** an in-memory journal */ SQLITE_PRIVATE int sqlite3IsMemJournal(sqlite3_file *pJfd){ return pJfd->pMethods==&MemJournalMethods; } /* ** Return the number of bytes required to store a MemJournal file descriptor. */ SQLITE_PRIVATE int sqlite3MemJournalSize(void){ return sizeof(MemJournal); } /************** End of memjournal.c ******************************************/ /************** Begin file walker.c ******************************************/ /* ** 2008 August 16 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains routines used for walking the parser tree for ** an SQL statement. */ /* #include "sqliteInt.h" */ /* #include */ /* #include */ /* ** Walk an expression tree. Invoke the callback once for each node ** of the expression, while descending. (In other words, the callback ** is invoked before visiting children.) ** ** The return value from the callback should be one of the WRC_* ** constants to specify how to proceed with the walk. ** ** WRC_Continue Continue descending down the tree. ** ** WRC_Prune Do not descend into child nodes. But allow ** the walk to continue with sibling nodes. ** ** WRC_Abort Do no more callbacks. Unwind the stack and ** return the top-level walk call. ** ** The return value from this routine is WRC_Abort to abandon the tree walk ** and WRC_Continue to continue. */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ int rc; if( pExpr==0 ) return WRC_Continue; testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); testcase( ExprHasProperty(pExpr, EP_Reduced) ); rc = pWalker->xExprCallback(pWalker, pExpr); if( rc==WRC_Continue && !ExprHasProperty(pExpr,EP_TokenOnly) ){ if( sqlite3WalkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, pExpr->pRight) ) return WRC_Abort; if( ExprHasProperty(pExpr, EP_xIsSelect) ){ if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort; }else{ if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort; } } return rc & WRC_Abort; } /* ** Call sqlite3WalkExpr() for every expression in list p or until ** an abort request is seen. */ SQLITE_PRIVATE int sqlite3WalkExprList(Walker *pWalker, ExprList *p){ int i; struct ExprList_item *pItem; if( p ){ for(i=p->nExpr, pItem=p->a; i>0; i--, pItem++){ if( sqlite3WalkExpr(pWalker, pItem->pExpr) ) return WRC_Abort; } } return WRC_Continue; } /* ** Walk all expressions associated with SELECT statement p. Do ** not invoke the SELECT callback on p, but do (of course) invoke ** any expr callbacks and SELECT callbacks that come from subqueries. ** Return WRC_Abort or WRC_Continue. */ SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker *pWalker, Select *p){ if( sqlite3WalkExprList(pWalker, p->pEList) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, p->pWhere) ) return WRC_Abort; if( sqlite3WalkExprList(pWalker, p->pGroupBy) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, p->pHaving) ) return WRC_Abort; if( sqlite3WalkExprList(pWalker, p->pOrderBy) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, p->pLimit) ) return WRC_Abort; if( sqlite3WalkExpr(pWalker, p->pOffset) ) return WRC_Abort; return WRC_Continue; } /* ** Walk the parse trees associated with all subqueries in the ** FROM clause of SELECT statement p. Do not invoke the select ** callback on p, but do invoke it on each FROM clause subquery ** and on any subqueries further down in the tree. Return ** WRC_Abort or WRC_Continue; */ SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker *pWalker, Select *p){ SrcList *pSrc; int i; struct SrcList_item *pItem; pSrc = p->pSrc; if( ALWAYS(pSrc) ){ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ if( sqlite3WalkSelect(pWalker, pItem->pSelect) ){ return WRC_Abort; } } } return WRC_Continue; } /* ** Call sqlite3WalkExpr() for every expression in Select statement p. ** Invoke sqlite3WalkSelect() for subqueries in the FROM clause and ** on the compound select chain, p->pPrior. ** ** If it is not NULL, the xSelectCallback() callback is invoked before ** the walk of the expressions and FROM clause. The xSelectCallback2() ** method, if it is not NULL, is invoked following the walk of the ** expressions and FROM clause. ** ** Return WRC_Continue under normal conditions. Return WRC_Abort if ** there is an abort request. ** ** If the Walker does not have an xSelectCallback() then this routine ** is a no-op returning WRC_Continue. */ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){ int rc; if( p==0 || (pWalker->xSelectCallback==0 && pWalker->xSelectCallback2==0) ){ return WRC_Continue; } rc = WRC_Continue; pWalker->walkerDepth++; while( p ){ if( pWalker->xSelectCallback ){ rc = pWalker->xSelectCallback(pWalker, p); if( rc ) break; } if( sqlite3WalkSelectExpr(pWalker, p) || sqlite3WalkSelectFrom(pWalker, p) ){ pWalker->walkerDepth--; return WRC_Abort; } if( pWalker->xSelectCallback2 ){ pWalker->xSelectCallback2(pWalker, p); } p = p->pPrior; } pWalker->walkerDepth--; return rc & WRC_Abort; } /************** End of walker.c **********************************************/ /************** Begin file resolve.c *****************************************/ /* ** 2008 August 18 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains routines used for walking the parser tree and ** resolve all identifiers by associating them with a particular ** table and column. */ /* #include "sqliteInt.h" */ /* #include */ /* #include */ /* ** Walk the expression tree pExpr and increase the aggregate function ** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node. ** This needs to occur when copying a TK_AGG_FUNCTION node from an ** outer query into an inner subquery. ** ** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..) ** is a helper function - a callback for the tree walker. */ static int incrAggDepth(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.n; return WRC_Continue; } static void incrAggFunctionDepth(Expr *pExpr, int N){ if( N>0 ){ Walker w; memset(&w, 0, sizeof(w)); w.xExprCallback = incrAggDepth; w.u.n = N; sqlite3WalkExpr(&w, pExpr); } } /* ** Turn the pExpr expression into an alias for the iCol-th column of the ** result set in pEList. ** ** If the result set column is a simple column reference, then this routine ** makes an exact copy. But for any other kind of expression, this ** routine make a copy of the result set column as the argument to the ** TK_AS operator. The TK_AS operator causes the expression to be ** evaluated just once and then reused for each alias. ** ** The reason for suppressing the TK_AS term when the expression is a simple ** column reference is so that the column reference will be recognized as ** usable by indices within the WHERE clause processing logic. ** ** The TK_AS operator is inhibited if zType[0]=='G'. This means ** that in a GROUP BY clause, the expression is evaluated twice. Hence: ** ** SELECT random()%5 AS x, count(*) FROM tab GROUP BY x ** ** Is equivalent to: ** ** SELECT random()%5 AS x, count(*) FROM tab GROUP BY random()%5 ** ** The result of random()%5 in the GROUP BY clause is probably different ** from the result in the result-set. On the other hand Standard SQL does ** not allow the GROUP BY clause to contain references to result-set columns. ** So this should never come up in well-formed queries. ** ** If the reference is followed by a COLLATE operator, then make sure ** the COLLATE operator is preserved. For example: ** ** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase; ** ** Should be transformed into: ** ** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase; ** ** The nSubquery parameter specifies how many levels of subquery the ** alias is removed from the original expression. The usual value is ** zero but it might be more if the alias is contained within a subquery ** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION ** structures must be increased by the nSubquery amount. */ static void resolveAlias( Parse *pParse, /* Parsing context */ ExprList *pEList, /* A result set */ int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ Expr *pExpr, /* Transform this into an alias to the result set */ const char *zType, /* "GROUP" or "ORDER" or "" */ int nSubquery /* Number of subqueries that the label is moving */ ){ Expr *pOrig; /* The iCol-th column of the result set */ Expr *pDup; /* Copy of pOrig */ sqlite3 *db; /* The database connection */ assert( iCol>=0 && iColnExpr ); pOrig = pEList->a[iCol].pExpr; assert( pOrig!=0 ); db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); if( pDup==0 ) return; if( pOrig->op!=TK_COLUMN && zType[0]!='G' ){ incrAggFunctionDepth(pDup, nSubquery); pDup = sqlite3PExpr(pParse, TK_AS, pDup, 0, 0); if( pDup==0 ) return; ExprSetProperty(pDup, EP_Skip); if( pEList->a[iCol].u.x.iAlias==0 ){ pEList->a[iCol].u.x.iAlias = (u16)(++pParse->nAlias); } pDup->iTable = pEList->a[iCol].u.x.iAlias; } if( pExpr->op==TK_COLLATE ){ pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); } /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This ** prevents ExprDelete() from deleting the Expr structure itself, ** allowing it to be repopulated by the memcpy() on the following line. ** The pExpr->u.zToken might point into memory that will be freed by the ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to ** make a copy of the token before doing the sqlite3DbFree(). */ ExprSetProperty(pExpr, EP_Static); sqlite3ExprDelete(db, pExpr); memcpy(pExpr, pDup, sizeof(*pExpr)); if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){ assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 ); pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken); pExpr->flags |= EP_MemToken; } sqlite3DbFree(db, pDup); } /* ** Return TRUE if the name zCol occurs anywhere in the USING clause. ** ** Return FALSE if the USING clause is NULL or if it does not contain ** zCol. */ static int nameInUsingClause(IdList *pUsing, const char *zCol){ if( pUsing ){ int k; for(k=0; knId; k++){ if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1; } } return 0; } /* ** Subqueries stores the original database, table and column names for their ** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". ** Check to see if the zSpan given to this routine matches the zDb, zTab, ** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will ** match anything. */ SQLITE_PRIVATE int sqlite3MatchSpanName( const char *zSpan, const char *zCol, const char *zTab, const char *zDb ){ int n; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){ return 0; } zSpan += n+1; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){ return 0; } zSpan += n+1; if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ return 0; } return 1; } /* ** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up ** that name in the set of source tables in pSrcList and make the pExpr ** expression node refer back to that source column. The following changes ** are made to pExpr: ** ** pExpr->iDb Set the index in db->aDb[] of the database X ** (even if X is implied). ** pExpr->iTable Set to the cursor number for the table obtained ** from pSrcList. ** pExpr->pTab Points to the Table structure of X.Y (even if ** X and/or Y are implied.) ** pExpr->iColumn Set to the column number within the table. ** pExpr->op Set to TK_COLUMN. ** pExpr->pLeft Any expression this points to is deleted ** pExpr->pRight Any expression this points to is deleted. ** ** The zDb variable is the name of the database (the "X"). This value may be ** NULL meaning that name is of the form Y.Z or Z. Any available database ** can be used. The zTable variable is the name of the table (the "Y"). This ** value can be NULL if zDb is also NULL. If zTable is NULL it ** means that the form of the name is Z and that columns from any table ** can be used. ** ** If the name cannot be resolved unambiguously, leave an error message ** in pParse and return WRC_Abort. Return WRC_Prune on success. */ static int lookupName( Parse *pParse, /* The parsing context */ const char *zDb, /* Name of the database containing table, or NULL */ const char *zTab, /* Name of table containing column, or NULL */ const char *zCol, /* Name of the column. */ NameContext *pNC, /* The name context used to resolve the name */ Expr *pExpr /* Make this EXPR node point to the selected column */ ){ int i, j; /* Loop counters */ int cnt = 0; /* Number of matching column names */ int cntTab = 0; /* Number of matching table names */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ struct SrcList_item *pItem; /* Use for looping over pSrcList items */ struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ NameContext *pTopNC = pNC; /* First namecontext in the list */ Schema *pSchema = 0; /* Schema of the expression */ int isTrigger = 0; /* True if resolved to a trigger column */ Table *pTab = 0; /* Table hold the row */ Column *pCol; /* A column of pTab */ assert( pNC ); /* the name context cannot be NULL. */ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); /* Initialize the node to no-match */ pExpr->iTable = -1; pExpr->pTab = 0; ExprSetVVAProperty(pExpr, EP_NoReduce); /* Translate the schema name in zDb into a pointer to the corresponding ** schema. If not found, pSchema will remain NULL and nothing will match ** resulting in an appropriate error message toward the end of this routine */ if( zDb ){ testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IsCheck ); if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){ /* Silently ignore database qualifiers inside CHECK constraints and ** partial indices. Do not raise errors because that might break ** legacy and because it does not hurt anything to just ignore the ** database name. */ zDb = 0; }else{ for(i=0; inDb; i++){ assert( db->aDb[i].zName ); if( sqlite3StrICmp(db->aDb[i].zName,zDb)==0 ){ pSchema = db->aDb[i].pSchema; break; } } } } /* Start at the inner-most context and move outward until a match is found */ while( pNC && cnt==0 ){ ExprList *pEList; SrcList *pSrcList = pNC->pSrcList; if( pSrcList ){ for(i=0, pItem=pSrcList->a; inSrc; i++, pItem++){ pTab = pItem->pTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 ); if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){ int hit = 0; pEList = pItem->pSelect->pEList; for(j=0; jnExpr; j++){ if( sqlite3MatchSpanName(pEList->a[j].zSpan, zCol, zTab, zDb) ){ cnt++; cntTab = 2; pMatch = pItem; pExpr->iColumn = j; hit = 1; } } if( hit || zTab==0 ) continue; } if( zDb && pTab->pSchema!=pSchema ){ continue; } if( zTab ){ const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; assert( zTabName!=0 ); if( sqlite3StrICmp(zTabName, zTab)!=0 ){ continue; } } if( 0==(cntTab++) ){ pMatch = pItem; } for(j=0, pCol=pTab->aCol; jnCol; j++, pCol++){ if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ /* If there has been exactly one prior match and this match ** is for the right-hand table of a NATURAL JOIN or is in a ** USING clause, then skip this match. */ if( cnt==1 ){ if( pItem->jointype & JT_NATURAL ) continue; if( nameInUsingClause(pItem->pUsing, zCol) ) continue; } cnt++; pMatch = pItem; /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; break; } } } if( pMatch ){ pExpr->iTable = pMatch->iCursor; pExpr->pTab = pMatch->pTab; /* RIGHT JOIN not (yet) supported */ assert( (pMatch->jointype & JT_RIGHT)==0 ); if( (pMatch->jointype & JT_LEFT)!=0 ){ ExprSetProperty(pExpr, EP_CanBeNull); } pSchema = pExpr->pTab->pSchema; } } /* if( pSrcList ) */ #ifndef SQLITE_OMIT_TRIGGER /* If we have not already resolved the name, then maybe ** it is a new.* or old.* trigger argument reference */ if( zDb==0 && zTab!=0 && cntTab==0 && pParse->pTriggerTab!=0 ){ int op = pParse->eTriggerOp; assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ pExpr->iTable = 1; pTab = pParse->pTriggerTab; }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ pExpr->iTable = 0; pTab = pParse->pTriggerTab; }else{ pTab = 0; } if( pTab ){ int iCol; pSchema = pTab->pSchema; cntTab++; for(iCol=0, pCol=pTab->aCol; iColnCol; iCol++, pCol++){ if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ if( iCol==pTab->iPKey ){ iCol = -1; } break; } } if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ /* IMP: R-51414-32910 */ /* IMP: R-44911-55124 */ iCol = -1; } if( iColnCol ){ cnt++; if( iCol<0 ){ pExpr->affinity = SQLITE_AFF_INTEGER; }else if( pExpr->iTable==0 ){ testcase( iCol==31 ); testcase( iCol==32 ); pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<iColumn = (i16)iCol; pExpr->pTab = pTab; isTrigger = 1; } } } #endif /* !defined(SQLITE_OMIT_TRIGGER) */ /* ** Perhaps the name is a reference to the ROWID */ if( cnt==0 && cntTab==1 && pMatch && sqlite3IsRowid(zCol) && VisibleRowid(pMatch->pTab) ){ cnt = 1; pExpr->iColumn = -1; /* IMP: R-44911-55124 */ pExpr->affinity = SQLITE_AFF_INTEGER; } /* ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z ** might refer to an result-set alias. This happens, for example, when ** we are resolving names in the WHERE clause of the following command: ** ** SELECT a+b AS x FROM table WHERE x<10; ** ** In cases like this, replace pExpr with a copy of the expression that ** forms the result set entry ("a+b" in the example) and return immediately. ** Note that the expression in the result set should have already been ** resolved by the time the WHERE clause is resolved. ** ** The ability to use an output result-set column in the WHERE, GROUP BY, ** or HAVING clauses, or as part of a larger expression in the ORDRE BY ** clause is not standard SQL. This is a (goofy) SQLite extension, that ** is supported for backwards compatibility only. TO DO: Issue a warning ** on sqlite3_log() whenever the capability is used. */ if( (pEList = pNC->pEList)!=0 && zTab==0 && cnt==0 ){ for(j=0; jnExpr; j++){ char *zAs = pEList->a[j].zName; if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ Expr *pOrig; assert( pExpr->pLeft==0 && pExpr->pRight==0 ); assert( pExpr->x.pList==0 ); assert( pExpr->x.pSelect==0 ); pOrig = pEList->a[j].pExpr; if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){ sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); return WRC_Abort; } resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); cnt = 1; pMatch = 0; assert( zTab==0 && zDb==0 ); goto lookupname_end; } } } /* Advance to the next name context. The loop will exit when either ** we have a match (cnt>0) or when we run out of name contexts. */ if( cnt==0 ){ pNC = pNC->pNext; nSubquery++; } } /* ** If X and Y are NULL (in other words if only the column name Z is ** supplied) and the value of Z is enclosed in double-quotes, then ** Z is a string literal if it doesn't match any column names. In that ** case, we need to return right away and not make any changes to ** pExpr. ** ** Because no reference was made to outer contexts, the pNC->nRef ** fields are not changed in any context. */ if( cnt==0 && zTab==0 && ExprHasProperty(pExpr,EP_DblQuoted) ){ pExpr->op = TK_STRING; pExpr->pTab = 0; return WRC_Prune; } /* ** cnt==0 means there was not match. cnt>1 means there were two or ** more matches. Either way, we have an error. */ if( cnt!=1 ){ const char *zErr; zErr = cnt==0 ? "no such column" : "ambiguous column name"; if( zDb ){ sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); }else if( zTab ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } pParse->checkSchema = 1; pTopNC->nErr++; } /* If a column from a table in pSrcList is referenced, then record ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the ** column number is greater than the number of bits in the bitmask ** then set the high-order bit of the bitmask. */ if( pExpr->iColumn>=0 && pMatch!=0 ){ int n = pExpr->iColumn; testcase( n==BMS-1 ); if( n>=BMS ){ n = BMS-1; } assert( pMatch->iCursor==pExpr->iTable ); pMatch->colUsed |= ((Bitmask)1)<pLeft); pExpr->pLeft = 0; sqlite3ExprDelete(db, pExpr->pRight); pExpr->pRight = 0; pExpr->op = (isTrigger ? TK_TRIGGER : TK_COLUMN); lookupname_end: if( cnt==1 ){ assert( pNC!=0 ); if( pExpr->op!=TK_AS ){ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); } /* Increment the nRef value on all name contexts from TopNC up to ** the point where the name matched. */ for(;;){ assert( pTopNC!=0 ); pTopNC->nRef++; if( pTopNC==pNC ) break; pTopNC = pTopNC->pNext; } return WRC_Prune; } else { return WRC_Abort; } } /* ** Allocate and return a pointer to an expression to load the column iCol ** from datasource iSrc in SrcList pSrc. */ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ struct SrcList_item *pItem = &pSrc->a[iSrc]; p->pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->pTab->iPKey==iCol ){ p->iColumn = -1; }else{ p->iColumn = (ynVar)iCol; testcase( iCol==BMS ); testcase( iCol==BMS-1 ); pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); } ExprSetProperty(p, EP_Resolved); } return p; } /* ** Report an error that an expression is not valid for a partial index WHERE ** clause. */ static void notValidPartIdxWhere( Parse *pParse, /* Leave error message here */ NameContext *pNC, /* The name context */ const char *zMsg /* Type of error */ ){ if( (pNC->ncFlags & NC_PartIdx)!=0 ){ sqlite3ErrorMsg(pParse, "%s prohibited in partial index WHERE clauses", zMsg); } } #ifndef SQLITE_OMIT_CHECK /* ** Report an error that an expression is not valid for a CHECK constraint. */ static void notValidCheckConstraint( Parse *pParse, /* Leave error message here */ NameContext *pNC, /* The name context */ const char *zMsg /* Type of error */ ){ if( (pNC->ncFlags & NC_IsCheck)!=0 ){ sqlite3ErrorMsg(pParse,"%s prohibited in CHECK constraints", zMsg); } } #else # define notValidCheckConstraint(P,N,M) #endif /* ** Expression p should encode a floating point value between 1.0 and 0.0. ** Return 1024 times this value. Or return -1 if p is not a floating point ** value between 1.0 and 0.0. */ static int exprProbability(Expr *p){ double r = -1.0; if( p->op!=TK_FLOAT ) return -1; sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8); assert( r>=0.0 ); if( r>1.0 ) return -1; return (int)(r*134217728.0); } /* ** This routine is callback for sqlite3WalkExpr(). ** ** Resolve symbolic names into TK_COLUMN operators for the current ** node in the expression tree. Return 0 to continue the search down ** the tree or 2 to abort the tree walk. ** ** This routine also does error checking and name resolution for ** function names. The operator for aggregate functions is changed ** to TK_AGG_FUNCTION. */ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ NameContext *pNC; Parse *pParse; pNC = pWalker->u.pNC; assert( pNC!=0 ); pParse = pNC->pParse; assert( pParse==pWalker->pParse ); if( ExprHasProperty(pExpr, EP_Resolved) ) return WRC_Prune; ExprSetProperty(pExpr, EP_Resolved); #ifndef NDEBUG if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ SrcList *pSrcList = pNC->pSrcList; int i; for(i=0; ipSrcList->nSrc; i++){ assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursornTab); } } #endif switch( pExpr->op ){ #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) /* The special operator TK_ROW means use the rowid for the first ** column in the FROM clause. This is used by the LIMIT and ORDER BY ** clause processing on UPDATE and DELETE statements. */ case TK_ROW: { SrcList *pSrcList = pNC->pSrcList; struct SrcList_item *pItem; assert( pSrcList && pSrcList->nSrc==1 ); pItem = pSrcList->a; pExpr->op = TK_COLUMN; pExpr->pTab = pItem->pTab; pExpr->iTable = pItem->iCursor; pExpr->iColumn = -1; pExpr->affinity = SQLITE_AFF_INTEGER; break; } #endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */ /* A lone identifier is the name of a column. */ case TK_ID: { return lookupName(pParse, 0, 0, pExpr->u.zToken, pNC, pExpr); } /* A table name and column name: ID.ID ** Or a database, table and column: ID.ID.ID */ case TK_DOT: { const char *zColumn; const char *zTable; const char *zDb; Expr *pRight; /* if( pSrcList==0 ) break; */ pRight = pExpr->pRight; if( pRight->op==TK_ID ){ zDb = 0; zTable = pExpr->pLeft->u.zToken; zColumn = pRight->u.zToken; }else{ assert( pRight->op==TK_DOT ); zDb = pExpr->pLeft->u.zToken; zTable = pRight->pLeft->u.zToken; zColumn = pRight->pRight->u.zToken; } return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); } /* Resolve function names */ case TK_FUNCTION: { ExprList *pList = pExpr->x.pList; /* The argument list */ int n = pList ? pList->nExpr : 0; /* Number of arguments */ int no_such_func = 0; /* True if no such function exists */ int wrong_num_args = 0; /* True if wrong number of arguments */ int is_agg = 0; /* True if is an aggregate function */ int auth; /* Authorization to use the function */ int nId; /* Number of characters in function name */ const char *zId; /* The function name. */ FuncDef *pDef; /* Information about the function */ u8 enc = ENC(pParse->db); /* The database encoding */ assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); notValidPartIdxWhere(pParse, pNC, "functions"); zId = pExpr->u.zToken; nId = sqlite3Strlen30(zId); pDef = sqlite3FindFunction(pParse->db, zId, nId, n, enc, 0); if( pDef==0 ){ pDef = sqlite3FindFunction(pParse->db, zId, nId, -2, enc, 0); if( pDef==0 ){ no_such_func = 1; }else{ wrong_num_args = 1; } }else{ is_agg = pDef->xFunc==0; if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ ExprSetProperty(pExpr, EP_Unlikely|EP_Skip); if( n==2 ){ pExpr->iTable = exprProbability(pList->a[1].pExpr); if( pExpr->iTable<0 ){ sqlite3ErrorMsg(pParse, "second argument to likelihood() must be a " "constant between 0.0 and 1.0"); pNC->nErr++; } }else{ /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is ** equivalent to likelihood(X, 0.0625). ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is ** short-hand for likelihood(X,0.0625). ** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand ** for likelihood(X,0.9375). ** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent ** to likelihood(X,0.9375). */ /* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */ pExpr->iTable = pDef->zName[0]=='u' ? 8388608 : 125829120; } } #ifndef SQLITE_OMIT_AUTHORIZATION auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0); if( auth!=SQLITE_OK ){ if( auth==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized to use function: %s", pDef->zName); pNC->nErr++; } pExpr->op = TK_NULL; return WRC_Prune; } #endif if( pDef->funcFlags & SQLITE_FUNC_CONSTANT ){ ExprSetProperty(pExpr,EP_ConstFunc); } } if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){ sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); pNC->nErr++; is_agg = 0; }else if( no_such_func && pParse->db->init.busy==0 ){ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); pNC->nErr++; }else if( wrong_num_args ){ sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", nId, zId); pNC->nErr++; } if( is_agg ) pNC->ncFlags &= ~NC_AllowAgg; sqlite3WalkExprList(pWalker, pList); if( is_agg ){ NameContext *pNC2 = pNC; pExpr->op = TK_AGG_FUNCTION; pExpr->op2 = 0; while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){ pExpr->op2++; pNC2 = pNC2->pNext; } assert( pDef!=0 ); if( pNC2 ){ assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX); } pNC->ncFlags |= NC_AllowAgg; } /* FIX ME: Compute pExpr->affinity based on the expected return ** type of the function */ return WRC_Prune; } #ifndef SQLITE_OMIT_SUBQUERY case TK_SELECT: case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); #endif case TK_IN: { testcase( pExpr->op==TK_IN ); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ int nRef = pNC->nRef; notValidCheckConstraint(pParse, pNC, "subqueries"); notValidPartIdxWhere(pParse, pNC, "subqueries"); sqlite3WalkSelect(pWalker, pExpr->x.pSelect); assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); } } break; } case TK_VARIABLE: { notValidCheckConstraint(pParse, pNC, "parameters"); notValidPartIdxWhere(pParse, pNC, "parameters"); break; } } return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; } /* ** pEList is a list of expressions which are really the result set of the ** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause. ** This routine checks to see if pE is a simple identifier which corresponds ** to the AS-name of one of the terms of the expression list. If it is, ** this routine return an integer between 1 and N where N is the number of ** elements in pEList, corresponding to the matching entry. If there is ** no match, or if pE is not a simple identifier, then this routine ** return 0. ** ** pEList has been resolved. pE has not. */ static int resolveAsName( Parse *pParse, /* Parsing context for error messages */ ExprList *pEList, /* List of expressions to scan */ Expr *pE /* Expression we are trying to match */ ){ int i; /* Loop counter */ UNUSED_PARAMETER(pParse); if( pE->op==TK_ID ){ char *zCol = pE->u.zToken; for(i=0; inExpr; i++){ char *zAs = pEList->a[i].zName; if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ return i+1; } } } return 0; } /* ** pE is a pointer to an expression which is a single term in the ** ORDER BY of a compound SELECT. The expression has not been ** name resolved. ** ** At the point this routine is called, we already know that the ** ORDER BY term is not an integer index into the result set. That ** case is handled by the calling routine. ** ** Attempt to match pE against result set columns in the left-most ** SELECT statement. Return the index i of the matching column, ** as an indication to the caller that it should sort by the i-th column. ** The left-most column is 1. In other words, the value returned is the ** same integer value that would be used in the SQL statement to indicate ** the column. ** ** If there is no match, return 0. Return -1 if an error occurs. */ static int resolveOrderByTermToExprList( Parse *pParse, /* Parsing context for error messages */ Select *pSelect, /* The SELECT statement with the ORDER BY clause */ Expr *pE /* The specific ORDER BY term */ ){ int i; /* Loop counter */ ExprList *pEList; /* The columns of the result set */ NameContext nc; /* Name context for resolving pE */ sqlite3 *db; /* Database connection */ int rc; /* Return code from subprocedures */ u8 savedSuppErr; /* Saved value of db->suppressErr */ assert( sqlite3ExprIsInteger(pE, &i)==0 ); pEList = pSelect->pEList; /* Resolve all names in the ORDER BY term expression */ memset(&nc, 0, sizeof(nc)); nc.pParse = pParse; nc.pSrcList = pSelect->pSrc; nc.pEList = pEList; nc.ncFlags = NC_AllowAgg; nc.nErr = 0; db = pParse->db; savedSuppErr = db->suppressErr; db->suppressErr = 1; rc = sqlite3ResolveExprNames(&nc, pE); db->suppressErr = savedSuppErr; if( rc ) return 0; /* Try to match the ORDER BY expression against an expression ** in the result set. Return an 1-based index of the matching ** result-set entry. */ for(i=0; inExpr; i++){ if( sqlite3ExprCompare(pEList->a[i].pExpr, pE, -1)<2 ){ return i+1; } } /* If no match, return 0. */ return 0; } /* ** Generate an ORDER BY or GROUP BY term out-of-range error. */ static void resolveOutOfRangeError( Parse *pParse, /* The error context into which to write the error */ const char *zType, /* "ORDER" or "GROUP" */ int i, /* The index (1-based) of the term out of range */ int mx /* Largest permissible value of i */ ){ sqlite3ErrorMsg(pParse, "%r %s BY term out of range - should be " "between 1 and %d", i, zType, mx); } /* ** Analyze the ORDER BY clause in a compound SELECT statement. Modify ** each term of the ORDER BY clause is a constant integer between 1 ** and N where N is the number of columns in the compound SELECT. ** ** ORDER BY terms that are already an integer between 1 and N are ** unmodified. ORDER BY terms that are integers outside the range of ** 1 through N generate an error. ORDER BY terms that are expressions ** are matched against result set expressions of compound SELECT ** beginning with the left-most SELECT and working toward the right. ** At the first match, the ORDER BY expression is transformed into ** the integer column number. ** ** Return the number of errors seen. */ static int resolveCompoundOrderBy( Parse *pParse, /* Parsing context. Leave error messages here */ Select *pSelect /* The SELECT statement containing the ORDER BY */ ){ int i; ExprList *pOrderBy; ExprList *pEList; sqlite3 *db; int moreToDo = 1; pOrderBy = pSelect->pOrderBy; if( pOrderBy==0 ) return 0; db = pParse->db; #if SQLITE_MAX_COLUMN if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); return 1; } #endif for(i=0; inExpr; i++){ pOrderBy->a[i].done = 0; } pSelect->pNext = 0; while( pSelect->pPrior ){ pSelect->pPrior->pNext = pSelect; pSelect = pSelect->pPrior; } while( pSelect && moreToDo ){ struct ExprList_item *pItem; moreToDo = 0; pEList = pSelect->pEList; assert( pEList!=0 ); for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ int iCol = -1; Expr *pE, *pDup; if( pItem->done ) continue; pE = sqlite3ExprSkipCollate(pItem->pExpr); if( sqlite3ExprIsInteger(pE, &iCol) ){ if( iCol<=0 || iCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); return 1; } }else{ iCol = resolveAsName(pParse, pEList, pE); if( iCol==0 ){ pDup = sqlite3ExprDup(db, pE, 0); if( !db->mallocFailed ){ assert(pDup); iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); } sqlite3ExprDelete(db, pDup); } } if( iCol>0 ){ /* Convert the ORDER BY term into an integer column number iCol, ** taking care to preserve the COLLATE clause if it exists */ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); if( pNew==0 ) return 1; pNew->flags |= EP_IntValue; pNew->u.iValue = iCol; if( pItem->pExpr==pE ){ pItem->pExpr = pNew; }else{ Expr *pParent = pItem->pExpr; assert( pParent->op==TK_COLLATE ); while( pParent->pLeft->op==TK_COLLATE ) pParent = pParent->pLeft; assert( pParent->pLeft==pE ); pParent->pLeft = pNew; } sqlite3ExprDelete(db, pE); pItem->u.x.iOrderByCol = (u16)iCol; pItem->done = 1; }else{ moreToDo = 1; } } pSelect = pSelect->pNext; } for(i=0; inExpr; i++){ if( pOrderBy->a[i].done==0 ){ sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any " "column in the result set", i+1); return 1; } } return 0; } /* ** Check every term in the ORDER BY or GROUP BY clause pOrderBy of ** the SELECT statement pSelect. If any term is reference to a ** result set expression (as determined by the ExprList.a.u.x.iOrderByCol ** field) then convert that term into a copy of the corresponding result set ** column. ** ** If any errors are detected, add an error message to pParse and ** return non-zero. Return zero if no errors are seen. */ SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy( Parse *pParse, /* Parsing context. Leave error messages here */ Select *pSelect, /* The SELECT statement containing the clause */ ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ const char *zType /* "ORDER" or "GROUP" */ ){ int i; sqlite3 *db = pParse->db; ExprList *pEList; struct ExprList_item *pItem; if( pOrderBy==0 || pParse->db->mallocFailed ) return 0; #if SQLITE_MAX_COLUMN if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); return 1; } #endif pEList = pSelect->pEList; assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */ for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ if( pItem->u.x.iOrderByCol ){ if( pItem->u.x.iOrderByCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); return 1; } resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, zType,0); } } return 0; } /* ** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect. ** The Name context of the SELECT statement is pNC. zType is either ** "ORDER" or "GROUP" depending on which type of clause pOrderBy is. ** ** This routine resolves each term of the clause into an expression. ** If the order-by term is an integer I between 1 and N (where N is the ** number of columns in the result set of the SELECT) then the expression ** in the resolution is a copy of the I-th result-set expression. If ** the order-by term is an identifier that corresponds to the AS-name of ** a result-set expression, then the term resolves to a copy of the ** result-set expression. Otherwise, the expression is resolved in ** the usual way - using sqlite3ResolveExprNames(). ** ** This routine returns the number of errors. If errors occur, then ** an appropriate error message might be left in pParse. (OOM errors ** excepted.) */ static int resolveOrderGroupBy( NameContext *pNC, /* The name context of the SELECT statement */ Select *pSelect, /* The SELECT statement holding pOrderBy */ ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */ const char *zType /* Either "ORDER" or "GROUP", as appropriate */ ){ int i, j; /* Loop counters */ int iCol; /* Column number */ struct ExprList_item *pItem; /* A term of the ORDER BY clause */ Parse *pParse; /* Parsing context */ int nResult; /* Number of terms in the result set */ if( pOrderBy==0 ) return 0; nResult = pSelect->pEList->nExpr; pParse = pNC->pParse; for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){ Expr *pE = pItem->pExpr; Expr *pE2 = sqlite3ExprSkipCollate(pE); if( zType[0]!='G' ){ iCol = resolveAsName(pParse, pSelect->pEList, pE2); if( iCol>0 ){ /* If an AS-name match is found, mark this ORDER BY column as being ** a copy of the iCol-th result-set column. The subsequent call to ** sqlite3ResolveOrderGroupBy() will convert the expression to a ** copy of the iCol-th result-set expression. */ pItem->u.x.iOrderByCol = (u16)iCol; continue; } } if( sqlite3ExprIsInteger(pE2, &iCol) ){ /* The ORDER BY term is an integer constant. Again, set the column ** number so that sqlite3ResolveOrderGroupBy() will convert the ** order-by term to a copy of the result-set expression */ if( iCol<1 || iCol>0xffff ){ resolveOutOfRangeError(pParse, zType, i+1, nResult); return 1; } pItem->u.x.iOrderByCol = (u16)iCol; continue; } /* Otherwise, treat the ORDER BY term as an ordinary expression */ pItem->u.x.iOrderByCol = 0; if( sqlite3ResolveExprNames(pNC, pE) ){ return 1; } for(j=0; jpEList->nExpr; j++){ if( sqlite3ExprCompare(pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ pItem->u.x.iOrderByCol = j+1; } } } return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType); } /* ** Resolve names in the SELECT statement p and all of its descendants. */ static int resolveSelectStep(Walker *pWalker, Select *p){ NameContext *pOuterNC; /* Context that contains this SELECT */ NameContext sNC; /* Name context of this SELECT */ int isCompound; /* True if p is a compound select */ int nCompound; /* Number of compound terms processed so far */ Parse *pParse; /* Parsing context */ ExprList *pEList; /* Result set expression list */ int i; /* Loop counter */ ExprList *pGroupBy; /* The GROUP BY clause */ Select *pLeftmost; /* Left-most of SELECT of a compound */ sqlite3 *db; /* Database connection */ assert( p!=0 ); if( p->selFlags & SF_Resolved ){ return WRC_Prune; } pOuterNC = pWalker->u.pNC; pParse = pWalker->pParse; db = pParse->db; /* Normally sqlite3SelectExpand() will be called first and will have ** already expanded this SELECT. However, if this is a subquery within ** an expression, sqlite3ResolveExprNames() will be called without a ** prior call to sqlite3SelectExpand(). When that happens, let ** sqlite3SelectPrep() do all of the processing for this SELECT. ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and ** this routine in the correct order. */ if( (p->selFlags & SF_Expanded)==0 ){ sqlite3SelectPrep(pParse, p, pOuterNC); return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; } isCompound = p->pPrior!=0; nCompound = 0; pLeftmost = p; while( p ){ assert( (p->selFlags & SF_Expanded)!=0 ); assert( (p->selFlags & SF_Resolved)==0 ); p->selFlags |= SF_Resolved; /* Resolve the expressions in the LIMIT and OFFSET clauses. These ** are not allowed to refer to any names, so pass an empty NameContext. */ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; if( sqlite3ResolveExprNames(&sNC, p->pLimit) || sqlite3ResolveExprNames(&sNC, p->pOffset) ){ return WRC_Abort; } /* If the SF_Converted flags is set, then this Select object was ** was created by the convertCompoundSelectToSubquery() function. ** In this case the ORDER BY clause (p->pOrderBy) should be resolved ** as if it were part of the sub-query, not the parent. This block ** moves the pOrderBy down to the sub-query. It will be moved back ** after the names have been resolved. */ if( p->selFlags & SF_Converted ){ Select *pSub = p->pSrc->a[0].pSelect; assert( p->pSrc->nSrc==1 && p->pOrderBy ); assert( pSub->pPrior && pSub->pOrderBy==0 ); pSub->pOrderBy = p->pOrderBy; p->pOrderBy = 0; } /* Recursively resolve names in all subqueries */ for(i=0; ipSrc->nSrc; i++){ struct SrcList_item *pItem = &p->pSrc->a[i]; if( pItem->pSelect ){ NameContext *pNC; /* Used to iterate name contexts */ int nRef = 0; /* Refcount for pOuterNC and outer contexts */ const char *zSavedContext = pParse->zAuthContext; /* Count the total number of references to pOuterNC and all of its ** parent contexts. After resolving references to expressions in ** pItem->pSelect, check if this value has changed. If so, then ** SELECT statement pItem->pSelect must be correlated. Set the ** pItem->isCorrelated flag if this is the case. */ for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; if( pItem->zName ) pParse->zAuthContext = pItem->zName; sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr || db->mallocFailed ) return WRC_Abort; for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; assert( pItem->isCorrelated==0 && nRef<=0 ); pItem->isCorrelated = (nRef!=0); } } /* Set up the local name-context to pass to sqlite3ResolveExprNames() to ** resolve the result-set expression list. */ sNC.ncFlags = NC_AllowAgg; sNC.pSrcList = p->pSrc; sNC.pNext = pOuterNC; /* Resolve names in the result set. */ pEList = p->pEList; assert( pEList!=0 ); for(i=0; inExpr; i++){ Expr *pX = pEList->a[i].pExpr; if( sqlite3ResolveExprNames(&sNC, pX) ){ return WRC_Abort; } } /* If there are no aggregate functions in the result-set, and no GROUP BY ** expression, do not allow aggregates in any of the other expressions. */ assert( (p->selFlags & SF_Aggregate)==0 ); pGroupBy = p->pGroupBy; if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){ assert( NC_MinMaxAgg==SF_MinMaxAgg ); p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg); }else{ sNC.ncFlags &= ~NC_AllowAgg; } /* If a HAVING clause is present, then there must be a GROUP BY clause. */ if( p->pHaving && !pGroupBy ){ sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); return WRC_Abort; } /* Add the output column list to the name-context before parsing the ** other expressions in the SELECT statement. This is so that ** expressions in the WHERE clause (etc.) can refer to expressions by ** aliases in the result set. ** ** Minor point: If this is the case, then the expression will be ** re-evaluated for each reference to it. */ sNC.pEList = p->pEList; if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; /* The ORDER BY and GROUP BY clauses may not refer to terms in ** outer queries */ sNC.pNext = 0; sNC.ncFlags |= NC_AllowAgg; /* If this is a converted compound query, move the ORDER BY clause from ** the sub-query back to the parent query. At this point each term ** within the ORDER BY clause has been transformed to an integer value. ** These integers will be replaced by copies of the corresponding result ** set expressions by the call to resolveOrderGroupBy() below. */ if( p->selFlags & SF_Converted ){ Select *pSub = p->pSrc->a[0].pSelect; p->pOrderBy = pSub->pOrderBy; pSub->pOrderBy = 0; } /* Process the ORDER BY clause for singleton SELECT statements. ** The ORDER BY clause for compounds SELECT statements is handled ** below, after all of the result-sets for all of the elements of ** the compound have been resolved. ** ** If there is an ORDER BY clause on a term of a compound-select other ** than the right-most term, then that is a syntax error. But the error ** is not detected until much later, and so we need to go ahead and ** resolve those symbols on the incorrect ORDER BY for consistency. */ if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */ && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){ return WRC_Abort; } if( db->mallocFailed ){ return WRC_Abort; } /* Resolve the GROUP BY clause. At the same time, make sure ** the GROUP BY clause does not contain aggregate functions. */ if( pGroupBy ){ struct ExprList_item *pItem; if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){ return WRC_Abort; } for(i=0, pItem=pGroupBy->a; inExpr; i++, pItem++){ if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " "the GROUP BY clause"); return WRC_Abort; } } } /* If this is part of a compound SELECT, check that it has the right ** number of expressions in the select list. */ if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){ sqlite3SelectWrongNumTermsError(pParse, p->pNext); return WRC_Abort; } /* Advance to the next term of the compound */ p = p->pPrior; nCompound++; } /* Resolve the ORDER BY on a compound SELECT after all terms of ** the compound have been resolved. */ if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){ return WRC_Abort; } return WRC_Prune; } /* ** This routine walks an expression tree and resolves references to ** table columns and result-set columns. At the same time, do error ** checking on function usage and set a flag if any aggregate functions ** are seen. ** ** To resolve table columns references we look for nodes (or subtrees) of the ** form X.Y.Z or Y.Z or just Z where ** ** X: The name of a database. Ex: "main" or "temp" or ** the symbolic name assigned to an ATTACH-ed database. ** ** Y: The name of a table in a FROM clause. Or in a trigger ** one of the special names "old" or "new". ** ** Z: The name of a column in table Y. ** ** The node at the root of the subtree is modified as follows: ** ** Expr.op Changed to TK_COLUMN ** Expr.pTab Points to the Table object for X.Y ** Expr.iColumn The column index in X.Y. -1 for the rowid. ** Expr.iTable The VDBE cursor number for X.Y ** ** ** To resolve result-set references, look for expression nodes of the ** form Z (with no X and Y prefix) where the Z matches the right-hand ** size of an AS clause in the result-set of a SELECT. The Z expression ** is replaced by a copy of the left-hand side of the result-set expression. ** Table-name and function resolution occurs on the substituted expression ** tree. For example, in: ** ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x; ** ** The "x" term of the order by is replaced by "a+b" to render: ** ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b; ** ** Function calls are checked to make sure that the function is ** defined and that the correct number of arguments are specified. ** If the function is an aggregate function, then the NC_HasAgg flag is ** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION. ** If an expression contains aggregate functions then the EP_Agg ** property on the expression is set. ** ** An error message is left in pParse if anything is amiss. The number ** if errors is returned. */ SQLITE_PRIVATE int sqlite3ResolveExprNames( NameContext *pNC, /* Namespace to resolve expressions in. */ Expr *pExpr /* The expression to be analyzed. */ ){ u16 savedHasAgg; Walker w; if( pExpr==0 ) return 0; #if SQLITE_MAX_EXPR_DEPTH>0 { Parse *pParse = pNC->pParse; if( sqlite3ExprCheckHeight(pParse, pExpr->nHeight+pNC->pParse->nHeight) ){ return 1; } pParse->nHeight += pExpr->nHeight; } #endif savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg); memset(&w, 0, sizeof(w)); w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; w.pParse = pNC->pParse; w.u.pNC = pNC; sqlite3WalkExpr(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 pNC->pParse->nHeight -= pExpr->nHeight; #endif if( pNC->nErr>0 || w.pParse->nErr>0 ){ ExprSetProperty(pExpr, EP_Error); } if( pNC->ncFlags & NC_HasAgg ){ ExprSetProperty(pExpr, EP_Agg); } pNC->ncFlags |= savedHasAgg; return ExprHasProperty(pExpr, EP_Error); } /* ** Resolve all names in all expressions of a SELECT and in all ** decendents of the SELECT, including compounds off of p->pPrior, ** subqueries in expressions, and subqueries used as FROM clause ** terms. ** ** See sqlite3ResolveExprNames() for a description of the kinds of ** transformations that occur. ** ** All SELECT statements should have been expanded using ** sqlite3SelectExpand() prior to invoking this routine. */ SQLITE_PRIVATE void sqlite3ResolveSelectNames( Parse *pParse, /* The parser context */ Select *p, /* The SELECT statement being coded. */ NameContext *pOuterNC /* Name context for parent SELECT statement */ ){ Walker w; assert( p!=0 ); memset(&w, 0, sizeof(w)); w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; w.pParse = pParse; w.u.pNC = pOuterNC; sqlite3WalkSelect(&w, p); } /* ** Resolve names in expressions that can only reference a single table: ** ** * CHECK constraints ** * WHERE clauses on partial indices ** ** The Expr.iTable value for Expr.op==TK_COLUMN nodes of the expression ** is set to -1 and the Expr.iColumn value is set to the column number. ** ** Any errors cause an error message to be set in pParse. */ SQLITE_PRIVATE void sqlite3ResolveSelfReference( Parse *pParse, /* Parsing context */ Table *pTab, /* The table being referenced */ int type, /* NC_IsCheck or NC_PartIdx */ Expr *pExpr, /* Expression to resolve. May be NULL. */ ExprList *pList /* Expression list to resolve. May be NUL. */ ){ SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int i; /* Loop counter */ assert( type==NC_IsCheck || type==NC_PartIdx ); memset(&sNC, 0, sizeof(sNC)); memset(&sSrc, 0, sizeof(sSrc)); sSrc.nSrc = 1; sSrc.a[0].zName = pTab->zName; sSrc.a[0].pTab = pTab; sSrc.a[0].iCursor = -1; sNC.pParse = pParse; sNC.pSrcList = &sSrc; sNC.ncFlags = type; if( sqlite3ResolveExprNames(&sNC, pExpr) ) return; if( pList ){ for(i=0; inExpr; i++){ if( sqlite3ResolveExprNames(&sNC, pList->a[i].pExpr) ){ return; } } } } /************** End of resolve.c *********************************************/ /************** Begin file expr.c ********************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains routines used for analyzing expressions and ** for generating VDBE code that evaluates expressions in SQLite. */ /* #include "sqliteInt.h" */ /* ** Return the 'affinity' of the expression pExpr if any. ** ** If pExpr is a column, a reference to a column via an 'AS' alias, ** or a sub-select with a column as the return value, then the ** affinity of that column is returned. Otherwise, 0x00 is returned, ** indicating no affinity for the expression. ** ** i.e. the WHERE clause expressions in the following statements all ** have an affinity: ** ** CREATE TABLE t1(a); ** SELECT * FROM t1 WHERE a; ** SELECT a AS b FROM t1 WHERE b; ** SELECT * FROM t1 WHERE (select a from t1); */ SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){ int op; pExpr = sqlite3ExprSkipCollate(pExpr); if( pExpr->flags & EP_Generic ) return 0; op = pExpr->op; if( op==TK_SELECT ){ assert( pExpr->flags&EP_xIsSelect ); return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); } #ifndef SQLITE_OMIT_CAST if( op==TK_CAST ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); return sqlite3AffinityType(pExpr->u.zToken, 0); } #endif if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER) && pExpr->pTab!=0 ){ /* op==TK_REGISTER && pExpr->pTab!=0 happens when pExpr was originally ** a TK_COLUMN but was previously evaluated and cached in a register */ int j = pExpr->iColumn; if( j<0 ) return SQLITE_AFF_INTEGER; assert( pExpr->pTab && jpTab->nCol ); return pExpr->pTab->aCol[j].affinity; } return pExpr->affinity; } /* ** Set the collating sequence for expression pExpr to be the collating ** sequence named by pToken. Return a pointer to a new Expr node that ** implements the COLLATE operator. ** ** If a memory allocation error occurs, that fact is recorded in pParse->db ** and the pExpr parameter is returned unchanged. */ SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken( Parse *pParse, /* Parsing context */ Expr *pExpr, /* Add the "COLLATE" clause to this expression */ const Token *pCollName, /* Name of collating sequence */ int dequote /* True to dequote pCollName */ ){ if( pCollName->n>0 ){ Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLLATE, pCollName, dequote); if( pNew ){ pNew->pLeft = pExpr; pNew->flags |= EP_Collate|EP_Skip; pExpr = pNew; } } return pExpr; } SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse *pParse, Expr *pExpr, const char *zC){ Token s; assert( zC!=0 ); s.z = zC; s.n = sqlite3Strlen30(s.z); return sqlite3ExprAddCollateToken(pParse, pExpr, &s, 0); } /* ** Skip over any TK_COLLATE or TK_AS operators and any unlikely() ** or likelihood() function at the root of an expression. */ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr *pExpr){ while( pExpr && ExprHasProperty(pExpr, EP_Skip) ){ if( ExprHasProperty(pExpr, EP_Unlikely) ){ assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); assert( pExpr->x.pList->nExpr>0 ); assert( pExpr->op==TK_FUNCTION ); pExpr = pExpr->x.pList->a[0].pExpr; }else{ assert( pExpr->op==TK_COLLATE || pExpr->op==TK_AS ); pExpr = pExpr->pLeft; } } return pExpr; } /* ** Return the collation sequence for the expression pExpr. If ** there is no defined collating sequence, return NULL. ** ** The collating sequence might be determined by a COLLATE operator ** or by the presence of a column with a defined collating sequence. ** COLLATE operators take first precedence. Left operands take ** precedence over right operands. */ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr){ sqlite3 *db = pParse->db; CollSeq *pColl = 0; Expr *p = pExpr; while( p ){ int op = p->op; if( p->flags & EP_Generic ) break; if( op==TK_CAST || op==TK_UPLUS ){ p = p->pLeft; continue; } if( op==TK_COLLATE || (op==TK_REGISTER && p->op2==TK_COLLATE) ){ pColl = sqlite3GetCollSeq(pParse, ENC(db), 0, p->u.zToken); break; } if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER || op==TK_TRIGGER) && p->pTab!=0 ){ /* op==TK_REGISTER && p->pTab!=0 happens when pExpr was originally ** a TK_COLUMN but was previously evaluated and cached in a register */ int j = p->iColumn; if( j>=0 ){ const char *zColl = p->pTab->aCol[j].zColl; pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); } break; } if( p->flags & EP_Collate ){ if( p->pLeft && (p->pLeft->flags & EP_Collate)!=0 ){ p = p->pLeft; }else{ Expr *pNext = p->pRight; /* The Expr.x union is never used at the same time as Expr.pRight */ assert( p->x.pList==0 || p->pRight==0 ); /* p->flags holds EP_Collate and p->pLeft->flags does not. And ** p->x.pSelect cannot. So if p->x.pLeft exists, it must hold at ** least one EP_Collate. Thus the following two ALWAYS. */ if( p->x.pList!=0 && ALWAYS(!ExprHasProperty(p, EP_xIsSelect)) ){ int i; for(i=0; ALWAYS(ix.pList->nExpr); i++){ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ pNext = p->x.pList->a[i].pExpr; break; } } } p = pNext; } }else{ break; } } if( sqlite3CheckCollSeq(pParse, pColl) ){ pColl = 0; } return pColl; } /* ** pExpr is an operand of a comparison operator. aff2 is the ** type affinity of the other operand. This routine returns the ** type affinity that should be used for the comparison operator. */ SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2){ char aff1 = sqlite3ExprAffinity(pExpr); if( aff1 && aff2 ){ /* Both sides of the comparison are columns. If one has numeric ** affinity, use that. Otherwise use no affinity. */ if( sqlite3IsNumericAffinity(aff1) || sqlite3IsNumericAffinity(aff2) ){ return SQLITE_AFF_NUMERIC; }else{ return SQLITE_AFF_BLOB; } }else if( !aff1 && !aff2 ){ /* Neither side of the comparison is a column. Compare the ** results directly. */ return SQLITE_AFF_BLOB; }else{ /* One side is a column, the other is not. Use the columns affinity. */ assert( aff1==0 || aff2==0 ); return (aff1 + aff2); } } /* ** pExpr is a comparison operator. Return the type affinity that should ** be applied to both operands prior to doing the comparison. */ static char comparisonAffinity(Expr *pExpr){ char aff; assert( pExpr->op==TK_EQ || pExpr->op==TK_IN || pExpr->op==TK_LT || pExpr->op==TK_GT || pExpr->op==TK_GE || pExpr->op==TK_LE || pExpr->op==TK_NE || pExpr->op==TK_IS || pExpr->op==TK_ISNOT ); assert( pExpr->pLeft ); aff = sqlite3ExprAffinity(pExpr->pLeft); if( pExpr->pRight ){ aff = sqlite3CompareAffinity(pExpr->pRight, aff); }else if( ExprHasProperty(pExpr, EP_xIsSelect) ){ aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff); }else if( !aff ){ aff = SQLITE_AFF_BLOB; } return aff; } /* ** pExpr is a comparison expression, eg. '=', '<', IN(...) etc. ** idx_affinity is the affinity of an indexed column. Return true ** if the index with affinity idx_affinity may be used to implement ** the comparison in pExpr. */ SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity){ char aff = comparisonAffinity(pExpr); switch( aff ){ case SQLITE_AFF_BLOB: return 1; case SQLITE_AFF_TEXT: return idx_affinity==SQLITE_AFF_TEXT; default: return sqlite3IsNumericAffinity(idx_affinity); } } /* ** Return the P5 value that should be used for a binary comparison ** opcode (OP_Eq, OP_Ge etc.) used to compare pExpr1 and pExpr2. */ static u8 binaryCompareP5(Expr *pExpr1, Expr *pExpr2, int jumpIfNull){ u8 aff = (char)sqlite3ExprAffinity(pExpr2); aff = (u8)sqlite3CompareAffinity(pExpr1, aff) | (u8)jumpIfNull; return aff; } /* ** Return a pointer to the collation sequence that should be used by ** a binary comparison operator comparing pLeft and pRight. ** ** If the left hand expression has a collating sequence type, then it is ** used. Otherwise the collation sequence for the right hand expression ** is used, or the default (BINARY) if neither expression has a collating ** type. ** ** Argument pRight (but not pLeft) may be a null pointer. In this case, ** it is not considered. */ SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq( Parse *pParse, Expr *pLeft, Expr *pRight ){ CollSeq *pColl; assert( pLeft ); if( pLeft->flags & EP_Collate ){ pColl = sqlite3ExprCollSeq(pParse, pLeft); }else if( pRight && (pRight->flags & EP_Collate)!=0 ){ pColl = sqlite3ExprCollSeq(pParse, pRight); }else{ pColl = sqlite3ExprCollSeq(pParse, pLeft); if( !pColl ){ pColl = sqlite3ExprCollSeq(pParse, pRight); } } return pColl; } /* ** Generate code for a comparison operator. */ static int codeCompare( Parse *pParse, /* The parsing (and code generating) context */ Expr *pLeft, /* The left operand */ Expr *pRight, /* The right operand */ int opcode, /* The comparison opcode */ int in1, int in2, /* Register holding operands */ int dest, /* Jump here if true. */ int jumpIfNull /* If true, jump if either operand is NULL */ ){ int p5; int addr; CollSeq *p4; p4 = sqlite3BinaryCompareCollSeq(pParse, pLeft, pRight); p5 = binaryCompareP5(pLeft, pRight, jumpIfNull); addr = sqlite3VdbeAddOp4(pParse->pVdbe, opcode, in2, dest, in1, (void*)p4, P4_COLLSEQ); sqlite3VdbeChangeP5(pParse->pVdbe, (u8)p5); return addr; } #if SQLITE_MAX_EXPR_DEPTH>0 /* ** Check that argument nHeight is less than or equal to the maximum ** expression depth allowed. If it is not, leave an error message in ** pParse. */ SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse *pParse, int nHeight){ int rc = SQLITE_OK; int mxHeight = pParse->db->aLimit[SQLITE_LIMIT_EXPR_DEPTH]; if( nHeight>mxHeight ){ sqlite3ErrorMsg(pParse, "Expression tree is too large (maximum depth %d)", mxHeight ); rc = SQLITE_ERROR; } return rc; } /* The following three functions, heightOfExpr(), heightOfExprList() ** and heightOfSelect(), are used to determine the maximum height ** of any expression tree referenced by the structure passed as the ** first argument. ** ** If this maximum height is greater than the current value pointed ** to by pnHeight, the second parameter, then set *pnHeight to that ** value. */ static void heightOfExpr(Expr *p, int *pnHeight){ if( p ){ if( p->nHeight>*pnHeight ){ *pnHeight = p->nHeight; } } } static void heightOfExprList(ExprList *p, int *pnHeight){ if( p ){ int i; for(i=0; inExpr; i++){ heightOfExpr(p->a[i].pExpr, pnHeight); } } } static void heightOfSelect(Select *p, int *pnHeight){ if( p ){ heightOfExpr(p->pWhere, pnHeight); heightOfExpr(p->pHaving, pnHeight); heightOfExpr(p->pLimit, pnHeight); heightOfExpr(p->pOffset, pnHeight); heightOfExprList(p->pEList, pnHeight); heightOfExprList(p->pGroupBy, pnHeight); heightOfExprList(p->pOrderBy, pnHeight); heightOfSelect(p->pPrior, pnHeight); } } /* ** Set the Expr.nHeight variable in the structure passed as an ** argument. An expression with no children, Expr.pList or ** Expr.pSelect member has a height of 1. Any other expression ** has a height equal to the maximum height of any other ** referenced Expr plus one. ** ** Also propagate EP_Propagate flags up from Expr.x.pList to Expr.flags, ** if appropriate. */ static void exprSetHeight(Expr *p){ int nHeight = 0; heightOfExpr(p->pLeft, &nHeight); heightOfExpr(p->pRight, &nHeight); if( ExprHasProperty(p, EP_xIsSelect) ){ heightOfSelect(p->x.pSelect, &nHeight); }else if( p->x.pList ){ heightOfExprList(p->x.pList, &nHeight); p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); } p->nHeight = nHeight + 1; } /* ** Set the Expr.nHeight variable using the exprSetHeight() function. If ** the height is greater than the maximum allowed expression depth, ** leave an error in pParse. ** ** Also propagate all EP_Propagate flags from the Expr.x.pList into ** Expr.flags. */ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ if( pParse->nErr ) return; exprSetHeight(p); sqlite3ExprCheckHeight(pParse, p->nHeight); } /* ** Return the maximum height of any expression tree referenced ** by the select statement passed as an argument. */ SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *p){ int nHeight = 0; heightOfSelect(p, &nHeight); return nHeight; } #else /* ABOVE: Height enforcement enabled. BELOW: Height enforcement off */ /* ** Propagate all EP_Propagate flags from the Expr.x.pList into ** Expr.flags. */ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ if( p && p->x.pList && !ExprHasProperty(p, EP_xIsSelect) ){ p->flags |= EP_Propagate & sqlite3ExprListFlags(p->x.pList); } } #define exprSetHeight(y) #endif /* SQLITE_MAX_EXPR_DEPTH>0 */ /* ** This routine is the core allocator for Expr nodes. ** ** Construct a new expression node and return a pointer to it. Memory ** for this node and for the pToken argument is a single allocation ** obtained from sqlite3DbMalloc(). The calling function ** is responsible for making sure the node eventually gets freed. ** ** If dequote is true, then the token (if it exists) is dequoted. ** If dequote is false, no dequoting is performance. The deQuote ** parameter is ignored if pToken is NULL or if the token does not ** appear to be quoted. If the quotes were of the form "..." (double-quotes) ** then the EP_DblQuoted flag is set on the expression node. ** ** Special case: If op==TK_INTEGER and pToken points to a string that ** can be translated into a 32-bit integer, then the token is not ** stored in u.zToken. Instead, the integer values is written ** into u.iValue and the EP_IntValue flag is set. No extra storage ** is allocated to hold the integer text and the dequote flag is ignored. */ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ int op, /* Expression opcode */ const Token *pToken, /* Token argument. Might be NULL */ int dequote /* True to dequote */ ){ Expr *pNew; int nExtra = 0; int iValue = 0; if( pToken ){ if( op!=TK_INTEGER || pToken->z==0 || sqlite3GetInt32(pToken->z, &iValue)==0 ){ nExtra = pToken->n+1; assert( iValue>=0 ); } } pNew = sqlite3DbMallocZero(db, sizeof(Expr)+nExtra); if( pNew ){ pNew->op = (u8)op; pNew->iAgg = -1; if( pToken ){ if( nExtra==0 ){ pNew->flags |= EP_IntValue; pNew->u.iValue = iValue; }else{ int c; pNew->u.zToken = (char*)&pNew[1]; assert( pToken->z!=0 || pToken->n==0 ); if( pToken->n ) memcpy(pNew->u.zToken, pToken->z, pToken->n); pNew->u.zToken[pToken->n] = 0; if( dequote && nExtra>=3 && ((c = pToken->z[0])=='\'' || c=='"' || c=='[' || c=='`') ){ sqlite3Dequote(pNew->u.zToken); if( c=='"' ) pNew->flags |= EP_DblQuoted; } } } #if SQLITE_MAX_EXPR_DEPTH>0 pNew->nHeight = 1; #endif } return pNew; } /* ** Allocate a new expression node from a zero-terminated token that has ** already been dequoted. */ SQLITE_PRIVATE Expr *sqlite3Expr( sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */ int op, /* Expression opcode */ const char *zToken /* Token argument. Might be NULL */ ){ Token x; x.z = zToken; x.n = zToken ? sqlite3Strlen30(zToken) : 0; return sqlite3ExprAlloc(db, op, &x, 0); } /* ** Attach subtrees pLeft and pRight to the Expr node pRoot. ** ** If pRoot==NULL that means that a memory allocation error has occurred. ** In that case, delete the subtrees pLeft and pRight. */ SQLITE_PRIVATE void sqlite3ExprAttachSubtrees( sqlite3 *db, Expr *pRoot, Expr *pLeft, Expr *pRight ){ if( pRoot==0 ){ assert( db->mallocFailed ); sqlite3ExprDelete(db, pLeft); sqlite3ExprDelete(db, pRight); }else{ if( pRight ){ pRoot->pRight = pRight; pRoot->flags |= EP_Propagate & pRight->flags; } if( pLeft ){ pRoot->pLeft = pLeft; pRoot->flags |= EP_Propagate & pLeft->flags; } exprSetHeight(pRoot); } } /* ** Allocate an Expr node which joins as many as two subtrees. ** ** One or both of the subtrees can be NULL. Return a pointer to the new ** Expr node. Or, if an OOM error occurs, set pParse->db->mallocFailed, ** free the subtrees and return NULL. */ SQLITE_PRIVATE Expr *sqlite3PExpr( Parse *pParse, /* Parsing context */ int op, /* Expression opcode */ Expr *pLeft, /* Left operand */ Expr *pRight, /* Right operand */ const Token *pToken /* Argument token */ ){ Expr *p; if( op==TK_AND && pLeft && pRight && pParse->nErr==0 ){ /* Take advantage of short-circuit false optimization for AND */ p = sqlite3ExprAnd(pParse->db, pLeft, pRight); }else{ p = sqlite3ExprAlloc(pParse->db, op, pToken, 1); sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight); } if( p ) { sqlite3ExprCheckHeight(pParse, p->nHeight); } return p; } /* ** If the expression is always either TRUE or FALSE (respectively), ** then return 1. If one cannot determine the truth value of the ** expression at compile-time return 0. ** ** This is an optimization. If is OK to return 0 here even if ** the expression really is always false or false (a false negative). ** But it is a bug to return 1 if the expression might have different ** boolean values in different circumstances (a false positive.) ** ** Note that if the expression is part of conditional for a ** LEFT JOIN, then we cannot determine at compile-time whether or not ** is it true or false, so always return 0. */ static int exprAlwaysTrue(Expr *p){ int v = 0; if( ExprHasProperty(p, EP_FromJoin) ) return 0; if( !sqlite3ExprIsInteger(p, &v) ) return 0; return v!=0; } static int exprAlwaysFalse(Expr *p){ int v = 0; if( ExprHasProperty(p, EP_FromJoin) ) return 0; if( !sqlite3ExprIsInteger(p, &v) ) return 0; return v==0; } /* ** Join two expressions using an AND operator. If either expression is ** NULL, then just return the other expression. ** ** If one side or the other of the AND is known to be false, then instead ** of returning an AND expression, just return a constant expression with ** a value of false. */ SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3 *db, Expr *pLeft, Expr *pRight){ if( pLeft==0 ){ return pRight; }else if( pRight==0 ){ return pLeft; }else if( exprAlwaysFalse(pLeft) || exprAlwaysFalse(pRight) ){ sqlite3ExprDelete(db, pLeft); sqlite3ExprDelete(db, pRight); return sqlite3ExprAlloc(db, TK_INTEGER, &sqlite3IntTokens[0], 0); }else{ Expr *pNew = sqlite3ExprAlloc(db, TK_AND, 0, 0); sqlite3ExprAttachSubtrees(db, pNew, pLeft, pRight); return pNew; } } /* ** Construct a new expression node for a function with multiple ** arguments. */ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *pToken){ Expr *pNew; sqlite3 *db = pParse->db; assert( pToken ); pNew = sqlite3ExprAlloc(db, TK_FUNCTION, pToken, 1); if( pNew==0 ){ sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */ return 0; } pNew->x.pList = pList; assert( !ExprHasProperty(pNew, EP_xIsSelect) ); sqlite3ExprSetHeightAndFlags(pParse, pNew); return pNew; } /* ** Assign a variable number to an expression that encodes a wildcard ** in the original SQL statement. ** ** Wildcards consisting of a single "?" are assigned the next sequential ** variable number. ** ** Wildcards of the form "?nnn" are assigned the number "nnn". We make ** sure "nnn" is not too be to avoid a denial of service attack when ** the SQL statement comes from an external source. ** ** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number ** as the previous instance of the same wildcard. Or if this is the first ** instance of the wildcard, the next sequential variable number is ** assigned. */ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){ sqlite3 *db = pParse->db; const char *z; if( pExpr==0 ) return; assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) ); z = pExpr->u.zToken; assert( z!=0 ); assert( z[0]!=0 ); if( z[1]==0 ){ /* Wildcard of the form "?". Assign the next variable number */ assert( z[0]=='?' ); pExpr->iColumn = (ynVar)(++pParse->nVar); }else{ ynVar x = 0; u32 n = sqlite3Strlen30(z); if( z[0]=='?' ){ /* Wildcard of the form "?nnn". Convert "nnn" to an integer and ** use it as the variable number */ i64 i; int bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8); pExpr->iColumn = x = (ynVar)i; testcase( i==0 ); testcase( i==1 ); testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 ); testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ); if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d", db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]); x = 0; } if( i>pParse->nVar ){ pParse->nVar = (int)i; } }else{ /* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable ** number as the prior appearance of the same name, or if the name ** has never appeared before, reuse the same variable number */ ynVar i; for(i=0; inzVar; i++){ if( pParse->azVar[i] && strcmp(pParse->azVar[i],z)==0 ){ pExpr->iColumn = x = (ynVar)i+1; break; } } if( x==0 ) x = pExpr->iColumn = (ynVar)(++pParse->nVar); } if( x>0 ){ if( x>pParse->nzVar ){ char **a; a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0])); if( a==0 ) return; /* Error reported through db->mallocFailed */ pParse->azVar = a; memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0])); pParse->nzVar = x; } if( z[0]!='?' || pParse->azVar[x-1]==0 ){ sqlite3DbFree(db, pParse->azVar[x-1]); pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n); } } } if( !pParse->nErr && pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){ sqlite3ErrorMsg(pParse, "too many SQL variables"); } } /* ** Recursively delete an expression tree. */ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p==0 ) return; /* Sanity check: Assert that the IntValue is non-negative if it exists */ assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 ); if( !ExprHasProperty(p, EP_TokenOnly) ){ /* The Expr.x union is never used at the same time as Expr.pRight */ assert( p->x.pList==0 || p->pRight==0 ); sqlite3ExprDelete(db, p->pLeft); sqlite3ExprDelete(db, p->pRight); if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken); if( ExprHasProperty(p, EP_xIsSelect) ){ sqlite3SelectDelete(db, p->x.pSelect); }else{ sqlite3ExprListDelete(db, p->x.pList); } } if( !ExprHasProperty(p, EP_Static) ){ sqlite3DbFree(db, p); } } /* ** Return the number of bytes allocated for the expression structure ** passed as the first argument. This is always one of EXPR_FULLSIZE, ** EXPR_REDUCEDSIZE or EXPR_TOKENONLYSIZE. */ static int exprStructSize(Expr *p){ if( ExprHasProperty(p, EP_TokenOnly) ) return EXPR_TOKENONLYSIZE; if( ExprHasProperty(p, EP_Reduced) ) return EXPR_REDUCEDSIZE; return EXPR_FULLSIZE; } /* ** The dupedExpr*Size() routines each return the number of bytes required ** to store a copy of an expression or expression tree. They differ in ** how much of the tree is measured. ** ** dupedExprStructSize() Size of only the Expr structure ** dupedExprNodeSize() Size of Expr + space for token ** dupedExprSize() Expr + token + subtree components ** *************************************************************************** ** ** The dupedExprStructSize() function returns two values OR-ed together: ** (1) the space required for a copy of the Expr structure only and ** (2) the EP_xxx flags that indicate what the structure size should be. ** The return values is always one of: ** ** EXPR_FULLSIZE ** EXPR_REDUCEDSIZE | EP_Reduced ** EXPR_TOKENONLYSIZE | EP_TokenOnly ** ** The size of the structure can be found by masking the return value ** of this routine with 0xfff. The flags can be found by masking the ** return value with EP_Reduced|EP_TokenOnly. ** ** Note that with flags==EXPRDUP_REDUCE, this routines works on full-size ** (unreduced) Expr objects as they or originally constructed by the parser. ** During expression analysis, extra information is computed and moved into ** later parts of teh Expr object and that extra information might get chopped ** off if the expression is reduced. Note also that it does not work to ** make an EXPRDUP_REDUCE copy of a reduced expression. It is only legal ** to reduce a pristine expression tree from the parser. The implementation ** of dupedExprStructSize() contain multiple assert() statements that attempt ** to enforce this constraint. */ static int dupedExprStructSize(Expr *p, int flags){ int nSize; assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); if( 0==(flags&EXPRDUP_REDUCE) ){ nSize = EXPR_FULLSIZE; }else{ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); assert( !ExprHasProperty(p, EP_FromJoin) ); assert( !ExprHasProperty(p, EP_MemToken) ); assert( !ExprHasProperty(p, EP_NoReduce) ); if( p->pLeft || p->x.pList ){ nSize = EXPR_REDUCEDSIZE | EP_Reduced; }else{ assert( p->pRight==0 ); nSize = EXPR_TOKENONLYSIZE | EP_TokenOnly; } } return nSize; } /* ** This function returns the space in bytes required to store the copy ** of the Expr structure and a copy of the Expr.u.zToken string (if that ** string is defined.) */ static int dupedExprNodeSize(Expr *p, int flags){ int nByte = dupedExprStructSize(p, flags) & 0xfff; if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ nByte += sqlite3Strlen30(p->u.zToken)+1; } return ROUND8(nByte); } /* ** Return the number of bytes required to create a duplicate of the ** expression passed as the first argument. The second argument is a ** mask containing EXPRDUP_XXX flags. ** ** The value returned includes space to create a copy of the Expr struct ** itself and the buffer referred to by Expr.u.zToken, if any. ** ** If the EXPRDUP_REDUCE flag is set, then the return value includes ** space to duplicate all Expr nodes in the tree formed by Expr.pLeft ** and Expr.pRight variables (but not for any structures pointed to or ** descended from the Expr.x.pList or Expr.x.pSelect variables). */ static int dupedExprSize(Expr *p, int flags){ int nByte = 0; if( p ){ nByte = dupedExprNodeSize(p, flags); if( flags&EXPRDUP_REDUCE ){ nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); } } return nByte; } /* ** This function is similar to sqlite3ExprDup(), except that if pzBuffer ** is not NULL then *pzBuffer is assumed to point to a buffer large enough ** to store the copy of expression p, the copies of p->u.zToken ** (if applicable), and the copies of the p->pLeft and p->pRight expressions, ** if any. Before returning, *pzBuffer is set to the first byte past the ** portion of the buffer copied into by this function. */ static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){ Expr *pNew = 0; /* Value to return */ if( p ){ const int isReduced = (flags&EXPRDUP_REDUCE); u8 *zAlloc; u32 staticFlag = 0; assert( pzBuffer==0 || isReduced ); /* Figure out where to write the new Expr structure. */ if( pzBuffer ){ zAlloc = *pzBuffer; staticFlag = EP_Static; }else{ zAlloc = sqlite3DbMallocRaw(db, dupedExprSize(p, flags)); } pNew = (Expr *)zAlloc; if( pNew ){ /* Set nNewSize to the size allocated for the structure pointed to ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed ** by the copy of the p->u.zToken string (if any). */ const unsigned nStructSize = dupedExprStructSize(p, flags); const int nNewSize = nStructSize & 0xfff; int nToken; if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ nToken = sqlite3Strlen30(p->u.zToken) + 1; }else{ nToken = 0; } if( isReduced ){ assert( ExprHasProperty(p, EP_Reduced)==0 ); memcpy(zAlloc, p, nNewSize); }else{ int nSize = exprStructSize(p); memcpy(zAlloc, p, nSize); memset(&zAlloc[nSize], 0, EXPR_FULLSIZE-nSize); } /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */ pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); pNew->flags |= staticFlag; /* Copy the p->u.zToken string, if any. */ if( nToken ){ char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; memcpy(zToken, p->u.zToken, nToken); } if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){ /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ if( ExprHasProperty(p, EP_xIsSelect) ){ pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, isReduced); }else{ pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, isReduced); } } /* Fill in pNew->pLeft and pNew->pRight. */ if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){ zAlloc += dupedExprNodeSize(p, flags); if( ExprHasProperty(pNew, EP_Reduced) ){ pNew->pLeft = exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc); pNew->pRight = exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc); } if( pzBuffer ){ *pzBuffer = zAlloc; } }else{ if( !ExprHasProperty(p, EP_TokenOnly) ){ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); } } } } return pNew; } /* ** Create and return a deep copy of the object passed as the second ** argument. If an OOM condition is encountered, NULL is returned ** and the db->mallocFailed flag set. */ #ifndef SQLITE_OMIT_CTE static With *withDup(sqlite3 *db, With *p){ With *pRet = 0; if( p ){ int nByte = sizeof(*p) + sizeof(p->a[0]) * (p->nCte-1); pRet = sqlite3DbMallocZero(db, nByte); if( pRet ){ int i; pRet->nCte = p->nCte; for(i=0; inCte; i++){ pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0); pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0); pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName); } } } return pRet; } #else # define withDup(x,y) 0 #endif /* ** The following group of routines make deep copies of expressions, ** expression lists, ID lists, and select statements. The copies can ** be deleted (by being passed to their respective ...Delete() routines) ** without effecting the originals. ** ** The expression list, ID, and source lists return by sqlite3ExprListDup(), ** sqlite3IdListDup(), and sqlite3SrcListDup() can not be further expanded ** by subsequent calls to sqlite*ListAppend() routines. ** ** Any tables that the SrcList might point to are not duplicated. ** ** The flags parameter contains a combination of the EXPRDUP_XXX flags. ** If the EXPRDUP_REDUCE flag is set, then the structure returned is a ** truncated version of the usual Expr structure that will be stored as ** part of the in-memory representation of the database schema. */ SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ return exprDup(db, p, flags, 0); } SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ ExprList *pNew; struct ExprList_item *pItem, *pOldItem; int i; if( p==0 ) return 0; pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); if( pNew==0 ) return 0; pNew->nExpr = i = p->nExpr; if( (flags & EXPRDUP_REDUCE)==0 ) for(i=1; inExpr; i+=i){} pNew->a = pItem = sqlite3DbMallocRaw(db, i*sizeof(p->a[0]) ); if( pItem==0 ){ sqlite3DbFree(db, pNew); return 0; } pOldItem = p->a; for(i=0; inExpr; i++, pItem++, pOldItem++){ Expr *pOldExpr = pOldItem->pExpr; pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags); pItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan); pItem->sortOrder = pOldItem->sortOrder; pItem->done = 0; pItem->bSpanIsTab = pOldItem->bSpanIsTab; pItem->u = pOldItem->u; } return pNew; } /* ** If cursors, triggers, views and subqueries are all omitted from ** the build, then none of the following routines, except for ** sqlite3SelectDup(), can be called. sqlite3SelectDup() is sometimes ** called with a NULL argument. */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) \ || !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){ SrcList *pNew; int i; int nByte; if( p==0 ) return 0; nByte = sizeof(*p) + (p->nSrc>0 ? sizeof(p->a[0]) * (p->nSrc-1) : 0); pNew = sqlite3DbMallocRaw(db, nByte ); if( pNew==0 ) return 0; pNew->nSrc = pNew->nAlloc = p->nSrc; for(i=0; inSrc; i++){ struct SrcList_item *pNewItem = &pNew->a[i]; struct SrcList_item *pOldItem = &p->a[i]; Table *pTab; pNewItem->pSchema = pOldItem->pSchema; pNewItem->zDatabase = sqlite3DbStrDup(db, pOldItem->zDatabase); pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pNewItem->zAlias = sqlite3DbStrDup(db, pOldItem->zAlias); pNewItem->jointype = pOldItem->jointype; pNewItem->iCursor = pOldItem->iCursor; pNewItem->addrFillSub = pOldItem->addrFillSub; pNewItem->regReturn = pOldItem->regReturn; pNewItem->isCorrelated = pOldItem->isCorrelated; pNewItem->viaCoroutine = pOldItem->viaCoroutine; pNewItem->isRecursive = pOldItem->isRecursive; pNewItem->zIndexedBy = sqlite3DbStrDup(db, pOldItem->zIndexedBy); pNewItem->notIndexed = pOldItem->notIndexed; pNewItem->pIndex = pOldItem->pIndex; pTab = pNewItem->pTab = pOldItem->pTab; if( pTab ){ pTab->nRef++; } pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags); pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags); pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing); pNewItem->colUsed = pOldItem->colUsed; } return pNew; } SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){ IdList *pNew; int i; if( p==0 ) return 0; pNew = sqlite3DbMallocRaw(db, sizeof(*pNew) ); if( pNew==0 ) return 0; pNew->nId = p->nId; pNew->a = sqlite3DbMallocRaw(db, p->nId*sizeof(p->a[0]) ); if( pNew->a==0 ){ sqlite3DbFree(db, pNew); return 0; } /* Note that because the size of the allocation for p->a[] is not ** necessarily a power of two, sqlite3IdListAppend() may not be called ** on the duplicate created by this function. */ for(i=0; inId; i++){ struct IdList_item *pNewItem = &pNew->a[i]; struct IdList_item *pOldItem = &p->a[i]; pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName); pNewItem->idx = pOldItem->idx; } return pNew; } SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ Select *pNew, *pPrior; if( p==0 ) return 0; pNew = sqlite3DbMallocRaw(db, sizeof(*p) ); if( pNew==0 ) return 0; pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags); pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags); pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags); pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags); pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags); pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags); pNew->op = p->op; pNew->pPrior = pPrior = sqlite3SelectDup(db, p->pPrior, flags); if( pPrior ) pPrior->pNext = pNew; pNew->pNext = 0; pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags); pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags); pNew->iLimit = 0; pNew->iOffset = 0; pNew->selFlags = p->selFlags & ~SF_UsesEphemeral; pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; pNew->nSelectRow = p->nSelectRow; pNew->pWith = withDup(db, p->pWith); sqlite3SelectSetName(pNew, p->zSelName); return pNew; } #else SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){ assert( p==0 ); return 0; } #endif /* ** Add a new element to the end of an expression list. If pList is ** initially NULL, then create a new expression list. ** ** If a memory allocation error occurs, the entire list is freed and ** NULL is returned. If non-NULL is returned, then it is guaranteed ** that the new entry was successfully appended. */ SQLITE_PRIVATE ExprList *sqlite3ExprListAppend( Parse *pParse, /* Parsing context */ ExprList *pList, /* List to which to append. Might be NULL */ Expr *pExpr /* Expression to be appended. Might be NULL */ ){ sqlite3 *db = pParse->db; if( pList==0 ){ pList = sqlite3DbMallocZero(db, sizeof(ExprList) ); if( pList==0 ){ goto no_mem; } pList->a = sqlite3DbMallocRaw(db, sizeof(pList->a[0])); if( pList->a==0 ) goto no_mem; }else if( (pList->nExpr & (pList->nExpr-1))==0 ){ struct ExprList_item *a; assert( pList->nExpr>0 ); a = sqlite3DbRealloc(db, pList->a, pList->nExpr*2*sizeof(pList->a[0])); if( a==0 ){ goto no_mem; } pList->a = a; } assert( pList->a!=0 ); if( 1 ){ struct ExprList_item *pItem = &pList->a[pList->nExpr++]; memset(pItem, 0, sizeof(*pItem)); pItem->pExpr = pExpr; } return pList; no_mem: /* Avoid leaking memory if malloc has failed. */ sqlite3ExprDelete(db, pExpr); sqlite3ExprListDelete(db, pList); return 0; } /* ** Set the ExprList.a[].zName element of the most recently added item ** on the expression list. ** ** pList might be NULL following an OOM error. But pName should never be ** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag ** is set. */ SQLITE_PRIVATE void sqlite3ExprListSetName( Parse *pParse, /* Parsing context */ ExprList *pList, /* List to which to add the span. */ Token *pName, /* Name to be added */ int dequote /* True to cause the name to be dequoted */ ){ assert( pList!=0 || pParse->db->mallocFailed!=0 ); if( pList ){ struct ExprList_item *pItem; assert( pList->nExpr>0 ); pItem = &pList->a[pList->nExpr-1]; assert( pItem->zName==0 ); pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n); if( dequote && pItem->zName ) sqlite3Dequote(pItem->zName); } } /* ** Set the ExprList.a[].zSpan element of the most recently added item ** on the expression list. ** ** pList might be NULL following an OOM error. But pSpan should never be ** NULL. If a memory allocation fails, the pParse->db->mallocFailed flag ** is set. */ SQLITE_PRIVATE void sqlite3ExprListSetSpan( Parse *pParse, /* Parsing context */ ExprList *pList, /* List to which to add the span. */ ExprSpan *pSpan /* The span to be added */ ){ sqlite3 *db = pParse->db; assert( pList!=0 || db->mallocFailed!=0 ); if( pList ){ struct ExprList_item *pItem = &pList->a[pList->nExpr-1]; assert( pList->nExpr>0 ); assert( db->mallocFailed || pItem->pExpr==pSpan->pExpr ); sqlite3DbFree(db, pItem->zSpan); pItem->zSpan = sqlite3DbStrNDup(db, (char*)pSpan->zStart, (int)(pSpan->zEnd - pSpan->zStart)); } } /* ** If the expression list pEList contains more than iLimit elements, ** leave an error message in pParse. */ SQLITE_PRIVATE void sqlite3ExprListCheckLength( Parse *pParse, ExprList *pEList, const char *zObject ){ int mx = pParse->db->aLimit[SQLITE_LIMIT_COLUMN]; testcase( pEList && pEList->nExpr==mx ); testcase( pEList && pEList->nExpr==mx+1 ); if( pEList && pEList->nExpr>mx ){ sqlite3ErrorMsg(pParse, "too many columns in %s", zObject); } } /* ** Delete an entire expression list. */ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ int i; struct ExprList_item *pItem; if( pList==0 ) return; assert( pList->a!=0 || pList->nExpr==0 ); for(pItem=pList->a, i=0; inExpr; i++, pItem++){ sqlite3ExprDelete(db, pItem->pExpr); sqlite3DbFree(db, pItem->zName); sqlite3DbFree(db, pItem->zSpan); } sqlite3DbFree(db, pList->a); sqlite3DbFree(db, pList); } /* ** Return the bitwise-OR of all Expr.flags fields in the given ** ExprList. */ SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList *pList){ int i; u32 m = 0; if( pList ){ for(i=0; inExpr; i++){ Expr *pExpr = pList->a[i].pExpr; if( ALWAYS(pExpr) ) m |= pExpr->flags; } } return m; } /* ** These routines are Walker callbacks used to check expressions to ** see if they are "constant" for some definition of constant. The ** Walker.eCode value determines the type of "constant" we are looking ** for. ** ** These callback routines are used to implement the following: ** ** sqlite3ExprIsConstant() pWalker->eCode==1 ** sqlite3ExprIsConstantNotJoin() pWalker->eCode==2 ** sqlite3ExprIsTableConstant() pWalker->eCode==3 ** sqlite3ExprIsConstantOrFunction() pWalker->eCode==4 or 5 ** ** In all cases, the callbacks set Walker.eCode=0 and abort if the expression ** is found to not be a constant. ** ** The sqlite3ExprIsConstantOrFunction() is used for evaluating expressions ** in a CREATE TABLE statement. The Walker.eCode value is 5 when parsing ** an existing schema and 4 when processing a new statement. A bound ** parameter raises an error for new statements, but is silently converted ** to NULL for existing schemas. This allows sqlite_master tables that ** contain a bound parameter because they were generated by older versions ** of SQLite to be parsed by newer versions of SQLite without raising a ** malformed schema error. */ static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){ /* If pWalker->eCode is 2 then any term of the expression that comes from ** the ON or USING clauses of a left join disqualifies the expression ** from being considered constant. */ if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_FromJoin) ){ pWalker->eCode = 0; return WRC_Abort; } switch( pExpr->op ){ /* Consider functions to be constant if all their arguments are constant ** and either pWalker->eCode==4 or 5 or the function has the ** SQLITE_FUNC_CONST flag. */ case TK_FUNCTION: if( pWalker->eCode>=4 || ExprHasProperty(pExpr,EP_ConstFunc) ){ return WRC_Continue; }else{ pWalker->eCode = 0; return WRC_Abort; } case TK_ID: case TK_COLUMN: case TK_AGG_FUNCTION: case TK_AGG_COLUMN: testcase( pExpr->op==TK_ID ); testcase( pExpr->op==TK_COLUMN ); testcase( pExpr->op==TK_AGG_FUNCTION ); testcase( pExpr->op==TK_AGG_COLUMN ); if( pWalker->eCode==3 && pExpr->iTable==pWalker->u.iCur ){ return WRC_Continue; }else{ pWalker->eCode = 0; return WRC_Abort; } case TK_VARIABLE: if( pWalker->eCode==5 ){ /* Silently convert bound parameters that appear inside of CREATE ** statements into a NULL when parsing the CREATE statement text out ** of the sqlite_master table */ pExpr->op = TK_NULL; }else if( pWalker->eCode==4 ){ /* A bound parameter in a CREATE statement that originates from ** sqlite3_prepare() causes an error */ pWalker->eCode = 0; return WRC_Abort; } /* Fall through */ default: testcase( pExpr->op==TK_SELECT ); /* selectNodeIsConstant will disallow */ testcase( pExpr->op==TK_EXISTS ); /* selectNodeIsConstant will disallow */ return WRC_Continue; } } static int selectNodeIsConstant(Walker *pWalker, Select *NotUsed){ UNUSED_PARAMETER(NotUsed); pWalker->eCode = 0; return WRC_Abort; } static int exprIsConst(Expr *p, int initFlag, int iCur){ Walker w; memset(&w, 0, sizeof(w)); w.eCode = initFlag; w.xExprCallback = exprNodeIsConstant; w.xSelectCallback = selectNodeIsConstant; w.u.iCur = iCur; sqlite3WalkExpr(&w, p); return w.eCode; } /* ** Walk an expression tree. Return non-zero if the expression is constant ** and 0 if it involves variables or function calls. ** ** For the purposes of this function, a double-quoted string (ex: "abc") ** is considered a variable but a single-quoted string (ex: 'abc') is ** a constant. */ SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr *p){ return exprIsConst(p, 1, 0); } /* ** Walk an expression tree. Return non-zero if the expression is constant ** that does no originate from the ON or USING clauses of a join. ** Return 0 if it involves variables or function calls or terms from ** an ON or USING clause. */ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr *p){ return exprIsConst(p, 2, 0); } /* ** Walk an expression tree. Return non-zero if the expression is constant ** for any single row of the table with cursor iCur. In other words, the ** expression must not refer to any non-deterministic function nor any ** table other than iCur. */ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ return exprIsConst(p, 3, iCur); } /* ** Walk an expression tree. Return non-zero if the expression is constant ** or a function call with constant arguments. Return and 0 if there ** are any variables. ** ** For the purposes of this function, a double-quoted string (ex: "abc") ** is considered a variable but a single-quoted string (ex: 'abc') is ** a constant. */ SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr *p, u8 isInit){ assert( isInit==0 || isInit==1 ); return exprIsConst(p, 4+isInit, 0); } /* ** If the expression p codes a constant integer that is small enough ** to fit in a 32-bit integer, return 1 and put the value of the integer ** in *pValue. If the expression is not an integer or if it is too big ** to fit in a signed 32-bit integer, return 0 and leave *pValue unchanged. */ SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr *p, int *pValue){ int rc = 0; /* If an expression is an integer literal that fits in a signed 32-bit ** integer, then the EP_IntValue flag will have already been set */ assert( p->op!=TK_INTEGER || (p->flags & EP_IntValue)!=0 || sqlite3GetInt32(p->u.zToken, &rc)==0 ); if( p->flags & EP_IntValue ){ *pValue = p->u.iValue; return 1; } switch( p->op ){ case TK_UPLUS: { rc = sqlite3ExprIsInteger(p->pLeft, pValue); break; } case TK_UMINUS: { int v; if( sqlite3ExprIsInteger(p->pLeft, &v) ){ assert( v!=(-2147483647-1) ); *pValue = -v; rc = 1; } break; } default: break; } return rc; } /* ** Return FALSE if there is no chance that the expression can be NULL. ** ** If the expression might be NULL or if the expression is too complex ** to tell return TRUE. ** ** This routine is used as an optimization, to skip OP_IsNull opcodes ** when we know that a value cannot be NULL. Hence, a false positive ** (returning TRUE when in fact the expression can never be NULL) might ** be a small performance hit but is otherwise harmless. On the other ** hand, a false negative (returning FALSE when the result could be NULL) ** will likely result in an incorrect answer. So when in doubt, return ** TRUE. */ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ u8 op; while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } op = p->op; if( op==TK_REGISTER ) op = p->op2; switch( op ){ case TK_INTEGER: case TK_STRING: case TK_FLOAT: case TK_BLOB: return 0; case TK_COLUMN: assert( p->pTab!=0 ); return ExprHasProperty(p, EP_CanBeNull) || (p->iColumn>=0 && p->pTab->aCol[p->iColumn].notNull==0); default: return 1; } } /* ** Return TRUE if the given expression is a constant which would be ** unchanged by OP_Affinity with the affinity given in the second ** argument. ** ** This routine is used to determine if the OP_Affinity operation ** can be omitted. When in doubt return FALSE. A false negative ** is harmless. A false positive, however, can result in the wrong ** answer. */ SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr *p, char aff){ u8 op; if( aff==SQLITE_AFF_BLOB ) return 1; while( p->op==TK_UPLUS || p->op==TK_UMINUS ){ p = p->pLeft; } op = p->op; if( op==TK_REGISTER ) op = p->op2; switch( op ){ case TK_INTEGER: { return aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC; } case TK_FLOAT: { return aff==SQLITE_AFF_REAL || aff==SQLITE_AFF_NUMERIC; } case TK_STRING: { return aff==SQLITE_AFF_TEXT; } case TK_BLOB: { return 1; } case TK_COLUMN: { assert( p->iTable>=0 ); /* p cannot be part of a CHECK constraint */ return p->iColumn<0 && (aff==SQLITE_AFF_INTEGER || aff==SQLITE_AFF_NUMERIC); } default: { return 0; } } } /* ** Return TRUE if the given string is a row-id column name. */ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ if( sqlite3StrICmp(z, "_ROWID_")==0 ) return 1; if( sqlite3StrICmp(z, "ROWID")==0 ) return 1; if( sqlite3StrICmp(z, "OID")==0 ) return 1; return 0; } /* ** Return true if we are able to the IN operator optimization on a ** query of the form ** ** x IN (SELECT ...) ** ** Where the SELECT... clause is as specified by the parameter to this ** routine. ** ** The Select object passed in has already been preprocessed and no ** errors have been found. */ #ifndef SQLITE_OMIT_SUBQUERY static int isCandidateForInOpt(Select *p){ SrcList *pSrc; ExprList *pEList; Table *pTab; if( p==0 ) return 0; /* right-hand side of IN is SELECT */ if( p->pPrior ) return 0; /* Not a compound SELECT */ if( p->selFlags & (SF_Distinct|SF_Aggregate) ){ testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); testcase( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); return 0; /* No DISTINCT keyword and no aggregate functions */ } assert( p->pGroupBy==0 ); /* Has no GROUP BY clause */ if( p->pLimit ) return 0; /* Has no LIMIT clause */ assert( p->pOffset==0 ); /* No LIMIT means no OFFSET */ if( p->pWhere ) return 0; /* Has no WHERE clause */ pSrc = p->pSrc; assert( pSrc!=0 ); if( pSrc->nSrc!=1 ) return 0; /* Single term in FROM clause */ if( pSrc->a[0].pSelect ) return 0; /* FROM is not a subquery or view */ pTab = pSrc->a[0].pTab; if( NEVER(pTab==0) ) return 0; assert( pTab->pSelect==0 ); /* FROM clause is not a view */ if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */ pEList = p->pEList; if( pEList->nExpr!=1 ) return 0; /* One column in the result set */ if( pEList->a[0].pExpr->op!=TK_COLUMN ) return 0; /* Result is a column */ return 1; } #endif /* SQLITE_OMIT_SUBQUERY */ /* ** Code an OP_Once instruction and allocate space for its flag. Return the ** address of the new instruction. */ SQLITE_PRIVATE int sqlite3CodeOnce(Parse *pParse){ Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ return sqlite3VdbeAddOp1(v, OP_Once, pParse->nOnce++); } /* ** Generate code that checks the left-most column of index table iCur to see if ** it contains any NULL entries. Cause the register at regHasNull to be set ** to a non-NULL value if iCur contains no NULLs. Cause register regHasNull ** to be set to NULL if iCur contains one or more NULL values. */ static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){ int j1; sqlite3VdbeAddOp2(v, OP_Integer, 0, regHasNull); j1 = sqlite3VdbeAddOp1(v, OP_Rewind, iCur); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Column, iCur, 0, regHasNull); sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); VdbeComment((v, "first_entry_in(%d)", iCur)); sqlite3VdbeJumpHere(v, j1); } #ifndef SQLITE_OMIT_SUBQUERY /* ** The argument is an IN operator with a list (not a subquery) on the ** right-hand side. Return TRUE if that list is constant. */ static int sqlite3InRhsIsConstant(Expr *pIn){ Expr *pLHS; int res; assert( !ExprHasProperty(pIn, EP_xIsSelect) ); pLHS = pIn->pLeft; pIn->pLeft = 0; res = sqlite3ExprIsConstant(pIn); pIn->pLeft = pLHS; return res; } #endif /* ** This function is used by the implementation of the IN (...) operator. ** The pX parameter is the expression on the RHS of the IN operator, which ** might be either a list of expressions or a subquery. ** ** The job of this routine is to find or create a b-tree object that can ** be used either to test for membership in the RHS set or to iterate through ** all members of the RHS set, skipping duplicates. ** ** A cursor is opened on the b-tree object that is the RHS of the IN operator ** and pX->iTable is set to the index of that cursor. ** ** The returned value of this function indicates the b-tree type, as follows: ** ** IN_INDEX_ROWID - The cursor was opened on a database table. ** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index. ** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index. ** IN_INDEX_EPH - The cursor was opened on a specially created and ** populated epheremal table. ** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be ** implemented as a sequence of comparisons. ** ** An existing b-tree might be used if the RHS expression pX is a simple ** subquery such as: ** ** SELECT FROM ** ** If the RHS of the IN operator is a list or a more complex subquery, then ** an ephemeral table might need to be generated from the RHS and then ** pX->iTable made to point to the ephemeral table instead of an ** existing table. ** ** The inFlags parameter must contain exactly one of the bits ** IN_INDEX_MEMBERSHIP or IN_INDEX_LOOP. If inFlags contains ** IN_INDEX_MEMBERSHIP, then the generated table will be used for a ** fast membership test. When the IN_INDEX_LOOP bit is set, the ** IN index will be used to loop over all values of the RHS of the ** IN operator. ** ** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate ** through the set members) then the b-tree must not contain duplicates. ** An epheremal table must be used unless the selected is guaranteed ** to be unique - either because it is an INTEGER PRIMARY KEY or it ** has a UNIQUE constraint or UNIQUE index. ** ** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used ** for fast set membership tests) then an epheremal table must ** be used unless is an INTEGER PRIMARY KEY or an index can ** be found with as its left-most column. ** ** If the IN_INDEX_NOOP_OK and IN_INDEX_MEMBERSHIP are both set and ** if the RHS of the IN operator is a list (not a subquery) then this ** routine might decide that creating an ephemeral b-tree for membership ** testing is too expensive and return IN_INDEX_NOOP. In that case, the ** calling routine should implement the IN operator using a sequence ** of Eq or Ne comparison operations. ** ** When the b-tree is being used for membership tests, the calling function ** might need to know whether or not the RHS side of the IN operator ** contains a NULL. If prRhsHasNull is not a NULL pointer and ** if there is any chance that the (...) might contain a NULL value at ** runtime, then a register is allocated and the register number written ** to *prRhsHasNull. If there is no chance that the (...) contains a ** NULL value, then *prRhsHasNull is left unchanged. ** ** If a register is allocated and its location stored in *prRhsHasNull, then ** the value in that register will be NULL if the b-tree contains one or more ** NULL values, and it will be some non-NULL value if the b-tree contains no ** NULL values. */ #ifndef SQLITE_OMIT_SUBQUERY SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int *prRhsHasNull){ Select *p; /* SELECT to the right of IN operator */ int eType = 0; /* Type of RHS table. IN_INDEX_* */ int iTab = pParse->nTab++; /* Cursor of the RHS table */ int mustBeUnique; /* True if RHS must be unique */ Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */ assert( pX->op==TK_IN ); mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0; /* Check to see if an existing table or index can be used to ** satisfy the query. This is preferable to generating a new ** ephemeral table. */ p = (ExprHasProperty(pX, EP_xIsSelect) ? pX->x.pSelect : 0); if( pParse->nErr==0 && isCandidateForInOpt(p) ){ sqlite3 *db = pParse->db; /* Database connection */ Table *pTab; /* Table
    . */ Expr *pExpr; /* Expression */ i16 iCol; /* Index of column */ i16 iDb; /* Database idx for pTab */ assert( p ); /* Because of isCandidateForInOpt(p) */ assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */ assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */ pTab = p->pSrc->a[0].pTab; pExpr = p->pEList->a[0].pExpr; iCol = (i16)pExpr->iColumn; /* Code an OP_Transaction and OP_TableLock for
    . */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); /* This function is only called from two places. In both cases the vdbe ** has already been allocated. So assume sqlite3GetVdbe() is always ** successful here. */ assert(v); if( iCol<0 ){ int iAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); eType = IN_INDEX_ROWID; sqlite3VdbeJumpHere(v, iAddr); }else{ Index *pIdx; /* Iterator variable */ /* The collation sequence used by the comparison. If an index is to ** be used in place of a temp-table, it must be ordered according ** to this collation sequence. */ CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pExpr); /* Check that the affinity that will be used to perform the ** comparison is the same as the affinity of the column. If ** it is not, it is not possible to use any index. */ int affinity_ok = sqlite3IndexAffinityOk(pX, pTab->aCol[iCol].affinity); for(pIdx=pTab->pIndex; pIdx && eType==0 && affinity_ok; pIdx=pIdx->pNext){ if( (pIdx->aiColumn[0]==iCol) && sqlite3FindCollSeq(db, ENC(db), pIdx->azColl[0], 0)==pReq && (!mustBeUnique || (pIdx->nKeyCol==1 && IsUniqueIndex(pIdx))) ){ int iAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 ); eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0]; if( prRhsHasNull && !pTab->aCol[iCol].notNull ){ *prRhsHasNull = ++pParse->nMem; sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull); } sqlite3VdbeJumpHere(v, iAddr); } } } } /* If no preexisting index is available for the IN clause ** and IN_INDEX_NOOP is an allowed reply ** and the RHS of the IN operator is a list, not a subquery ** and the RHS is not contant or has two or fewer terms, ** then it is not worth creating an ephemeral table to evaluate ** the IN operator so return IN_INDEX_NOOP. */ if( eType==0 && (inFlags & IN_INDEX_NOOP_OK) && !ExprHasProperty(pX, EP_xIsSelect) && (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2) ){ eType = IN_INDEX_NOOP; } if( eType==0 ){ /* Could not find an existing table or index to use as the RHS b-tree. ** We will have to generate an ephemeral table to do the job. */ u32 savedNQueryLoop = pParse->nQueryLoop; int rMayHaveNull = 0; eType = IN_INDEX_EPH; if( inFlags & IN_INDEX_LOOP ){ pParse->nQueryLoop = 0; if( pX->pLeft->iColumn<0 && !ExprHasProperty(pX, EP_xIsSelect) ){ eType = IN_INDEX_ROWID; } }else if( prRhsHasNull ){ *prRhsHasNull = rMayHaveNull = ++pParse->nMem; } sqlite3CodeSubselect(pParse, pX, rMayHaveNull, eType==IN_INDEX_ROWID); pParse->nQueryLoop = savedNQueryLoop; }else{ pX->iTable = iTab; } return eType; } #endif /* ** Generate code for scalar subqueries used as a subquery expression, EXISTS, ** or IN operators. Examples: ** ** (SELECT a FROM b) -- subquery ** EXISTS (SELECT a FROM b) -- EXISTS subquery ** x IN (4,5,11) -- IN operator with list on right-hand side ** x IN (SELECT a FROM b) -- IN operator with subquery on the right ** ** The pExpr parameter describes the expression that contains the IN ** operator or subquery. ** ** If parameter isRowid is non-zero, then expression pExpr is guaranteed ** to be of the form " IN (?, ?, ?)", where is a reference ** to some integer key column of a table B-Tree. In this case, use an ** intkey B-Tree to store the set of IN(...) values instead of the usual ** (slower) variable length keys B-Tree. ** ** If rMayHaveNull is non-zero, that means that the operation is an IN ** (not a SELECT or EXISTS) and that the RHS might contains NULLs. ** All this routine does is initialize the register given by rMayHaveNull ** to NULL. Calling routines will take care of changing this register ** value to non-NULL if the RHS is NULL-free. ** ** For a SELECT or EXISTS operator, return the register that holds the ** result. For IN operators or if an error occurs, the return value is 0. */ #ifndef SQLITE_OMIT_SUBQUERY SQLITE_PRIVATE int sqlite3CodeSubselect( Parse *pParse, /* Parsing context */ Expr *pExpr, /* The IN, SELECT, or EXISTS operator */ int rHasNullFlag, /* Register that records whether NULLs exist in RHS */ int isRowid /* If true, LHS of IN operator is a rowid */ ){ int jmpIfDynamic = -1; /* One-time test address */ int rReg = 0; /* Register storing resulting */ Vdbe *v = sqlite3GetVdbe(pParse); if( NEVER(v==0) ) return 0; sqlite3ExprCachePush(pParse); /* This code must be run in its entirety every time it is encountered ** if any of the following is true: ** ** * The right-hand side is a correlated subquery ** * The right-hand side is an expression list containing variables ** * We are inside a trigger ** ** If all of the above are false, then we can run this code just once ** save the results, and reuse the same result on subsequent invocations. */ if( !ExprHasProperty(pExpr, EP_VarSelect) ){ jmpIfDynamic = sqlite3CodeOnce(pParse); VdbeCoverage(v); } #ifndef SQLITE_OMIT_EXPLAIN if( pParse->explain==2 ){ char *zMsg = sqlite3MPrintf( pParse->db, "EXECUTE %s%s SUBQUERY %d", jmpIfDynamic>=0?"":"CORRELATED ", pExpr->op==TK_IN?"LIST":"SCALAR", pParse->iNextSelectId ); sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); } #endif switch( pExpr->op ){ case TK_IN: { char affinity; /* Affinity of the LHS of the IN */ int addr; /* Address of OP_OpenEphemeral instruction */ Expr *pLeft = pExpr->pLeft; /* the LHS of the IN operator */ KeyInfo *pKeyInfo = 0; /* Key information */ affinity = sqlite3ExprAffinity(pLeft); /* Whether this is an 'x IN(SELECT...)' or an 'x IN()' ** expression it is handled the same way. An ephemeral table is ** filled with single-field index keys representing the results ** from the SELECT or the . ** ** If the 'x' expression is a column value, or the SELECT... ** statement returns a column value, then the affinity of that ** column is used to build the index keys. If both 'x' and the ** SELECT... statement are columns, then numeric affinity is used ** if either column has NUMERIC or INTEGER affinity. If neither ** 'x' nor the SELECT... statement are columns, then numeric affinity ** is used. */ pExpr->iTable = pParse->nTab++; addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, !isRowid); pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, 1, 1); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ /* Case 1: expr IN (SELECT ...) ** ** Generate code to write the results of the select into the temporary ** table allocated and opened above. */ Select *pSelect = pExpr->x.pSelect; SelectDest dest; ExprList *pEList; assert( !isRowid ); sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable); dest.affSdst = (u8)affinity; assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable ); pSelect->iLimit = 0; testcase( pSelect->selFlags & SF_Distinct ); testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */ if( sqlite3Select(pParse, pSelect, &dest) ){ sqlite3KeyInfoUnref(pKeyInfo); return 0; } pEList = pSelect->pEList; assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */ assert( pEList!=0 ); assert( pEList->nExpr>0 ); assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); pKeyInfo->aColl[0] = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pEList->a[0].pExpr); }else if( ALWAYS(pExpr->x.pList!=0) ){ /* Case 2: expr IN (exprlist) ** ** For each expression, build an index key from the evaluation and ** store it in the temporary table. If is a column, then use ** that columns affinity when building index keys. If is not ** a column, use numeric affinity. */ int i; ExprList *pList = pExpr->x.pList; struct ExprList_item *pItem; int r1, r2, r3; if( !affinity ){ affinity = SQLITE_AFF_BLOB; } if( pKeyInfo ){ assert( sqlite3KeyInfoIsWriteable(pKeyInfo) ); pKeyInfo->aColl[0] = sqlite3ExprCollSeq(pParse, pExpr->pLeft); } /* Loop through each expression in . */ r1 = sqlite3GetTempReg(pParse); r2 = sqlite3GetTempReg(pParse); if( isRowid ) sqlite3VdbeAddOp2(v, OP_Null, 0, r2); for(i=pList->nExpr, pItem=pList->a; i>0; i--, pItem++){ Expr *pE2 = pItem->pExpr; int iValToIns; /* If the expression is not constant then we will need to ** disable the test that was generated above that makes sure ** this code only executes once. Because for a non-constant ** expression we need to rerun this code each time. */ if( jmpIfDynamic>=0 && !sqlite3ExprIsConstant(pE2) ){ sqlite3VdbeChangeToNoop(v, jmpIfDynamic); jmpIfDynamic = -1; } /* Evaluate the expression and insert it into the temp table */ if( isRowid && sqlite3ExprIsInteger(pE2, &iValToIns) ){ sqlite3VdbeAddOp3(v, OP_InsertInt, pExpr->iTable, r2, iValToIns); }else{ r3 = sqlite3ExprCodeTarget(pParse, pE2, r1); if( isRowid ){ sqlite3VdbeAddOp2(v, OP_MustBeInt, r3, sqlite3VdbeCurrentAddr(v)+2); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Insert, pExpr->iTable, r2, r3); }else{ sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1); sqlite3ExprCacheAffinityChange(pParse, r3, 1); sqlite3VdbeAddOp2(v, OP_IdxInsert, pExpr->iTable, r2); } } } sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempReg(pParse, r2); } if( pKeyInfo ){ sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); } break; } case TK_EXISTS: case TK_SELECT: default: { /* If this has to be a scalar SELECT. Generate code to put the ** value of this select in a memory cell and record the number ** of the memory cell in iColumn. If this is an EXISTS, write ** an integer 0 (not exists) or 1 (exists) into a memory cell ** and record that memory cell in iColumn. */ Select *pSel; /* SELECT statement to encode */ SelectDest dest; /* How to deal with SELECt result */ testcase( pExpr->op==TK_EXISTS ); testcase( pExpr->op==TK_SELECT ); assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT ); assert( ExprHasProperty(pExpr, EP_xIsSelect) ); pSel = pExpr->x.pSelect; sqlite3SelectDestInit(&dest, 0, ++pParse->nMem); if( pExpr->op==TK_SELECT ){ dest.eDest = SRT_Mem; dest.iSdst = dest.iSDParm; sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iSDParm); VdbeComment((v, "Init subquery result")); }else{ dest.eDest = SRT_Exists; sqlite3VdbeAddOp2(v, OP_Integer, 0, dest.iSDParm); VdbeComment((v, "Init EXISTS result")); } sqlite3ExprDelete(pParse->db, pSel->pLimit); pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[1]); pSel->iLimit = 0; pSel->selFlags &= ~SF_MultiValue; if( sqlite3Select(pParse, pSel, &dest) ){ return 0; } rReg = dest.iSDParm; ExprSetVVAProperty(pExpr, EP_NoReduce); break; } } if( rHasNullFlag ){ sqlite3SetHasNullFlag(v, pExpr->iTable, rHasNullFlag); } if( jmpIfDynamic>=0 ){ sqlite3VdbeJumpHere(v, jmpIfDynamic); } sqlite3ExprCachePop(pParse); return rReg; } #endif /* SQLITE_OMIT_SUBQUERY */ #ifndef SQLITE_OMIT_SUBQUERY /* ** Generate code for an IN expression. ** ** x IN (SELECT ...) ** x IN (value, value, ...) ** ** The left-hand side (LHS) is a scalar expression. The right-hand side (RHS) ** is an array of zero or more values. The expression is true if the LHS is ** contained within the RHS. The value of the expression is unknown (NULL) ** if the LHS is NULL or if the LHS is not contained within the RHS and the ** RHS contains one or more NULL values. ** ** This routine generates code that jumps to destIfFalse if the LHS is not ** contained within the RHS. If due to NULLs we cannot determine if the LHS ** is contained in the RHS then jump to destIfNull. If the LHS is contained ** within the RHS then fall through. */ static void sqlite3ExprCodeIN( Parse *pParse, /* Parsing and code generating context */ Expr *pExpr, /* The IN expression */ int destIfFalse, /* Jump here if LHS is not contained in the RHS */ int destIfNull /* Jump here if the results are unknown due to NULLs */ ){ int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */ char affinity; /* Comparison affinity to use */ int eType; /* Type of the RHS */ int r1; /* Temporary use register */ Vdbe *v; /* Statement under construction */ /* Compute the RHS. After this step, the table with cursor ** pExpr->iTable will contains the values that make up the RHS. */ v = pParse->pVdbe; assert( v!=0 ); /* OOM detected prior to this routine */ VdbeNoopComment((v, "begin IN expr")); eType = sqlite3FindInIndex(pParse, pExpr, IN_INDEX_MEMBERSHIP | IN_INDEX_NOOP_OK, destIfFalse==destIfNull ? 0 : &rRhsHasNull); /* Figure out the affinity to use to create a key from the results ** of the expression. affinityStr stores a static string suitable for ** P4 of OP_MakeRecord. */ affinity = comparisonAffinity(pExpr); /* Code the LHS, the from " IN (...)". */ sqlite3ExprCachePush(pParse); r1 = sqlite3GetTempReg(pParse); sqlite3ExprCode(pParse, pExpr->pLeft, r1); /* If sqlite3FindInIndex() did not find or create an index that is ** suitable for evaluating the IN operator, then evaluate using a ** sequence of comparisons. */ if( eType==IN_INDEX_NOOP ){ ExprList *pList = pExpr->x.pList; CollSeq *pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); int labelOk = sqlite3VdbeMakeLabel(v); int r2, regToFree; int regCkNull = 0; int ii; assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); if( destIfNull!=destIfFalse ){ regCkNull = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_BitAnd, r1, r1, regCkNull); } for(ii=0; iinExpr; ii++){ r2 = sqlite3ExprCodeTemp(pParse, pList->a[ii].pExpr, ®ToFree); if( regCkNull && sqlite3ExprCanBeNull(pList->a[ii].pExpr) ){ sqlite3VdbeAddOp3(v, OP_BitAnd, regCkNull, r2, regCkNull); } if( iinExpr-1 || destIfNull!=destIfFalse ){ sqlite3VdbeAddOp4(v, OP_Eq, r1, labelOk, r2, (void*)pColl, P4_COLLSEQ); VdbeCoverageIf(v, iinExpr-1); VdbeCoverageIf(v, ii==pList->nExpr-1); sqlite3VdbeChangeP5(v, affinity); }else{ assert( destIfNull==destIfFalse ); sqlite3VdbeAddOp4(v, OP_Ne, r1, destIfFalse, r2, (void*)pColl, P4_COLLSEQ); VdbeCoverage(v); sqlite3VdbeChangeP5(v, affinity | SQLITE_JUMPIFNULL); } sqlite3ReleaseTempReg(pParse, regToFree); } if( regCkNull ){ sqlite3VdbeAddOp2(v, OP_IsNull, regCkNull, destIfNull); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse); } sqlite3VdbeResolveLabel(v, labelOk); sqlite3ReleaseTempReg(pParse, regCkNull); }else{ /* If the LHS is NULL, then the result is either false or NULL depending ** on whether the RHS is empty or not, respectively. */ if( sqlite3ExprCanBeNull(pExpr->pLeft) ){ if( destIfNull==destIfFalse ){ /* Shortcut for the common case where the false and NULL outcomes are ** the same. */ sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); VdbeCoverage(v); }else{ int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull); sqlite3VdbeJumpHere(v, addr1); } } if( eType==IN_INDEX_ROWID ){ /* In this case, the RHS is the ROWID of table b-tree */ sqlite3VdbeAddOp2(v, OP_MustBeInt, r1, destIfFalse); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, destIfFalse, r1); VdbeCoverage(v); }else{ /* In this case, the RHS is an index b-tree. */ sqlite3VdbeAddOp4(v, OP_Affinity, r1, 1, 0, &affinity, 1); /* If the set membership test fails, then the result of the ** "x IN (...)" expression must be either 0 or NULL. If the set ** contains no NULL values, then the result is 0. If the set ** contains one or more NULL values, then the result of the ** expression is also NULL. */ assert( destIfFalse!=destIfNull || rRhsHasNull==0 ); if( rRhsHasNull==0 ){ /* This branch runs if it is known at compile time that the RHS ** cannot contain NULL values. This happens as the result ** of a "NOT NULL" constraint in the database schema. ** ** Also run this branch if NULL is equivalent to FALSE ** for this particular IN operator. */ sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, r1, 1); VdbeCoverage(v); }else{ /* In this branch, the RHS of the IN might contain a NULL and ** the presence of a NULL on the RHS makes a difference in the ** outcome. */ int j1; /* First check to see if the LHS is contained in the RHS. If so, ** then the answer is TRUE the presence of NULLs in the RHS does ** not matter. If the LHS is not contained in the RHS, then the ** answer is NULL if the RHS contains NULLs and the answer is ** FALSE if the RHS is NULL-free. */ j1 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, r1, 1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_IsNull, rRhsHasNull, destIfNull); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse); sqlite3VdbeJumpHere(v, j1); } } } sqlite3ReleaseTempReg(pParse, r1); sqlite3ExprCachePop(pParse); VdbeComment((v, "end IN expr")); } #endif /* SQLITE_OMIT_SUBQUERY */ #ifndef SQLITE_OMIT_FLOATING_POINT /* ** Generate an instruction that will put the floating point ** value described by z[0..n-1] into register iMem. ** ** The z[] string will probably not be zero-terminated. But the ** z[n] character is guaranteed to be something that does not look ** like the continuation of the number. */ static void codeReal(Vdbe *v, const char *z, int negateFlag, int iMem){ if( ALWAYS(z!=0) ){ double value; sqlite3AtoF(z, &value, sqlite3Strlen30(z), SQLITE_UTF8); assert( !sqlite3IsNaN(value) ); /* The new AtoF never returns NaN */ if( negateFlag ) value = -value; sqlite3VdbeAddOp4Dup8(v, OP_Real, 0, iMem, 0, (u8*)&value, P4_REAL); } } #endif /* ** Generate an instruction that will put the integer describe by ** text z[0..n-1] into register iMem. ** ** Expr.u.zToken is always UTF8 and zero-terminated. */ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ Vdbe *v = pParse->pVdbe; if( pExpr->flags & EP_IntValue ){ int i = pExpr->u.iValue; assert( i>=0 ); if( negFlag ) i = -i; sqlite3VdbeAddOp2(v, OP_Integer, i, iMem); }else{ int c; i64 value; const char *z = pExpr->u.zToken; assert( z!=0 ); c = sqlite3DecOrHexToI64(z, &value); if( c==0 || (c==2 && negFlag) ){ if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; } sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64); }else{ #ifdef SQLITE_OMIT_FLOATING_POINT sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z); #else #ifndef SQLITE_OMIT_HEX_INTEGER if( sqlite3_strnicmp(z,"0x",2)==0 ){ sqlite3ErrorMsg(pParse, "hex literal too big: %s", z); }else #endif { codeReal(v, z, negFlag, iMem); } #endif } } } /* ** Clear a cache entry. */ static void cacheEntryClear(Parse *pParse, struct yColCache *p){ if( p->tempReg ){ if( pParse->nTempRegaTempReg) ){ pParse->aTempReg[pParse->nTempReg++] = p->iReg; } p->tempReg = 0; } } /* ** Record in the column cache that a particular column from a ** particular table is stored in a particular register. */ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int iReg){ int i; int minLru; int idxLru; struct yColCache *p; /* Unless an error has occurred, register numbers are always positive. */ assert( iReg>0 || pParse->nErr || pParse->db->mallocFailed ); assert( iCol>=-1 && iCol<32768 ); /* Finite column numbers */ /* The SQLITE_ColumnCache flag disables the column cache. This is used ** for testing only - to verify that SQLite always gets the same answer ** with and without the column cache. */ if( OptimizationDisabled(pParse->db, SQLITE_ColumnCache) ) return; /* First replace any existing entry. ** ** Actually, the way the column cache is currently used, we are guaranteed ** that the object will never already be in cache. Verify this guarantee. */ #ifndef NDEBUG for(i=0, p=pParse->aColCache; iiReg==0 || p->iTable!=iTab || p->iColumn!=iCol ); } #endif /* Find an empty slot and replace it */ for(i=0, p=pParse->aColCache; iiReg==0 ){ p->iLevel = pParse->iCacheLevel; p->iTable = iTab; p->iColumn = iCol; p->iReg = iReg; p->tempReg = 0; p->lru = pParse->iCacheCnt++; return; } } /* Replace the last recently used */ minLru = 0x7fffffff; idxLru = -1; for(i=0, p=pParse->aColCache; ilrulru; } } if( ALWAYS(idxLru>=0) ){ p = &pParse->aColCache[idxLru]; p->iLevel = pParse->iCacheLevel; p->iTable = iTab; p->iColumn = iCol; p->iReg = iReg; p->tempReg = 0; p->lru = pParse->iCacheCnt++; return; } } /* ** Indicate that registers between iReg..iReg+nReg-1 are being overwritten. ** Purge the range of registers from the column cache. */ SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){ int i; int iLast = iReg + nReg - 1; struct yColCache *p; for(i=0, p=pParse->aColCache; iiReg; if( r>=iReg && r<=iLast ){ cacheEntryClear(pParse, p); p->iReg = 0; } } } /* ** Remember the current column cache context. Any new entries added ** added to the column cache after this call are removed when the ** corresponding pop occurs. */ SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){ pParse->iCacheLevel++; #ifdef SQLITE_DEBUG if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ printf("PUSH to %d\n", pParse->iCacheLevel); } #endif } /* ** Remove from the column cache any entries that were added since the ** the previous sqlite3ExprCachePush operation. In other words, restore ** the cache to the state it was in prior the most recent Push. */ SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){ int i; struct yColCache *p; assert( pParse->iCacheLevel>=1 ); pParse->iCacheLevel--; #ifdef SQLITE_DEBUG if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ printf("POP to %d\n", pParse->iCacheLevel); } #endif for(i=0, p=pParse->aColCache; iiReg && p->iLevel>pParse->iCacheLevel ){ cacheEntryClear(pParse, p); p->iReg = 0; } } } /* ** When a cached column is reused, make sure that its register is ** no longer available as a temp register. ticket #3879: that same ** register might be in the cache in multiple places, so be sure to ** get them all. */ static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){ int i; struct yColCache *p; for(i=0, p=pParse->aColCache; iiReg==iReg ){ p->tempReg = 0; } } } /* ** Generate code to extract the value of the iCol-th column of a table. */ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( Vdbe *v, /* The VDBE under construction */ Table *pTab, /* The table containing the value */ int iTabCur, /* The table cursor. Or the PK cursor for WITHOUT ROWID */ int iCol, /* Index of the column to extract */ int regOut /* Extract the value into this register */ ){ if( iCol<0 || iCol==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); }else{ int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; int x = iCol; if( !HasRowid(pTab) ){ x = sqlite3ColumnOfIndex(sqlite3PrimaryKeyIndex(pTab), iCol); } sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut); } if( iCol>=0 ){ sqlite3ColumnDefault(v, pTab, iCol, regOut); } } /* ** Generate code that will extract the iColumn-th column from ** table pTab and store the column value in a register. An effort ** is made to store the column value in register iReg, but this is ** not guaranteed. The location of the column value is returned. ** ** There must be an open cursor to pTab in iTable when this routine ** is called. If iColumn<0 then code is generated that extracts the rowid. */ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn( Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* Description of the table we are reading from */ int iColumn, /* Index of the table column */ int iTable, /* The cursor pointing to the table */ int iReg, /* Store results here */ u8 p5 /* P5 value for OP_Column */ ){ Vdbe *v = pParse->pVdbe; int i; struct yColCache *p; for(i=0, p=pParse->aColCache; iiReg>0 && p->iTable==iTable && p->iColumn==iColumn ){ p->lru = pParse->iCacheCnt++; sqlite3ExprCachePinRegister(pParse, p->iReg); return p->iReg; } } assert( v!=0 ); sqlite3ExprCodeGetColumnOfTable(v, pTab, iTable, iColumn, iReg); if( p5 ){ sqlite3VdbeChangeP5(v, p5); }else{ sqlite3ExprCacheStore(pParse, iTable, iColumn, iReg); } return iReg; } /* ** Clear all column cache entries. */ SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){ int i; struct yColCache *p; #if SQLITE_DEBUG if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ printf("CLEAR\n"); } #endif for(i=0, p=pParse->aColCache; iiReg ){ cacheEntryClear(pParse, p); p->iReg = 0; } } } /* ** Record the fact that an affinity change has occurred on iCount ** registers starting with iStart. */ SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse *pParse, int iStart, int iCount){ sqlite3ExprCacheRemove(pParse, iStart, iCount); } /* ** Generate code to move content from registers iFrom...iFrom+nReg-1 ** over to iTo..iTo+nReg-1. Keep the column cache up-to-date. */ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int nReg){ assert( iFrom>=iTo+nReg || iFrom+nReg<=iTo ); sqlite3VdbeAddOp3(pParse->pVdbe, OP_Move, iFrom, iTo, nReg); sqlite3ExprCacheRemove(pParse, iFrom, nReg); } #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) /* ** Return true if any register in the range iFrom..iTo (inclusive) ** is used as part of the column cache. ** ** This routine is used within assert() and testcase() macros only ** and does not appear in a normal build. */ static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){ int i; struct yColCache *p; for(i=0, p=pParse->aColCache; iiReg; if( r>=iFrom && r<=iTo ) return 1; /*NO_TEST*/ } return 0; } #endif /* SQLITE_DEBUG || SQLITE_COVERAGE_TEST */ /* ** Convert an expression node to a TK_REGISTER */ static void exprToRegister(Expr *p, int iReg){ p->op2 = p->op; p->op = TK_REGISTER; p->iTable = iReg; ExprClearProperty(p, EP_Skip); } /* ** Generate code into the current Vdbe to evaluate the given ** expression. Attempt to store the results in register "target". ** Return the register where results are stored. ** ** With this routine, there is no guarantee that results will ** be stored in target. The result might be stored in some other ** register if it is convenient to do so. The calling function ** must check the return code and move the results to the desired ** register. */ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target){ Vdbe *v = pParse->pVdbe; /* The VM under construction */ int op; /* The opcode being coded */ int inReg = target; /* Results stored in register inReg */ int regFree1 = 0; /* If non-zero free this temporary register */ int regFree2 = 0; /* If non-zero free this temporary register */ int r1, r2, r3, r4; /* Various register numbers */ sqlite3 *db = pParse->db; /* The database connection */ Expr tempX; /* Temporary expression node */ assert( target>0 && target<=pParse->nMem ); if( v==0 ){ assert( pParse->db->mallocFailed ); return 0; } if( pExpr==0 ){ op = TK_NULL; }else{ op = pExpr->op; } switch( op ){ case TK_AGG_COLUMN: { AggInfo *pAggInfo = pExpr->pAggInfo; struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg]; if( !pAggInfo->directMode ){ assert( pCol->iMem>0 ); inReg = pCol->iMem; break; }else if( pAggInfo->useSortingIdx ){ sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, pCol->iSorterColumn, target); break; } /* Otherwise, fall thru into the TK_COLUMN case */ } case TK_COLUMN: { int iTab = pExpr->iTable; if( iTab<0 ){ if( pParse->ckBase>0 ){ /* Generating CHECK constraints or inserting into partial index */ inReg = pExpr->iColumn + pParse->ckBase; break; }else{ /* Deleting from a partial index */ iTab = pParse->iPartIdxTab; } } inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab, pExpr->iColumn, iTab, target, pExpr->op2); break; } case TK_INTEGER: { codeInteger(pParse, pExpr, 0, target); break; } #ifndef SQLITE_OMIT_FLOATING_POINT case TK_FLOAT: { assert( !ExprHasProperty(pExpr, EP_IntValue) ); codeReal(v, pExpr->u.zToken, 0, target); break; } #endif case TK_STRING: { assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3VdbeAddOp4(v, OP_String8, 0, target, 0, pExpr->u.zToken, 0); break; } case TK_NULL: { sqlite3VdbeAddOp2(v, OP_Null, 0, target); break; } #ifndef SQLITE_OMIT_BLOB_LITERAL case TK_BLOB: { int n; const char *z; char *zBlob; assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( pExpr->u.zToken[0]=='x' || pExpr->u.zToken[0]=='X' ); assert( pExpr->u.zToken[1]=='\'' ); z = &pExpr->u.zToken[2]; n = sqlite3Strlen30(z) - 1; assert( z[n]=='\'' ); zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n); sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC); break; } #endif case TK_VARIABLE: { assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( pExpr->u.zToken!=0 ); assert( pExpr->u.zToken[0]!=0 ); sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target); if( pExpr->u.zToken[1]!=0 ){ assert( pExpr->u.zToken[0]=='?' || strcmp(pExpr->u.zToken, pParse->azVar[pExpr->iColumn-1])==0 ); sqlite3VdbeChangeP4(v, -1, pParse->azVar[pExpr->iColumn-1], P4_STATIC); } break; } case TK_REGISTER: { inReg = pExpr->iTable; break; } case TK_AS: { inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); break; } #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); if( inReg!=target ){ sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); inReg = target; } sqlite3VdbeAddOp2(v, OP_Cast, target, sqlite3AffinityType(pExpr->u.zToken, 0)); testcase( usedAsColumnCache(pParse, inReg, inReg) ); sqlite3ExprCacheAffinityChange(pParse, inReg, 1); break; } #endif /* SQLITE_OMIT_CAST */ case TK_LT: case TK_LE: case TK_GT: case TK_GE: case TK_NE: case TK_EQ: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, inReg, SQLITE_STOREP2); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_IS: case TK_ISNOT: { testcase( op==TK_IS ); testcase( op==TK_ISNOT ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); op = (op==TK_IS) ? TK_EQ : TK_NE; codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, inReg, SQLITE_STOREP2 | SQLITE_NULLEQ); VdbeCoverageIf(v, op==TK_EQ); VdbeCoverageIf(v, op==TK_NE); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_AND: case TK_OR: case TK_PLUS: case TK_STAR: case TK_MINUS: case TK_REM: case TK_BITAND: case TK_BITOR: case TK_SLASH: case TK_LSHIFT: case TK_RSHIFT: case TK_CONCAT: { assert( TK_AND==OP_And ); testcase( op==TK_AND ); assert( TK_OR==OP_Or ); testcase( op==TK_OR ); assert( TK_PLUS==OP_Add ); testcase( op==TK_PLUS ); assert( TK_MINUS==OP_Subtract ); testcase( op==TK_MINUS ); assert( TK_REM==OP_Remainder ); testcase( op==TK_REM ); assert( TK_BITAND==OP_BitAnd ); testcase( op==TK_BITAND ); assert( TK_BITOR==OP_BitOr ); testcase( op==TK_BITOR ); assert( TK_SLASH==OP_Divide ); testcase( op==TK_SLASH ); assert( TK_LSHIFT==OP_ShiftLeft ); testcase( op==TK_LSHIFT ); assert( TK_RSHIFT==OP_ShiftRight ); testcase( op==TK_RSHIFT ); assert( TK_CONCAT==OP_Concat ); testcase( op==TK_CONCAT ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); sqlite3VdbeAddOp3(v, op, r2, r1, target); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_UMINUS: { Expr *pLeft = pExpr->pLeft; assert( pLeft ); if( pLeft->op==TK_INTEGER ){ codeInteger(pParse, pLeft, 1, target); #ifndef SQLITE_OMIT_FLOATING_POINT }else if( pLeft->op==TK_FLOAT ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); codeReal(v, pLeft->u.zToken, 1, target); #endif }else{ tempX.op = TK_INTEGER; tempX.flags = EP_IntValue|EP_TokenOnly; tempX.u.iValue = 0; r1 = sqlite3ExprCodeTemp(pParse, &tempX, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free2); sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target); testcase( regFree2==0 ); } inReg = target; break; } case TK_BITNOT: case TK_NOT: { assert( TK_BITNOT==OP_BitNot ); testcase( op==TK_BITNOT ); assert( TK_NOT==OP_Not ); testcase( op==TK_NOT ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); testcase( regFree1==0 ); inReg = target; sqlite3VdbeAddOp2(v, op, r1, inReg); break; } case TK_ISNULL: case TK_NOTNULL: { int addr; assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); sqlite3VdbeAddOp2(v, OP_Integer, 1, target); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); testcase( regFree1==0 ); addr = sqlite3VdbeAddOp1(v, op, r1); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); sqlite3VdbeAddOp2(v, OP_Integer, 0, target); sqlite3VdbeJumpHere(v, addr); break; } case TK_AGG_FUNCTION: { AggInfo *pInfo = pExpr->pAggInfo; if( pInfo==0 ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken); }else{ inReg = pInfo->aFunc[pExpr->iAgg].iMem; } break; } case TK_FUNCTION: { ExprList *pFarg; /* List of function arguments */ int nFarg; /* Number of function arguments */ FuncDef *pDef; /* The function definition object */ int nId; /* Length of the function name in bytes */ const char *zId; /* The function name */ u32 constMask = 0; /* Mask of function arguments that are constant */ int i; /* Loop counter */ u8 enc = ENC(db); /* The text encoding used by this database */ CollSeq *pColl = 0; /* A collating sequence */ assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); if( ExprHasProperty(pExpr, EP_TokenOnly) ){ pFarg = 0; }else{ pFarg = pExpr->x.pList; } nFarg = pFarg ? pFarg->nExpr : 0; assert( !ExprHasProperty(pExpr, EP_IntValue) ); zId = pExpr->u.zToken; nId = sqlite3Strlen30(zId); pDef = sqlite3FindFunction(db, zId, nId, nFarg, enc, 0); if( pDef==0 || pDef->xFunc==0 ){ sqlite3ErrorMsg(pParse, "unknown function: %.*s()", nId, zId); break; } /* Attempt a direct implementation of the built-in COALESCE() and ** IFNULL() functions. This avoids unnecessary evaluation of ** arguments past the first non-NULL argument. */ if( pDef->funcFlags & SQLITE_FUNC_COALESCE ){ int endCoalesce = sqlite3VdbeMakeLabel(v); assert( nFarg>=2 ); sqlite3ExprCode(pParse, pFarg->a[0].pExpr, target); for(i=1; ia[i].pExpr, target); sqlite3ExprCachePop(pParse); } sqlite3VdbeResolveLabel(v, endCoalesce); break; } /* The UNLIKELY() function is a no-op. The result is the value ** of the first argument. */ if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ assert( nFarg>=1 ); inReg = sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target); break; } for(i=0; ia[i].pExpr) ){ testcase( i==31 ); constMask |= MASKBIT32(i); } if( (pDef->funcFlags & SQLITE_FUNC_NEEDCOLL)!=0 && !pColl ){ pColl = sqlite3ExprCollSeq(pParse, pFarg->a[i].pExpr); } } if( pFarg ){ if( constMask ){ r1 = pParse->nMem+1; pParse->nMem += nFarg; }else{ r1 = sqlite3GetTempRange(pParse, nFarg); } /* For length() and typeof() functions with a column argument, ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data ** loading. */ if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ u8 exprOp; assert( nFarg==1 ); assert( pFarg->a[0].pExpr!=0 ); exprOp = pFarg->a[0].pExpr->op; if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); testcase( pDef->funcFlags & OPFLAG_LENGTHARG ); pFarg->a[0].pExpr->op2 = pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG); } } sqlite3ExprCachePush(pParse); /* Ticket 2ea2425d34be */ sqlite3ExprCodeExprList(pParse, pFarg, r1, SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR); sqlite3ExprCachePop(pParse); /* Ticket 2ea2425d34be */ }else{ r1 = 0; } #ifndef SQLITE_OMIT_VIRTUALTABLE /* Possibly overload the function if the first argument is ** a virtual table column. ** ** For infix functions (LIKE, GLOB, REGEXP, and MATCH) use the ** second argument, not the first, as the argument to test to ** see if it is a column in a virtual table. This is done because ** the left operand of infix functions (the operand we want to ** control overloading) ends up as the second argument to the ** function. The expression "A glob B" is equivalent to ** "glob(B,A). We want to use the A in "A glob B" to test ** for function overloading. But we use the B term in "glob(B,A)". */ if( nFarg>=2 && (pExpr->flags & EP_InfixFunc) ){ pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[1].pExpr); }else if( nFarg>0 ){ pDef = sqlite3VtabOverloadFunction(db, pDef, nFarg, pFarg->a[0].pExpr); } #endif if( pDef->funcFlags & SQLITE_FUNC_NEEDCOLL ){ if( !pColl ) pColl = db->pDfltColl; sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ); } sqlite3VdbeAddOp4(v, OP_Function0, constMask, r1, target, (char*)pDef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, (u8)nFarg); if( nFarg && constMask==0 ){ sqlite3ReleaseTempRange(pParse, r1, nFarg); } break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_EXISTS: case TK_SELECT: { testcase( op==TK_EXISTS ); testcase( op==TK_SELECT ); inReg = sqlite3CodeSubselect(pParse, pExpr, 0, 0); break; } case TK_IN: { int destIfFalse = sqlite3VdbeMakeLabel(v); int destIfNull = sqlite3VdbeMakeLabel(v); sqlite3VdbeAddOp2(v, OP_Null, 0, target); sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); sqlite3VdbeAddOp2(v, OP_Integer, 1, target); sqlite3VdbeResolveLabel(v, destIfFalse); sqlite3VdbeAddOp2(v, OP_AddImm, target, 0); sqlite3VdbeResolveLabel(v, destIfNull); break; } #endif /* SQLITE_OMIT_SUBQUERY */ /* ** x BETWEEN y AND z ** ** This is equivalent to ** ** x>=y AND x<=z ** ** X is stored in pExpr->pLeft. ** Y is stored in pExpr->pList->a[0].pExpr. ** Z is stored in pExpr->pList->a[1].pExpr. */ case TK_BETWEEN: { Expr *pLeft = pExpr->pLeft; struct ExprList_item *pLItem = pExpr->x.pList->a; Expr *pRight = pLItem->pExpr; r1 = sqlite3ExprCodeTemp(pParse, pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pRight, ®Free2); testcase( regFree1==0 ); testcase( regFree2==0 ); r3 = sqlite3GetTempReg(pParse); r4 = sqlite3GetTempReg(pParse); codeCompare(pParse, pLeft, pRight, OP_Ge, r1, r2, r3, SQLITE_STOREP2); VdbeCoverage(v); pLItem++; pRight = pLItem->pExpr; sqlite3ReleaseTempReg(pParse, regFree2); r2 = sqlite3ExprCodeTemp(pParse, pRight, ®Free2); testcase( regFree2==0 ); codeCompare(pParse, pLeft, pRight, OP_Le, r1, r2, r4, SQLITE_STOREP2); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_And, r3, r4, target); sqlite3ReleaseTempReg(pParse, r3); sqlite3ReleaseTempReg(pParse, r4); break; } case TK_COLLATE: case TK_UPLUS: { inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); break; } case TK_TRIGGER: { /* If the opcode is TK_TRIGGER, then the expression is a reference ** to a column in the new.* or old.* pseudo-tables available to ** trigger programs. In this case Expr.iTable is set to 1 for the ** new.* pseudo-table, or 0 for the old.* pseudo-table. Expr.iColumn ** is set to the column of the pseudo-table to read, or to -1 to ** read the rowid field. ** ** The expression is implemented using an OP_Param opcode. The p1 ** parameter is set to 0 for an old.rowid reference, or to (i+1) ** to reference another column of the old.* pseudo-table, where ** i is the index of the column. For a new.rowid reference, p1 is ** set to (n+1), where n is the number of columns in each pseudo-table. ** For a reference to any other column in the new.* pseudo-table, p1 ** is set to (n+2+i), where n and i are as defined previously. For ** example, if the table on which triggers are being fired is ** declared as: ** ** CREATE TABLE t1(a, b); ** ** Then p1 is interpreted as follows: ** ** p1==0 -> old.rowid p1==3 -> new.rowid ** p1==1 -> old.a p1==4 -> new.a ** p1==2 -> old.b p1==5 -> new.b */ Table *pTab = pExpr->pTab; int p1 = pExpr->iTable * (pTab->nCol+1) + 1 + pExpr->iColumn; assert( pExpr->iTable==0 || pExpr->iTable==1 ); assert( pExpr->iColumn>=-1 && pExpr->iColumnnCol ); assert( pTab->iPKey<0 || pExpr->iColumn!=pTab->iPKey ); assert( p1>=0 && p1<(pTab->nCol*2+2) ); sqlite3VdbeAddOp2(v, OP_Param, p1, target); VdbeComment((v, "%s.%s -> $%d", (pExpr->iTable ? "new" : "old"), (pExpr->iColumn<0 ? "rowid" : pExpr->pTab->aCol[pExpr->iColumn].zName), target )); #ifndef SQLITE_OMIT_FLOATING_POINT /* If the column has REAL affinity, it may currently be stored as an ** integer. Use OP_RealAffinity to make sure it is really real. ** ** EVIDENCE-OF: R-60985-57662 SQLite will convert the value back to ** floating point when extracting it from the record. */ if( pExpr->iColumn>=0 && pTab->aCol[pExpr->iColumn].affinity==SQLITE_AFF_REAL ){ sqlite3VdbeAddOp1(v, OP_RealAffinity, target); } #endif break; } /* ** Form A: ** CASE x WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END ** ** Form B: ** CASE WHEN e1 THEN r1 WHEN e2 THEN r2 ... WHEN eN THEN rN ELSE y END ** ** Form A is can be transformed into the equivalent form B as follows: ** CASE WHEN x=e1 THEN r1 WHEN x=e2 THEN r2 ... ** WHEN x=eN THEN rN ELSE y END ** ** X (if it exists) is in pExpr->pLeft. ** Y is in the last element of pExpr->x.pList if pExpr->x.pList->nExpr is ** odd. The Y is also optional. If the number of elements in x.pList ** is even, then Y is omitted and the "otherwise" result is NULL. ** Ei is in pExpr->pList->a[i*2] and Ri is pExpr->pList->a[i*2+1]. ** ** The result of the expression is the Ri for the first matching Ei, ** or if there is no matching Ei, the ELSE term Y, or if there is ** no ELSE term, NULL. */ default: assert( op==TK_CASE ); { int endLabel; /* GOTO label for end of CASE stmt */ int nextCase; /* GOTO label for next WHEN clause */ int nExpr; /* 2x number of WHEN terms */ int i; /* Loop counter */ ExprList *pEList; /* List of WHEN terms */ struct ExprList_item *aListelem; /* Array of WHEN terms */ Expr opCompare; /* The X==Ei expression */ Expr *pX; /* The X expression */ Expr *pTest = 0; /* X==Ei (form A) or just Ei (form B) */ VVA_ONLY( int iCacheLevel = pParse->iCacheLevel; ) assert( !ExprHasProperty(pExpr, EP_xIsSelect) && pExpr->x.pList ); assert(pExpr->x.pList->nExpr > 0); pEList = pExpr->x.pList; aListelem = pEList->a; nExpr = pEList->nExpr; endLabel = sqlite3VdbeMakeLabel(v); if( (pX = pExpr->pLeft)!=0 ){ tempX = *pX; testcase( pX->op==TK_COLUMN ); exprToRegister(&tempX, sqlite3ExprCodeTemp(pParse, pX, ®Free1)); testcase( regFree1==0 ); opCompare.op = TK_EQ; opCompare.pLeft = &tempX; pTest = &opCompare; /* Ticket b351d95f9cd5ef17e9d9dbae18f5ca8611190001: ** The value in regFree1 might get SCopy-ed into the file result. ** So make sure that the regFree1 register is not reused for other ** purposes and possibly overwritten. */ regFree1 = 0; } for(i=0; iop==TK_COLUMN ); sqlite3ExprIfFalse(pParse, pTest, nextCase, SQLITE_JUMPIFNULL); testcase( aListelem[i+1].pExpr->op==TK_COLUMN ); sqlite3ExprCode(pParse, aListelem[i+1].pExpr, target); sqlite3VdbeAddOp2(v, OP_Goto, 0, endLabel); sqlite3ExprCachePop(pParse); sqlite3VdbeResolveLabel(v, nextCase); } if( (nExpr&1)!=0 ){ sqlite3ExprCachePush(pParse); sqlite3ExprCode(pParse, pEList->a[nExpr-1].pExpr, target); sqlite3ExprCachePop(pParse); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, target); } assert( db->mallocFailed || pParse->nErr>0 || pParse->iCacheLevel==iCacheLevel ); sqlite3VdbeResolveLabel(v, endLabel); break; } #ifndef SQLITE_OMIT_TRIGGER case TK_RAISE: { assert( pExpr->affinity==OE_Rollback || pExpr->affinity==OE_Abort || pExpr->affinity==OE_Fail || pExpr->affinity==OE_Ignore ); if( !pParse->pTriggerTab ){ sqlite3ErrorMsg(pParse, "RAISE() may only be used within a trigger-program"); return 0; } if( pExpr->affinity==OE_Abort ){ sqlite3MayAbort(pParse); } assert( !ExprHasProperty(pExpr, EP_IntValue) ); if( pExpr->affinity==OE_Ignore ){ sqlite3VdbeAddOp4( v, OP_Halt, SQLITE_OK, OE_Ignore, 0, pExpr->u.zToken,0); VdbeCoverage(v); }else{ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_TRIGGER, pExpr->affinity, pExpr->u.zToken, 0, 0); } break; } #endif } sqlite3ReleaseTempReg(pParse, regFree1); sqlite3ReleaseTempReg(pParse, regFree2); return inReg; } /* ** Factor out the code of the given expression to initialization time. */ SQLITE_PRIVATE void sqlite3ExprCodeAtInit( Parse *pParse, /* Parsing context */ Expr *pExpr, /* The expression to code when the VDBE initializes */ int regDest, /* Store the value in this register */ u8 reusable /* True if this expression is reusable */ ){ ExprList *p; assert( ConstFactorOk(pParse) ); p = pParse->pConstExpr; pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); p = sqlite3ExprListAppend(pParse, p, pExpr); if( p ){ struct ExprList_item *pItem = &p->a[p->nExpr-1]; pItem->u.iConstExprReg = regDest; pItem->reusable = reusable; } pParse->pConstExpr = p; } /* ** Generate code to evaluate an expression and store the results ** into a register. Return the register number where the results ** are stored. ** ** If the register is a temporary register that can be deallocated, ** then write its number into *pReg. If the result register is not ** a temporary, then set *pReg to zero. ** ** If pExpr is a constant, then this routine might generate this ** code to fill the register in the initialization section of the ** VDBE program, in order to factor it out of the evaluation loop. */ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){ int r2; pExpr = sqlite3ExprSkipCollate(pExpr); if( ConstFactorOk(pParse) && pExpr->op!=TK_REGISTER && sqlite3ExprIsConstantNotJoin(pExpr) ){ ExprList *p = pParse->pConstExpr; int i; *pReg = 0; if( p ){ struct ExprList_item *pItem; for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){ if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){ return pItem->u.iConstExprReg; } } } r2 = ++pParse->nMem; sqlite3ExprCodeAtInit(pParse, pExpr, r2, 1); }else{ int r1 = sqlite3GetTempReg(pParse); r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1); if( r2==r1 ){ *pReg = r1; }else{ sqlite3ReleaseTempReg(pParse, r1); *pReg = 0; } } return r2; } /* ** Generate code that will evaluate expression pExpr and store the ** results in register target. The results are guaranteed to appear ** in register target. */ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ int inReg; assert( target>0 && target<=pParse->nMem ); if( pExpr && pExpr->op==TK_REGISTER ){ sqlite3VdbeAddOp2(pParse->pVdbe, OP_Copy, pExpr->iTable, target); }else{ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); assert( pParse->pVdbe || pParse->db->mallocFailed ); if( inReg!=target && pParse->pVdbe ){ sqlite3VdbeAddOp2(pParse->pVdbe, OP_SCopy, inReg, target); } } } /* ** Generate code that will evaluate expression pExpr and store the ** results in register target. The results are guaranteed to appear ** in register target. If the expression is constant, then this routine ** might choose to code the expression at initialization time. */ SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){ if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){ sqlite3ExprCodeAtInit(pParse, pExpr, target, 0); }else{ sqlite3ExprCode(pParse, pExpr, target); } } /* ** Generate code that evaluates the given expression and puts the result ** in register target. ** ** Also make a copy of the expression results into another "cache" register ** and modify the expression so that the next time it is evaluated, ** the result is a copy of the cache register. ** ** This routine is used for expressions that are used multiple ** times. They are evaluated once and the results of the expression ** are reused. */ SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse *pParse, Expr *pExpr, int target){ Vdbe *v = pParse->pVdbe; int iMem; assert( target>0 ); assert( pExpr->op!=TK_REGISTER ); sqlite3ExprCode(pParse, pExpr, target); iMem = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Copy, target, iMem); exprToRegister(pExpr, iMem); } /* ** Generate code that pushes the value of every element of the given ** expression list into a sequence of registers beginning at target. ** ** Return the number of elements evaluated. ** ** The SQLITE_ECEL_DUP flag prevents the arguments from being ** filled using OP_SCopy. OP_Copy must be used instead. ** ** The SQLITE_ECEL_FACTOR argument allows constant arguments to be ** factored out into initialization code. */ SQLITE_PRIVATE int sqlite3ExprCodeExprList( Parse *pParse, /* Parsing context */ ExprList *pList, /* The expression list to be coded */ int target, /* Where to write results */ u8 flags /* SQLITE_ECEL_* flags */ ){ struct ExprList_item *pItem; int i, n; u8 copyOp = (flags & SQLITE_ECEL_DUP) ? OP_Copy : OP_SCopy; assert( pList!=0 ); assert( target>0 ); assert( pParse->pVdbe!=0 ); /* Never gets this far otherwise */ n = pList->nExpr; if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR; for(pItem=pList->a, i=0; ipExpr; if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){ sqlite3ExprCodeAtInit(pParse, pExpr, target+i, 0); }else{ int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i); if( inReg!=target+i ){ VdbeOp *pOp; Vdbe *v = pParse->pVdbe; if( copyOp==OP_Copy && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy && pOp->p1+pOp->p3+1==inReg && pOp->p2+pOp->p3+1==target+i ){ pOp->p3++; }else{ sqlite3VdbeAddOp2(v, copyOp, inReg, target+i); } } } } return n; } /* ** Generate code for a BETWEEN operator. ** ** x BETWEEN y AND z ** ** The above is equivalent to ** ** x>=y AND x<=z ** ** Code it as such, taking care to do the common subexpression ** elimination of x. */ static void exprCodeBetween( Parse *pParse, /* Parsing and code generating context */ Expr *pExpr, /* The BETWEEN expression */ int dest, /* Jump here if the jump is taken */ int jumpIfTrue, /* Take the jump if the BETWEEN is true */ int jumpIfNull /* Take the jump if the BETWEEN is NULL */ ){ Expr exprAnd; /* The AND operator in x>=y AND x<=z */ Expr compLeft; /* The x>=y term */ Expr compRight; /* The x<=z term */ Expr exprX; /* The x subexpression */ int regFree1 = 0; /* Temporary use register */ assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); exprX = *pExpr->pLeft; exprAnd.op = TK_AND; exprAnd.pLeft = &compLeft; exprAnd.pRight = &compRight; compLeft.op = TK_GE; compLeft.pLeft = &exprX; compLeft.pRight = pExpr->x.pList->a[0].pExpr; compRight.op = TK_LE; compRight.pLeft = &exprX; compRight.pRight = pExpr->x.pList->a[1].pExpr; exprToRegister(&exprX, sqlite3ExprCodeTemp(pParse, &exprX, ®Free1)); if( jumpIfTrue ){ sqlite3ExprIfTrue(pParse, &exprAnd, dest, jumpIfNull); }else{ sqlite3ExprIfFalse(pParse, &exprAnd, dest, jumpIfNull); } sqlite3ReleaseTempReg(pParse, regFree1); /* Ensure adequate test coverage */ testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1==0 ); testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1!=0 ); testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1==0 ); testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1!=0 ); testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1==0 ); testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1!=0 ); testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1==0 ); testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1!=0 ); } /* ** Generate code for a boolean expression such that a jump is made ** to the label "dest" if the expression is true but execution ** continues straight thru if the expression is false. ** ** If the expression evaluates to NULL (neither true nor false), then ** take the jump if the jumpIfNull flag is SQLITE_JUMPIFNULL. ** ** This code depends on the fact that certain token values (ex: TK_EQ) ** are the same as opcode values (ex: OP_Eq) that implement the corresponding ** operation. Special comments in vdbe.c and the mkopcodeh.awk script in ** the make process cause these values to align. Assert()s in the code ** below verify that the numbers are aligned correctly. */ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ Vdbe *v = pParse->pVdbe; int op = 0; int regFree1 = 0; int regFree2 = 0; int r1, r2; assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ if( NEVER(pExpr==0) ) return; /* No way this can happen */ op = pExpr->op; switch( op ){ case TK_AND: { int d2 = sqlite3VdbeMakeLabel(v); testcase( jumpIfNull==0 ); sqlite3ExprIfFalse(pParse, pExpr->pLeft, d2,jumpIfNull^SQLITE_JUMPIFNULL); sqlite3ExprCachePush(pParse); sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3VdbeResolveLabel(v, d2); sqlite3ExprCachePop(pParse); break; } case TK_OR: { testcase( jumpIfNull==0 ); sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); sqlite3ExprCachePush(pParse); sqlite3ExprIfTrue(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3ExprCachePop(pParse); break; } case TK_NOT: { testcase( jumpIfNull==0 ); sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); break; } case TK_LT: case TK_LE: case TK_GT: case TK_GE: case TK_NE: case TK_EQ: { testcase( jumpIfNull==0 ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, jumpIfNull); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_IS: case TK_ISNOT: { testcase( op==TK_IS ); testcase( op==TK_ISNOT ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); op = (op==TK_IS) ? TK_EQ : TK_NE; codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, SQLITE_NULLEQ); VdbeCoverageIf(v, op==TK_EQ); VdbeCoverageIf(v, op==TK_NE); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_ISNULL: case TK_NOTNULL: { assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); testcase( regFree1==0 ); break; } case TK_BETWEEN: { testcase( jumpIfNull==0 ); exprCodeBetween(pParse, pExpr, dest, 1, jumpIfNull); break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_IN: { int destIfFalse = sqlite3VdbeMakeLabel(v); int destIfNull = jumpIfNull ? dest : destIfFalse; sqlite3ExprCodeIN(pParse, pExpr, destIfFalse, destIfNull); sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); sqlite3VdbeResolveLabel(v, destIfFalse); break; } #endif default: { if( exprAlwaysTrue(pExpr) ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); }else if( exprAlwaysFalse(pExpr) ){ /* No-op */ }else{ r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); sqlite3VdbeAddOp3(v, OP_If, r1, dest, jumpIfNull!=0); VdbeCoverage(v); testcase( regFree1==0 ); testcase( jumpIfNull==0 ); } break; } } sqlite3ReleaseTempReg(pParse, regFree1); sqlite3ReleaseTempReg(pParse, regFree2); } /* ** Generate code for a boolean expression such that a jump is made ** to the label "dest" if the expression is false but execution ** continues straight thru if the expression is true. ** ** If the expression evaluates to NULL (neither true nor false) then ** jump if jumpIfNull is SQLITE_JUMPIFNULL or fall through if jumpIfNull ** is 0. */ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int jumpIfNull){ Vdbe *v = pParse->pVdbe; int op = 0; int regFree1 = 0; int regFree2 = 0; int r1, r2; assert( jumpIfNull==SQLITE_JUMPIFNULL || jumpIfNull==0 ); if( NEVER(v==0) ) return; /* Existence of VDBE checked by caller */ if( pExpr==0 ) return; /* The value of pExpr->op and op are related as follows: ** ** pExpr->op op ** --------- ---------- ** TK_ISNULL OP_NotNull ** TK_NOTNULL OP_IsNull ** TK_NE OP_Eq ** TK_EQ OP_Ne ** TK_GT OP_Le ** TK_LE OP_Gt ** TK_GE OP_Lt ** TK_LT OP_Ge ** ** For other values of pExpr->op, op is undefined and unused. ** The value of TK_ and OP_ constants are arranged such that we ** can compute the mapping above using the following expression. ** Assert()s verify that the computation is correct. */ op = ((pExpr->op+(TK_ISNULL&1))^1)-(TK_ISNULL&1); /* Verify correct alignment of TK_ and OP_ constants */ assert( pExpr->op!=TK_ISNULL || op==OP_NotNull ); assert( pExpr->op!=TK_NOTNULL || op==OP_IsNull ); assert( pExpr->op!=TK_NE || op==OP_Eq ); assert( pExpr->op!=TK_EQ || op==OP_Ne ); assert( pExpr->op!=TK_LT || op==OP_Ge ); assert( pExpr->op!=TK_LE || op==OP_Gt ); assert( pExpr->op!=TK_GT || op==OP_Le ); assert( pExpr->op!=TK_GE || op==OP_Lt ); switch( pExpr->op ){ case TK_AND: { testcase( jumpIfNull==0 ); sqlite3ExprIfFalse(pParse, pExpr->pLeft, dest, jumpIfNull); sqlite3ExprCachePush(pParse); sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3ExprCachePop(pParse); break; } case TK_OR: { int d2 = sqlite3VdbeMakeLabel(v); testcase( jumpIfNull==0 ); sqlite3ExprIfTrue(pParse, pExpr->pLeft, d2, jumpIfNull^SQLITE_JUMPIFNULL); sqlite3ExprCachePush(pParse); sqlite3ExprIfFalse(pParse, pExpr->pRight, dest, jumpIfNull); sqlite3VdbeResolveLabel(v, d2); sqlite3ExprCachePop(pParse); break; } case TK_NOT: { testcase( jumpIfNull==0 ); sqlite3ExprIfTrue(pParse, pExpr->pLeft, dest, jumpIfNull); break; } case TK_LT: case TK_LE: case TK_GT: case TK_GE: case TK_NE: case TK_EQ: { testcase( jumpIfNull==0 ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, jumpIfNull); assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt); assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le); assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt); assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge); assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq); assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_IS: case TK_ISNOT: { testcase( pExpr->op==TK_IS ); testcase( pExpr->op==TK_ISNOT ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, ®Free2); op = (pExpr->op==TK_IS) ? TK_NE : TK_EQ; codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op, r1, r2, dest, SQLITE_NULLEQ); VdbeCoverageIf(v, op==TK_EQ); VdbeCoverageIf(v, op==TK_NE); testcase( regFree1==0 ); testcase( regFree2==0 ); break; } case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); testcase( regFree1==0 ); break; } case TK_BETWEEN: { testcase( jumpIfNull==0 ); exprCodeBetween(pParse, pExpr, dest, 0, jumpIfNull); break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_IN: { if( jumpIfNull ){ sqlite3ExprCodeIN(pParse, pExpr, dest, dest); }else{ int destIfNull = sqlite3VdbeMakeLabel(v); sqlite3ExprCodeIN(pParse, pExpr, dest, destIfNull); sqlite3VdbeResolveLabel(v, destIfNull); } break; } #endif default: { if( exprAlwaysFalse(pExpr) ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, dest); }else if( exprAlwaysTrue(pExpr) ){ /* no-op */ }else{ r1 = sqlite3ExprCodeTemp(pParse, pExpr, ®Free1); sqlite3VdbeAddOp3(v, OP_IfNot, r1, dest, jumpIfNull!=0); VdbeCoverage(v); testcase( regFree1==0 ); testcase( jumpIfNull==0 ); } break; } } sqlite3ReleaseTempReg(pParse, regFree1); sqlite3ReleaseTempReg(pParse, regFree2); } /* ** Like sqlite3ExprIfFalse() except that a copy is made of pExpr before ** code generation, and that copy is deleted after code generation. This ** ensures that the original pExpr is unchanged. */ SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse *pParse, Expr *pExpr, int dest,int jumpIfNull){ sqlite3 *db = pParse->db; Expr *pCopy = sqlite3ExprDup(db, pExpr, 0); if( db->mallocFailed==0 ){ sqlite3ExprIfFalse(pParse, pCopy, dest, jumpIfNull); } sqlite3ExprDelete(db, pCopy); } /* ** Do a deep comparison of two expression trees. Return 0 if the two ** expressions are completely identical. Return 1 if they differ only ** by a COLLATE operator at the top level. Return 2 if there are differences ** other than the top-level COLLATE operator. ** ** If any subelement of pB has Expr.iTable==(-1) then it is allowed ** to compare equal to an equivalent element in pA with Expr.iTable==iTab. ** ** The pA side might be using TK_REGISTER. If that is the case and pB is ** not using TK_REGISTER but is otherwise equivalent, then still return 0. ** ** Sometimes this routine will return 2 even if the two expressions ** really are equivalent. If we cannot prove that the expressions are ** identical, we return 2 just to be safe. So if this routine ** returns 2, then you do not really know for certain if the two ** expressions are the same. But if you get a 0 or 1 return, then you ** can be sure the expressions are the same. In the places where ** this routine is used, it does not hurt to get an extra 2 - that ** just might result in some slightly slower code. But returning ** an incorrect 0 or 1 could lead to a malfunction. */ SQLITE_PRIVATE int sqlite3ExprCompare(Expr *pA, Expr *pB, int iTab){ u32 combinedFlags; if( pA==0 || pB==0 ){ return pB==pA ? 0 : 2; } combinedFlags = pA->flags | pB->flags; if( combinedFlags & EP_IntValue ){ if( (pA->flags&pB->flags&EP_IntValue)!=0 && pA->u.iValue==pB->u.iValue ){ return 0; } return 2; } if( pA->op!=pB->op ){ if( pA->op==TK_COLLATE && sqlite3ExprCompare(pA->pLeft, pB, iTab)<2 ){ return 1; } if( pB->op==TK_COLLATE && sqlite3ExprCompare(pA, pB->pLeft, iTab)<2 ){ return 1; } return 2; } if( pA->op!=TK_COLUMN && ALWAYS(pA->op!=TK_AGG_COLUMN) && pA->u.zToken ){ if( strcmp(pA->u.zToken,pB->u.zToken)!=0 ){ return pA->op==TK_COLLATE ? 1 : 2; } } if( (pA->flags & EP_Distinct)!=(pB->flags & EP_Distinct) ) return 2; if( ALWAYS((combinedFlags & EP_TokenOnly)==0) ){ if( combinedFlags & EP_xIsSelect ) return 2; if( sqlite3ExprCompare(pA->pLeft, pB->pLeft, iTab) ) return 2; if( sqlite3ExprCompare(pA->pRight, pB->pRight, iTab) ) return 2; if( sqlite3ExprListCompare(pA->x.pList, pB->x.pList, iTab) ) return 2; if( ALWAYS((combinedFlags & EP_Reduced)==0) && pA->op!=TK_STRING ){ if( pA->iColumn!=pB->iColumn ) return 2; if( pA->iTable!=pB->iTable && (pA->iTable!=iTab || NEVER(pB->iTable>=0)) ) return 2; } } return 0; } /* ** Compare two ExprList objects. Return 0 if they are identical and ** non-zero if they differ in any way. ** ** If any subelement of pB has Expr.iTable==(-1) then it is allowed ** to compare equal to an equivalent element in pA with Expr.iTable==iTab. ** ** This routine might return non-zero for equivalent ExprLists. The ** only consequence will be disabled optimizations. But this routine ** must never return 0 if the two ExprList objects are different, or ** a malfunction will result. ** ** Two NULL pointers are considered to be the same. But a NULL pointer ** always differs from a non-NULL pointer. */ SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList *pA, ExprList *pB, int iTab){ int i; if( pA==0 && pB==0 ) return 0; if( pA==0 || pB==0 ) return 1; if( pA->nExpr!=pB->nExpr ) return 1; for(i=0; inExpr; i++){ Expr *pExprA = pA->a[i].pExpr; Expr *pExprB = pB->a[i].pExpr; if( pA->a[i].sortOrder!=pB->a[i].sortOrder ) return 1; if( sqlite3ExprCompare(pExprA, pExprB, iTab) ) return 1; } return 0; } /* ** Return true if we can prove the pE2 will always be true if pE1 is ** true. Return false if we cannot complete the proof or if pE2 might ** be false. Examples: ** ** pE1: x==5 pE2: x==5 Result: true ** pE1: x>0 pE2: x==5 Result: false ** pE1: x=21 pE2: x=21 OR y=43 Result: true ** pE1: x!=123 pE2: x IS NOT NULL Result: true ** pE1: x!=?1 pE2: x IS NOT NULL Result: true ** pE1: x IS NULL pE2: x IS NOT NULL Result: false ** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab. ** ** When in doubt, return false. Returning true might give a performance ** improvement. Returning false might cause a performance reduction, but ** it will always give the correct answer and is hence always safe. */ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){ if( sqlite3ExprCompare(pE1, pE2, iTab)==0 ){ return 1; } if( pE2->op==TK_OR && (sqlite3ExprImpliesExpr(pE1, pE2->pLeft, iTab) || sqlite3ExprImpliesExpr(pE1, pE2->pRight, iTab) ) ){ return 1; } if( pE2->op==TK_NOTNULL && sqlite3ExprCompare(pE1->pLeft, pE2->pLeft, iTab)==0 && (pE1->op!=TK_ISNULL && pE1->op!=TK_IS) ){ return 1; } return 0; } /* ** An instance of the following structure is used by the tree walker ** to count references to table columns in the arguments of an ** aggregate function, in order to implement the ** sqlite3FunctionThisSrc() routine. */ struct SrcCount { SrcList *pSrc; /* One particular FROM clause in a nested query */ int nThis; /* Number of references to columns in pSrcList */ int nOther; /* Number of references to columns in other FROM clauses */ }; /* ** Count the number of references to columns. */ static int exprSrcCount(Walker *pWalker, Expr *pExpr){ /* The NEVER() on the second term is because sqlite3FunctionUsesThisSrc() ** is always called before sqlite3ExprAnalyzeAggregates() and so the ** TK_COLUMNs have not yet been converted into TK_AGG_COLUMN. If ** sqlite3FunctionUsesThisSrc() is used differently in the future, the ** NEVER() will need to be removed. */ if( pExpr->op==TK_COLUMN || NEVER(pExpr->op==TK_AGG_COLUMN) ){ int i; struct SrcCount *p = pWalker->u.pSrcCount; SrcList *pSrc = p->pSrc; int nSrc = pSrc ? pSrc->nSrc : 0; for(i=0; iiTable==pSrc->a[i].iCursor ) break; } if( inThis++; }else{ p->nOther++; } } return WRC_Continue; } /* ** Determine if any of the arguments to the pExpr Function reference ** pSrcList. Return true if they do. Also return true if the function ** has no arguments or has only constant arguments. Return false if pExpr ** references columns but not columns of tables found in pSrcList. */ SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr *pExpr, SrcList *pSrcList){ Walker w; struct SrcCount cnt; assert( pExpr->op==TK_AGG_FUNCTION ); memset(&w, 0, sizeof(w)); w.xExprCallback = exprSrcCount; w.u.pSrcCount = &cnt; cnt.pSrc = pSrcList; cnt.nThis = 0; cnt.nOther = 0; sqlite3WalkExprList(&w, pExpr->x.pList); return cnt.nThis>0 || cnt.nOther==0; } /* ** Add a new element to the pAggInfo->aCol[] array. Return the index of ** the new element. Return a negative number if malloc fails. */ static int addAggInfoColumn(sqlite3 *db, AggInfo *pInfo){ int i; pInfo->aCol = sqlite3ArrayAllocate( db, pInfo->aCol, sizeof(pInfo->aCol[0]), &pInfo->nColumn, &i ); return i; } /* ** Add a new element to the pAggInfo->aFunc[] array. Return the index of ** the new element. Return a negative number if malloc fails. */ static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ int i; pInfo->aFunc = sqlite3ArrayAllocate( db, pInfo->aFunc, sizeof(pInfo->aFunc[0]), &pInfo->nFunc, &i ); return i; } /* ** This is the xExprCallback for a tree walker. It is used to ** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates ** for additional information. */ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ int i; NameContext *pNC = pWalker->u.pNC; Parse *pParse = pNC->pParse; SrcList *pSrcList = pNC->pSrcList; AggInfo *pAggInfo = pNC->pAggInfo; switch( pExpr->op ){ case TK_AGG_COLUMN: case TK_COLUMN: { testcase( pExpr->op==TK_AGG_COLUMN ); testcase( pExpr->op==TK_COLUMN ); /* Check to see if the column is in one of the tables in the FROM ** clause of the aggregate query */ if( ALWAYS(pSrcList!=0) ){ struct SrcList_item *pItem = pSrcList->a; for(i=0; inSrc; i++, pItem++){ struct AggInfo_col *pCol; assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); if( pExpr->iTable==pItem->iCursor ){ /* If we reach this point, it means that pExpr refers to a table ** that is in the FROM clause of the aggregate query. ** ** Make an entry for the column in pAggInfo->aCol[] if there ** is not an entry there already. */ int k; pCol = pAggInfo->aCol; for(k=0; knColumn; k++, pCol++){ if( pCol->iTable==pExpr->iTable && pCol->iColumn==pExpr->iColumn ){ break; } } if( (k>=pAggInfo->nColumn) && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 ){ pCol = &pAggInfo->aCol[k]; pCol->pTab = pExpr->pTab; pCol->iTable = pExpr->iTable; pCol->iColumn = pExpr->iColumn; pCol->iMem = ++pParse->nMem; pCol->iSorterColumn = -1; pCol->pExpr = pExpr; if( pAggInfo->pGroupBy ){ int j, n; ExprList *pGB = pAggInfo->pGroupBy; struct ExprList_item *pTerm = pGB->a; n = pGB->nExpr; for(j=0; jpExpr; if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable && pE->iColumn==pExpr->iColumn ){ pCol->iSorterColumn = j; break; } } } if( pCol->iSorterColumn<0 ){ pCol->iSorterColumn = pAggInfo->nSortingColumn++; } } /* There is now an entry for pExpr in pAggInfo->aCol[] (either ** because it was there before or because we just created it). ** Convert the pExpr to be a TK_AGG_COLUMN referring to that ** pAggInfo->aCol[] entry. */ ExprSetVVAProperty(pExpr, EP_NoReduce); pExpr->pAggInfo = pAggInfo; pExpr->op = TK_AGG_COLUMN; pExpr->iAgg = (i16)k; break; } /* endif pExpr->iTable==pItem->iCursor */ } /* end loop over pSrcList */ } return WRC_Prune; } case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0 && pWalker->walkerDepth==pExpr->op2 ){ /* Check to see if pExpr is a duplicate of another aggregate ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; for(i=0; inFunc; i++, pItem++){ if( sqlite3ExprCompare(pItem->pExpr, pExpr, -1)==0 ){ break; } } if( i>=pAggInfo->nFunc ){ /* pExpr is original. Make a new entry in pAggInfo->aFunc[] */ u8 enc = ENC(pParse->db); i = addAggInfoFunc(pParse->db, pAggInfo); if( i>=0 ){ assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); pItem = &pAggInfo->aFunc[i]; pItem->pExpr = pExpr; pItem->iMem = ++pParse->nMem; assert( !ExprHasProperty(pExpr, EP_IntValue) ); pItem->pFunc = sqlite3FindFunction(pParse->db, pExpr->u.zToken, sqlite3Strlen30(pExpr->u.zToken), pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); if( pExpr->flags & EP_Distinct ){ pItem->iDistinct = pParse->nTab++; }else{ pItem->iDistinct = -1; } } } /* Make pExpr point to the appropriate pAggInfo->aFunc[] entry */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pExpr, EP_NoReduce); pExpr->iAgg = (i16)i; pExpr->pAggInfo = pAggInfo; return WRC_Prune; }else{ return WRC_Continue; } } } return WRC_Continue; } static int analyzeAggregatesInSelect(Walker *pWalker, Select *pSelect){ UNUSED_PARAMETER(pWalker); UNUSED_PARAMETER(pSelect); return WRC_Continue; } /* ** Analyze the pExpr expression looking for aggregate functions and ** for variables that need to be added to AggInfo object that pNC->pAggInfo ** points to. Additional entries are made on the AggInfo object as ** necessary. ** ** This routine should only be called after the expression has been ** analyzed by sqlite3ResolveExprNames(). */ SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext *pNC, Expr *pExpr){ Walker w; memset(&w, 0, sizeof(w)); w.xExprCallback = analyzeAggregate; w.xSelectCallback = analyzeAggregatesInSelect; w.u.pNC = pNC; assert( pNC->pSrcList!=0 ); sqlite3WalkExpr(&w, pExpr); } /* ** Call sqlite3ExprAnalyzeAggregates() for every expression in an ** expression list. Return the number of errors. ** ** If an error is found, the analysis is cut short. */ SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext *pNC, ExprList *pList){ struct ExprList_item *pItem; int i; if( pList ){ for(pItem=pList->a, i=0; inExpr; i++, pItem++){ sqlite3ExprAnalyzeAggregates(pNC, pItem->pExpr); } } } /* ** Allocate a single new register for use to hold some intermediate result. */ SQLITE_PRIVATE int sqlite3GetTempReg(Parse *pParse){ if( pParse->nTempReg==0 ){ return ++pParse->nMem; } return pParse->aTempReg[--pParse->nTempReg]; } /* ** Deallocate a register, making available for reuse for some other ** purpose. ** ** If a register is currently being used by the column cache, then ** the deallocation is deferred until the column cache line that uses ** the register becomes stale. */ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){ if( iReg && pParse->nTempRegaTempReg) ){ int i; struct yColCache *p; for(i=0, p=pParse->aColCache; iiReg==iReg ){ p->tempReg = 1; return; } } pParse->aTempReg[pParse->nTempReg++] = iReg; } } /* ** Allocate or deallocate a block of nReg consecutive registers */ SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){ int i, n; i = pParse->iRangeReg; n = pParse->nRangeReg; if( nReg<=n ){ assert( !usedAsColumnCache(pParse, i, i+n-1) ); pParse->iRangeReg += nReg; pParse->nRangeReg -= nReg; }else{ i = pParse->nMem+1; pParse->nMem += nReg; } return i; } SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){ sqlite3ExprCacheRemove(pParse, iReg, nReg); if( nReg>pParse->nRangeReg ){ pParse->nRangeReg = nReg; pParse->iRangeReg = iReg; } } /* ** Mark all temporary registers as being unavailable for reuse. */ SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){ pParse->nTempReg = 0; pParse->nRangeReg = 0; } /************** End of expr.c ************************************************/ /************** Begin file alter.c *******************************************/ /* ** 2005 February 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that used to generate VDBE code ** that implements the ALTER TABLE command. */ /* #include "sqliteInt.h" */ /* ** The code in this file only exists if we are not omitting the ** ALTER TABLE logic from the build. */ #ifndef SQLITE_OMIT_ALTERTABLE /* ** This function is used by SQL generated to implement the ** ALTER TABLE command. The first argument is the text of a CREATE TABLE or ** CREATE INDEX command. The second is a table name. The table name in ** the CREATE TABLE or CREATE INDEX statement is replaced with the third ** argument and the result returned. Examples: ** ** sqlite_rename_table('CREATE TABLE abc(a, b, c)', 'def') ** -> 'CREATE TABLE def(a, b, c)' ** ** sqlite_rename_table('CREATE INDEX i ON abc(a)', 'def') ** -> 'CREATE INDEX i ON def(a, b, c)' */ static void renameTableFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ unsigned char const *zSql = sqlite3_value_text(argv[0]); unsigned char const *zTableName = sqlite3_value_text(argv[1]); int token; Token tname; unsigned char const *zCsr = zSql; int len = 0; char *zRet; sqlite3 *db = sqlite3_context_db_handle(context); UNUSED_PARAMETER(NotUsed); /* The principle used to locate the table name in the CREATE TABLE ** statement is that the table name is the first non-space token that ** is immediately followed by a TK_LP or TK_USING token. */ if( zSql ){ do { if( !*zCsr ){ /* Ran out of input before finding an opening bracket. Return NULL. */ return; } /* Store the token that zCsr points to in tname. */ tname.z = (char*)zCsr; tname.n = len; /* Advance zCsr to the next token. Store that token type in 'token', ** and its length in 'len' (to be used next iteration of this loop). */ do { zCsr += len; len = sqlite3GetToken(zCsr, &token); } while( token==TK_SPACE ); assert( len>0 ); } while( token!=TK_LP && token!=TK_USING ); zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), zSql, zTableName, tname.z+tname.n); sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); } } /* ** This C function implements an SQL user function that is used by SQL code ** generated by the ALTER TABLE ... RENAME command to modify the definition ** of any foreign key constraints that use the table being renamed as the ** parent table. It is passed three arguments: ** ** 1) The complete text of the CREATE TABLE statement being modified, ** 2) The old name of the table being renamed, and ** 3) The new name of the table being renamed. ** ** It returns the new CREATE TABLE statement. For example: ** ** sqlite_rename_parent('CREATE TABLE t1(a REFERENCES t2)', 't2', 't3') ** -> 'CREATE TABLE t1(a REFERENCES t3)' */ #ifndef SQLITE_OMIT_FOREIGN_KEY static void renameParentFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ sqlite3 *db = sqlite3_context_db_handle(context); char *zOutput = 0; char *zResult; unsigned char const *zInput = sqlite3_value_text(argv[0]); unsigned char const *zOld = sqlite3_value_text(argv[1]); unsigned char const *zNew = sqlite3_value_text(argv[2]); unsigned const char *z; /* Pointer to token */ int n; /* Length of token z */ int token; /* Type of token */ UNUSED_PARAMETER(NotUsed); if( zInput==0 || zOld==0 ) return; for(z=zInput; *z; z=z+n){ n = sqlite3GetToken(z, &token); if( token==TK_REFERENCES ){ char *zParent; do { z += n; n = sqlite3GetToken(z, &token); }while( token==TK_SPACE ); if( token==TK_ILLEGAL ) break; zParent = sqlite3DbStrNDup(db, (const char *)z, n); if( zParent==0 ) break; sqlite3Dequote(zParent); if( 0==sqlite3StrICmp((const char *)zOld, zParent) ){ char *zOut = sqlite3MPrintf(db, "%s%.*s\"%w\"", (zOutput?zOutput:""), (int)(z-zInput), zInput, (const char *)zNew ); sqlite3DbFree(db, zOutput); zOutput = zOut; zInput = &z[n]; } sqlite3DbFree(db, zParent); } } zResult = sqlite3MPrintf(db, "%s%s", (zOutput?zOutput:""), zInput), sqlite3_result_text(context, zResult, -1, SQLITE_DYNAMIC); sqlite3DbFree(db, zOutput); } #endif #ifndef SQLITE_OMIT_TRIGGER /* This function is used by SQL generated to implement the ** ALTER TABLE command. The first argument is the text of a CREATE TRIGGER ** statement. The second is a table name. The table name in the CREATE ** TRIGGER statement is replaced with the third argument and the result ** returned. This is analagous to renameTableFunc() above, except for CREATE ** TRIGGER, not CREATE INDEX and CREATE TABLE. */ static void renameTriggerFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ unsigned char const *zSql = sqlite3_value_text(argv[0]); unsigned char const *zTableName = sqlite3_value_text(argv[1]); int token; Token tname; int dist = 3; unsigned char const *zCsr = zSql; int len = 0; char *zRet; sqlite3 *db = sqlite3_context_db_handle(context); UNUSED_PARAMETER(NotUsed); /* The principle used to locate the table name in the CREATE TRIGGER ** statement is that the table name is the first token that is immediately ** preceded by either TK_ON or TK_DOT and immediately followed by one ** of TK_WHEN, TK_BEGIN or TK_FOR. */ if( zSql ){ do { if( !*zCsr ){ /* Ran out of input before finding the table name. Return NULL. */ return; } /* Store the token that zCsr points to in tname. */ tname.z = (char*)zCsr; tname.n = len; /* Advance zCsr to the next token. Store that token type in 'token', ** and its length in 'len' (to be used next iteration of this loop). */ do { zCsr += len; len = sqlite3GetToken(zCsr, &token); }while( token==TK_SPACE ); assert( len>0 ); /* Variable 'dist' stores the number of tokens read since the most ** recent TK_DOT or TK_ON. This means that when a WHEN, FOR or BEGIN ** token is read and 'dist' equals 2, the condition stated above ** to be met. ** ** Note that ON cannot be a database, table or column name, so ** there is no need to worry about syntax like ** "CREATE TRIGGER ... ON ON.ON BEGIN ..." etc. */ dist++; if( token==TK_DOT || token==TK_ON ){ dist = 0; } } while( dist!=2 || (token!=TK_WHEN && token!=TK_FOR && token!=TK_BEGIN) ); /* Variable tname now contains the token that is the old table-name ** in the CREATE TRIGGER statement. */ zRet = sqlite3MPrintf(db, "%.*s\"%w\"%s", (int)(((u8*)tname.z) - zSql), zSql, zTableName, tname.z+tname.n); sqlite3_result_text(context, zRet, -1, SQLITE_DYNAMIC); } } #endif /* !SQLITE_OMIT_TRIGGER */ /* ** Register built-in functions used to help implement ALTER TABLE */ SQLITE_PRIVATE void sqlite3AlterFunctions(void){ static SQLITE_WSD FuncDef aAlterTableFuncs[] = { FUNCTION(sqlite_rename_table, 2, 0, 0, renameTableFunc), #ifndef SQLITE_OMIT_TRIGGER FUNCTION(sqlite_rename_trigger, 2, 0, 0, renameTriggerFunc), #endif #ifndef SQLITE_OMIT_FOREIGN_KEY FUNCTION(sqlite_rename_parent, 3, 0, 0, renameParentFunc), #endif }; int i; FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aAlterTableFuncs); for(i=0; i OR name= OR ... ** ** If argument zWhere is NULL, then a pointer string containing the text ** "name=" is returned, where is the quoted version ** of the string passed as argument zConstant. The returned buffer is ** allocated using sqlite3DbMalloc(). It is the responsibility of the ** caller to ensure that it is eventually freed. ** ** If argument zWhere is not NULL, then the string returned is ** " OR name=", where is the contents of zWhere. ** In this case zWhere is passed to sqlite3DbFree() before returning. ** */ static char *whereOrName(sqlite3 *db, char *zWhere, char *zConstant){ char *zNew; if( !zWhere ){ zNew = sqlite3MPrintf(db, "name=%Q", zConstant); }else{ zNew = sqlite3MPrintf(db, "%s OR name=%Q", zWhere, zConstant); sqlite3DbFree(db, zWhere); } return zNew; } #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) /* ** Generate the text of a WHERE expression which can be used to select all ** tables that have foreign key constraints that refer to table pTab (i.e. ** constraints for which pTab is the parent table) from the sqlite_master ** table. */ static char *whereForeignKeys(Parse *pParse, Table *pTab){ FKey *p; char *zWhere = 0; for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ zWhere = whereOrName(pParse->db, zWhere, p->pFrom->zName); } return zWhere; } #endif /* ** Generate the text of a WHERE expression which can be used to select all ** temporary triggers on table pTab from the sqlite_temp_master table. If ** table pTab has no temporary triggers, or is itself stored in the ** temporary database, NULL is returned. */ static char *whereTempTriggers(Parse *pParse, Table *pTab){ Trigger *pTrig; char *zWhere = 0; const Schema *pTempSchema = pParse->db->aDb[1].pSchema; /* Temp db schema */ /* If the table is not located in the temp-db (in which case NULL is ** returned, loop through the tables list of triggers. For each trigger ** that is not part of the temp-db schema, add a clause to the WHERE ** expression being built up in zWhere. */ if( pTab->pSchema!=pTempSchema ){ sqlite3 *db = pParse->db; for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ if( pTrig->pSchema==pTempSchema ){ zWhere = whereOrName(db, zWhere, pTrig->zName); } } } if( zWhere ){ char *zNew = sqlite3MPrintf(pParse->db, "type='trigger' AND (%s)", zWhere); sqlite3DbFree(pParse->db, zWhere); zWhere = zNew; } return zWhere; } /* ** Generate code to drop and reload the internal representation of table ** pTab from the database, including triggers and temporary triggers. ** Argument zName is the name of the table in the database schema at ** the time the generated code is executed. This can be different from ** pTab->zName if this function is being called to code part of an ** "ALTER TABLE RENAME TO" statement. */ static void reloadTableSchema(Parse *pParse, Table *pTab, const char *zName){ Vdbe *v; char *zWhere; int iDb; /* Index of database containing pTab */ #ifndef SQLITE_OMIT_TRIGGER Trigger *pTrig; #endif v = sqlite3GetVdbe(pParse); if( NEVER(v==0) ) return; assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); assert( iDb>=0 ); #ifndef SQLITE_OMIT_TRIGGER /* Drop any table triggers from the internal schema. */ for(pTrig=sqlite3TriggerList(pParse, pTab); pTrig; pTrig=pTrig->pNext){ int iTrigDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); assert( iTrigDb==iDb || iTrigDb==1 ); sqlite3VdbeAddOp4(v, OP_DropTrigger, iTrigDb, 0, 0, pTrig->zName, 0); } #endif /* Drop the table and index from the internal schema. */ sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); /* Reload the table, index and permanent trigger schemas. */ zWhere = sqlite3MPrintf(pParse->db, "tbl_name=%Q", zName); if( !zWhere ) return; sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); #ifndef SQLITE_OMIT_TRIGGER /* Now, if the table is not stored in the temp database, reload any temp ** triggers. Don't use IN(...) in case SQLITE_OMIT_SUBQUERY is defined. */ if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ sqlite3VdbeAddParseSchemaOp(v, 1, zWhere); } #endif } /* ** Parameter zName is the name of a table that is about to be altered ** (either with ALTER TABLE ... RENAME TO or ALTER TABLE ... ADD COLUMN). ** If the table is a system table, this function leaves an error message ** in pParse->zErr (system tables may not be altered) and returns non-zero. ** ** Or, if zName is not a system table, zero is returned. */ static int isSystemTable(Parse *pParse, const char *zName){ if( sqlite3Strlen30(zName)>6 && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ sqlite3ErrorMsg(pParse, "table %s may not be altered", zName); return 1; } return 0; } /* ** Generate code to implement the "ALTER TABLE xxx RENAME TO yyy" ** command. */ SQLITE_PRIVATE void sqlite3AlterRenameTable( Parse *pParse, /* Parser context. */ SrcList *pSrc, /* The table to rename. */ Token *pName /* The new table name. */ ){ int iDb; /* Database that contains the table */ char *zDb; /* Name of database iDb */ Table *pTab; /* Table being renamed */ char *zName = 0; /* NULL-terminated version of pName */ sqlite3 *db = pParse->db; /* Database connection */ int nTabName; /* Number of UTF-8 characters in zTabName */ const char *zTabName; /* Original name of the table */ Vdbe *v; #ifndef SQLITE_OMIT_TRIGGER char *zWhere = 0; /* Where clause to locate temp triggers */ #endif VTable *pVTab = 0; /* Non-zero if this is a v-tab with an xRename() */ int savedDbFlags; /* Saved value of db->flags */ savedDbFlags = db->flags; if( NEVER(db->mallocFailed) ) goto exit_rename_table; assert( pSrc->nSrc==1 ); assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); if( !pTab ) goto exit_rename_table; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); zDb = db->aDb[iDb].zName; db->flags |= SQLITE_PreferBuiltin; /* Get a NULL terminated version of the new table name. */ zName = sqlite3NameFromToken(db, pName); if( !zName ) goto exit_rename_table; /* Check that a table or index named 'zName' does not already exist ** in database iDb. If so, this is an error. */ if( sqlite3FindTable(db, zName, zDb) || sqlite3FindIndex(db, zName, zDb) ){ sqlite3ErrorMsg(pParse, "there is already another table or index with this name: %s", zName); goto exit_rename_table; } /* Make sure it is not a system table being altered, or a reserved name ** that the table is being renamed to. */ if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ goto exit_rename_table; } if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto exit_rename_table; } #ifndef SQLITE_OMIT_VIEW if( pTab->pSelect ){ sqlite3ErrorMsg(pParse, "view %s may not be altered", pTab->zName); goto exit_rename_table; } #endif #ifndef SQLITE_OMIT_AUTHORIZATION /* Invoke the authorization callback. */ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ goto exit_rename_table; } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto exit_rename_table; } if( IsVirtual(pTab) ){ pVTab = sqlite3GetVTable(db, pTab); if( pVTab->pVtab->pModule->xRename==0 ){ pVTab = 0; } } #endif /* Begin a transaction for database iDb. ** Then modify the schema cookie (since the ALTER TABLE modifies the ** schema). Open a statement transaction if the table is a virtual ** table. */ v = sqlite3GetVdbe(pParse); if( v==0 ){ goto exit_rename_table; } sqlite3BeginWriteOperation(pParse, pVTab!=0, iDb); sqlite3ChangeCookie(pParse, iDb); /* If this is a virtual table, invoke the xRename() function if ** one is defined. The xRename() callback will modify the names ** of any resources used by the v-table implementation (including other ** SQLite tables) that are identified by the name of the virtual table. */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( pVTab ){ int i = ++pParse->nMem; sqlite3VdbeAddOp4(v, OP_String8, 0, i, 0, zName, 0); sqlite3VdbeAddOp4(v, OP_VRename, i, 0, 0,(const char*)pVTab, P4_VTAB); sqlite3MayAbort(pParse); } #endif /* figure out how many UTF-8 characters are in zName */ zTabName = pTab->zName; nTabName = sqlite3Utf8CharLen(zTabName, -1); #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) if( db->flags&SQLITE_ForeignKeys ){ /* If foreign-key support is enabled, rewrite the CREATE TABLE ** statements corresponding to all child tables of foreign key constraints ** for which the renamed table is the parent table. */ if( (zWhere=whereForeignKeys(pParse, pTab))!=0 ){ sqlite3NestedParse(pParse, "UPDATE \"%w\".%s SET " "sql = sqlite_rename_parent(sql, %Q, %Q) " "WHERE %s;", zDb, SCHEMA_TABLE(iDb), zTabName, zName, zWhere); sqlite3DbFree(db, zWhere); } } #endif /* Modify the sqlite_master table to use the new table name. */ sqlite3NestedParse(pParse, "UPDATE %Q.%s SET " #ifdef SQLITE_OMIT_TRIGGER "sql = sqlite_rename_table(sql, %Q), " #else "sql = CASE " "WHEN type = 'trigger' THEN sqlite_rename_trigger(sql, %Q)" "ELSE sqlite_rename_table(sql, %Q) END, " #endif "tbl_name = %Q, " "name = CASE " "WHEN type='table' THEN %Q " "WHEN name LIKE 'sqlite_autoindex%%' AND type='index' THEN " "'sqlite_autoindex_' || %Q || substr(name,%d+18) " "ELSE name END " "WHERE tbl_name=%Q COLLATE nocase AND " "(type='table' OR type='index' OR type='trigger');", zDb, SCHEMA_TABLE(iDb), zName, zName, zName, #ifndef SQLITE_OMIT_TRIGGER zName, #endif zName, nTabName, zTabName ); #ifndef SQLITE_OMIT_AUTOINCREMENT /* If the sqlite_sequence table exists in this database, then update ** it with the new table name. */ if( sqlite3FindTable(db, "sqlite_sequence", zDb) ){ sqlite3NestedParse(pParse, "UPDATE \"%w\".sqlite_sequence set name = %Q WHERE name = %Q", zDb, zName, pTab->zName); } #endif #ifndef SQLITE_OMIT_TRIGGER /* If there are TEMP triggers on this table, modify the sqlite_temp_master ** table. Don't do this if the table being ALTERed is itself located in ** the temp database. */ if( (zWhere=whereTempTriggers(pParse, pTab))!=0 ){ sqlite3NestedParse(pParse, "UPDATE sqlite_temp_master SET " "sql = sqlite_rename_trigger(sql, %Q), " "tbl_name = %Q " "WHERE %s;", zName, zName, zWhere); sqlite3DbFree(db, zWhere); } #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) if( db->flags&SQLITE_ForeignKeys ){ FKey *p; for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ Table *pFrom = p->pFrom; if( pFrom!=pTab ){ reloadTableSchema(pParse, p->pFrom, pFrom->zName); } } } #endif /* Drop and reload the internal table schema. */ reloadTableSchema(pParse, pTab, zName); exit_rename_table: sqlite3SrcListDelete(db, pSrc); sqlite3DbFree(db, zName); db->flags = savedDbFlags; } /* ** Generate code to make sure the file format number is at least minFormat. ** The generated code will increase the file format number if necessary. */ SQLITE_PRIVATE void sqlite3MinimumFileFormat(Parse *pParse, int iDb, int minFormat){ Vdbe *v; v = sqlite3GetVdbe(pParse); /* The VDBE should have been allocated before this routine is called. ** If that allocation failed, we would have quit before reaching this ** point */ if( ALWAYS(v) ){ int r1 = sqlite3GetTempReg(pParse); int r2 = sqlite3GetTempReg(pParse); int j1; sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); sqlite3VdbeAddOp2(v, OP_Integer, minFormat, r2); j1 = sqlite3VdbeAddOp3(v, OP_Ge, r2, 0, r1); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, r2); sqlite3VdbeJumpHere(v, j1); sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempReg(pParse, r2); } } /* ** This function is called after an "ALTER TABLE ... ADD" statement ** has been parsed. Argument pColDef contains the text of the new ** column definition. ** ** The Table structure pParse->pNewTable was extended to include ** the new column during parsing. */ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ Table *pNew; /* Copy of pParse->pNewTable */ Table *pTab; /* Table being altered */ int iDb; /* Database number */ const char *zDb; /* Database name */ const char *zTab; /* Table name */ char *zCol; /* Null-terminated column definition */ Column *pCol; /* The new column */ Expr *pDflt; /* Default value for the new column */ sqlite3 *db; /* The database connection; */ db = pParse->db; if( pParse->nErr || db->mallocFailed ) return; pNew = pParse->pNewTable; assert( pNew ); assert( sqlite3BtreeHoldsAllMutexes(db) ); iDb = sqlite3SchemaToIndex(db, pNew->pSchema); zDb = db->aDb[iDb].zName; zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */ pCol = &pNew->aCol[pNew->nCol-1]; pDflt = pCol->pDflt; pTab = sqlite3FindTable(db, zTab, zDb); assert( pTab ); #ifndef SQLITE_OMIT_AUTHORIZATION /* Invoke the authorization callback. */ if( sqlite3AuthCheck(pParse, SQLITE_ALTER_TABLE, zDb, pTab->zName, 0) ){ return; } #endif /* If the default value for the new column was specified with a ** literal NULL, then set pDflt to 0. This simplifies checking ** for an SQL NULL default below. */ if( pDflt && pDflt->op==TK_NULL ){ pDflt = 0; } /* Check that the new column is not specified as PRIMARY KEY or UNIQUE. ** If there is a NOT NULL constraint, then the default value for the ** column must not be NULL. */ if( pCol->colFlags & COLFLAG_PRIMKEY ){ sqlite3ErrorMsg(pParse, "Cannot add a PRIMARY KEY column"); return; } if( pNew->pIndex ){ sqlite3ErrorMsg(pParse, "Cannot add a UNIQUE column"); return; } if( (db->flags&SQLITE_ForeignKeys) && pNew->pFKey && pDflt ){ sqlite3ErrorMsg(pParse, "Cannot add a REFERENCES column with non-NULL default value"); return; } if( pCol->notNull && !pDflt ){ sqlite3ErrorMsg(pParse, "Cannot add a NOT NULL column with default value NULL"); return; } /* Ensure the default expression is something that sqlite3ValueFromExpr() ** can handle (i.e. not CURRENT_TIME etc.) */ if( pDflt ){ sqlite3_value *pVal = 0; int rc; rc = sqlite3ValueFromExpr(db, pDflt, SQLITE_UTF8, SQLITE_AFF_BLOB, &pVal); assert( rc==SQLITE_OK || rc==SQLITE_NOMEM ); if( rc!=SQLITE_OK ){ db->mallocFailed = 1; return; } if( !pVal ){ sqlite3ErrorMsg(pParse, "Cannot add a column with non-constant default"); return; } sqlite3ValueFree(pVal); } /* Modify the CREATE TABLE statement. */ zCol = sqlite3DbStrNDup(db, (char*)pColDef->z, pColDef->n); if( zCol ){ char *zEnd = &zCol[pColDef->n-1]; int savedDbFlags = db->flags; while( zEnd>zCol && (*zEnd==';' || sqlite3Isspace(*zEnd)) ){ *zEnd-- = '\0'; } db->flags |= SQLITE_PreferBuiltin; sqlite3NestedParse(pParse, "UPDATE \"%w\".%s SET " "sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) " "WHERE type = 'table' AND name = %Q", zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1, zTab ); sqlite3DbFree(db, zCol); db->flags = savedDbFlags; } /* If the default value of the new column is NULL, then set the file ** format to 2. If the default value of the new column is not NULL, ** the file format becomes 3. */ sqlite3MinimumFileFormat(pParse, iDb, pDflt ? 3 : 2); /* Reload the schema of the modified table. */ reloadTableSchema(pParse, pTab, pTab->zName); } /* ** This function is called by the parser after the table-name in ** an "ALTER TABLE ADD" statement is parsed. Argument ** pSrc is the full-name of the table being altered. ** ** This routine makes a (partial) copy of the Table structure ** for the table being altered and sets Parse.pNewTable to point ** to it. Routines called by the parser as the column definition ** is parsed (i.e. sqlite3AddColumn()) add the new Column data to ** the copy. The copy of the Table structure is deleted by tokenize.c ** after parsing is finished. ** ** Routine sqlite3AlterFinishAddColumn() will be called to complete ** coding the "ALTER TABLE ... ADD" statement. */ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ Table *pNew; Table *pTab; Vdbe *v; int iDb; int i; int nAlloc; sqlite3 *db = pParse->db; /* Look up the table being altered. */ assert( pParse->pNewTable==0 ); assert( sqlite3BtreeHoldsAllMutexes(db) ); if( db->mallocFailed ) goto exit_begin_add_column; pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]); if( !pTab ) goto exit_begin_add_column; #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "virtual tables may not be altered"); goto exit_begin_add_column; } #endif /* Make sure this is not an attempt to ALTER a view. */ if( pTab->pSelect ){ sqlite3ErrorMsg(pParse, "Cannot add a column to a view"); goto exit_begin_add_column; } if( SQLITE_OK!=isSystemTable(pParse, pTab->zName) ){ goto exit_begin_add_column; } assert( pTab->addColOffset>0 ); iDb = sqlite3SchemaToIndex(db, pTab->pSchema); /* Put a copy of the Table struct in Parse.pNewTable for the ** sqlite3AddColumn() function and friends to modify. But modify ** the name by adding an "sqlite_altertab_" prefix. By adding this ** prefix, we insure that the name will not collide with an existing ** table because user table are not allowed to have the "sqlite_" ** prefix on their name. */ pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table)); if( !pNew ) goto exit_begin_add_column; pParse->pNewTable = pNew; pNew->nRef = 1; pNew->nCol = pTab->nCol; assert( pNew->nCol>0 ); nAlloc = (((pNew->nCol-1)/8)*8)+8; assert( nAlloc>=pNew->nCol && nAlloc%8==0 && nAlloc-pNew->nCol<8 ); pNew->aCol = (Column*)sqlite3DbMallocZero(db, sizeof(Column)*nAlloc); pNew->zName = sqlite3MPrintf(db, "sqlite_altertab_%s", pTab->zName); if( !pNew->aCol || !pNew->zName ){ db->mallocFailed = 1; goto exit_begin_add_column; } memcpy(pNew->aCol, pTab->aCol, sizeof(Column)*pNew->nCol); for(i=0; inCol; i++){ Column *pCol = &pNew->aCol[i]; pCol->zName = sqlite3DbStrDup(db, pCol->zName); pCol->zColl = 0; pCol->zType = 0; pCol->pDflt = 0; pCol->zDflt = 0; } pNew->pSchema = db->aDb[iDb].pSchema; pNew->addColOffset = pTab->addColOffset; pNew->nRef = 1; /* Begin a transaction and increment the schema cookie. */ sqlite3BeginWriteOperation(pParse, 0, iDb); v = sqlite3GetVdbe(pParse); if( !v ) goto exit_begin_add_column; sqlite3ChangeCookie(pParse, iDb); exit_begin_add_column: sqlite3SrcListDelete(db, pSrc); return; } #endif /* SQLITE_ALTER_TABLE */ /************** End of alter.c ***********************************************/ /************** Begin file analyze.c *****************************************/ /* ** 2005-07-08 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code associated with the ANALYZE command. ** ** The ANALYZE command gather statistics about the content of tables ** and indices. These statistics are made available to the query planner ** to help it make better decisions about how to perform queries. ** ** The following system tables are or have been supported: ** ** CREATE TABLE sqlite_stat1(tbl, idx, stat); ** CREATE TABLE sqlite_stat2(tbl, idx, sampleno, sample); ** CREATE TABLE sqlite_stat3(tbl, idx, nEq, nLt, nDLt, sample); ** CREATE TABLE sqlite_stat4(tbl, idx, nEq, nLt, nDLt, sample); ** ** Additional tables might be added in future releases of SQLite. ** The sqlite_stat2 table is not created or used unless the SQLite version ** is between 3.6.18 and 3.7.8, inclusive, and unless SQLite is compiled ** with SQLITE_ENABLE_STAT2. The sqlite_stat2 table is deprecated. ** The sqlite_stat2 table is superseded by sqlite_stat3, which is only ** created and used by SQLite versions 3.7.9 and later and with ** SQLITE_ENABLE_STAT3 defined. The functionality of sqlite_stat3 ** is a superset of sqlite_stat2. The sqlite_stat4 is an enhanced ** version of sqlite_stat3 and is only available when compiled with ** SQLITE_ENABLE_STAT4 and in SQLite versions 3.8.1 and later. It is ** not possible to enable both STAT3 and STAT4 at the same time. If they ** are both enabled, then STAT4 takes precedence. ** ** For most applications, sqlite_stat1 provides all the statistics required ** for the query planner to make good choices. ** ** Format of sqlite_stat1: ** ** There is normally one row per index, with the index identified by the ** name in the idx column. The tbl column is the name of the table to ** which the index belongs. In each such row, the stat column will be ** a string consisting of a list of integers. The first integer in this ** list is the number of rows in the index. (This is the same as the ** number of rows in the table, except for partial indices.) The second ** integer is the average number of rows in the index that have the same ** value in the first column of the index. The third integer is the average ** number of rows in the index that have the same value for the first two ** columns. The N-th integer (for N>1) is the average number of rows in ** the index which have the same value for the first N-1 columns. For ** a K-column index, there will be K+1 integers in the stat column. If ** the index is unique, then the last integer will be 1. ** ** The list of integers in the stat column can optionally be followed ** by the keyword "unordered". The "unordered" keyword, if it is present, ** must be separated from the last integer by a single space. If the ** "unordered" keyword is present, then the query planner assumes that ** the index is unordered and will not use the index for a range query. ** ** If the sqlite_stat1.idx column is NULL, then the sqlite_stat1.stat ** column contains a single integer which is the (estimated) number of ** rows in the table identified by sqlite_stat1.tbl. ** ** Format of sqlite_stat2: ** ** The sqlite_stat2 is only created and is only used if SQLite is compiled ** with SQLITE_ENABLE_STAT2 and if the SQLite version number is between ** 3.6.18 and 3.7.8. The "stat2" table contains additional information ** about the distribution of keys within an index. The index is identified by ** the "idx" column and the "tbl" column is the name of the table to which ** the index belongs. There are usually 10 rows in the sqlite_stat2 ** table for each index. ** ** The sqlite_stat2 entries for an index that have sampleno between 0 and 9 ** inclusive are samples of the left-most key value in the index taken at ** evenly spaced points along the index. Let the number of samples be S ** (10 in the standard build) and let C be the number of rows in the index. ** Then the sampled rows are given by: ** ** rownumber = (i*C*2 + C)/(S*2) ** ** For i between 0 and S-1. Conceptually, the index space is divided into ** S uniform buckets and the samples are the middle row from each bucket. ** ** The format for sqlite_stat2 is recorded here for legacy reference. This ** version of SQLite does not support sqlite_stat2. It neither reads nor ** writes the sqlite_stat2 table. This version of SQLite only supports ** sqlite_stat3. ** ** Format for sqlite_stat3: ** ** The sqlite_stat3 format is a subset of sqlite_stat4. Hence, the ** sqlite_stat4 format will be described first. Further information ** about sqlite_stat3 follows the sqlite_stat4 description. ** ** Format for sqlite_stat4: ** ** As with sqlite_stat2, the sqlite_stat4 table contains histogram data ** to aid the query planner in choosing good indices based on the values ** that indexed columns are compared against in the WHERE clauses of ** queries. ** ** The sqlite_stat4 table contains multiple entries for each index. ** The idx column names the index and the tbl column is the table of the ** index. If the idx and tbl columns are the same, then the sample is ** of the INTEGER PRIMARY KEY. The sample column is a blob which is the ** binary encoding of a key from the index. The nEq column is a ** list of integers. The first integer is the approximate number ** of entries in the index whose left-most column exactly matches ** the left-most column of the sample. The second integer in nEq ** is the approximate number of entries in the index where the ** first two columns match the first two columns of the sample. ** And so forth. nLt is another list of integers that show the approximate ** number of entries that are strictly less than the sample. The first ** integer in nLt contains the number of entries in the index where the ** left-most column is less than the left-most column of the sample. ** The K-th integer in the nLt entry is the number of index entries ** where the first K columns are less than the first K columns of the ** sample. The nDLt column is like nLt except that it contains the ** number of distinct entries in the index that are less than the ** sample. ** ** There can be an arbitrary number of sqlite_stat4 entries per index. ** The ANALYZE command will typically generate sqlite_stat4 tables ** that contain between 10 and 40 samples which are distributed across ** the key space, though not uniformly, and which include samples with ** large nEq values. ** ** Format for sqlite_stat3 redux: ** ** The sqlite_stat3 table is like sqlite_stat4 except that it only ** looks at the left-most column of the index. The sqlite_stat3.sample ** column contains the actual value of the left-most column instead ** of a blob encoding of the complete index key as is found in ** sqlite_stat4.sample. The nEq, nLt, and nDLt entries of sqlite_stat3 ** all contain just a single integer which is the same as the first ** integer in the equivalent columns in sqlite_stat4. */ #ifndef SQLITE_OMIT_ANALYZE /* #include "sqliteInt.h" */ #if defined(SQLITE_ENABLE_STAT4) # define IsStat4 1 # define IsStat3 0 #elif defined(SQLITE_ENABLE_STAT3) # define IsStat4 0 # define IsStat3 1 #else # define IsStat4 0 # define IsStat3 0 # undef SQLITE_STAT4_SAMPLES # define SQLITE_STAT4_SAMPLES 1 #endif #define IsStat34 (IsStat3+IsStat4) /* 1 for STAT3 or STAT4. 0 otherwise */ /* ** This routine generates code that opens the sqlite_statN tables. ** The sqlite_stat1 table is always relevant. sqlite_stat2 is now ** obsolete. sqlite_stat3 and sqlite_stat4 are only opened when ** appropriate compile-time options are provided. ** ** If the sqlite_statN tables do not previously exist, it is created. ** ** Argument zWhere may be a pointer to a buffer containing a table name, ** or it may be a NULL pointer. If it is not NULL, then all entries in ** the sqlite_statN tables associated with the named table are deleted. ** If zWhere==0, then code is generated to delete all stat table entries. */ static void openStatTable( Parse *pParse, /* Parsing context */ int iDb, /* The database we are looking in */ int iStatCur, /* Open the sqlite_stat1 table on this cursor */ const char *zWhere, /* Delete entries for this table or index */ const char *zWhereType /* Either "tbl" or "idx" */ ){ static const struct { const char *zName; const char *zCols; } aTable[] = { { "sqlite_stat1", "tbl,idx,stat" }, #if defined(SQLITE_ENABLE_STAT4) { "sqlite_stat4", "tbl,idx,neq,nlt,ndlt,sample" }, { "sqlite_stat3", 0 }, #elif defined(SQLITE_ENABLE_STAT3) { "sqlite_stat3", "tbl,idx,neq,nlt,ndlt,sample" }, { "sqlite_stat4", 0 }, #else { "sqlite_stat3", 0 }, { "sqlite_stat4", 0 }, #endif }; int i; sqlite3 *db = pParse->db; Db *pDb; Vdbe *v = sqlite3GetVdbe(pParse); int aRoot[ArraySize(aTable)]; u8 aCreateTbl[ArraySize(aTable)]; if( v==0 ) return; assert( sqlite3BtreeHoldsAllMutexes(db) ); assert( sqlite3VdbeDb(v)==db ); pDb = &db->aDb[iDb]; /* Create new statistic tables if they do not exist, or clear them ** if they do already exist. */ for(i=0; izName))==0 ){ if( aTable[i].zCols ){ /* The sqlite_statN table does not exist. Create it. Note that a ** side-effect of the CREATE TABLE statement is to leave the rootpage ** of the new table in register pParse->regRoot. This is important ** because the OpenWrite opcode below will be needing it. */ sqlite3NestedParse(pParse, "CREATE TABLE %Q.%s(%s)", pDb->zName, zTab, aTable[i].zCols ); aRoot[i] = pParse->regRoot; aCreateTbl[i] = OPFLAG_P2ISREG; } }else{ /* The table already exists. If zWhere is not NULL, delete all entries ** associated with the table zWhere. If zWhere is NULL, delete the ** entire contents of the table. */ aRoot[i] = pStat->tnum; aCreateTbl[i] = 0; sqlite3TableLock(pParse, iDb, aRoot[i], 1, zTab); if( zWhere ){ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE %s=%Q", pDb->zName, zTab, zWhereType, zWhere ); }else{ /* The sqlite_stat[134] table already exists. Delete all rows. */ sqlite3VdbeAddOp2(v, OP_Clear, aRoot[i], iDb); } } } /* Open the sqlite_stat[134] tables for writing. */ for(i=0; aTable[i].zCols; i++){ assert( inRowid ){ sqlite3DbFree(db, p->u.aRowid); p->nRowid = 0; } } #endif /* Initialize the BLOB value of a ROWID */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 static void sampleSetRowid(sqlite3 *db, Stat4Sample *p, int n, const u8 *pData){ assert( db!=0 ); if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); p->u.aRowid = sqlite3DbMallocRaw(db, n); if( p->u.aRowid ){ p->nRowid = n; memcpy(p->u.aRowid, pData, n); }else{ p->nRowid = 0; } } #endif /* Initialize the INTEGER value of a ROWID. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 static void sampleSetRowidInt64(sqlite3 *db, Stat4Sample *p, i64 iRowid){ assert( db!=0 ); if( p->nRowid ) sqlite3DbFree(db, p->u.aRowid); p->nRowid = 0; p->u.iRowid = iRowid; } #endif /* ** Copy the contents of object (*pFrom) into (*pTo). */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 static void sampleCopy(Stat4Accum *p, Stat4Sample *pTo, Stat4Sample *pFrom){ pTo->isPSample = pFrom->isPSample; pTo->iCol = pFrom->iCol; pTo->iHash = pFrom->iHash; memcpy(pTo->anEq, pFrom->anEq, sizeof(tRowcnt)*p->nCol); memcpy(pTo->anLt, pFrom->anLt, sizeof(tRowcnt)*p->nCol); memcpy(pTo->anDLt, pFrom->anDLt, sizeof(tRowcnt)*p->nCol); if( pFrom->nRowid ){ sampleSetRowid(p->db, pTo, pFrom->nRowid, pFrom->u.aRowid); }else{ sampleSetRowidInt64(p->db, pTo, pFrom->u.iRowid); } } #endif /* ** Reclaim all memory of a Stat4Accum structure. */ static void stat4Destructor(void *pOld){ Stat4Accum *p = (Stat4Accum*)pOld; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int i; for(i=0; inCol; i++) sampleClear(p->db, p->aBest+i); for(i=0; imxSample; i++) sampleClear(p->db, p->a+i); sampleClear(p->db, &p->current); #endif sqlite3DbFree(p->db, p); } /* ** Implementation of the stat_init(N,K,C) SQL function. The three parameters ** are: ** N: The number of columns in the index including the rowid/pk (note 1) ** K: The number of columns in the index excluding the rowid/pk. ** C: The number of rows in the index (note 2) ** ** Note 1: In the special case of the covering index that implements a ** WITHOUT ROWID table, N is the number of PRIMARY KEY columns, not the ** total number of columns in the table. ** ** Note 2: C is only used for STAT3 and STAT4. ** ** For indexes on ordinary rowid tables, N==K+1. But for indexes on ** WITHOUT ROWID tables, N=K+P where P is the number of columns in the ** PRIMARY KEY of the table. The covering index that implements the ** original WITHOUT ROWID table as N==K as a special case. ** ** This routine allocates the Stat4Accum object in heap memory. The return ** value is a pointer to the Stat4Accum object. The datatype of the ** return value is BLOB, but it is really just a pointer to the Stat4Accum ** object. */ static void statInit( sqlite3_context *context, int argc, sqlite3_value **argv ){ Stat4Accum *p; int nCol; /* Number of columns in index being sampled */ int nKeyCol; /* Number of key columns */ int nColUp; /* nCol rounded up for alignment */ int n; /* Bytes of space to allocate */ sqlite3 *db; /* Database connection */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int mxSample = SQLITE_STAT4_SAMPLES; #endif /* Decode the three function arguments */ UNUSED_PARAMETER(argc); nCol = sqlite3_value_int(argv[0]); assert( nCol>0 ); nColUp = sizeof(tRowcnt)<8 ? (nCol+1)&~1 : nCol; nKeyCol = sqlite3_value_int(argv[1]); assert( nKeyCol<=nCol ); assert( nKeyCol>0 ); /* Allocate the space required for the Stat4Accum object */ n = sizeof(*p) + sizeof(tRowcnt)*nColUp /* Stat4Accum.anEq */ + sizeof(tRowcnt)*nColUp /* Stat4Accum.anDLt */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 + sizeof(tRowcnt)*nColUp /* Stat4Accum.anLt */ + sizeof(Stat4Sample)*(nCol+mxSample) /* Stat4Accum.aBest[], a[] */ + sizeof(tRowcnt)*3*nColUp*(nCol+mxSample) #endif ; db = sqlite3_context_db_handle(context); p = sqlite3DbMallocZero(db, n); if( p==0 ){ sqlite3_result_error_nomem(context); return; } p->db = db; p->nRow = 0; p->nCol = nCol; p->nKeyCol = nKeyCol; p->current.anDLt = (tRowcnt*)&p[1]; p->current.anEq = &p->current.anDLt[nColUp]; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 { u8 *pSpace; /* Allocated space not yet assigned */ int i; /* Used to iterate through p->aSample[] */ p->iGet = -1; p->mxSample = mxSample; p->nPSample = (tRowcnt)(sqlite3_value_int64(argv[2])/(mxSample/3+1) + 1); p->current.anLt = &p->current.anEq[nColUp]; p->iPrn = 0x689e962d*(u32)nCol ^ 0xd0944565*(u32)sqlite3_value_int(argv[2]); /* Set up the Stat4Accum.a[] and aBest[] arrays */ p->a = (struct Stat4Sample*)&p->current.anLt[nColUp]; p->aBest = &p->a[mxSample]; pSpace = (u8*)(&p->a[mxSample+nCol]); for(i=0; i<(mxSample+nCol); i++){ p->a[i].anEq = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); p->a[i].anLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); p->a[i].anDLt = (tRowcnt *)pSpace; pSpace += (sizeof(tRowcnt) * nColUp); } assert( (pSpace - (u8*)p)==n ); for(i=0; iaBest[i].iCol = i; } } #endif /* Return a pointer to the allocated object to the caller. Note that ** only the pointer (the 2nd parameter) matters. The size of the object ** (given by the 3rd parameter) is never used and can be any positive ** value. */ sqlite3_result_blob(context, p, sizeof(*p), stat4Destructor); } static const FuncDef statInitFuncdef = { 2+IsStat34, /* nArg */ SQLITE_UTF8, /* funcFlags */ 0, /* pUserData */ 0, /* pNext */ statInit, /* xFunc */ 0, /* xStep */ 0, /* xFinalize */ "stat_init", /* zName */ 0, /* pHash */ 0 /* pDestructor */ }; #ifdef SQLITE_ENABLE_STAT4 /* ** pNew and pOld are both candidate non-periodic samples selected for ** the same column (pNew->iCol==pOld->iCol). Ignoring this column and ** considering only any trailing columns and the sample hash value, this ** function returns true if sample pNew is to be preferred over pOld. ** In other words, if we assume that the cardinalities of the selected ** column for pNew and pOld are equal, is pNew to be preferred over pOld. ** ** This function assumes that for each argument sample, the contents of ** the anEq[] array from pSample->anEq[pSample->iCol+1] onwards are valid. */ static int sampleIsBetterPost( Stat4Accum *pAccum, Stat4Sample *pNew, Stat4Sample *pOld ){ int nCol = pAccum->nCol; int i; assert( pNew->iCol==pOld->iCol ); for(i=pNew->iCol+1; ianEq[i]>pOld->anEq[i] ) return 1; if( pNew->anEq[i]anEq[i] ) return 0; } if( pNew->iHash>pOld->iHash ) return 1; return 0; } #endif #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** Return true if pNew is to be preferred over pOld. ** ** This function assumes that for each argument sample, the contents of ** the anEq[] array from pSample->anEq[pSample->iCol] onwards are valid. */ static int sampleIsBetter( Stat4Accum *pAccum, Stat4Sample *pNew, Stat4Sample *pOld ){ tRowcnt nEqNew = pNew->anEq[pNew->iCol]; tRowcnt nEqOld = pOld->anEq[pOld->iCol]; assert( pOld->isPSample==0 && pNew->isPSample==0 ); assert( IsStat4 || (pNew->iCol==0 && pOld->iCol==0) ); if( (nEqNew>nEqOld) ) return 1; #ifdef SQLITE_ENABLE_STAT4 if( nEqNew==nEqOld ){ if( pNew->iColiCol ) return 1; return (pNew->iCol==pOld->iCol && sampleIsBetterPost(pAccum, pNew, pOld)); } return 0; #else return (nEqNew==nEqOld && pNew->iHash>pOld->iHash); #endif } /* ** Copy the contents of sample *pNew into the p->a[] array. If necessary, ** remove the least desirable sample from p->a[] to make room. */ static void sampleInsert(Stat4Accum *p, Stat4Sample *pNew, int nEqZero){ Stat4Sample *pSample = 0; int i; assert( IsStat4 || nEqZero==0 ); #ifdef SQLITE_ENABLE_STAT4 if( pNew->isPSample==0 ){ Stat4Sample *pUpgrade = 0; assert( pNew->anEq[pNew->iCol]>0 ); /* This sample is being added because the prefix that ends in column ** iCol occurs many times in the table. However, if we have already ** added a sample that shares this prefix, there is no need to add ** this one. Instead, upgrade the priority of the highest priority ** existing sample that shares this prefix. */ for(i=p->nSample-1; i>=0; i--){ Stat4Sample *pOld = &p->a[i]; if( pOld->anEq[pNew->iCol]==0 ){ if( pOld->isPSample ) return; assert( pOld->iCol>pNew->iCol ); assert( sampleIsBetter(p, pNew, pOld) ); if( pUpgrade==0 || sampleIsBetter(p, pOld, pUpgrade) ){ pUpgrade = pOld; } } } if( pUpgrade ){ pUpgrade->iCol = pNew->iCol; pUpgrade->anEq[pUpgrade->iCol] = pNew->anEq[pUpgrade->iCol]; goto find_new_min; } } #endif /* If necessary, remove sample iMin to make room for the new sample. */ if( p->nSample>=p->mxSample ){ Stat4Sample *pMin = &p->a[p->iMin]; tRowcnt *anEq = pMin->anEq; tRowcnt *anLt = pMin->anLt; tRowcnt *anDLt = pMin->anDLt; sampleClear(p->db, pMin); memmove(pMin, &pMin[1], sizeof(p->a[0])*(p->nSample-p->iMin-1)); pSample = &p->a[p->nSample-1]; pSample->nRowid = 0; pSample->anEq = anEq; pSample->anDLt = anDLt; pSample->anLt = anLt; p->nSample = p->mxSample-1; } /* The "rows less-than" for the rowid column must be greater than that ** for the last sample in the p->a[] array. Otherwise, the samples would ** be out of order. */ #ifdef SQLITE_ENABLE_STAT4 assert( p->nSample==0 || pNew->anLt[p->nCol-1] > p->a[p->nSample-1].anLt[p->nCol-1] ); #endif /* Insert the new sample */ pSample = &p->a[p->nSample]; sampleCopy(p, pSample, pNew); p->nSample++; /* Zero the first nEqZero entries in the anEq[] array. */ memset(pSample->anEq, 0, sizeof(tRowcnt)*nEqZero); #ifdef SQLITE_ENABLE_STAT4 find_new_min: #endif if( p->nSample>=p->mxSample ){ int iMin = -1; for(i=0; imxSample; i++){ if( p->a[i].isPSample ) continue; if( iMin<0 || sampleIsBetter(p, &p->a[iMin], &p->a[i]) ){ iMin = i; } } assert( iMin>=0 ); p->iMin = iMin; } } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* ** Field iChng of the index being scanned has changed. So at this point ** p->current contains a sample that reflects the previous row of the ** index. The value of anEq[iChng] and subsequent anEq[] elements are ** correct at this point. */ static void samplePushPrevious(Stat4Accum *p, int iChng){ #ifdef SQLITE_ENABLE_STAT4 int i; /* Check if any samples from the aBest[] array should be pushed ** into IndexSample.a[] at this point. */ for(i=(p->nCol-2); i>=iChng; i--){ Stat4Sample *pBest = &p->aBest[i]; pBest->anEq[i] = p->current.anEq[i]; if( p->nSamplemxSample || sampleIsBetter(p, pBest, &p->a[p->iMin]) ){ sampleInsert(p, pBest, i); } } /* Update the anEq[] fields of any samples already collected. */ for(i=p->nSample-1; i>=0; i--){ int j; for(j=iChng; jnCol; j++){ if( p->a[i].anEq[j]==0 ) p->a[i].anEq[j] = p->current.anEq[j]; } } #endif #if defined(SQLITE_ENABLE_STAT3) && !defined(SQLITE_ENABLE_STAT4) if( iChng==0 ){ tRowcnt nLt = p->current.anLt[0]; tRowcnt nEq = p->current.anEq[0]; /* Check if this is to be a periodic sample. If so, add it. */ if( (nLt/p->nPSample)!=(nLt+nEq)/p->nPSample ){ p->current.isPSample = 1; sampleInsert(p, &p->current, 0); p->current.isPSample = 0; }else /* Or if it is a non-periodic sample. Add it in this case too. */ if( p->nSamplemxSample || sampleIsBetter(p, &p->current, &p->a[p->iMin]) ){ sampleInsert(p, &p->current, 0); } } #endif #ifndef SQLITE_ENABLE_STAT3_OR_STAT4 UNUSED_PARAMETER( p ); UNUSED_PARAMETER( iChng ); #endif } /* ** Implementation of the stat_push SQL function: stat_push(P,C,R) ** Arguments: ** ** P Pointer to the Stat4Accum object created by stat_init() ** C Index of left-most column to differ from previous row ** R Rowid for the current row. Might be a key record for ** WITHOUT ROWID tables. ** ** This SQL function always returns NULL. It's purpose it to accumulate ** statistical data and/or samples in the Stat4Accum object about the ** index being analyzed. The stat_get() SQL function will later be used to ** extract relevant information for constructing the sqlite_statN tables. ** ** The R parameter is only used for STAT3 and STAT4 */ static void statPush( sqlite3_context *context, int argc, sqlite3_value **argv ){ int i; /* The three function arguments */ Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); int iChng = sqlite3_value_int(argv[1]); UNUSED_PARAMETER( argc ); UNUSED_PARAMETER( context ); assert( p->nCol>0 ); assert( iChngnCol ); if( p->nRow==0 ){ /* This is the first call to this function. Do initialization. */ for(i=0; inCol; i++) p->current.anEq[i] = 1; }else{ /* Second and subsequent calls get processed here */ samplePushPrevious(p, iChng); /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply ** to the current row of the index. */ for(i=0; icurrent.anEq[i]++; } for(i=iChng; inCol; i++){ p->current.anDLt[i]++; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 p->current.anLt[i] += p->current.anEq[i]; #endif p->current.anEq[i] = 1; } } p->nRow++; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( sqlite3_value_type(argv[2])==SQLITE_INTEGER ){ sampleSetRowidInt64(p->db, &p->current, sqlite3_value_int64(argv[2])); }else{ sampleSetRowid(p->db, &p->current, sqlite3_value_bytes(argv[2]), sqlite3_value_blob(argv[2])); } p->current.iHash = p->iPrn = p->iPrn*1103515245 + 12345; #endif #ifdef SQLITE_ENABLE_STAT4 { tRowcnt nLt = p->current.anLt[p->nCol-1]; /* Check if this is to be a periodic sample. If so, add it. */ if( (nLt/p->nPSample)!=(nLt+1)/p->nPSample ){ p->current.isPSample = 1; p->current.iCol = 0; sampleInsert(p, &p->current, p->nCol-1); p->current.isPSample = 0; } /* Update the aBest[] array. */ for(i=0; i<(p->nCol-1); i++){ p->current.iCol = i; if( i>=iChng || sampleIsBetterPost(p, &p->current, &p->aBest[i]) ){ sampleCopy(p, &p->aBest[i], &p->current); } } } #endif } static const FuncDef statPushFuncdef = { 2+IsStat34, /* nArg */ SQLITE_UTF8, /* funcFlags */ 0, /* pUserData */ 0, /* pNext */ statPush, /* xFunc */ 0, /* xStep */ 0, /* xFinalize */ "stat_push", /* zName */ 0, /* pHash */ 0 /* pDestructor */ }; #define STAT_GET_STAT1 0 /* "stat" column of stat1 table */ #define STAT_GET_ROWID 1 /* "rowid" column of stat[34] entry */ #define STAT_GET_NEQ 2 /* "neq" column of stat[34] entry */ #define STAT_GET_NLT 3 /* "nlt" column of stat[34] entry */ #define STAT_GET_NDLT 4 /* "ndlt" column of stat[34] entry */ /* ** Implementation of the stat_get(P,J) SQL function. This routine is ** used to query statistical information that has been gathered into ** the Stat4Accum object by prior calls to stat_push(). The P parameter ** has type BLOB but it is really just a pointer to the Stat4Accum object. ** The content to returned is determined by the parameter J ** which is one of the STAT_GET_xxxx values defined above. ** ** If neither STAT3 nor STAT4 are enabled, then J is always ** STAT_GET_STAT1 and is hence omitted and this routine becomes ** a one-parameter function, stat_get(P), that always returns the ** stat1 table entry information. */ static void statGet( sqlite3_context *context, int argc, sqlite3_value **argv ){ Stat4Accum *p = (Stat4Accum*)sqlite3_value_blob(argv[0]); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* STAT3 and STAT4 have a parameter on this routine. */ int eCall = sqlite3_value_int(argv[1]); assert( argc==2 ); assert( eCall==STAT_GET_STAT1 || eCall==STAT_GET_NEQ || eCall==STAT_GET_ROWID || eCall==STAT_GET_NLT || eCall==STAT_GET_NDLT ); if( eCall==STAT_GET_STAT1 ) #else assert( argc==1 ); #endif { /* Return the value to store in the "stat" column of the sqlite_stat1 ** table for this index. ** ** The value is a string composed of a list of integers describing ** the index. The first integer in the list is the total number of ** entries in the index. There is one additional integer in the list ** for each indexed column. This additional integer is an estimate of ** the number of rows matched by a stabbing query on the index using ** a key with the corresponding number of fields. In other words, ** if the index is on columns (a,b) and the sqlite_stat1 value is ** "100 10 2", then SQLite estimates that: ** ** * the index contains 100 rows, ** * "WHERE a=?" matches 10 rows, and ** * "WHERE a=? AND b=?" matches 2 rows. ** ** If D is the count of distinct values and K is the total number of ** rows, then each estimate is computed as: ** ** I = (K+D-1)/D */ char *z; int i; char *zRet = sqlite3MallocZero( (p->nKeyCol+1)*25 ); if( zRet==0 ){ sqlite3_result_error_nomem(context); return; } sqlite3_snprintf(24, zRet, "%llu", (u64)p->nRow); z = zRet + sqlite3Strlen30(zRet); for(i=0; inKeyCol; i++){ u64 nDistinct = p->current.anDLt[i] + 1; u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; sqlite3_snprintf(24, z, " %llu", iVal); z += sqlite3Strlen30(z); assert( p->current.anEq[i] ); } assert( z[0]=='\0' && z>zRet ); sqlite3_result_text(context, zRet, -1, sqlite3_free); } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 else if( eCall==STAT_GET_ROWID ){ if( p->iGet<0 ){ samplePushPrevious(p, 0); p->iGet = 0; } if( p->iGetnSample ){ Stat4Sample *pS = p->a + p->iGet; if( pS->nRowid==0 ){ sqlite3_result_int64(context, pS->u.iRowid); }else{ sqlite3_result_blob(context, pS->u.aRowid, pS->nRowid, SQLITE_TRANSIENT); } } }else{ tRowcnt *aCnt = 0; assert( p->iGetnSample ); switch( eCall ){ case STAT_GET_NEQ: aCnt = p->a[p->iGet].anEq; break; case STAT_GET_NLT: aCnt = p->a[p->iGet].anLt; break; default: { aCnt = p->a[p->iGet].anDLt; p->iGet++; break; } } if( IsStat3 ){ sqlite3_result_int64(context, (i64)aCnt[0]); }else{ char *zRet = sqlite3MallocZero(p->nCol * 25); if( zRet==0 ){ sqlite3_result_error_nomem(context); }else{ int i; char *z = zRet; for(i=0; inCol; i++){ sqlite3_snprintf(24, z, "%llu ", (u64)aCnt[i]); z += sqlite3Strlen30(z); } assert( z[0]=='\0' && z>zRet ); z[-1] = '\0'; sqlite3_result_text(context, zRet, -1, sqlite3_free); } } } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ #ifndef SQLITE_DEBUG UNUSED_PARAMETER( argc ); #endif } static const FuncDef statGetFuncdef = { 1+IsStat34, /* nArg */ SQLITE_UTF8, /* funcFlags */ 0, /* pUserData */ 0, /* pNext */ statGet, /* xFunc */ 0, /* xStep */ 0, /* xFinalize */ "stat_get", /* zName */ 0, /* pHash */ 0 /* pDestructor */ }; static void callStatGet(Vdbe *v, int regStat4, int iParam, int regOut){ assert( regOut!=regStat4 && regOut!=regStat4+1 ); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3VdbeAddOp2(v, OP_Integer, iParam, regStat4+1); #elif SQLITE_DEBUG assert( iParam==STAT_GET_STAT1 ); #else UNUSED_PARAMETER( iParam ); #endif sqlite3VdbeAddOp3(v, OP_Function0, 0, regStat4, regOut); sqlite3VdbeChangeP4(v, -1, (char*)&statGetFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 1 + IsStat34); } /* ** Generate code to do an analysis of all indices associated with ** a single table. */ static void analyzeOneTable( Parse *pParse, /* Parser context */ Table *pTab, /* Table whose indices are to be analyzed */ Index *pOnlyIdx, /* If not NULL, only analyze this one index */ int iStatCur, /* Index of VdbeCursor that writes the sqlite_stat1 table */ int iMem, /* Available memory locations begin here */ int iTab /* Next available cursor */ ){ sqlite3 *db = pParse->db; /* Database handle */ Index *pIdx; /* An index to being analyzed */ int iIdxCur; /* Cursor open on index being analyzed */ int iTabCur; /* Table cursor */ Vdbe *v; /* The virtual machine being built up */ int i; /* Loop counter */ int jZeroRows = -1; /* Jump from here if number of rows is zero */ int iDb; /* Index of database containing pTab */ u8 needTableCnt = 1; /* True to count the table */ int regNewRowid = iMem++; /* Rowid for the inserted record */ int regStat4 = iMem++; /* Register to hold Stat4Accum object */ int regChng = iMem++; /* Index of changed index field */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int regRowid = iMem++; /* Rowid argument passed to stat_push() */ #endif int regTemp = iMem++; /* Temporary use register */ int regTabname = iMem++; /* Register containing table name */ int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ pParse->nMem = MAX(pParse->nMem, iMem); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return; } if( pTab->tnum==0 ){ /* Do not gather statistics on views or virtual tables */ return; } if( sqlite3_strnicmp(pTab->zName, "sqlite_", 7)==0 ){ /* Do not gather statistics on system tables */ return; } assert( sqlite3BtreeHoldsAllMutexes(db) ); iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 ); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0, db->aDb[iDb].zName ) ){ return; } #endif /* Establish a read-lock on the table at the shared-cache level. ** Open a read-only cursor on the table. Also allocate a cursor number ** to use for scanning indexes (iIdxCur). No index cursor is opened at ** this time though. */ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); iTabCur = iTab++; iIdxCur = iTab++; pParse->nTab = MAX(pParse->nTab, iTab); sqlite3OpenTable(pParse, iTabCur, iDb, pTab, OP_OpenRead); sqlite3VdbeAddOp4(v, OP_String8, 0, regTabname, 0, pTab->zName, 0); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int nCol; /* Number of columns in pIdx. "N" */ int addrRewind; /* Address of "OP_Rewind iIdxCur" */ int addrNextRow; /* Address of "next_row:" */ const char *zIdxName; /* Name of the index */ int nColTest; /* Number of columns to test for changes */ if( pOnlyIdx && pOnlyIdx!=pIdx ) continue; if( pIdx->pPartIdxWhere==0 ) needTableCnt = 0; if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIdx) ){ nCol = pIdx->nKeyCol; zIdxName = pTab->zName; nColTest = nCol - 1; }else{ nCol = pIdx->nColumn; zIdxName = pIdx->zName; nColTest = pIdx->uniqNotNull ? pIdx->nKeyCol-1 : nCol-1; } /* Populate the register containing the index name. */ sqlite3VdbeAddOp4(v, OP_String8, 0, regIdxname, 0, zIdxName, 0); VdbeComment((v, "Analysis for %s.%s", pTab->zName, zIdxName)); /* ** Pseudo-code for loop that calls stat_push(): ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto chng_addr_0; ** ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto chng_addr_N ** ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... ** ** endDistinctTest: ** regRowid = idx(rowid) ** stat_push(P, regChng, regRowid) ** Next csr ** if !eof(csr) goto next_row; ** ** end_of_scan: */ /* Make sure there are enough memory cells allocated to accommodate ** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); sqlite3VdbeAddOp3(v, OP_OpenRead, iIdxCur, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); /* Invoke the stat_init() function. The arguments are: ** ** (1) the number of columns in the index including the rowid ** (or for a WITHOUT ROWID table, the number of PK columns), ** (2) the number of columns in the key without the rowid/pk ** (3) the number of rows in the index, ** ** ** The third argument is only used for STAT3 and STAT4 */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3VdbeAddOp2(v, OP_Count, iIdxCur, regStat4+3); #endif sqlite3VdbeAddOp2(v, OP_Integer, nCol, regStat4+1); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->nKeyCol, regStat4+2); sqlite3VdbeAddOp3(v, OP_Function0, 0, regStat4+1, regStat4); sqlite3VdbeChangeP4(v, -1, (char*)&statInitFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); /* Implementation of the following: ** ** Rewind csr ** if eof(csr) goto end_of_scan; ** regChng = 0 ** goto next_push_0; ** */ addrRewind = sqlite3VdbeAddOp1(v, OP_Rewind, iIdxCur); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, 0, regChng); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest>0 ){ int endDistinctTest = sqlite3VdbeMakeLabel(v); int *aGotoChng; /* Array of jump instruction addresses */ aGotoChng = sqlite3DbMallocRaw(db, sizeof(int)*nColTest); if( aGotoChng==0 ) continue; /* ** next_row: ** regChng = 0 ** if( idx(0) != regPrev(0) ) goto chng_addr_0 ** regChng = 1 ** if( idx(1) != regPrev(1) ) goto chng_addr_1 ** ... ** regChng = N ** goto endDistinctTest */ sqlite3VdbeAddOp0(v, OP_Goto); addrNextRow = sqlite3VdbeCurrentAddr(v); if( nColTest==1 && pIdx->nKeyCol==1 && IsUniqueIndex(pIdx) ){ /* For a single-column UNIQUE index, once we have found a non-NULL ** row, we know that all the rest will be distinct, so skip ** subsequent distinctness tests. */ sqlite3VdbeAddOp2(v, OP_NotNull, regPrev, endDistinctTest); VdbeCoverage(v); } for(i=0; iazColl[i]); sqlite3VdbeAddOp2(v, OP_Integer, i, regChng); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, i, regTemp); aGotoChng[i] = sqlite3VdbeAddOp4(v, OP_Ne, regTemp, 0, regPrev+i, pColl, P4_COLLSEQ); sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); VdbeCoverage(v); } sqlite3VdbeAddOp2(v, OP_Integer, nColTest, regChng); sqlite3VdbeAddOp2(v, OP_Goto, 0, endDistinctTest); /* ** chng_addr_0: ** regPrev(0) = idx(0) ** chng_addr_1: ** regPrev(1) = idx(1) ** ... */ sqlite3VdbeJumpHere(v, addrNextRow-1); for(i=0; ipTable); int j, k, regKey; regKey = sqlite3GetTempRange(pParse, pPk->nKeyCol); for(j=0; jnKeyCol; j++){ k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, regKey+j); VdbeComment((v, "%s", pTab->aCol[pPk->aiColumn[j]].zName)); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regKey, pPk->nKeyCol, regRowid); sqlite3ReleaseTempRange(pParse, regKey, pPk->nKeyCol); } #endif assert( regChng==(regStat4+1) ); sqlite3VdbeAddOp3(v, OP_Function0, 1, regStat4, regTemp); sqlite3VdbeChangeP4(v, -1, (char*)&statPushFuncdef, P4_FUNCDEF); sqlite3VdbeChangeP5(v, 2+IsStat34); sqlite3VdbeAddOp2(v, OP_Next, iIdxCur, addrNextRow); VdbeCoverage(v); /* Add the entry to the stat1 table. */ callStatGet(v, regStat4, STAT_GET_STAT1, regStat1); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); /* Add the entries to the stat3 or stat4 table. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 { int regEq = regStat1; int regLt = regStat1+1; int regDLt = regStat1+2; int regSample = regStat1+3; int regCol = regStat1+4; int regSampleRowid = regCol + nCol; int addrNext; int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; pParse->nMem = MAX(pParse->nMem, regCol+nCol); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(v, regStat4, STAT_GET_ROWID, regSampleRowid); addrIsNull = sqlite3VdbeAddOp1(v, OP_IsNull, regSampleRowid); VdbeCoverage(v); callStatGet(v, regStat4, STAT_GET_NEQ, regEq); callStatGet(v, regStat4, STAT_GET_NLT, regLt); callStatGet(v, regStat4, STAT_GET_NDLT, regDLt); sqlite3VdbeAddOp4Int(v, seekOp, iTabCur, addrNext, regSampleRowid, 0); /* We know that the regSampleRowid row exists because it was read by ** the previous loop. Thus the not-found jump of seekOp will never ** be taken */ VdbeCoverageNeverTaken(v); #ifdef SQLITE_ENABLE_STAT3 sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, pIdx->aiColumn[0], regSample); #else for(i=0; iaiColumn[i]; sqlite3ExprCodeGetColumnOfTable(v, pTab, iTabCur, iCol, regCol+i); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regCol, nCol, regSample); #endif sqlite3VdbeAddOp3(v, OP_MakeRecord, regTabname, 6, regTemp); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur+1, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur+1, regTemp, regNewRowid); sqlite3VdbeAddOp2(v, OP_Goto, 1, addrNext); /* P1==1 for end-of-loop */ sqlite3VdbeJumpHere(v, addrIsNull); } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* End of analysis */ sqlite3VdbeJumpHere(v, addrRewind); } /* Create a single sqlite_stat1 entry containing NULL as the index ** name and the row count as the content. */ if( pOnlyIdx==0 && needTableCnt ){ VdbeComment((v, "%s", pTab->zName)); sqlite3VdbeAddOp2(v, OP_Count, iTabCur, regStat1); jZeroRows = sqlite3VdbeAddOp1(v, OP_IfNot, regStat1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Null, 0, regIdxname); assert( "BBB"[0]==SQLITE_AFF_TEXT ); sqlite3VdbeAddOp4(v, OP_MakeRecord, regTabname, 3, regTemp, "BBB", 0); sqlite3VdbeAddOp2(v, OP_NewRowid, iStatCur, regNewRowid); sqlite3VdbeAddOp3(v, OP_Insert, iStatCur, regTemp, regNewRowid); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3VdbeJumpHere(v, jZeroRows); } } /* ** Generate code that will cause the most recent index analysis to ** be loaded into internal hash tables where is can be used. */ static void loadAnalysis(Parse *pParse, int iDb){ Vdbe *v = sqlite3GetVdbe(pParse); if( v ){ sqlite3VdbeAddOp1(v, OP_LoadAnalysis, iDb); } } /* ** Generate code that will do an analysis of an entire database */ static void analyzeDatabase(Parse *pParse, int iDb){ sqlite3 *db = pParse->db; Schema *pSchema = db->aDb[iDb].pSchema; /* Schema of database iDb */ HashElem *k; int iStatCur; int iMem; int iTab; sqlite3BeginWriteOperation(pParse, 0, iDb); iStatCur = pParse->nTab; pParse->nTab += 3; openStatTable(pParse, iDb, iStatCur, 0, 0); iMem = pParse->nMem+1; iTab = pParse->nTab; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ Table *pTab = (Table*)sqliteHashData(k); analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); } loadAnalysis(pParse, iDb); } /* ** Generate code that will do an analysis of a single table in ** a database. If pOnlyIdx is not NULL then it is a single index ** in pTab that should be analyzed. */ static void analyzeTable(Parse *pParse, Table *pTab, Index *pOnlyIdx){ int iDb; int iStatCur; assert( pTab!=0 ); assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); iStatCur = pParse->nTab; pParse->nTab += 3; if( pOnlyIdx ){ openStatTable(pParse, iDb, iStatCur, pOnlyIdx->zName, "idx"); }else{ openStatTable(pParse, iDb, iStatCur, pTab->zName, "tbl"); } analyzeOneTable(pParse, pTab, pOnlyIdx, iStatCur,pParse->nMem+1,pParse->nTab); loadAnalysis(pParse, iDb); } /* ** Generate code for the ANALYZE command. The parser calls this routine ** when it recognizes an ANALYZE command. ** ** ANALYZE -- 1 ** ANALYZE -- 2 ** ANALYZE ?.? -- 3 ** ** Form 1 causes all indices in all attached databases to be analyzed. ** Form 2 analyzes all indices the single database named. ** Form 3 analyzes all indices associated with the named table. */ SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){ sqlite3 *db = pParse->db; int iDb; int i; char *z, *zDb; Table *pTab; Index *pIdx; Token *pTableName; Vdbe *v; /* Read the database schema. If an error occurs, leave an error message ** and code in pParse and return NULL. */ assert( sqlite3BtreeHoldsAllMutexes(pParse->db) ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ return; } assert( pName2!=0 || pName1==0 ); if( pName1==0 ){ /* Form 1: Analyze everything */ for(i=0; inDb; i++){ if( i==1 ) continue; /* Do not analyze the TEMP database */ analyzeDatabase(pParse, i); } }else if( pName2->n==0 ){ /* Form 2: Analyze the database or table named */ iDb = sqlite3FindDb(db, pName1); if( iDb>=0 ){ analyzeDatabase(pParse, iDb); }else{ z = sqlite3NameFromToken(db, pName1); if( z ){ if( (pIdx = sqlite3FindIndex(db, z, 0))!=0 ){ analyzeTable(pParse, pIdx->pTable, pIdx); }else if( (pTab = sqlite3LocateTable(pParse, 0, z, 0))!=0 ){ analyzeTable(pParse, pTab, 0); } sqlite3DbFree(db, z); } } }else{ /* Form 3: Analyze the fully qualified table name */ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName); if( iDb>=0 ){ zDb = db->aDb[iDb].zName; z = sqlite3NameFromToken(db, pTableName); if( z ){ if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){ analyzeTable(pParse, pIdx->pTable, pIdx); }else if( (pTab = sqlite3LocateTable(pParse, 0, z, zDb))!=0 ){ analyzeTable(pParse, pTab, 0); } sqlite3DbFree(db, z); } } } v = sqlite3GetVdbe(pParse); if( v ) sqlite3VdbeAddOp0(v, OP_Expire); } /* ** Used to pass information from the analyzer reader through to the ** callback routine. */ typedef struct analysisInfo analysisInfo; struct analysisInfo { sqlite3 *db; const char *zDatabase; }; /* ** The first argument points to a nul-terminated string containing a ** list of space separated integers. Read the first nOut of these into ** the array aOut[]. */ static void decodeIntArray( char *zIntArray, /* String containing int array to decode */ int nOut, /* Number of slots in aOut[] */ tRowcnt *aOut, /* Store integers here */ LogEst *aLog, /* Or, if aOut==0, here */ Index *pIndex /* Handle extra flags for this index, if not NULL */ ){ char *z = zIntArray; int c; int i; tRowcnt v; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( z==0 ) z = ""; #else assert( z!=0 ); #endif for(i=0; *z && i='0' && c<='9' ){ v = v*10 + c - '0'; z++; } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( aOut ) aOut[i] = v; if( aLog ) aLog[i] = sqlite3LogEst(v); #else assert( aOut==0 ); UNUSED_PARAMETER(aOut); assert( aLog!=0 ); aLog[i] = sqlite3LogEst(v); #endif if( *z==' ' ) z++; } #ifndef SQLITE_ENABLE_STAT3_OR_STAT4 assert( pIndex!=0 ); { #else if( pIndex ){ #endif pIndex->bUnordered = 0; pIndex->noSkipScan = 0; while( z[0] ){ if( sqlite3_strglob("unordered*", z)==0 ){ pIndex->bUnordered = 1; }else if( sqlite3_strglob("sz=[0-9]*", z)==0 ){ pIndex->szIdxRow = sqlite3LogEst(sqlite3Atoi(z+3)); }else if( sqlite3_strglob("noskipscan*", z)==0 ){ pIndex->noSkipScan = 1; } #ifdef SQLITE_ENABLE_COSTMULT else if( sqlite3_strglob("costmult=[0-9]*",z)==0 ){ pIndex->pTable->costMult = sqlite3LogEst(sqlite3Atoi(z+9)); } #endif while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } } } /* ** This callback is invoked once for each index when reading the ** sqlite_stat1 table. ** ** argv[0] = name of the table ** argv[1] = name of the index (might be NULL) ** argv[2] = results of analysis - on integer for each column ** ** Entries for which argv[1]==NULL simply record the number of rows in ** the table. */ static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){ analysisInfo *pInfo = (analysisInfo*)pData; Index *pIndex; Table *pTable; const char *z; assert( argc==3 ); UNUSED_PARAMETER2(NotUsed, argc); if( argv==0 || argv[0]==0 || argv[2]==0 ){ return 0; } pTable = sqlite3FindTable(pInfo->db, argv[0], pInfo->zDatabase); if( pTable==0 ){ return 0; } if( argv[1]==0 ){ pIndex = 0; }else if( sqlite3_stricmp(argv[0],argv[1])==0 ){ pIndex = sqlite3PrimaryKeyIndex(pTable); }else{ pIndex = sqlite3FindIndex(pInfo->db, argv[1], pInfo->zDatabase); } z = argv[2]; if( pIndex ){ tRowcnt *aiRowEst = 0; int nCol = pIndex->nKeyCol+1; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* Index.aiRowEst may already be set here if there are duplicate ** sqlite_stat1 entries for this index. In that case just clobber ** the old data with the new instead of allocating a new array. */ if( pIndex->aiRowEst==0 ){ pIndex->aiRowEst = (tRowcnt*)sqlite3MallocZero(sizeof(tRowcnt) * nCol); if( pIndex->aiRowEst==0 ) pInfo->db->mallocFailed = 1; } aiRowEst = pIndex->aiRowEst; #endif pIndex->bUnordered = 0; decodeIntArray((char*)z, nCol, aiRowEst, pIndex->aiRowLogEst, pIndex); if( pIndex->pPartIdxWhere==0 ) pTable->nRowLogEst = pIndex->aiRowLogEst[0]; }else{ Index fakeIdx; fakeIdx.szIdxRow = pTable->szTabRow; #ifdef SQLITE_ENABLE_COSTMULT fakeIdx.pTable = pTable; #endif decodeIntArray((char*)z, 1, 0, &pTable->nRowLogEst, &fakeIdx); pTable->szTabRow = fakeIdx.szIdxRow; } return 0; } /* ** If the Index.aSample variable is not NULL, delete the aSample[] array ** and its contents. */ SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( pIdx->aSample ){ int j; for(j=0; jnSample; j++){ IndexSample *p = &pIdx->aSample[j]; sqlite3DbFree(db, p->p); } sqlite3DbFree(db, pIdx->aSample); } if( db && db->pnBytesFreed==0 ){ pIdx->nSample = 0; pIdx->aSample = 0; } #else UNUSED_PARAMETER(db); UNUSED_PARAMETER(pIdx); #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** Populate the pIdx->aAvgEq[] array based on the samples currently ** stored in pIdx->aSample[]. */ static void initAvgEq(Index *pIdx){ if( pIdx ){ IndexSample *aSample = pIdx->aSample; IndexSample *pFinal = &aSample[pIdx->nSample-1]; int iCol; int nCol = 1; if( pIdx->nSampleCol>1 ){ /* If this is stat4 data, then calculate aAvgEq[] values for all ** sample columns except the last. The last is always set to 1, as ** once the trailing PK fields are considered all index keys are ** unique. */ nCol = pIdx->nSampleCol-1; pIdx->aAvgEq[nCol] = 1; } for(iCol=0; iColnSample; int i; /* Used to iterate through samples */ tRowcnt sumEq = 0; /* Sum of the nEq values */ tRowcnt avgEq = 0; tRowcnt nRow; /* Number of rows in index */ i64 nSum100 = 0; /* Number of terms contributing to sumEq */ i64 nDist100; /* Number of distinct values in index */ if( !pIdx->aiRowEst || iCol>=pIdx->nKeyCol || pIdx->aiRowEst[iCol+1]==0 ){ nRow = pFinal->anLt[iCol]; nDist100 = (i64)100 * pFinal->anDLt[iCol]; nSample--; }else{ nRow = pIdx->aiRowEst[0]; nDist100 = ((i64)100 * pIdx->aiRowEst[0]) / pIdx->aiRowEst[iCol+1]; } pIdx->nRowEst0 = nRow; /* Set nSum to the number of distinct (iCol+1) field prefixes that ** occur in the stat4 table for this index. Set sumEq to the sum of ** the nEq values for column iCol for the same set (adding the value ** only once where there exist duplicate prefixes). */ for(i=0; inSample-1) || aSample[i].anDLt[iCol]!=aSample[i+1].anDLt[iCol] ){ sumEq += aSample[i].anEq[iCol]; nSum100 += 100; } } if( nDist100>nSum100 ){ avgEq = ((i64)100 * (nRow - sumEq))/(nDist100 - nSum100); } if( avgEq==0 ) avgEq = 1; pIdx->aAvgEq[iCol] = avgEq; } } } /* ** Look up an index by name. Or, if the name of a WITHOUT ROWID table ** is supplied instead, find the PRIMARY KEY index for that table. */ static Index *findIndexOrPrimaryKey( sqlite3 *db, const char *zName, const char *zDb ){ Index *pIdx = sqlite3FindIndex(db, zName, zDb); if( pIdx==0 ){ Table *pTab = sqlite3FindTable(db, zName, zDb); if( pTab && !HasRowid(pTab) ) pIdx = sqlite3PrimaryKeyIndex(pTab); } return pIdx; } /* ** Load the content from either the sqlite_stat4 or sqlite_stat3 table ** into the relevant Index.aSample[] arrays. ** ** Arguments zSql1 and zSql2 must point to SQL statements that return ** data equivalent to the following (statements are different for stat3, ** see the caller of this function for details): ** ** zSql1: SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx ** zSql2: SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4 ** ** where %Q is replaced with the database name before the SQL is executed. */ static int loadStatTbl( sqlite3 *db, /* Database handle */ int bStat3, /* Assume single column records only */ const char *zSql1, /* SQL statement 1 (see above) */ const char *zSql2, /* SQL statement 2 (see above) */ const char *zDb /* Database name (e.g. "main") */ ){ int rc; /* Result codes from subroutines */ sqlite3_stmt *pStmt = 0; /* An SQL statement being run */ char *zSql; /* Text of the SQL statement */ Index *pPrevIdx = 0; /* Previous index in the loop */ IndexSample *pSample; /* A slot in pIdx->aSample[] */ assert( db->lookaside.bEnabled==0 ); zSql = sqlite3MPrintf(db, zSql1, zDb); if( !zSql ){ return SQLITE_NOMEM; } rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); sqlite3DbFree(db, zSql); if( rc ) return rc; while( sqlite3_step(pStmt)==SQLITE_ROW ){ int nIdxCol = 1; /* Number of columns in stat4 records */ char *zIndex; /* Index name */ Index *pIdx; /* Pointer to the index object */ int nSample; /* Number of samples */ int nByte; /* Bytes of space required */ int i; /* Bytes of space required */ tRowcnt *pSpace; zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; nSample = sqlite3_column_int(pStmt, 1); pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); assert( pIdx==0 || bStat3 || pIdx->nSample==0 ); /* Index.nSample is non-zero at this point if data has already been ** loaded from the stat4 table. In this case ignore stat3 data. */ if( pIdx==0 || pIdx->nSample ) continue; if( bStat3==0 ){ assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 ); if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ nIdxCol = pIdx->nKeyCol; }else{ nIdxCol = pIdx->nColumn; } } pIdx->nSampleCol = nIdxCol; nByte = sizeof(IndexSample) * nSample; nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ pIdx->aSample = sqlite3DbMallocZero(db, nByte); if( pIdx->aSample==0 ){ sqlite3_finalize(pStmt); return SQLITE_NOMEM; } pSpace = (tRowcnt*)&pIdx->aSample[nSample]; pIdx->aAvgEq = pSpace; pSpace += nIdxCol; for(i=0; iaSample[i].anEq = pSpace; pSpace += nIdxCol; pIdx->aSample[i].anLt = pSpace; pSpace += nIdxCol; pIdx->aSample[i].anDLt = pSpace; pSpace += nIdxCol; } assert( ((u8*)pSpace)-nByte==(u8*)(pIdx->aSample) ); } rc = sqlite3_finalize(pStmt); if( rc ) return rc; zSql = sqlite3MPrintf(db, zSql2, zDb); if( !zSql ){ return SQLITE_NOMEM; } rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); sqlite3DbFree(db, zSql); if( rc ) return rc; while( sqlite3_step(pStmt)==SQLITE_ROW ){ char *zIndex; /* Index name */ Index *pIdx; /* Pointer to the index object */ int nCol = 1; /* Number of columns in index */ zIndex = (char *)sqlite3_column_text(pStmt, 0); if( zIndex==0 ) continue; pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); if( pIdx==0 ) continue; /* This next condition is true if data has already been loaded from ** the sqlite_stat4 table. In this case ignore stat3 data. */ nCol = pIdx->nSampleCol; if( bStat3 && nCol>1 ) continue; if( pIdx!=pPrevIdx ){ initAvgEq(pPrevIdx); pPrevIdx = pIdx; } pSample = &pIdx->aSample[pIdx->nSample]; decodeIntArray((char*)sqlite3_column_text(pStmt,1),nCol,pSample->anEq,0,0); decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0); decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0); /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer. ** This is in case the sample record is corrupted. In that case, the ** sqlite3VdbeRecordCompare() may read up to two varints past the ** end of the allocated buffer before it realizes it is dealing with ** a corrupt record. Adding the two 0x00 bytes prevents this from causing ** a buffer overread. */ pSample->n = sqlite3_column_bytes(pStmt, 4); pSample->p = sqlite3DbMallocZero(db, pSample->n + 2); if( pSample->p==0 ){ sqlite3_finalize(pStmt); return SQLITE_NOMEM; } memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n); pIdx->nSample++; } rc = sqlite3_finalize(pStmt); if( rc==SQLITE_OK ) initAvgEq(pPrevIdx); return rc; } /* ** Load content from the sqlite_stat4 and sqlite_stat3 tables into ** the Index.aSample[] arrays of all indices. */ static int loadStat4(sqlite3 *db, const char *zDb){ int rc = SQLITE_OK; /* Result codes from subroutines */ assert( db->lookaside.bEnabled==0 ); if( sqlite3FindTable(db, "sqlite_stat4", zDb) ){ rc = loadStatTbl(db, 0, "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", zDb ); } if( rc==SQLITE_OK && sqlite3FindTable(db, "sqlite_stat3", zDb) ){ rc = loadStatTbl(db, 1, "SELECT idx,count(*) FROM %Q.sqlite_stat3 GROUP BY idx", "SELECT idx,neq,nlt,ndlt,sqlite_record(sample) FROM %Q.sqlite_stat3", zDb ); } return rc; } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* ** Load the content of the sqlite_stat1 and sqlite_stat3/4 tables. The ** contents of sqlite_stat1 are used to populate the Index.aiRowEst[] ** arrays. The contents of sqlite_stat3/4 are used to populate the ** Index.aSample[] arrays. ** ** If the sqlite_stat1 table is not present in the database, SQLITE_ERROR ** is returned. In this case, even if SQLITE_ENABLE_STAT3/4 was defined ** during compilation and the sqlite_stat3/4 table is present, no data is ** read from it. ** ** If SQLITE_ENABLE_STAT3/4 was defined during compilation and the ** sqlite_stat4 table is not present in the database, SQLITE_ERROR is ** returned. However, in this case, data is read from the sqlite_stat1 ** table (if it is present) before returning. ** ** If an OOM error occurs, this function always sets db->mallocFailed. ** This means if the caller does not care about other errors, the return ** code may be ignored. */ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ analysisInfo sInfo; HashElem *i; char *zSql; int rc; assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 ); /* Clear any prior statistics */ assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ Index *pIdx = sqliteHashData(i); sqlite3DefaultRowEst(pIdx); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3DeleteIndexSamples(db, pIdx); pIdx->aSample = 0; #endif } /* Check to make sure the sqlite_stat1 table exists */ sInfo.db = db; sInfo.zDatabase = db->aDb[iDb].zName; if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)==0 ){ return SQLITE_ERROR; } /* Load new statistics out of the sqlite_stat1 table */ zSql = sqlite3MPrintf(db, "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); if( zSql==0 ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); sqlite3DbFree(db, zSql); } /* Load the statistics from the sqlite_stat4 table. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){ int lookasideEnabled = db->lookaside.bEnabled; db->lookaside.bEnabled = 0; rc = loadStat4(db, sInfo.zDatabase); db->lookaside.bEnabled = lookasideEnabled; } for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ Index *pIdx = sqliteHashData(i); sqlite3_free(pIdx->aiRowEst); pIdx->aiRowEst = 0; } #endif if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; } return rc; } #endif /* SQLITE_OMIT_ANALYZE */ /************** End of analyze.c *********************************************/ /************** Begin file attach.c ******************************************/ /* ** 2003 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to implement the ATTACH and DETACH commands. */ /* #include "sqliteInt.h" */ #ifndef SQLITE_OMIT_ATTACH /* ** Resolve an expression that was part of an ATTACH or DETACH statement. This ** is slightly different from resolving a normal SQL expression, because simple ** identifiers are treated as strings, not possible column names or aliases. ** ** i.e. if the parser sees: ** ** ATTACH DATABASE abc AS def ** ** it treats the two expressions as literal strings 'abc' and 'def' instead of ** looking for columns of the same name. ** ** This only applies to the root node of pExpr, so the statement: ** ** ATTACH DATABASE abc||def AS 'db2' ** ** will fail because neither abc or def can be resolved. */ static int resolveAttachExpr(NameContext *pName, Expr *pExpr) { int rc = SQLITE_OK; if( pExpr ){ if( pExpr->op!=TK_ID ){ rc = sqlite3ResolveExprNames(pName, pExpr); }else{ pExpr->op = TK_STRING; } } return rc; } /* ** An SQL user-function registered to do the work of an ATTACH statement. The ** three arguments to the function come directly from an attach statement: ** ** ATTACH DATABASE x AS y KEY z ** ** SELECT sqlite_attach(x, y, z) ** ** If the optional "KEY z" syntax is omitted, an SQL NULL is passed as the ** third argument. */ static void attachFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ int i; int rc = 0; sqlite3 *db = sqlite3_context_db_handle(context); const char *zName; const char *zFile; char *zPath = 0; char *zErr = 0; unsigned int flags; Db *aNew; char *zErrDyn = 0; sqlite3_vfs *pVfs; UNUSED_PARAMETER(NotUsed); zFile = (const char *)sqlite3_value_text(argv[0]); zName = (const char *)sqlite3_value_text(argv[1]); if( zFile==0 ) zFile = ""; if( zName==0 ) zName = ""; /* Check for the following errors: ** ** * Too many attached databases, ** * Transaction currently open ** * Specified database name already being used. */ if( db->nDb>=db->aLimit[SQLITE_LIMIT_ATTACHED]+2 ){ zErrDyn = sqlite3MPrintf(db, "too many attached databases - max %d", db->aLimit[SQLITE_LIMIT_ATTACHED] ); goto attach_error; } if( !db->autoCommit ){ zErrDyn = sqlite3MPrintf(db, "cannot ATTACH database within transaction"); goto attach_error; } for(i=0; inDb; i++){ char *z = db->aDb[i].zName; assert( z && zName ); if( sqlite3StrICmp(z, zName)==0 ){ zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName); goto attach_error; } } /* Allocate the new entry in the db->aDb[] array and initialize the schema ** hash tables. */ if( db->aDb==db->aDbStatic ){ aNew = sqlite3DbMallocRaw(db, sizeof(db->aDb[0])*3 ); if( aNew==0 ) return; memcpy(aNew, db->aDb, sizeof(db->aDb[0])*2); }else{ aNew = sqlite3DbRealloc(db, db->aDb, sizeof(db->aDb[0])*(db->nDb+1) ); if( aNew==0 ) return; } db->aDb = aNew; aNew = &db->aDb[db->nDb]; memset(aNew, 0, sizeof(*aNew)); /* Open the database file. If the btree is successfully opened, use ** it to obtain the database schema. At this point the schema may ** or may not be initialized. */ flags = db->openFlags; rc = sqlite3ParseUri(db->pVfs->zName, zFile, &flags, &pVfs, &zPath, &zErr); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; sqlite3_result_error(context, zErr, -1); sqlite3_free(zErr); return; } assert( pVfs ); flags |= SQLITE_OPEN_MAIN_DB; rc = sqlite3BtreeOpen(pVfs, zPath, db, &aNew->pBt, 0, flags); sqlite3_free( zPath ); db->nDb++; if( rc==SQLITE_CONSTRAINT ){ rc = SQLITE_ERROR; zErrDyn = sqlite3MPrintf(db, "database is already attached"); }else if( rc==SQLITE_OK ){ Pager *pPager; aNew->pSchema = sqlite3SchemaGet(db, aNew->pBt); if( !aNew->pSchema ){ rc = SQLITE_NOMEM; }else if( aNew->pSchema->file_format && aNew->pSchema->enc!=ENC(db) ){ zErrDyn = sqlite3MPrintf(db, "attached databases must use the same text encoding as main database"); rc = SQLITE_ERROR; } sqlite3BtreeEnter(aNew->pBt); pPager = sqlite3BtreePager(aNew->pBt); sqlite3PagerLockingMode(pPager, db->dfltLockMode); sqlite3BtreeSecureDelete(aNew->pBt, sqlite3BtreeSecureDelete(db->aDb[0].pBt,-1) ); #ifndef SQLITE_OMIT_PAGER_PRAGMAS sqlite3BtreeSetPagerFlags(aNew->pBt, 3 | (db->flags & PAGER_FLAGS_MASK)); #endif sqlite3BtreeLeave(aNew->pBt); } aNew->safety_level = 3; aNew->zName = sqlite3DbStrDup(db, zName); if( rc==SQLITE_OK && aNew->zName==0 ){ rc = SQLITE_NOMEM; } #ifdef SQLITE_HAS_CODEC if( rc==SQLITE_OK ){ extern int sqlite3CodecAttach(sqlite3*, int, const void*, int); extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); int nKey; char *zKey; int t = sqlite3_value_type(argv[2]); switch( t ){ case SQLITE_INTEGER: case SQLITE_FLOAT: zErrDyn = sqlite3DbStrDup(db, "Invalid key value"); rc = SQLITE_ERROR; break; case SQLITE_TEXT: case SQLITE_BLOB: nKey = sqlite3_value_bytes(argv[2]); zKey = (char *)sqlite3_value_blob(argv[2]); rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); break; case SQLITE_NULL: /* No key specified. Use the key from the main database */ sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); if( nKey>0 || sqlite3BtreeGetOptimalReserve(db->aDb[0].pBt)>0 ){ rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey); } break; } } #endif /* If the file was opened successfully, read the schema for the new database. ** If this fails, or if opening the file failed, then close the file and ** remove the entry from the db->aDb[] array. i.e. put everything back the way ** we found it. */ if( rc==SQLITE_OK ){ sqlite3BtreeEnterAll(db); rc = sqlite3Init(db, &zErrDyn); sqlite3BtreeLeaveAll(db); } #ifdef SQLITE_USER_AUTHENTICATION if( rc==SQLITE_OK ){ u8 newAuth = 0; rc = sqlite3UserAuthCheckLogin(db, zName, &newAuth); if( newAuthauth.authLevel ){ rc = SQLITE_AUTH_USER; } } #endif if( rc ){ int iDb = db->nDb - 1; assert( iDb>=2 ); if( db->aDb[iDb].pBt ){ sqlite3BtreeClose(db->aDb[iDb].pBt); db->aDb[iDb].pBt = 0; db->aDb[iDb].pSchema = 0; } sqlite3ResetAllSchemasOfConnection(db); db->nDb = iDb; if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ db->mallocFailed = 1; sqlite3DbFree(db, zErrDyn); zErrDyn = sqlite3MPrintf(db, "out of memory"); }else if( zErrDyn==0 ){ zErrDyn = sqlite3MPrintf(db, "unable to open database: %s", zFile); } goto attach_error; } return; attach_error: /* Return an error if we get here */ if( zErrDyn ){ sqlite3_result_error(context, zErrDyn, -1); sqlite3DbFree(db, zErrDyn); } if( rc ) sqlite3_result_error_code(context, rc); } /* ** An SQL user-function registered to do the work of an DETACH statement. The ** three arguments to the function come directly from a detach statement: ** ** DETACH DATABASE x ** ** SELECT sqlite_detach(x) */ static void detachFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ const char *zName = (const char *)sqlite3_value_text(argv[0]); sqlite3 *db = sqlite3_context_db_handle(context); int i; Db *pDb = 0; char zErr[128]; UNUSED_PARAMETER(NotUsed); if( zName==0 ) zName = ""; for(i=0; inDb; i++){ pDb = &db->aDb[i]; if( pDb->pBt==0 ) continue; if( sqlite3StrICmp(pDb->zName, zName)==0 ) break; } if( i>=db->nDb ){ sqlite3_snprintf(sizeof(zErr),zErr, "no such database: %s", zName); goto detach_error; } if( i<2 ){ sqlite3_snprintf(sizeof(zErr),zErr, "cannot detach database %s", zName); goto detach_error; } if( !db->autoCommit ){ sqlite3_snprintf(sizeof(zErr), zErr, "cannot DETACH database within transaction"); goto detach_error; } if( sqlite3BtreeIsInReadTrans(pDb->pBt) || sqlite3BtreeIsInBackup(pDb->pBt) ){ sqlite3_snprintf(sizeof(zErr),zErr, "database %s is locked", zName); goto detach_error; } sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; pDb->pSchema = 0; sqlite3CollapseDatabaseArray(db); return; detach_error: sqlite3_result_error(context, zErr, -1); } /* ** This procedure generates VDBE code for a single invocation of either the ** sqlite_detach() or sqlite_attach() SQL user functions. */ static void codeAttach( Parse *pParse, /* The parser context */ int type, /* Either SQLITE_ATTACH or SQLITE_DETACH */ FuncDef const *pFunc,/* FuncDef wrapper for detachFunc() or attachFunc() */ Expr *pAuthArg, /* Expression to pass to authorization callback */ Expr *pFilename, /* Name of database file */ Expr *pDbname, /* Name of the database to use internally */ Expr *pKey /* Database key for encryption extension */ ){ int rc; NameContext sName; Vdbe *v; sqlite3* db = pParse->db; int regArgs; memset(&sName, 0, sizeof(NameContext)); sName.pParse = pParse; if( SQLITE_OK!=(rc = resolveAttachExpr(&sName, pFilename)) || SQLITE_OK!=(rc = resolveAttachExpr(&sName, pDbname)) || SQLITE_OK!=(rc = resolveAttachExpr(&sName, pKey)) ){ goto attach_end; } #ifndef SQLITE_OMIT_AUTHORIZATION if( pAuthArg ){ char *zAuthArg; if( pAuthArg->op==TK_STRING ){ zAuthArg = pAuthArg->u.zToken; }else{ zAuthArg = 0; } rc = sqlite3AuthCheck(pParse, type, zAuthArg, 0, 0); if(rc!=SQLITE_OK ){ goto attach_end; } } #endif /* SQLITE_OMIT_AUTHORIZATION */ v = sqlite3GetVdbe(pParse); regArgs = sqlite3GetTempRange(pParse, 4); sqlite3ExprCode(pParse, pFilename, regArgs); sqlite3ExprCode(pParse, pDbname, regArgs+1); sqlite3ExprCode(pParse, pKey, regArgs+2); assert( v || db->mallocFailed ); if( v ){ sqlite3VdbeAddOp3(v, OP_Function0, 0, regArgs+3-pFunc->nArg, regArgs+3); assert( pFunc->nArg==-1 || (pFunc->nArg&0xff)==pFunc->nArg ); sqlite3VdbeChangeP5(v, (u8)(pFunc->nArg)); sqlite3VdbeChangeP4(v, -1, (char *)pFunc, P4_FUNCDEF); /* Code an OP_Expire. For an ATTACH statement, set P1 to true (expire this ** statement only). For DETACH, set it to false (expire all existing ** statements). */ sqlite3VdbeAddOp1(v, OP_Expire, (type==SQLITE_ATTACH)); } attach_end: sqlite3ExprDelete(db, pFilename); sqlite3ExprDelete(db, pDbname); sqlite3ExprDelete(db, pKey); } /* ** Called by the parser to compile a DETACH statement. ** ** DETACH pDbname */ SQLITE_PRIVATE void sqlite3Detach(Parse *pParse, Expr *pDbname){ static const FuncDef detach_func = { 1, /* nArg */ SQLITE_UTF8, /* funcFlags */ 0, /* pUserData */ 0, /* pNext */ detachFunc, /* xFunc */ 0, /* xStep */ 0, /* xFinalize */ "sqlite_detach", /* zName */ 0, /* pHash */ 0 /* pDestructor */ }; codeAttach(pParse, SQLITE_DETACH, &detach_func, pDbname, 0, 0, pDbname); } /* ** Called by the parser to compile an ATTACH statement. ** ** ATTACH p AS pDbname KEY pKey */ SQLITE_PRIVATE void sqlite3Attach(Parse *pParse, Expr *p, Expr *pDbname, Expr *pKey){ static const FuncDef attach_func = { 3, /* nArg */ SQLITE_UTF8, /* funcFlags */ 0, /* pUserData */ 0, /* pNext */ attachFunc, /* xFunc */ 0, /* xStep */ 0, /* xFinalize */ "sqlite_attach", /* zName */ 0, /* pHash */ 0 /* pDestructor */ }; codeAttach(pParse, SQLITE_ATTACH, &attach_func, p, p, pDbname, pKey); } #endif /* SQLITE_OMIT_ATTACH */ /* ** Initialize a DbFixer structure. This routine must be called prior ** to passing the structure to one of the sqliteFixAAAA() routines below. */ SQLITE_PRIVATE void sqlite3FixInit( DbFixer *pFix, /* The fixer to be initialized */ Parse *pParse, /* Error messages will be written here */ int iDb, /* This is the database that must be used */ const char *zType, /* "view", "trigger", or "index" */ const Token *pName /* Name of the view, trigger, or index */ ){ sqlite3 *db; db = pParse->db; assert( db->nDb>iDb ); pFix->pParse = pParse; pFix->zDb = db->aDb[iDb].zName; pFix->pSchema = db->aDb[iDb].pSchema; pFix->zType = zType; pFix->pName = pName; pFix->bVarOnly = (iDb==1); } /* ** The following set of routines walk through the parse tree and assign ** a specific database to all table references where the database name ** was left unspecified in the original SQL statement. The pFix structure ** must have been initialized by a prior call to sqlite3FixInit(). ** ** These routines are used to make sure that an index, trigger, or ** view in one database does not refer to objects in a different database. ** (Exception: indices, triggers, and views in the TEMP database are ** allowed to refer to anything.) If a reference is explicitly made ** to an object in a different database, an error message is added to ** pParse->zErrMsg and these routines return non-zero. If everything ** checks out, these routines return 0. */ SQLITE_PRIVATE int sqlite3FixSrcList( DbFixer *pFix, /* Context of the fixation */ SrcList *pList /* The Source list to check and modify */ ){ int i; const char *zDb; struct SrcList_item *pItem; if( NEVER(pList==0) ) return 0; zDb = pFix->zDb; for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pFix->bVarOnly==0 ){ if( pItem->zDatabase && sqlite3StrICmp(pItem->zDatabase, zDb) ){ sqlite3ErrorMsg(pFix->pParse, "%s %T cannot reference objects in database %s", pFix->zType, pFix->pName, pItem->zDatabase); return 1; } sqlite3DbFree(pFix->pParse->db, pItem->zDatabase); pItem->zDatabase = 0; pItem->pSchema = pFix->pSchema; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) if( sqlite3FixSelect(pFix, pItem->pSelect) ) return 1; if( sqlite3FixExpr(pFix, pItem->pOn) ) return 1; #endif } return 0; } #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER) SQLITE_PRIVATE int sqlite3FixSelect( DbFixer *pFix, /* Context of the fixation */ Select *pSelect /* The SELECT statement to be fixed to one database */ ){ while( pSelect ){ if( sqlite3FixExprList(pFix, pSelect->pEList) ){ return 1; } if( sqlite3FixSrcList(pFix, pSelect->pSrc) ){ return 1; } if( sqlite3FixExpr(pFix, pSelect->pWhere) ){ return 1; } if( sqlite3FixExprList(pFix, pSelect->pGroupBy) ){ return 1; } if( sqlite3FixExpr(pFix, pSelect->pHaving) ){ return 1; } if( sqlite3FixExprList(pFix, pSelect->pOrderBy) ){ return 1; } if( sqlite3FixExpr(pFix, pSelect->pLimit) ){ return 1; } if( sqlite3FixExpr(pFix, pSelect->pOffset) ){ return 1; } pSelect = pSelect->pPrior; } return 0; } SQLITE_PRIVATE int sqlite3FixExpr( DbFixer *pFix, /* Context of the fixation */ Expr *pExpr /* The expression to be fixed to one database */ ){ while( pExpr ){ if( pExpr->op==TK_VARIABLE ){ if( pFix->pParse->db->init.busy ){ pExpr->op = TK_NULL; }else{ sqlite3ErrorMsg(pFix->pParse, "%s cannot use variables", pFix->zType); return 1; } } if( ExprHasProperty(pExpr, EP_TokenOnly) ) break; if( ExprHasProperty(pExpr, EP_xIsSelect) ){ if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1; }else{ if( sqlite3FixExprList(pFix, pExpr->x.pList) ) return 1; } if( sqlite3FixExpr(pFix, pExpr->pRight) ){ return 1; } pExpr = pExpr->pLeft; } return 0; } SQLITE_PRIVATE int sqlite3FixExprList( DbFixer *pFix, /* Context of the fixation */ ExprList *pList /* The expression to be fixed to one database */ ){ int i; struct ExprList_item *pItem; if( pList==0 ) return 0; for(i=0, pItem=pList->a; inExpr; i++, pItem++){ if( sqlite3FixExpr(pFix, pItem->pExpr) ){ return 1; } } return 0; } #endif #ifndef SQLITE_OMIT_TRIGGER SQLITE_PRIVATE int sqlite3FixTriggerStep( DbFixer *pFix, /* Context of the fixation */ TriggerStep *pStep /* The trigger step be fixed to one database */ ){ while( pStep ){ if( sqlite3FixSelect(pFix, pStep->pSelect) ){ return 1; } if( sqlite3FixExpr(pFix, pStep->pWhere) ){ return 1; } if( sqlite3FixExprList(pFix, pStep->pExprList) ){ return 1; } pStep = pStep->pNext; } return 0; } #endif /************** End of attach.c **********************************************/ /************** Begin file auth.c ********************************************/ /* ** 2003 January 11 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to implement the sqlite3_set_authorizer() ** API. This facility is an optional feature of the library. Embedded ** systems that do not need this facility may omit it by recompiling ** the library with -DSQLITE_OMIT_AUTHORIZATION=1 */ /* #include "sqliteInt.h" */ /* ** All of the code in this file may be omitted by defining a single ** macro. */ #ifndef SQLITE_OMIT_AUTHORIZATION /* ** Set or clear the access authorization function. ** ** The access authorization function is be called during the compilation ** phase to verify that the user has read and/or write access permission on ** various fields of the database. The first argument to the auth function ** is a copy of the 3rd argument to this routine. The second argument ** to the auth function is one of these constants: ** ** SQLITE_CREATE_INDEX ** SQLITE_CREATE_TABLE ** SQLITE_CREATE_TEMP_INDEX ** SQLITE_CREATE_TEMP_TABLE ** SQLITE_CREATE_TEMP_TRIGGER ** SQLITE_CREATE_TEMP_VIEW ** SQLITE_CREATE_TRIGGER ** SQLITE_CREATE_VIEW ** SQLITE_DELETE ** SQLITE_DROP_INDEX ** SQLITE_DROP_TABLE ** SQLITE_DROP_TEMP_INDEX ** SQLITE_DROP_TEMP_TABLE ** SQLITE_DROP_TEMP_TRIGGER ** SQLITE_DROP_TEMP_VIEW ** SQLITE_DROP_TRIGGER ** SQLITE_DROP_VIEW ** SQLITE_INSERT ** SQLITE_PRAGMA ** SQLITE_READ ** SQLITE_SELECT ** SQLITE_TRANSACTION ** SQLITE_UPDATE ** ** The third and fourth arguments to the auth function are the name of ** the table and the column that are being accessed. The auth function ** should return either SQLITE_OK, SQLITE_DENY, or SQLITE_IGNORE. If ** SQLITE_OK is returned, it means that access is allowed. SQLITE_DENY ** means that the SQL statement will never-run - the sqlite3_exec() call ** will return with an error. SQLITE_IGNORE means that the SQL statement ** should run but attempts to read the specified column will return NULL ** and attempts to write the column will be ignored. ** ** Setting the auth function to NULL disables this hook. The default ** setting of the auth function is NULL. */ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( sqlite3 *db, int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), void *pArg ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); db->xAuth = (sqlite3_xauth)xAuth; db->pAuthArg = pArg; sqlite3ExpirePreparedStatements(db); sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } /* ** Write an error message into pParse->zErrMsg that explains that the ** user-supplied authorization function returned an illegal value. */ static void sqliteAuthBadReturnCode(Parse *pParse){ sqlite3ErrorMsg(pParse, "authorizer malfunction"); pParse->rc = SQLITE_ERROR; } /* ** Invoke the authorization callback for permission to read column zCol from ** table zTab in database zDb. This function assumes that an authorization ** callback has been registered (i.e. that sqlite3.xAuth is not NULL). ** ** If SQLITE_IGNORE is returned and pExpr is not NULL, then pExpr is changed ** to an SQL NULL expression. Otherwise, if pExpr is NULL, then SQLITE_IGNORE ** is treated as SQLITE_DENY. In this case an error is left in pParse. */ SQLITE_PRIVATE int sqlite3AuthReadCol( Parse *pParse, /* The parser context */ const char *zTab, /* Table name */ const char *zCol, /* Column name */ int iDb /* Index of containing database. */ ){ sqlite3 *db = pParse->db; /* Database handle */ char *zDb = db->aDb[iDb].zName; /* Name of attached database */ int rc; /* Auth callback return code */ rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext #ifdef SQLITE_USER_AUTHENTICATION ,db->auth.zAuthUser #endif ); if( rc==SQLITE_DENY ){ if( db->nDb>2 || iDb!=0 ){ sqlite3ErrorMsg(pParse, "access to %s.%s.%s is prohibited",zDb,zTab,zCol); }else{ sqlite3ErrorMsg(pParse, "access to %s.%s is prohibited", zTab, zCol); } pParse->rc = SQLITE_AUTH; }else if( rc!=SQLITE_IGNORE && rc!=SQLITE_OK ){ sqliteAuthBadReturnCode(pParse); } return rc; } /* ** The pExpr should be a TK_COLUMN expression. The table referred to ** is in pTabList or else it is the NEW or OLD table of a trigger. ** Check to see if it is OK to read this particular column. ** ** If the auth function returns SQLITE_IGNORE, change the TK_COLUMN ** instruction into a TK_NULL. If the auth function returns SQLITE_DENY, ** then generate an error. */ SQLITE_PRIVATE void sqlite3AuthRead( Parse *pParse, /* The parser context */ Expr *pExpr, /* The expression to check authorization on */ Schema *pSchema, /* The schema of the expression */ SrcList *pTabList /* All table that pExpr might refer to */ ){ sqlite3 *db = pParse->db; Table *pTab = 0; /* The table being read */ const char *zCol; /* Name of the column of the table */ int iSrc; /* Index in pTabList->a[] of table being read */ int iDb; /* The index of the database the expression refers to */ int iCol; /* Index of column in table */ if( db->xAuth==0 ) return; iDb = sqlite3SchemaToIndex(pParse->db, pSchema); if( iDb<0 ){ /* An attempt to read a column out of a subquery or other ** temporary table. */ return; } assert( pExpr->op==TK_COLUMN || pExpr->op==TK_TRIGGER ); if( pExpr->op==TK_TRIGGER ){ pTab = pParse->pTriggerTab; }else{ assert( pTabList ); for(iSrc=0; ALWAYS(iSrcnSrc); iSrc++){ if( pExpr->iTable==pTabList->a[iSrc].iCursor ){ pTab = pTabList->a[iSrc].pTab; break; } } } iCol = pExpr->iColumn; if( NEVER(pTab==0) ) return; if( iCol>=0 ){ assert( iColnCol ); zCol = pTab->aCol[iCol].zName; }else if( pTab->iPKey>=0 ){ assert( pTab->iPKeynCol ); zCol = pTab->aCol[pTab->iPKey].zName; }else{ zCol = "ROWID"; } assert( iDb>=0 && iDbnDb ); if( SQLITE_IGNORE==sqlite3AuthReadCol(pParse, pTab->zName, zCol, iDb) ){ pExpr->op = TK_NULL; } } /* ** Do an authorization check using the code and arguments given. Return ** either SQLITE_OK (zero) or SQLITE_IGNORE or SQLITE_DENY. If SQLITE_DENY ** is returned, then the error count and error message in pParse are ** modified appropriately. */ SQLITE_PRIVATE int sqlite3AuthCheck( Parse *pParse, int code, const char *zArg1, const char *zArg2, const char *zArg3 ){ sqlite3 *db = pParse->db; int rc; /* Don't do any authorization checks if the database is initialising ** or if the parser is being invoked from within sqlite3_declare_vtab. */ if( db->init.busy || IN_DECLARE_VTAB ){ return SQLITE_OK; } if( db->xAuth==0 ){ return SQLITE_OK; } rc = db->xAuth(db->pAuthArg, code, zArg1, zArg2, zArg3, pParse->zAuthContext #ifdef SQLITE_USER_AUTHENTICATION ,db->auth.zAuthUser #endif ); if( rc==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized"); pParse->rc = SQLITE_AUTH; }else if( rc!=SQLITE_OK && rc!=SQLITE_IGNORE ){ rc = SQLITE_DENY; sqliteAuthBadReturnCode(pParse); } return rc; } /* ** Push an authorization context. After this routine is called, the ** zArg3 argument to authorization callbacks will be zContext until ** popped. Or if pParse==0, this routine is a no-op. */ SQLITE_PRIVATE void sqlite3AuthContextPush( Parse *pParse, AuthContext *pContext, const char *zContext ){ assert( pParse ); pContext->pParse = pParse; pContext->zAuthContext = pParse->zAuthContext; pParse->zAuthContext = zContext; } /* ** Pop an authorization context that was previously pushed ** by sqlite3AuthContextPush */ SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){ if( pContext->pParse ){ pContext->pParse->zAuthContext = pContext->zAuthContext; pContext->pParse = 0; } } #endif /* SQLITE_OMIT_AUTHORIZATION */ /************** End of auth.c ************************************************/ /************** Begin file build.c *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that are called by the SQLite parser ** when syntax rules are reduced. The routines in this file handle the ** following kinds of SQL syntax: ** ** CREATE TABLE ** DROP TABLE ** CREATE INDEX ** DROP INDEX ** creating ID lists ** BEGIN TRANSACTION ** COMMIT ** ROLLBACK */ /* #include "sqliteInt.h" */ /* ** This routine is called when a new SQL statement is beginning to ** be parsed. Initialize the pParse structure as needed. */ SQLITE_PRIVATE void sqlite3BeginParse(Parse *pParse, int explainFlag){ pParse->explain = (u8)explainFlag; pParse->nVar = 0; } #ifndef SQLITE_OMIT_SHARED_CACHE /* ** The TableLock structure is only used by the sqlite3TableLock() and ** codeTableLocks() functions. */ struct TableLock { int iDb; /* The database containing the table to be locked */ int iTab; /* The root page of the table to be locked */ u8 isWriteLock; /* True for write lock. False for a read lock */ const char *zName; /* Name of the table */ }; /* ** Record the fact that we want to lock a table at run-time. ** ** The table to be locked has root page iTab and is found in database iDb. ** A read or a write lock can be taken depending on isWritelock. ** ** This routine just records the fact that the lock is desired. The ** code to make the lock occur is generated by a later call to ** codeTableLocks() which occurs during sqlite3FinishCoding(). */ SQLITE_PRIVATE void sqlite3TableLock( Parse *pParse, /* Parsing context */ int iDb, /* Index of the database containing the table to lock */ int iTab, /* Root page number of the table to be locked */ u8 isWriteLock, /* True for a write lock */ const char *zName /* Name of the table to be locked */ ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); int i; int nBytes; TableLock *p; assert( iDb>=0 ); for(i=0; inTableLock; i++){ p = &pToplevel->aTableLock[i]; if( p->iDb==iDb && p->iTab==iTab ){ p->isWriteLock = (p->isWriteLock || isWriteLock); return; } } nBytes = sizeof(TableLock) * (pToplevel->nTableLock+1); pToplevel->aTableLock = sqlite3DbReallocOrFree(pToplevel->db, pToplevel->aTableLock, nBytes); if( pToplevel->aTableLock ){ p = &pToplevel->aTableLock[pToplevel->nTableLock++]; p->iDb = iDb; p->iTab = iTab; p->isWriteLock = isWriteLock; p->zName = zName; }else{ pToplevel->nTableLock = 0; pToplevel->db->mallocFailed = 1; } } /* ** Code an OP_TableLock instruction for each table locked by the ** statement (configured by calls to sqlite3TableLock()). */ static void codeTableLocks(Parse *pParse){ int i; Vdbe *pVdbe; pVdbe = sqlite3GetVdbe(pParse); assert( pVdbe!=0 ); /* sqlite3GetVdbe cannot fail: VDBE already allocated */ for(i=0; inTableLock; i++){ TableLock *p = &pParse->aTableLock[i]; int p1 = p->iDb; sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock, p->zName, P4_STATIC); } } #else #define codeTableLocks(x) #endif /* ** Return TRUE if the given yDbMask object is empty - if it contains no ** 1 bits. This routine is used by the DbMaskAllZero() and DbMaskNotZero() ** macros when SQLITE_MAX_ATTACHED is greater than 30. */ #if SQLITE_MAX_ATTACHED>30 SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){ int i; for(i=0; ipToplevel==0 ); db = pParse->db; if( pParse->nested ) return; if( db->mallocFailed || pParse->nErr ){ if( pParse->rc==SQLITE_OK ) pParse->rc = SQLITE_ERROR; return; } /* Begin by generating some termination code at the end of the ** vdbe program */ v = sqlite3GetVdbe(pParse); assert( !pParse->isMultiWrite || sqlite3VdbeAssertMayAbort(v, pParse->mayAbort)); if( v ){ while( sqlite3VdbeDeletePriorOpcode(v, OP_Close) ){} sqlite3VdbeAddOp0(v, OP_Halt); #if SQLITE_USER_AUTHENTICATION if( pParse->nTableLock>0 && db->init.busy==0 ){ sqlite3UserAuthInit(db); if( db->auth.authLevelrc = SQLITE_AUTH_USER; sqlite3ErrorMsg(pParse, "user not authenticated"); return; } } #endif /* The cookie mask contains one bit for each database file open. ** (Bit 0 is for main, bit 1 is for temp, and so forth.) Bits are ** set for each database that is used. Generate code to start a ** transaction on each used database and to verify the schema cookie ** on each used database. */ if( db->mallocFailed==0 && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr) ){ int iDb, i; assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init ); sqlite3VdbeJumpHere(v, 0); for(iDb=0; iDbnDb; iDb++){ if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue; sqlite3VdbeUsesBtree(v, iDb); sqlite3VdbeAddOp4Int(v, OP_Transaction, /* Opcode */ iDb, /* P1 */ DbMaskTest(pParse->writeMask,iDb), /* P2 */ pParse->cookieValue[iDb], /* P3 */ db->aDb[iDb].pSchema->iGeneration /* P4 */ ); if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1); } #ifndef SQLITE_OMIT_VIRTUALTABLE for(i=0; inVtabLock; i++){ char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]); sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); } pParse->nVtabLock = 0; #endif /* Once all the cookies have been verified and transactions opened, ** obtain the required table-locks. This is a no-op unless the ** shared-cache feature is enabled. */ codeTableLocks(pParse); /* Initialize any AUTOINCREMENT data structures required. */ sqlite3AutoincrementBegin(pParse); /* Code constant expressions that where factored out of inner loops */ if( pParse->pConstExpr ){ ExprList *pEL = pParse->pConstExpr; pParse->okConstFactor = 0; for(i=0; inExpr; i++){ sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg); } } /* Finally, jump back to the beginning of the executable code. */ sqlite3VdbeAddOp2(v, OP_Goto, 0, 1); } } /* Get the VDBE program ready for execution */ if( v && pParse->nErr==0 && !db->mallocFailed ){ assert( pParse->iCacheLevel==0 ); /* Disables and re-enables match */ /* A minimum of one cursor is required if autoincrement is used * See ticket [a696379c1f08866] */ if( pParse->pAinc!=0 && pParse->nTab==0 ) pParse->nTab = 1; sqlite3VdbeMakeReady(v, pParse); pParse->rc = SQLITE_DONE; pParse->colNamesSet = 0; }else{ pParse->rc = SQLITE_ERROR; } pParse->nTab = 0; pParse->nMem = 0; pParse->nSet = 0; pParse->nVar = 0; DbMaskZero(pParse->cookieMask); } /* ** Run the parser and code generator recursively in order to generate ** code for the SQL statement given onto the end of the pParse context ** currently under construction. When the parser is run recursively ** this way, the final OP_Halt is not appended and other initialization ** and finalization steps are omitted because those are handling by the ** outermost parser. ** ** Not everything is nestable. This facility is designed to permit ** INSERT, UPDATE, and DELETE operations against SQLITE_MASTER. Use ** care if you decide to try to use this routine for some other purposes. */ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ va_list ap; char *zSql; char *zErrMsg = 0; sqlite3 *db = pParse->db; # define SAVE_SZ (sizeof(Parse) - offsetof(Parse,nVar)) char saveBuf[SAVE_SZ]; if( pParse->nErr ) return; assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ va_start(ap, zFormat); zSql = sqlite3VMPrintf(db, zFormat, ap); va_end(ap); if( zSql==0 ){ return; /* A malloc must have failed */ } pParse->nested++; memcpy(saveBuf, &pParse->nVar, SAVE_SZ); memset(&pParse->nVar, 0, SAVE_SZ); sqlite3RunParser(pParse, zSql, &zErrMsg); sqlite3DbFree(db, zErrMsg); sqlite3DbFree(db, zSql); memcpy(&pParse->nVar, saveBuf, SAVE_SZ); pParse->nested--; } #if SQLITE_USER_AUTHENTICATION /* ** Return TRUE if zTable is the name of the system table that stores the ** list of users and their access credentials. */ SQLITE_PRIVATE int sqlite3UserAuthTable(const char *zTable){ return sqlite3_stricmp(zTable, "sqlite_user")==0; } #endif /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the ** database containing the table. Return NULL if not found. ** ** If zDatabase is 0, all databases are searched for the table and the ** first matching table is returned. (No checking for duplicate table ** names is done.) The search order is TEMP first, then MAIN, then any ** auxiliary databases added using the ATTACH command. ** ** See also sqlite3LocateTable(). */ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const char *zDatabase){ Table *p = 0; int i; /* All mutexes are required for schema access. Make sure we hold them. */ assert( zDatabase!=0 || sqlite3BtreeHoldsAllMutexes(db) ); #if SQLITE_USER_AUTHENTICATION /* Only the admin user is allowed to know that the sqlite_user table ** exists */ if( db->auth.authLevelnDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue; assert( sqlite3SchemaMutexHeld(db, j, 0) ); p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName); if( p ) break; } return p; } /* ** Locate the in-memory structure that describes a particular database ** table given the name of that table and (optionally) the name of the ** database containing the table. Return NULL if not found. Also leave an ** error message in pParse->zErrMsg. ** ** The difference between this routine and sqlite3FindTable() is that this ** routine leaves an error message in pParse->zErrMsg where ** sqlite3FindTable() does not. */ SQLITE_PRIVATE Table *sqlite3LocateTable( Parse *pParse, /* context in which to report errors */ int isView, /* True if looking for a VIEW rather than a TABLE */ const char *zName, /* Name of the table we are looking for */ const char *zDbase /* Name of the database. Might be NULL */ ){ Table *p; /* Read the database schema. If an error occurs, leave an error message ** and code in pParse and return NULL. */ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ return 0; } p = sqlite3FindTable(pParse->db, zName, zDbase); if( p==0 ){ const char *zMsg = isView ? "no such view" : "no such table"; if( zDbase ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); } pParse->checkSchema = 1; } #if SQLITE_USER_AUTHENICATION else if( pParse->db->auth.authLevelpSchema) if it is not NULL. p->pSchema may be ** non-NULL if it is part of a view or trigger program definition. See ** sqlite3FixSrcList() for details. */ SQLITE_PRIVATE Table *sqlite3LocateTableItem( Parse *pParse, int isView, struct SrcList_item *p ){ const char *zDb; assert( p->pSchema==0 || p->zDatabase==0 ); if( p->pSchema ){ int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema); zDb = pParse->db->aDb[iDb].zName; }else{ zDb = p->zDatabase; } return sqlite3LocateTable(pParse, isView, p->zName, zDb); } /* ** Locate the in-memory structure that describes ** a particular index given the name of that index ** and the name of the database that contains the index. ** Return NULL if not found. ** ** If zDatabase is 0, all databases are searched for the ** table and the first matching index is returned. (No checking ** for duplicate index names is done.) The search order is ** TEMP first, then MAIN, then any auxiliary databases added ** using the ATTACH command. */ SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const char *zDb){ Index *p = 0; int i; /* All mutexes are required for schema access. Make sure we hold them. */ assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); for(i=OMIT_TEMPDB; inDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ Schema *pSchema = db->aDb[j].pSchema; assert( pSchema ); if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue; assert( sqlite3SchemaMutexHeld(db, j, 0) ); p = sqlite3HashFind(&pSchema->idxHash, zName); if( p ) break; } return p; } /* ** Reclaim the memory used by an index */ static void freeIndex(sqlite3 *db, Index *p){ #ifndef SQLITE_OMIT_ANALYZE sqlite3DeleteIndexSamples(db, p); #endif sqlite3ExprDelete(db, p->pPartIdxWhere); sqlite3DbFree(db, p->zColAff); if( p->isResized ) sqlite3DbFree(db, p->azColl); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3_free(p->aiRowEst); #endif sqlite3DbFree(db, p); } /* ** For the index called zIdxName which is found in the database iDb, ** unlike that index from its Table then remove the index from ** the index hash table and free all memory structures associated ** with the index. */ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3 *db, int iDb, const char *zIdxName){ Index *pIndex; Hash *pHash; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pHash = &db->aDb[iDb].pSchema->idxHash; pIndex = sqlite3HashInsert(pHash, zIdxName, 0); if( ALWAYS(pIndex) ){ if( pIndex->pTable->pIndex==pIndex ){ pIndex->pTable->pIndex = pIndex->pNext; }else{ Index *p; /* Justification of ALWAYS(); The index must be on the list of ** indices. */ p = pIndex->pTable->pIndex; while( ALWAYS(p) && p->pNext!=pIndex ){ p = p->pNext; } if( ALWAYS(p && p->pNext==pIndex) ){ p->pNext = pIndex->pNext; } } freeIndex(db, pIndex); } db->flags |= SQLITE_InternChanges; } /* ** Look through the list of open database files in db->aDb[] and if ** any have been closed, remove them from the list. Reallocate the ** db->aDb[] structure to a smaller size, if possible. ** ** Entry 0 (the "main" database) and entry 1 (the "temp" database) ** are never candidates for being collapsed. */ SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3 *db){ int i, j; for(i=j=2; inDb; i++){ struct Db *pDb = &db->aDb[i]; if( pDb->pBt==0 ){ sqlite3DbFree(db, pDb->zName); pDb->zName = 0; continue; } if( jaDb[j] = db->aDb[i]; } j++; } memset(&db->aDb[j], 0, (db->nDb-j)*sizeof(db->aDb[j])); db->nDb = j; if( db->nDb<=2 && db->aDb!=db->aDbStatic ){ memcpy(db->aDbStatic, db->aDb, 2*sizeof(db->aDb[0])); sqlite3DbFree(db, db->aDb); db->aDb = db->aDbStatic; } } /* ** Reset the schema for the database at index iDb. Also reset the ** TEMP schema. */ SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3 *db, int iDb){ Db *pDb; assert( iDbnDb ); /* Case 1: Reset the single schema identified by iDb */ pDb = &db->aDb[iDb]; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); assert( pDb->pSchema!=0 ); sqlite3SchemaClear(pDb->pSchema); /* If any database other than TEMP is reset, then also reset TEMP ** since TEMP might be holding triggers that reference tables in the ** other database. */ if( iDb!=1 ){ pDb = &db->aDb[1]; assert( pDb->pSchema!=0 ); sqlite3SchemaClear(pDb->pSchema); } return; } /* ** Erase all schema information from all attached databases (including ** "main" and "temp") for a single database connection. */ SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3 *db){ int i; sqlite3BtreeEnterAll(db); for(i=0; inDb; i++){ Db *pDb = &db->aDb[i]; if( pDb->pSchema ){ sqlite3SchemaClear(pDb->pSchema); } } db->flags &= ~SQLITE_InternChanges; sqlite3VtabUnlockList(db); sqlite3BtreeLeaveAll(db); sqlite3CollapseDatabaseArray(db); } /* ** This routine is called when a commit occurs. */ SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3 *db){ db->flags &= ~SQLITE_InternChanges; } /* ** Delete memory allocated for the column names of a table or view (the ** Table.aCol[] array). */ static void sqliteDeleteColumnNames(sqlite3 *db, Table *pTable){ int i; Column *pCol; assert( pTable!=0 ); if( (pCol = pTable->aCol)!=0 ){ for(i=0; inCol; i++, pCol++){ sqlite3DbFree(db, pCol->zName); sqlite3ExprDelete(db, pCol->pDflt); sqlite3DbFree(db, pCol->zDflt); sqlite3DbFree(db, pCol->zType); sqlite3DbFree(db, pCol->zColl); } sqlite3DbFree(db, pTable->aCol); } } /* ** Remove the memory data structures associated with the given ** Table. No changes are made to disk by this routine. ** ** This routine just deletes the data structure. It does not unlink ** the table data structure from the hash table. But it does destroy ** memory structures of the indices and foreign keys associated with ** the table. ** ** The db parameter is optional. It is needed if the Table object ** contains lookaside memory. (Table objects in the schema do not use ** lookaside memory, but some ephemeral Table objects do.) Or the ** db parameter can be used with db->pnBytesFreed to measure the memory ** used by the Table object. */ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ Index *pIndex, *pNext; TESTONLY( int nLookaside; ) /* Used to verify lookaside not used for schema */ assert( !pTable || pTable->nRef>0 ); /* Do not delete the table until the reference count reaches zero. */ if( !pTable ) return; if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return; /* Record the number of outstanding lookaside allocations in schema Tables ** prior to doing any free() operations. Since schema Tables do not use ** lookaside, this number should not change. */ TESTONLY( nLookaside = (db && (pTable->tabFlags & TF_Ephemeral)==0) ? db->lookaside.nOut : 0 ); /* Delete all indices associated with this table. */ for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ pNext = pIndex->pNext; assert( pIndex->pSchema==pTable->pSchema ); if( !db || db->pnBytesFreed==0 ){ char *zName = pIndex->zName; TESTONLY ( Index *pOld = ) sqlite3HashInsert( &pIndex->pSchema->idxHash, zName, 0 ); assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); assert( pOld==pIndex || pOld==0 ); } freeIndex(db, pIndex); } /* Delete any foreign keys attached to this table. */ sqlite3FkDelete(db, pTable); /* Delete the Table structure itself. */ sqliteDeleteColumnNames(db, pTable); sqlite3DbFree(db, pTable->zName); sqlite3DbFree(db, pTable->zColAff); sqlite3SelectDelete(db, pTable->pSelect); #ifndef SQLITE_OMIT_CHECK sqlite3ExprListDelete(db, pTable->pCheck); #endif #ifndef SQLITE_OMIT_VIRTUALTABLE sqlite3VtabClear(db, pTable); #endif sqlite3DbFree(db, pTable); /* Verify that no lookaside memory was used by schema tables */ assert( nLookaside==0 || nLookaside==db->lookaside.nOut ); } /* ** Unlink the given table from the hash tables and the delete the ** table structure with all its indices and foreign keys. */ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3 *db, int iDb, const char *zTabName){ Table *p; Db *pDb; assert( db!=0 ); assert( iDb>=0 && iDbnDb ); assert( zTabName ); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); testcase( zTabName[0]==0 ); /* Zero-length table names are allowed */ pDb = &db->aDb[iDb]; p = sqlite3HashInsert(&pDb->pSchema->tblHash, zTabName, 0); sqlite3DeleteTable(db, p); db->flags |= SQLITE_InternChanges; } /* ** Given a token, return a string that consists of the text of that ** token. Space to hold the returned string ** is obtained from sqliteMalloc() and must be freed by the calling ** function. ** ** Any quotation marks (ex: "name", 'name', [name], or `name`) that ** surround the body of the token are removed. ** ** Tokens are often just pointers into the original SQL text and so ** are not \000 terminated and are not persistent. The returned string ** is \000 terminated and is persistent. */ SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){ char *zName; if( pName ){ zName = sqlite3DbStrNDup(db, (char*)pName->z, pName->n); sqlite3Dequote(zName); }else{ zName = 0; } return zName; } /* ** Open the sqlite_master table stored in database number iDb for ** writing. The table is opened using cursor 0. */ SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *p, int iDb){ Vdbe *v = sqlite3GetVdbe(p); sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb)); sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, MASTER_ROOT, iDb, 5); if( p->nTab==0 ){ p->nTab = 1; } } /* ** Parameter zName points to a nul-terminated buffer containing the name ** of a database ("main", "temp" or the name of an attached db). This ** function returns the index of the named database in db->aDb[], or ** -1 if the named db cannot be found. */ SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){ int i = -1; /* Database number */ if( zName ){ Db *pDb; int n = sqlite3Strlen30(zName); for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){ if( (!OMIT_TEMPDB || i!=1 ) && n==sqlite3Strlen30(pDb->zName) && 0==sqlite3StrICmp(pDb->zName, zName) ){ break; } } } return i; } /* ** The token *pName contains the name of a database (either "main" or ** "temp" or the name of an attached db). This routine returns the ** index of the named database in db->aDb[], or -1 if the named db ** does not exist. */ SQLITE_PRIVATE int sqlite3FindDb(sqlite3 *db, Token *pName){ int i; /* Database number */ char *zName; /* Name we are searching for */ zName = sqlite3NameFromToken(db, pName); i = sqlite3FindDbName(db, zName); sqlite3DbFree(db, zName); return i; } /* The table or view or trigger name is passed to this routine via tokens ** pName1 and pName2. If the table name was fully qualified, for example: ** ** CREATE TABLE xxx.yyy (...); ** ** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if ** the table name is not fully qualified, i.e.: ** ** CREATE TABLE yyy(...); ** ** Then pName1 is set to "yyy" and pName2 is "". ** ** This routine sets the *ppUnqual pointer to point at the token (pName1 or ** pName2) that stores the unqualified table name. The index of the ** database "xxx" is returned. */ SQLITE_PRIVATE int sqlite3TwoPartName( Parse *pParse, /* Parsing and code generating context */ Token *pName1, /* The "xxx" in the name "xxx.yyy" or "xxx" */ Token *pName2, /* The "yyy" in the name "xxx.yyy" */ Token **pUnqual /* Write the unqualified object name here */ ){ int iDb; /* Database holding the object */ sqlite3 *db = pParse->db; if( ALWAYS(pName2!=0) && pName2->n>0 ){ if( db->init.busy ) { sqlite3ErrorMsg(pParse, "corrupt database"); return -1; } *pUnqual = pName2; iDb = sqlite3FindDb(db, pName1); if( iDb<0 ){ sqlite3ErrorMsg(pParse, "unknown database %T", pName1); return -1; } }else{ assert( db->init.iDb==0 || db->init.busy ); iDb = db->init.iDb; *pUnqual = pName1; } return iDb; } /* ** This routine is used to check if the UTF-8 string zName is a legal ** unqualified name for a new schema object (table, index, view or ** trigger). All names are legal except those that begin with the string ** "sqlite_" (in upper, lower or mixed case). This portion of the namespace ** is reserved for internal use. */ SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *pParse, const char *zName){ if( !pParse->db->init.busy && pParse->nested==0 && (pParse->db->flags & SQLITE_WriteSchema)==0 && 0==sqlite3StrNICmp(zName, "sqlite_", 7) ){ sqlite3ErrorMsg(pParse, "object name reserved for internal use: %s", zName); return SQLITE_ERROR; } return SQLITE_OK; } /* ** Return the PRIMARY KEY index of a table */ SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table *pTab){ Index *p; for(p=pTab->pIndex; p && !IsPrimaryKeyIndex(p); p=p->pNext){} return p; } /* ** Return the column of index pIdx that corresponds to table ** column iCol. Return -1 if not found. */ SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index *pIdx, i16 iCol){ int i; for(i=0; inColumn; i++){ if( iCol==pIdx->aiColumn[i] ) return i; } return -1; } /* ** Begin constructing a new table representation in memory. This is ** the first of several action routines that get called in response ** to a CREATE TABLE statement. In particular, this routine is called ** after seeing tokens "CREATE" and "TABLE" and the table name. The isTemp ** flag is true if the table should be stored in the auxiliary database ** file instead of in the main database file. This is normally the case ** when the "TEMP" or "TEMPORARY" keyword occurs in between ** CREATE and TABLE. ** ** The new table record is initialized and put in pParse->pNewTable. ** As more of the CREATE TABLE statement is parsed, additional action ** routines will be called to add more information to this record. ** At the end of the CREATE TABLE statement, the sqlite3EndTable() routine ** is called to complete the construction of the new table record. */ SQLITE_PRIVATE void sqlite3StartTable( Parse *pParse, /* Parser context */ Token *pName1, /* First part of the name of the table or view */ Token *pName2, /* Second part of the name of the table or view */ int isTemp, /* True if this is a TEMP table */ int isView, /* True if this is a VIEW */ int isVirtual, /* True if this is a VIRTUAL table */ int noErr /* Do nothing if table already exists */ ){ Table *pTable; char *zName = 0; /* The name of the new table */ sqlite3 *db = pParse->db; Vdbe *v; int iDb; /* Database number to create the table in */ Token *pName; /* Unqualified name of the table to create */ /* The table or view name to create is passed to this routine via tokens ** pName1 and pName2. If the table name was fully qualified, for example: ** ** CREATE TABLE xxx.yyy (...); ** ** Then pName1 is set to "xxx" and pName2 "yyy". On the other hand if ** the table name is not fully qualified, i.e.: ** ** CREATE TABLE yyy(...); ** ** Then pName1 is set to "yyy" and pName2 is "". ** ** The call below sets the pName pointer to point at the token (pName1 or ** pName2) that stores the unqualified table name. The variable iDb is ** set to the index of the database that the table or view is to be ** created in. */ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); if( iDb<0 ) return; if( !OMIT_TEMPDB && isTemp && pName2->n>0 && iDb!=1 ){ /* If creating a temp table, the name may not be qualified. Unless ** the database name is "temp" anyway. */ sqlite3ErrorMsg(pParse, "temporary table name must be unqualified"); return; } if( !OMIT_TEMPDB && isTemp ) iDb = 1; pParse->sNameToken = *pName; zName = sqlite3NameFromToken(db, pName); if( zName==0 ) return; if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto begin_table_error; } if( db->init.iDb==1 ) isTemp = 1; #ifndef SQLITE_OMIT_AUTHORIZATION assert( (isTemp & 1)==isTemp ); { int code; char *zDb = db->aDb[iDb].zName; if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){ goto begin_table_error; } if( isView ){ if( !OMIT_TEMPDB && isTemp ){ code = SQLITE_CREATE_TEMP_VIEW; }else{ code = SQLITE_CREATE_VIEW; } }else{ if( !OMIT_TEMPDB && isTemp ){ code = SQLITE_CREATE_TEMP_TABLE; }else{ code = SQLITE_CREATE_TABLE; } } if( !isVirtual && sqlite3AuthCheck(pParse, code, zName, 0, zDb) ){ goto begin_table_error; } } #endif /* Make sure the new table name does not collide with an existing ** index or table name in the same database. Issue an error message if ** it does. The exception is if the statement being parsed was passed ** to an sqlite3_declare_vtab() call. In that case only the column names ** and types will be used, so there is no need to test for namespace ** collisions. */ if( !IN_DECLARE_VTAB ){ char *zDb = db->aDb[iDb].zName; if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto begin_table_error; } pTable = sqlite3FindTable(db, zName, zDb); if( pTable ){ if( !noErr ){ sqlite3ErrorMsg(pParse, "table %T already exists", pName); }else{ assert( !db->init.busy || CORRUPT_DB ); sqlite3CodeVerifySchema(pParse, iDb); } goto begin_table_error; } if( sqlite3FindIndex(db, zName, zDb)!=0 ){ sqlite3ErrorMsg(pParse, "there is already an index named %s", zName); goto begin_table_error; } } pTable = sqlite3DbMallocZero(db, sizeof(Table)); if( pTable==0 ){ db->mallocFailed = 1; pParse->rc = SQLITE_NOMEM; pParse->nErr++; goto begin_table_error; } pTable->zName = zName; pTable->iPKey = -1; pTable->pSchema = db->aDb[iDb].pSchema; pTable->nRef = 1; pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); assert( pParse->pNewTable==0 ); pParse->pNewTable = pTable; /* If this is the magic sqlite_sequence table used by autoincrement, ** then record a pointer to this table in the main database structure ** so that INSERT can find the table easily. */ #ifndef SQLITE_OMIT_AUTOINCREMENT if( !pParse->nested && strcmp(zName, "sqlite_sequence")==0 ){ assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pTable->pSchema->pSeqTab = pTable; } #endif /* Begin generating the code that will insert the table record into ** the SQLITE_MASTER table. Note in particular that we must go ahead ** and allocate the record number for the table entry now. Before any ** PRIMARY KEY or UNIQUE keywords are parsed. Those keywords will cause ** indices to be created and the table record must come before the ** indices. Hence, the record number for the table must be allocated ** now. */ if( !db->init.busy && (v = sqlite3GetVdbe(pParse))!=0 ){ int j1; int fileFormat; int reg1, reg2, reg3; sqlite3BeginWriteOperation(pParse, 1, iDb); #ifndef SQLITE_OMIT_VIRTUALTABLE if( isVirtual ){ sqlite3VdbeAddOp0(v, OP_VBegin); } #endif /* If the file format and encoding in the database have not been set, ** set them now. */ reg1 = pParse->regRowid = ++pParse->nMem; reg2 = pParse->regRoot = ++pParse->nMem; reg3 = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, reg3, BTREE_FILE_FORMAT); sqlite3VdbeUsesBtree(v, iDb); j1 = sqlite3VdbeAddOp1(v, OP_If, reg3); VdbeCoverage(v); fileFormat = (db->flags & SQLITE_LegacyFileFmt)!=0 ? 1 : SQLITE_MAX_FILE_FORMAT; sqlite3VdbeAddOp2(v, OP_Integer, fileFormat, reg3); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, reg3); sqlite3VdbeAddOp2(v, OP_Integer, ENC(db), reg3); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_TEXT_ENCODING, reg3); sqlite3VdbeJumpHere(v, j1); /* This just creates a place-holder record in the sqlite_master table. ** The record created does not contain anything yet. It will be replaced ** by the real entry in code generated at sqlite3EndTable(). ** ** The rowid for the new entry is left in register pParse->regRowid. ** The root page number of the new table is left in reg pParse->regRoot. ** The rowid and root page number values are needed by the code that ** sqlite3EndTable will generate. */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) if( isView || isVirtual ){ sqlite3VdbeAddOp2(v, OP_Integer, 0, reg2); }else #endif { pParse->addrCrTab = sqlite3VdbeAddOp2(v, OP_CreateTable, iDb, reg2); } sqlite3OpenMasterTable(pParse, iDb); sqlite3VdbeAddOp2(v, OP_NewRowid, 0, reg1); sqlite3VdbeAddOp2(v, OP_Null, 0, reg3); sqlite3VdbeAddOp3(v, OP_Insert, 0, reg3, reg1); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3VdbeAddOp0(v, OP_Close); } /* Normal (non-error) return. */ return; /* If an error occurs, we jump here */ begin_table_error: sqlite3DbFree(db, zName); return; } /* ** This macro is used to compare two strings in a case-insensitive manner. ** It is slightly faster than calling sqlite3StrICmp() directly, but ** produces larger code. ** ** WARNING: This macro is not compatible with the strcmp() family. It ** returns true if the two strings are equal, otherwise false. */ #define STRICMP(x, y) (\ sqlite3UpperToLower[*(unsigned char *)(x)]== \ sqlite3UpperToLower[*(unsigned char *)(y)] \ && sqlite3StrICmp((x)+1,(y)+1)==0 ) /* ** Add a new column to the table currently being constructed. ** ** The parser calls this routine once for each column declaration ** in a CREATE TABLE statement. sqlite3StartTable() gets called ** first to get things going. Then this routine is called for each ** column. */ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token *pName){ Table *p; int i; char *z; Column *pCol; sqlite3 *db = pParse->db; if( (p = pParse->pNewTable)==0 ) return; #if SQLITE_MAX_COLUMN if( p->nCol+1>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns on %s", p->zName); return; } #endif z = sqlite3NameFromToken(db, pName); if( z==0 ) return; for(i=0; inCol; i++){ if( STRICMP(z, p->aCol[i].zName) ){ sqlite3ErrorMsg(pParse, "duplicate column name: %s", z); sqlite3DbFree(db, z); return; } } if( (p->nCol & 0x7)==0 ){ Column *aNew; aNew = sqlite3DbRealloc(db,p->aCol,(p->nCol+8)*sizeof(p->aCol[0])); if( aNew==0 ){ sqlite3DbFree(db, z); return; } p->aCol = aNew; } pCol = &p->aCol[p->nCol]; memset(pCol, 0, sizeof(p->aCol[0])); pCol->zName = z; /* If there is no type specified, columns have the default affinity ** 'BLOB'. If there is a type specified, then sqlite3AddColumnType() will ** be called next to set pCol->affinity correctly. */ pCol->affinity = SQLITE_AFF_BLOB; pCol->szEst = 1; p->nCol++; } /* ** This routine is called by the parser while in the middle of ** parsing a CREATE TABLE statement. A "NOT NULL" constraint has ** been seen on a column. This routine sets the notNull flag on ** the column currently under construction. */ SQLITE_PRIVATE void sqlite3AddNotNull(Parse *pParse, int onError){ Table *p; p = pParse->pNewTable; if( p==0 || NEVER(p->nCol<1) ) return; p->aCol[p->nCol-1].notNull = (u8)onError; } /* ** Scan the column type name zType (length nType) and return the ** associated affinity type. ** ** This routine does a case-independent search of zType for the ** substrings in the following table. If one of the substrings is ** found, the corresponding affinity is returned. If zType contains ** more than one of the substrings, entries toward the top of ** the table take priority. For example, if zType is 'BLOBINT', ** SQLITE_AFF_INTEGER is returned. ** ** Substring | Affinity ** -------------------------------- ** 'INT' | SQLITE_AFF_INTEGER ** 'CHAR' | SQLITE_AFF_TEXT ** 'CLOB' | SQLITE_AFF_TEXT ** 'TEXT' | SQLITE_AFF_TEXT ** 'BLOB' | SQLITE_AFF_BLOB ** 'REAL' | SQLITE_AFF_REAL ** 'FLOA' | SQLITE_AFF_REAL ** 'DOUB' | SQLITE_AFF_REAL ** ** If none of the substrings in the above table are found, ** SQLITE_AFF_NUMERIC is returned. */ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, u8 *pszEst){ u32 h = 0; char aff = SQLITE_AFF_NUMERIC; const char *zChar = 0; if( zIn==0 ) return aff; while( zIn[0] ){ h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; zChar = zIn; }else if( h==(('c'<<24)+('l'<<16)+('o'<<8)+'b') ){ /* CLOB */ aff = SQLITE_AFF_TEXT; }else if( h==(('t'<<24)+('e'<<16)+('x'<<8)+'t') ){ /* TEXT */ aff = SQLITE_AFF_TEXT; }else if( h==(('b'<<24)+('l'<<16)+('o'<<8)+'b') /* BLOB */ && (aff==SQLITE_AFF_NUMERIC || aff==SQLITE_AFF_REAL) ){ aff = SQLITE_AFF_BLOB; if( zIn[0]=='(' ) zChar = zIn; #ifndef SQLITE_OMIT_FLOATING_POINT }else if( h==(('r'<<24)+('e'<<16)+('a'<<8)+'l') /* REAL */ && aff==SQLITE_AFF_NUMERIC ){ aff = SQLITE_AFF_REAL; }else if( h==(('f'<<24)+('l'<<16)+('o'<<8)+'a') /* FLOA */ && aff==SQLITE_AFF_NUMERIC ){ aff = SQLITE_AFF_REAL; }else if( h==(('d'<<24)+('o'<<16)+('u'<<8)+'b') /* DOUB */ && aff==SQLITE_AFF_NUMERIC ){ aff = SQLITE_AFF_REAL; #endif }else if( (h&0x00FFFFFF)==(('i'<<16)+('n'<<8)+'t') ){ /* INT */ aff = SQLITE_AFF_INTEGER; break; } } /* If pszEst is not NULL, store an estimate of the field size. The ** estimate is scaled so that the size of an integer is 1. */ if( pszEst ){ *pszEst = 1; /* default size is approx 4 bytes */ if( aff255 ) v = 255; *pszEst = v; /* BLOB(k), VARCHAR(k), CHAR(k) -> r=(k/4+1) */ break; } zChar++; } }else{ *pszEst = 5; /* BLOB, TEXT, CLOB -> r=5 (approx 20 bytes)*/ } } } return aff; } /* ** This routine is called by the parser while in the middle of ** parsing a CREATE TABLE statement. The pFirst token is the first ** token in the sequence of tokens that describe the type of the ** column currently under construction. pLast is the last token ** in the sequence. Use this information to construct a string ** that contains the typename of the column and store that string ** in zType. */ SQLITE_PRIVATE void sqlite3AddColumnType(Parse *pParse, Token *pType){ Table *p; Column *pCol; p = pParse->pNewTable; if( p==0 || NEVER(p->nCol<1) ) return; pCol = &p->aCol[p->nCol-1]; assert( pCol->zType==0 || CORRUPT_DB ); sqlite3DbFree(pParse->db, pCol->zType); pCol->zType = sqlite3NameFromToken(pParse->db, pType); pCol->affinity = sqlite3AffinityType(pCol->zType, &pCol->szEst); } /* ** The expression is the default value for the most recently added column ** of the table currently under construction. ** ** Default value expressions must be constant. Raise an exception if this ** is not the case. ** ** This routine is called by the parser while in the middle of ** parsing a CREATE TABLE statement. */ SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse *pParse, ExprSpan *pSpan){ Table *p; Column *pCol; sqlite3 *db = pParse->db; p = pParse->pNewTable; if( p!=0 ){ pCol = &(p->aCol[p->nCol-1]); if( !sqlite3ExprIsConstantOrFunction(pSpan->pExpr, db->init.busy) ){ sqlite3ErrorMsg(pParse, "default value of column [%s] is not constant", pCol->zName); }else{ /* A copy of pExpr is used instead of the original, as pExpr contains ** tokens that point to volatile memory. The 'span' of the expression ** is required by pragma table_info. */ sqlite3ExprDelete(db, pCol->pDflt); pCol->pDflt = sqlite3ExprDup(db, pSpan->pExpr, EXPRDUP_REDUCE); sqlite3DbFree(db, pCol->zDflt); pCol->zDflt = sqlite3DbStrNDup(db, (char*)pSpan->zStart, (int)(pSpan->zEnd - pSpan->zStart)); } } sqlite3ExprDelete(db, pSpan->pExpr); } /* ** Designate the PRIMARY KEY for the table. pList is a list of names ** of columns that form the primary key. If pList is NULL, then the ** most recently added column of the table is the primary key. ** ** A table can have at most one primary key. If the table already has ** a primary key (and this is the second primary key) then create an ** error. ** ** If the PRIMARY KEY is on a single column whose datatype is INTEGER, ** then we will try to use that column as the rowid. Set the Table.iPKey ** field of the table under construction to be the index of the ** INTEGER PRIMARY KEY column. Table.iPKey is set to -1 if there is ** no INTEGER PRIMARY KEY. ** ** If the key is not an INTEGER PRIMARY KEY, then create a unique ** index for the key. No index is created for INTEGER PRIMARY KEYs. */ SQLITE_PRIVATE void sqlite3AddPrimaryKey( Parse *pParse, /* Parsing context */ ExprList *pList, /* List of field names to be indexed */ int onError, /* What to do with a uniqueness conflict */ int autoInc, /* True if the AUTOINCREMENT keyword is present */ int sortOrder /* SQLITE_SO_ASC or SQLITE_SO_DESC */ ){ Table *pTab = pParse->pNewTable; char *zType = 0; int iCol = -1, i; int nTerm; if( pTab==0 || IN_DECLARE_VTAB ) goto primary_key_exit; if( pTab->tabFlags & TF_HasPrimaryKey ){ sqlite3ErrorMsg(pParse, "table \"%s\" has more than one primary key", pTab->zName); goto primary_key_exit; } pTab->tabFlags |= TF_HasPrimaryKey; if( pList==0 ){ iCol = pTab->nCol - 1; pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY; zType = pTab->aCol[iCol].zType; nTerm = 1; }else{ nTerm = pList->nExpr; for(i=0; inCol; iCol++){ if( sqlite3StrICmp(pList->a[i].zName, pTab->aCol[iCol].zName)==0 ){ pTab->aCol[iCol].colFlags |= COLFLAG_PRIMKEY; zType = pTab->aCol[iCol].zType; break; } } } } if( nTerm==1 && zType && sqlite3StrICmp(zType, "INTEGER")==0 && sortOrder==SQLITE_SO_ASC ){ pTab->iPKey = iCol; pTab->keyConf = (u8)onError; assert( autoInc==0 || autoInc==1 ); pTab->tabFlags |= autoInc*TF_Autoincrement; if( pList ) pParse->iPkSortOrder = pList->a[0].sortOrder; }else if( autoInc ){ #ifndef SQLITE_OMIT_AUTOINCREMENT sqlite3ErrorMsg(pParse, "AUTOINCREMENT is only allowed on an " "INTEGER PRIMARY KEY"); #endif }else{ Index *p; p = sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, 0, sortOrder, 0); if( p ){ p->idxType = SQLITE_IDXTYPE_PRIMARYKEY; } pList = 0; } primary_key_exit: sqlite3ExprListDelete(pParse->db, pList); return; } /* ** Add a new CHECK constraint to the table currently under construction. */ SQLITE_PRIVATE void sqlite3AddCheckConstraint( Parse *pParse, /* Parsing context */ Expr *pCheckExpr /* The check expression */ ){ #ifndef SQLITE_OMIT_CHECK Table *pTab = pParse->pNewTable; sqlite3 *db = pParse->db; if( pTab && !IN_DECLARE_VTAB && !sqlite3BtreeIsReadonly(db->aDb[db->init.iDb].pBt) ){ pTab->pCheck = sqlite3ExprListAppend(pParse, pTab->pCheck, pCheckExpr); if( pParse->constraintName.n ){ sqlite3ExprListSetName(pParse, pTab->pCheck, &pParse->constraintName, 1); } }else #endif { sqlite3ExprDelete(pParse->db, pCheckExpr); } } /* ** Set the collation function of the most recently parsed table column ** to the CollSeq given. */ SQLITE_PRIVATE void sqlite3AddCollateType(Parse *pParse, Token *pToken){ Table *p; int i; char *zColl; /* Dequoted name of collation sequence */ sqlite3 *db; if( (p = pParse->pNewTable)==0 ) return; i = p->nCol-1; db = pParse->db; zColl = sqlite3NameFromToken(db, pToken); if( !zColl ) return; if( sqlite3LocateCollSeq(pParse, zColl) ){ Index *pIdx; sqlite3DbFree(db, p->aCol[i].zColl); p->aCol[i].zColl = zColl; /* If the column is declared as " PRIMARY KEY COLLATE ", ** then an index may have been created on this column before the ** collation type was added. Correct this if it is the case. */ for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ assert( pIdx->nKeyCol==1 ); if( pIdx->aiColumn[0]==i ){ pIdx->azColl[0] = p->aCol[i].zColl; } } }else{ sqlite3DbFree(db, zColl); } } /* ** This function returns the collation sequence for database native text ** encoding identified by the string zName, length nName. ** ** If the requested collation sequence is not available, or not available ** in the database native encoding, the collation factory is invoked to ** request it. If the collation factory does not supply such a sequence, ** and the sequence is available in another text encoding, then that is ** returned instead. ** ** If no versions of the requested collations sequence are available, or ** another error occurs, NULL is returned and an error message written into ** pParse. ** ** This routine is a wrapper around sqlite3FindCollSeq(). This routine ** invokes the collation factory if the named collation cannot be found ** and generates an error message. ** ** See also: sqlite3FindCollSeq(), sqlite3GetCollSeq() */ SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){ sqlite3 *db = pParse->db; u8 enc = ENC(db); u8 initbusy = db->init.busy; CollSeq *pColl; pColl = sqlite3FindCollSeq(db, enc, zName, initbusy); if( !initbusy && (!pColl || !pColl->xCmp) ){ pColl = sqlite3GetCollSeq(pParse, enc, pColl, zName); } return pColl; } /* ** Generate code that will increment the schema cookie. ** ** The schema cookie is used to determine when the schema for the ** database changes. After each schema change, the cookie value ** changes. When a process first reads the schema it records the ** cookie. Thereafter, whenever it goes to access the database, ** it checks the cookie to make sure the schema has not changed ** since it was last read. ** ** This plan is not completely bullet-proof. It is possible for ** the schema to change multiple times and for the cookie to be ** set back to prior value. But schema changes are infrequent ** and the probability of hitting the same cookie value is only ** 1 chance in 2^32. So we're safe enough. */ SQLITE_PRIVATE void sqlite3ChangeCookie(Parse *pParse, int iDb){ int r1 = sqlite3GetTempReg(pParse); sqlite3 *db = pParse->db; Vdbe *v = pParse->pVdbe; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); sqlite3VdbeAddOp2(v, OP_Integer, db->aDb[iDb].pSchema->schema_cookie+1, r1); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_SCHEMA_VERSION, r1); sqlite3ReleaseTempReg(pParse, r1); } /* ** Measure the number of characters needed to output the given ** identifier. The number returned includes any quotes used ** but does not include the null terminator. ** ** The estimate is conservative. It might be larger that what is ** really needed. */ static int identLength(const char *z){ int n; for(n=0; *z; n++, z++){ if( *z=='"' ){ n++; } } return n + 2; } /* ** The first parameter is a pointer to an output buffer. The second ** parameter is a pointer to an integer that contains the offset at ** which to write into the output buffer. This function copies the ** nul-terminated string pointed to by the third parameter, zSignedIdent, ** to the specified offset in the buffer and updates *pIdx to refer ** to the first byte after the last byte written before returning. ** ** If the string zSignedIdent consists entirely of alpha-numeric ** characters, does not begin with a digit and is not an SQL keyword, ** then it is copied to the output buffer exactly as it is. Otherwise, ** it is quoted using double-quotes. */ static void identPut(char *z, int *pIdx, char *zSignedIdent){ unsigned char *zIdent = (unsigned char*)zSignedIdent; int i, j, needQuote; i = *pIdx; for(j=0; zIdent[j]; j++){ if( !sqlite3Isalnum(zIdent[j]) && zIdent[j]!='_' ) break; } needQuote = sqlite3Isdigit(zIdent[0]) || sqlite3KeywordCode(zIdent, j)!=TK_ID || zIdent[j]!=0 || j==0; if( needQuote ) z[i++] = '"'; for(j=0; zIdent[j]; j++){ z[i++] = zIdent[j]; if( zIdent[j]=='"' ) z[i++] = '"'; } if( needQuote ) z[i++] = '"'; z[i] = 0; *pIdx = i; } /* ** Generate a CREATE TABLE statement appropriate for the given ** table. Memory to hold the text of the statement is obtained ** from sqliteMalloc() and must be freed by the calling function. */ static char *createTableStmt(sqlite3 *db, Table *p){ int i, k, n; char *zStmt; char *zSep, *zSep2, *zEnd; Column *pCol; n = 0; for(pCol = p->aCol, i=0; inCol; i++, pCol++){ n += identLength(pCol->zName) + 5; } n += identLength(p->zName); if( n<50 ){ zSep = ""; zSep2 = ","; zEnd = ")"; }else{ zSep = "\n "; zSep2 = ",\n "; zEnd = "\n)"; } n += 35 + 6*p->nCol; zStmt = sqlite3DbMallocRaw(0, n); if( zStmt==0 ){ db->mallocFailed = 1; return 0; } sqlite3_snprintf(n, zStmt, "CREATE TABLE "); k = sqlite3Strlen30(zStmt); identPut(zStmt, &k, p->zName); zStmt[k++] = '('; for(pCol=p->aCol, i=0; inCol; i++, pCol++){ static const char * const azType[] = { /* SQLITE_AFF_BLOB */ "", /* SQLITE_AFF_TEXT */ " TEXT", /* SQLITE_AFF_NUMERIC */ " NUM", /* SQLITE_AFF_INTEGER */ " INT", /* SQLITE_AFF_REAL */ " REAL" }; int len; const char *zType; sqlite3_snprintf(n-k, &zStmt[k], zSep); k += sqlite3Strlen30(&zStmt[k]); zSep = zSep2; identPut(zStmt, &k, pCol->zName); assert( pCol->affinity-SQLITE_AFF_BLOB >= 0 ); assert( pCol->affinity-SQLITE_AFF_BLOB < ArraySize(azType) ); testcase( pCol->affinity==SQLITE_AFF_BLOB ); testcase( pCol->affinity==SQLITE_AFF_TEXT ); testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); testcase( pCol->affinity==SQLITE_AFF_INTEGER ); testcase( pCol->affinity==SQLITE_AFF_REAL ); zType = azType[pCol->affinity - SQLITE_AFF_BLOB]; len = sqlite3Strlen30(zType); assert( pCol->affinity==SQLITE_AFF_BLOB || pCol->affinity==sqlite3AffinityType(zType, 0) ); memcpy(&zStmt[k], zType, len); k += len; assert( k<=n ); } sqlite3_snprintf(n-k, &zStmt[k], "%s", zEnd); return zStmt; } /* ** Resize an Index object to hold N columns total. Return SQLITE_OK ** on success and SQLITE_NOMEM on an OOM error. */ static int resizeIndexObject(sqlite3 *db, Index *pIdx, int N){ char *zExtra; int nByte; if( pIdx->nColumn>=N ) return SQLITE_OK; assert( pIdx->isResized==0 ); nByte = (sizeof(char*) + sizeof(i16) + 1)*N; zExtra = sqlite3DbMallocZero(db, nByte); if( zExtra==0 ) return SQLITE_NOMEM; memcpy(zExtra, pIdx->azColl, sizeof(char*)*pIdx->nColumn); pIdx->azColl = (char**)zExtra; zExtra += sizeof(char*)*N; memcpy(zExtra, pIdx->aiColumn, sizeof(i16)*pIdx->nColumn); pIdx->aiColumn = (i16*)zExtra; zExtra += sizeof(i16)*N; memcpy(zExtra, pIdx->aSortOrder, pIdx->nColumn); pIdx->aSortOrder = (u8*)zExtra; pIdx->nColumn = N; pIdx->isResized = 1; return SQLITE_OK; } /* ** Estimate the total row width for a table. */ static void estimateTableWidth(Table *pTab){ unsigned wTable = 0; const Column *pTabCol; int i; for(i=pTab->nCol, pTabCol=pTab->aCol; i>0; i--, pTabCol++){ wTable += pTabCol->szEst; } if( pTab->iPKey<0 ) wTable++; pTab->szTabRow = sqlite3LogEst(wTable*4); } /* ** Estimate the average size of a row for an index. */ static void estimateIndexWidth(Index *pIdx){ unsigned wIndex = 0; int i; const Column *aCol = pIdx->pTable->aCol; for(i=0; inColumn; i++){ i16 x = pIdx->aiColumn[i]; assert( xpTable->nCol ); wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst; } pIdx->szIdxRow = sqlite3LogEst(wIndex*4); } /* Return true if value x is found any of the first nCol entries of aiCol[] */ static int hasColumn(const i16 *aiCol, int nCol, int x){ while( nCol-- > 0 ) if( x==*(aiCol++) ) return 1; return 0; } /* ** This routine runs at the end of parsing a CREATE TABLE statement that ** has a WITHOUT ROWID clause. The job of this routine is to convert both ** internal schema data structures and the generated VDBE code so that they ** are appropriate for a WITHOUT ROWID table instead of a rowid table. ** Changes include: ** ** (1) Convert the OP_CreateTable into an OP_CreateIndex. There is ** no rowid btree for a WITHOUT ROWID. Instead, the canonical ** data storage is a covering index btree. ** (2) Bypass the creation of the sqlite_master table entry ** for the PRIMARY KEY as the primary key index is now ** identified by the sqlite_master table entry of the table itself. ** (3) Set the Index.tnum of the PRIMARY KEY Index object in the ** schema to the rootpage from the main table. ** (4) Set all columns of the PRIMARY KEY schema object to be NOT NULL. ** (5) Add all table columns to the PRIMARY KEY Index object ** so that the PRIMARY KEY is a covering index. The surplus ** columns are part of KeyInfo.nXField and are not used for ** sorting or lookup or uniqueness checks. ** (6) Replace the rowid tail on all automatically generated UNIQUE ** indices with the PRIMARY KEY columns. */ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ Index *pIdx; Index *pPk; int nPk; int i, j; sqlite3 *db = pParse->db; Vdbe *v = pParse->pVdbe; /* Convert the OP_CreateTable opcode that would normally create the ** root-page for the table into an OP_CreateIndex opcode. The index ** created will become the PRIMARY KEY index. */ if( pParse->addrCrTab ){ assert( v ); sqlite3VdbeGetOp(v, pParse->addrCrTab)->opcode = OP_CreateIndex; } /* Locate the PRIMARY KEY index. Or, if this table was originally ** an INTEGER PRIMARY KEY table, create a new PRIMARY KEY index. */ if( pTab->iPKey>=0 ){ ExprList *pList; pList = sqlite3ExprListAppend(pParse, 0, 0); if( pList==0 ) return; pList->a[0].zName = sqlite3DbStrDup(pParse->db, pTab->aCol[pTab->iPKey].zName); pList->a[0].sortOrder = pParse->iPkSortOrder; assert( pParse->pNewTable==pTab ); pPk = sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0); if( pPk==0 ) return; pPk->idxType = SQLITE_IDXTYPE_PRIMARYKEY; pTab->iPKey = -1; }else{ pPk = sqlite3PrimaryKeyIndex(pTab); /* Bypass the creation of the PRIMARY KEY btree and the sqlite_master ** table entry. This is only required if currently generating VDBE ** code for a CREATE TABLE (not when parsing one as part of reading ** a database schema). */ if( v ){ assert( db->init.busy==0 ); sqlite3VdbeGetOp(v, pPk->tnum)->opcode = OP_Goto; } /* ** Remove all redundant columns from the PRIMARY KEY. For example, change ** "PRIMARY KEY(a,b,a,b,c,b,c,d)" into just "PRIMARY KEY(a,b,c,d)". Later ** code assumes the PRIMARY KEY contains no repeated columns. */ for(i=j=1; inKeyCol; i++){ if( hasColumn(pPk->aiColumn, j, pPk->aiColumn[i]) ){ pPk->nColumn--; }else{ pPk->aiColumn[j++] = pPk->aiColumn[i]; } } pPk->nKeyCol = j; } pPk->isCovering = 1; assert( pPk!=0 ); nPk = pPk->nKeyCol; /* Make sure every column of the PRIMARY KEY is NOT NULL. (Except, ** do not enforce this for imposter tables.) */ if( !db->init.imposterTable ){ for(i=0; iaCol[pPk->aiColumn[i]].notNull = 1; } pPk->uniqNotNull = 1; } /* The root page of the PRIMARY KEY is the table root page */ pPk->tnum = pTab->tnum; /* Update the in-memory representation of all UNIQUE indices by converting ** the final rowid column into one or more columns of the PRIMARY KEY. */ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int n; if( IsPrimaryKeyIndex(pIdx) ) continue; for(i=n=0; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ) n++; } if( n==0 ){ /* This index is a superset of the primary key */ pIdx->nColumn = pIdx->nKeyCol; continue; } if( resizeIndexObject(db, pIdx, pIdx->nKeyCol+n) ) return; for(i=0, j=pIdx->nKeyCol; iaiColumn, pIdx->nKeyCol, pPk->aiColumn[i]) ){ pIdx->aiColumn[j] = pPk->aiColumn[i]; pIdx->azColl[j] = pPk->azColl[i]; j++; } } assert( pIdx->nColumn>=pIdx->nKeyCol+n ); assert( pIdx->nColumn>=j ); } /* Add all table columns to the PRIMARY KEY index */ if( nPknCol ){ if( resizeIndexObject(db, pPk, pTab->nCol) ) return; for(i=0, j=nPk; inCol; i++){ if( !hasColumn(pPk->aiColumn, j, i) ){ assert( jnColumn ); pPk->aiColumn[j] = i; pPk->azColl[j] = "BINARY"; j++; } } assert( pPk->nColumn==j ); assert( pTab->nCol==j ); }else{ pPk->nColumn = pTab->nCol; } } /* ** This routine is called to report the final ")" that terminates ** a CREATE TABLE statement. ** ** The table structure that other action routines have been building ** is added to the internal hash tables, assuming no errors have ** occurred. ** ** An entry for the table is made in the master table on disk, unless ** this is a temporary table or db->init.busy==1. When db->init.busy==1 ** it means we are reading the sqlite_master table because we just ** connected to the database or because the sqlite_master table has ** recently changed, so the entry for this table already exists in ** the sqlite_master table. We do not want to create it again. ** ** If the pSelect argument is not NULL, it means that this routine ** was called to create a table generated from a ** "CREATE TABLE ... AS SELECT ..." statement. The column names of ** the new table will match the result set of the SELECT. */ SQLITE_PRIVATE void sqlite3EndTable( Parse *pParse, /* Parse context */ Token *pCons, /* The ',' token after the last column defn. */ Token *pEnd, /* The ')' before options in the CREATE TABLE */ u8 tabOpts, /* Extra table options. Usually 0. */ Select *pSelect /* Select from a "CREATE ... AS SELECT" */ ){ Table *p; /* The new table */ sqlite3 *db = pParse->db; /* The database connection */ int iDb; /* Database in which the table lives */ Index *pIdx; /* An implied index of the table */ if( (pEnd==0 && pSelect==0) || db->mallocFailed ){ return; } p = pParse->pNewTable; if( p==0 ) return; assert( !db->init.busy || !pSelect ); /* If the db->init.busy is 1 it means we are reading the SQL off the ** "sqlite_master" or "sqlite_temp_master" table on the disk. ** So do not write to the disk again. Extract the root page number ** for the table from the db->init.newTnum field. (The page number ** should have been put there by the sqliteOpenCb routine.) */ if( db->init.busy ){ p->tnum = db->init.newTnum; } /* Special processing for WITHOUT ROWID Tables */ if( tabOpts & TF_WithoutRowid ){ if( (p->tabFlags & TF_Autoincrement) ){ sqlite3ErrorMsg(pParse, "AUTOINCREMENT not allowed on WITHOUT ROWID tables"); return; } if( (p->tabFlags & TF_HasPrimaryKey)==0 ){ sqlite3ErrorMsg(pParse, "PRIMARY KEY missing on table %s", p->zName); }else{ p->tabFlags |= TF_WithoutRowid | TF_NoVisibleRowid; convertToWithoutRowidTable(pParse, p); } } iDb = sqlite3SchemaToIndex(db, p->pSchema); #ifndef SQLITE_OMIT_CHECK /* Resolve names in all CHECK constraint expressions. */ if( p->pCheck ){ sqlite3ResolveSelfReference(pParse, p, NC_IsCheck, 0, p->pCheck); } #endif /* !defined(SQLITE_OMIT_CHECK) */ /* Estimate the average row size for the table and for all implied indices */ estimateTableWidth(p); for(pIdx=p->pIndex; pIdx; pIdx=pIdx->pNext){ estimateIndexWidth(pIdx); } /* If not initializing, then create a record for the new table ** in the SQLITE_MASTER table of the database. ** ** If this is a TEMPORARY table, write the entry into the auxiliary ** file instead of into the main database file. */ if( !db->init.busy ){ int n; Vdbe *v; char *zType; /* "view" or "table" */ char *zType2; /* "VIEW" or "TABLE" */ char *zStmt; /* Text of the CREATE TABLE or CREATE VIEW statement */ v = sqlite3GetVdbe(pParse); if( NEVER(v==0) ) return; sqlite3VdbeAddOp1(v, OP_Close, 0); /* ** Initialize zType for the new view or table. */ if( p->pSelect==0 ){ /* A regular table */ zType = "table"; zType2 = "TABLE"; #ifndef SQLITE_OMIT_VIEW }else{ /* A view */ zType = "view"; zType2 = "VIEW"; #endif } /* If this is a CREATE TABLE xx AS SELECT ..., execute the SELECT ** statement to populate the new table. The root-page number for the ** new table is in register pParse->regRoot. ** ** Once the SELECT has been coded by sqlite3Select(), it is in a ** suitable state to query for the column names and types to be used ** by the new table. ** ** A shared-cache write-lock is not required to write to the new table, ** as a schema-lock must have already been obtained to create it. Since ** a schema-lock excludes all other database users, the write-lock would ** be redundant. */ if( pSelect ){ SelectDest dest; /* Where the SELECT should store results */ int regYield; /* Register holding co-routine entry-point */ int addrTop; /* Top of the co-routine */ int regRec; /* A record to be insert into the new table */ int regRowid; /* Rowid of the next row to insert */ int addrInsLoop; /* Top of the loop for inserting rows */ Table *pSelTab; /* A table that describes the SELECT results */ regYield = ++pParse->nMem; regRec = ++pParse->nMem; regRowid = ++pParse->nMem; assert(pParse->nTab==1); sqlite3MayAbort(pParse); sqlite3VdbeAddOp3(v, OP_OpenWrite, 1, pParse->regRoot, iDb); sqlite3VdbeChangeP5(v, OPFLAG_P2ISREG); pParse->nTab = 2; addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); sqlite3Select(pParse, pSelect, &dest); sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield); sqlite3VdbeJumpHere(v, addrTop - 1); if( pParse->nErr ) return; pSelTab = sqlite3ResultSetOfSelect(pParse, pSelect); if( pSelTab==0 ) return; assert( p->aCol==0 ); p->nCol = pSelTab->nCol; p->aCol = pSelTab->aCol; pSelTab->nCol = 0; pSelTab->aCol = 0; sqlite3DeleteTable(db, pSelTab); addrInsLoop = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, dest.iSdst, dest.nSdst, regRec); sqlite3TableAffinity(v, p, 0); sqlite3VdbeAddOp2(v, OP_NewRowid, 1, regRowid); sqlite3VdbeAddOp3(v, OP_Insert, 1, regRec, regRowid); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrInsLoop); sqlite3VdbeJumpHere(v, addrInsLoop); sqlite3VdbeAddOp1(v, OP_Close, 1); } /* Compute the complete text of the CREATE statement */ if( pSelect ){ zStmt = createTableStmt(db, p); }else{ Token *pEnd2 = tabOpts ? &pParse->sLastToken : pEnd; n = (int)(pEnd2->z - pParse->sNameToken.z); if( pEnd2->z[0]!=';' ) n += pEnd2->n; zStmt = sqlite3MPrintf(db, "CREATE %s %.*s", zType2, n, pParse->sNameToken.z ); } /* A slot for the record has already been allocated in the ** SQLITE_MASTER table. We just need to update that slot with all ** the information we've collected. */ sqlite3NestedParse(pParse, "UPDATE %Q.%s " "SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q " "WHERE rowid=#%d", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zType, p->zName, p->zName, pParse->regRoot, zStmt, pParse->regRowid ); sqlite3DbFree(db, zStmt); sqlite3ChangeCookie(pParse, iDb); #ifndef SQLITE_OMIT_AUTOINCREMENT /* Check to see if we need to create an sqlite_sequence table for ** keeping track of autoincrement keys. */ if( p->tabFlags & TF_Autoincrement ){ Db *pDb = &db->aDb[iDb]; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( pDb->pSchema->pSeqTab==0 ){ sqlite3NestedParse(pParse, "CREATE TABLE %Q.sqlite_sequence(name,seq)", pDb->zName ); } } #endif /* Reparse everything to update our internal data structures */ sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName)); } /* Add the table to the in-memory representation of the database. */ if( db->init.busy ){ Table *pOld; Schema *pSchema = p->pSchema; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pOld = sqlite3HashInsert(&pSchema->tblHash, p->zName, p); if( pOld ){ assert( p==pOld ); /* Malloc must have failed inside HashInsert() */ db->mallocFailed = 1; return; } pParse->pNewTable = 0; db->flags |= SQLITE_InternChanges; #ifndef SQLITE_OMIT_ALTERTABLE if( !p->pSelect ){ const char *zName = (const char *)pParse->sNameToken.z; int nName; assert( !pSelect && pCons && pEnd ); if( pCons->z==0 ){ pCons = pEnd; } nName = (int)((const char *)pCons->z - zName); p->addColOffset = 13 + sqlite3Utf8CharLen(zName, nName); } #endif } } #ifndef SQLITE_OMIT_VIEW /* ** The parser calls this routine in order to create a new VIEW */ SQLITE_PRIVATE void sqlite3CreateView( Parse *pParse, /* The parsing context */ Token *pBegin, /* The CREATE token that begins the statement */ Token *pName1, /* The token that holds the name of the view */ Token *pName2, /* The token that holds the name of the view */ Select *pSelect, /* A SELECT statement that will become the new view */ int isTemp, /* TRUE for a TEMPORARY view */ int noErr /* Suppress error messages if VIEW already exists */ ){ Table *p; int n; const char *z; Token sEnd; DbFixer sFix; Token *pName = 0; int iDb; sqlite3 *db = pParse->db; if( pParse->nVar>0 ){ sqlite3ErrorMsg(pParse, "parameters are not allowed in views"); sqlite3SelectDelete(db, pSelect); return; } sqlite3StartTable(pParse, pName1, pName2, isTemp, 1, 0, noErr); p = pParse->pNewTable; if( p==0 || pParse->nErr ){ sqlite3SelectDelete(db, pSelect); return; } sqlite3TwoPartName(pParse, pName1, pName2, &pName); iDb = sqlite3SchemaToIndex(db, p->pSchema); sqlite3FixInit(&sFix, pParse, iDb, "view", pName); if( sqlite3FixSelect(&sFix, pSelect) ){ sqlite3SelectDelete(db, pSelect); return; } /* Make a copy of the entire SELECT statement that defines the view. ** This will force all the Expr.token.z values to be dynamically ** allocated rather than point to the input string - which means that ** they will persist after the current sqlite3_exec() call returns. */ p->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); sqlite3SelectDelete(db, pSelect); if( db->mallocFailed ){ return; } if( !db->init.busy ){ sqlite3ViewGetColumnNames(pParse, p); } /* Locate the end of the CREATE VIEW statement. Make sEnd point to ** the end. */ sEnd = pParse->sLastToken; if( ALWAYS(sEnd.z[0]!=0) && sEnd.z[0]!=';' ){ sEnd.z += sEnd.n; } sEnd.n = 0; n = (int)(sEnd.z - pBegin->z); z = pBegin->z; while( ALWAYS(n>0) && sqlite3Isspace(z[n-1]) ){ n--; } sEnd.z = &z[n-1]; sEnd.n = 1; /* Use sqlite3EndTable() to add the view to the SQLITE_MASTER table */ sqlite3EndTable(pParse, 0, &sEnd, 0, 0); return; } #endif /* SQLITE_OMIT_VIEW */ #if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** The Table structure pTable is really a VIEW. Fill in the names of ** the columns of the view in the pTable structure. Return the number ** of errors. If an error is seen leave an error message in pParse->zErrMsg. */ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ Table *pSelTab; /* A fake table from which we get the result set */ Select *pSel; /* Copy of the SELECT that implements the view */ int nErr = 0; /* Number of errors encountered */ int n; /* Temporarily holds the number of cursors assigned */ sqlite3 *db = pParse->db; /* Database connection for malloc errors */ sqlite3_xauth xAuth; /* Saved xAuth pointer */ assert( pTable ); #ifndef SQLITE_OMIT_VIRTUALTABLE if( sqlite3VtabCallConnect(pParse, pTable) ){ return SQLITE_ERROR; } if( IsVirtual(pTable) ) return 0; #endif #ifndef SQLITE_OMIT_VIEW /* A positive nCol means the columns names for this view are ** already known. */ if( pTable->nCol>0 ) return 0; /* A negative nCol is a special marker meaning that we are currently ** trying to compute the column names. If we enter this routine with ** a negative nCol, it means two or more views form a loop, like this: ** ** CREATE VIEW one AS SELECT * FROM two; ** CREATE VIEW two AS SELECT * FROM one; ** ** Actually, the error above is now caught prior to reaching this point. ** But the following test is still important as it does come up ** in the following: ** ** CREATE TABLE main.ex1(a); ** CREATE TEMP VIEW ex1 AS SELECT a FROM ex1; ** SELECT * FROM temp.ex1; */ if( pTable->nCol<0 ){ sqlite3ErrorMsg(pParse, "view %s is circularly defined", pTable->zName); return 1; } assert( pTable->nCol>=0 ); /* If we get this far, it means we need to compute the table names. ** Note that the call to sqlite3ResultSetOfSelect() will expand any ** "*" elements in the results set of the view and will assign cursors ** to the elements of the FROM clause. But we do not want these changes ** to be permanent. So the computation is done on a copy of the SELECT ** statement that defines the view. */ assert( pTable->pSelect ); pSel = sqlite3SelectDup(db, pTable->pSelect, 0); if( pSel ){ u8 enableLookaside = db->lookaside.bEnabled; n = pParse->nTab; sqlite3SrcListAssignCursors(pParse, pSel->pSrc); pTable->nCol = -1; db->lookaside.bEnabled = 0; #ifndef SQLITE_OMIT_AUTHORIZATION xAuth = db->xAuth; db->xAuth = 0; pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); db->xAuth = xAuth; #else pSelTab = sqlite3ResultSetOfSelect(pParse, pSel); #endif db->lookaside.bEnabled = enableLookaside; pParse->nTab = n; if( pSelTab ){ assert( pTable->aCol==0 ); pTable->nCol = pSelTab->nCol; pTable->aCol = pSelTab->aCol; pSelTab->nCol = 0; pSelTab->aCol = 0; sqlite3DeleteTable(db, pSelTab); assert( sqlite3SchemaMutexHeld(db, 0, pTable->pSchema) ); pTable->pSchema->schemaFlags |= DB_UnresetViews; }else{ pTable->nCol = 0; nErr++; } sqlite3SelectDelete(db, pSel); } else { nErr++; } #endif /* SQLITE_OMIT_VIEW */ return nErr; } #endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ #ifndef SQLITE_OMIT_VIEW /* ** Clear the column names from every VIEW in database idx. */ static void sqliteViewResetAll(sqlite3 *db, int idx){ HashElem *i; assert( sqlite3SchemaMutexHeld(db, idx, 0) ); if( !DbHasProperty(db, idx, DB_UnresetViews) ) return; for(i=sqliteHashFirst(&db->aDb[idx].pSchema->tblHash); i;i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); if( pTab->pSelect ){ sqliteDeleteColumnNames(db, pTab); pTab->aCol = 0; pTab->nCol = 0; } } DbClearProperty(db, idx, DB_UnresetViews); } #else # define sqliteViewResetAll(A,B) #endif /* SQLITE_OMIT_VIEW */ /* ** This function is called by the VDBE to adjust the internal schema ** used by SQLite when the btree layer moves a table root page. The ** root-page of a table or index in database iDb has changed from iFrom ** to iTo. ** ** Ticket #1728: The symbol table might still contain information ** on tables and/or indices that are the process of being deleted. ** If you are unlucky, one of those deleted indices or tables might ** have the same rootpage number as the real table or index that is ** being moved. So we cannot stop searching after the first match ** because the first match might be for one of the deleted indices ** or tables and not the table/index that is actually being moved. ** We must continue looping until all tables and indices with ** rootpage==iFrom have been converted to have a rootpage of iTo ** in order to be certain that we got the right one. */ #ifndef SQLITE_OMIT_AUTOVACUUM SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3 *db, int iDb, int iFrom, int iTo){ HashElem *pElem; Hash *pHash; Db *pDb; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pDb = &db->aDb[iDb]; pHash = &pDb->pSchema->tblHash; for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ Table *pTab = sqliteHashData(pElem); if( pTab->tnum==iFrom ){ pTab->tnum = iTo; } } pHash = &pDb->pSchema->idxHash; for(pElem=sqliteHashFirst(pHash); pElem; pElem=sqliteHashNext(pElem)){ Index *pIdx = sqliteHashData(pElem); if( pIdx->tnum==iFrom ){ pIdx->tnum = iTo; } } } #endif /* ** Write code to erase the table with root-page iTable from database iDb. ** Also write code to modify the sqlite_master table and internal schema ** if a root-page of another table is moved by the btree-layer whilst ** erasing iTable (this can happen with an auto-vacuum database). */ static void destroyRootPage(Parse *pParse, int iTable, int iDb){ Vdbe *v = sqlite3GetVdbe(pParse); int r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_Destroy, iTable, r1, iDb); sqlite3MayAbort(pParse); #ifndef SQLITE_OMIT_AUTOVACUUM /* OP_Destroy stores an in integer r1. If this integer ** is non-zero, then it is the root page number of a table moved to ** location iTable. The following code modifies the sqlite_master table to ** reflect this. ** ** The "#NNN" in the SQL is a special constant that means whatever value ** is in register NNN. See grammar rules associated with the TK_REGISTER ** token for additional information. */ sqlite3NestedParse(pParse, "UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d", pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable, r1, r1); #endif sqlite3ReleaseTempReg(pParse, r1); } /* ** Write VDBE code to erase table pTab and all associated indices on disk. ** Code to update the sqlite_master tables and internal schema definitions ** in case a root-page belonging to another table is moved by the btree layer ** is also added (this can happen with an auto-vacuum database). */ static void destroyTable(Parse *pParse, Table *pTab){ #ifdef SQLITE_OMIT_AUTOVACUUM Index *pIdx; int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); destroyRootPage(pParse, pTab->tnum, iDb); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ destroyRootPage(pParse, pIdx->tnum, iDb); } #else /* If the database may be auto-vacuum capable (if SQLITE_OMIT_AUTOVACUUM ** is not defined), then it is important to call OP_Destroy on the ** table and index root-pages in order, starting with the numerically ** largest root-page number. This guarantees that none of the root-pages ** to be destroyed is relocated by an earlier OP_Destroy. i.e. if the ** following were coded: ** ** OP_Destroy 4 0 ** ... ** OP_Destroy 5 0 ** ** and root page 5 happened to be the largest root-page number in the ** database, then root page 5 would be moved to page 4 by the ** "OP_Destroy 4 0" opcode. The subsequent "OP_Destroy 5 0" would hit ** a free-list page. */ int iTab = pTab->tnum; int iDestroyed = 0; while( 1 ){ Index *pIdx; int iLargest = 0; if( iDestroyed==0 || iTabpIndex; pIdx; pIdx=pIdx->pNext){ int iIdx = pIdx->tnum; assert( pIdx->pSchema==pTab->pSchema ); if( (iDestroyed==0 || (iIdxiLargest ){ iLargest = iIdx; } } if( iLargest==0 ){ return; }else{ int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); assert( iDb>=0 && iDbdb->nDb ); destroyRootPage(pParse, iLargest, iDb); iDestroyed = iLargest; } } #endif } /* ** Remove entries from the sqlite_statN tables (for N in (1,2,3)) ** after a DROP INDEX or DROP TABLE command. */ static void sqlite3ClearStatTables( Parse *pParse, /* The parsing context */ int iDb, /* The database number */ const char *zType, /* "idx" or "tbl" */ const char *zName /* Name of index or table */ ){ int i; const char *zDbName = pParse->db->aDb[iDb].zName; for(i=1; i<=4; i++){ char zTab[24]; sqlite3_snprintf(sizeof(zTab),zTab,"sqlite_stat%d",i); if( sqlite3FindTable(pParse->db, zTab, zDbName) ){ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE %s=%Q", zDbName, zTab, zType, zName ); } } } /* ** Generate code to drop a table. */ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, int isView){ Vdbe *v; sqlite3 *db = pParse->db; Trigger *pTrigger; Db *pDb = &db->aDb[iDb]; v = sqlite3GetVdbe(pParse); assert( v!=0 ); sqlite3BeginWriteOperation(pParse, 1, iDb); #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ sqlite3VdbeAddOp0(v, OP_VBegin); } #endif /* Drop all triggers associated with the table being dropped. Code ** is generated to remove entries from sqlite_master and/or ** sqlite_temp_master if required. */ pTrigger = sqlite3TriggerList(pParse, pTab); while( pTrigger ){ assert( pTrigger->pSchema==pTab->pSchema || pTrigger->pSchema==db->aDb[1].pSchema ); sqlite3DropTriggerPtr(pParse, pTrigger); pTrigger = pTrigger->pNext; } #ifndef SQLITE_OMIT_AUTOINCREMENT /* Remove any entries of the sqlite_sequence table associated with ** the table being dropped. This is done before the table is dropped ** at the btree level, in case the sqlite_sequence table needs to ** move as a result of the drop (can happen in auto-vacuum mode). */ if( pTab->tabFlags & TF_Autoincrement ){ sqlite3NestedParse(pParse, "DELETE FROM %Q.sqlite_sequence WHERE name=%Q", pDb->zName, pTab->zName ); } #endif /* Drop all SQLITE_MASTER table and index entries that refer to the ** table. The program name loops through the master table and deletes ** every row that refers to a table of the same name as the one being ** dropped. Triggers are handled separately because a trigger can be ** created in the temp database that refers to a table in another ** database. */ sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'", pDb->zName, SCHEMA_TABLE(iDb), pTab->zName); if( !isView && !IsVirtual(pTab) ){ destroyTable(pParse, pTab); } /* Remove the table entry from SQLite's internal schema and modify ** the schema cookie. */ if( IsVirtual(pTab) ){ sqlite3VdbeAddOp4(v, OP_VDestroy, iDb, 0, 0, pTab->zName, 0); } sqlite3VdbeAddOp4(v, OP_DropTable, iDb, 0, 0, pTab->zName, 0); sqlite3ChangeCookie(pParse, iDb); sqliteViewResetAll(db, iDb); } /* ** This routine is called to do the work of a DROP TABLE statement. ** pName is the name of the table to be dropped. */ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, int noErr){ Table *pTab; Vdbe *v; sqlite3 *db = pParse->db; int iDb; if( db->mallocFailed ){ goto exit_drop_table; } assert( pParse->nErr==0 ); assert( pName->nSrc==1 ); if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]); if( noErr ) db->suppressErr--; if( pTab==0 ){ if( noErr ) sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); goto exit_drop_table; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDb>=0 && iDbnDb ); /* If pTab is a virtual table, call ViewGetColumnNames() to ensure ** it is initialized. */ if( IsVirtual(pTab) && sqlite3ViewGetColumnNames(pParse, pTab) ){ goto exit_drop_table; } #ifndef SQLITE_OMIT_AUTHORIZATION { int code; const char *zTab = SCHEMA_TABLE(iDb); const char *zDb = db->aDb[iDb].zName; const char *zArg2 = 0; if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){ goto exit_drop_table; } if( isView ){ if( !OMIT_TEMPDB && iDb==1 ){ code = SQLITE_DROP_TEMP_VIEW; }else{ code = SQLITE_DROP_VIEW; } #ifndef SQLITE_OMIT_VIRTUALTABLE }else if( IsVirtual(pTab) ){ code = SQLITE_DROP_VTABLE; zArg2 = sqlite3GetVTable(db, pTab)->pMod->zName; #endif }else{ if( !OMIT_TEMPDB && iDb==1 ){ code = SQLITE_DROP_TEMP_TABLE; }else{ code = SQLITE_DROP_TABLE; } } if( sqlite3AuthCheck(pParse, code, pTab->zName, zArg2, zDb) ){ goto exit_drop_table; } if( sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb) ){ goto exit_drop_table; } } #endif if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && sqlite3StrNICmp(pTab->zName, "sqlite_stat", 11)!=0 ){ sqlite3ErrorMsg(pParse, "table %s may not be dropped", pTab->zName); goto exit_drop_table; } #ifndef SQLITE_OMIT_VIEW /* Ensure DROP TABLE is not used on a view, and DROP VIEW is not used ** on a table. */ if( isView && pTab->pSelect==0 ){ sqlite3ErrorMsg(pParse, "use DROP TABLE to delete table %s", pTab->zName); goto exit_drop_table; } if( !isView && pTab->pSelect ){ sqlite3ErrorMsg(pParse, "use DROP VIEW to delete view %s", pTab->zName); goto exit_drop_table; } #endif /* Generate code to remove the table from the master table ** on disk. */ v = sqlite3GetVdbe(pParse); if( v ){ sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3ClearStatTables(pParse, iDb, "tbl", pTab->zName); sqlite3FkDropTable(pParse, pName, pTab); sqlite3CodeDropTable(pParse, pTab, iDb, isView); } exit_drop_table: sqlite3SrcListDelete(db, pName); } /* ** This routine is called to create a new foreign key on the table ** currently under construction. pFromCol determines which columns ** in the current table point to the foreign key. If pFromCol==0 then ** connect the key to the last column inserted. pTo is the name of ** the table referred to (a.k.a the "parent" table). pToCol is a list ** of tables in the parent pTo table. flags contains all ** information about the conflict resolution algorithms specified ** in the ON DELETE, ON UPDATE and ON INSERT clauses. ** ** An FKey structure is created and added to the table currently ** under construction in the pParse->pNewTable field. ** ** The foreign key is set for IMMEDIATE processing. A subsequent call ** to sqlite3DeferForeignKey() might change this to DEFERRED. */ SQLITE_PRIVATE void sqlite3CreateForeignKey( Parse *pParse, /* Parsing context */ ExprList *pFromCol, /* Columns in this table that point to other table */ Token *pTo, /* Name of the other table */ ExprList *pToCol, /* Columns in the other table */ int flags /* Conflict resolution algorithms. */ ){ sqlite3 *db = pParse->db; #ifndef SQLITE_OMIT_FOREIGN_KEY FKey *pFKey = 0; FKey *pNextTo; Table *p = pParse->pNewTable; int nByte; int i; int nCol; char *z; assert( pTo!=0 ); if( p==0 || IN_DECLARE_VTAB ) goto fk_end; if( pFromCol==0 ){ int iCol = p->nCol-1; if( NEVER(iCol<0) ) goto fk_end; if( pToCol && pToCol->nExpr!=1 ){ sqlite3ErrorMsg(pParse, "foreign key on %s" " should reference only one column of table %T", p->aCol[iCol].zName, pTo); goto fk_end; } nCol = 1; }else if( pToCol && pToCol->nExpr!=pFromCol->nExpr ){ sqlite3ErrorMsg(pParse, "number of columns in foreign key does not match the number of " "columns in the referenced table"); goto fk_end; }else{ nCol = pFromCol->nExpr; } nByte = sizeof(*pFKey) + (nCol-1)*sizeof(pFKey->aCol[0]) + pTo->n + 1; if( pToCol ){ for(i=0; inExpr; i++){ nByte += sqlite3Strlen30(pToCol->a[i].zName) + 1; } } pFKey = sqlite3DbMallocZero(db, nByte ); if( pFKey==0 ){ goto fk_end; } pFKey->pFrom = p; pFKey->pNextFrom = p->pFKey; z = (char*)&pFKey->aCol[nCol]; pFKey->zTo = z; memcpy(z, pTo->z, pTo->n); z[pTo->n] = 0; sqlite3Dequote(z); z += pTo->n+1; pFKey->nCol = nCol; if( pFromCol==0 ){ pFKey->aCol[0].iFrom = p->nCol-1; }else{ for(i=0; inCol; j++){ if( sqlite3StrICmp(p->aCol[j].zName, pFromCol->a[i].zName)==0 ){ pFKey->aCol[i].iFrom = j; break; } } if( j>=p->nCol ){ sqlite3ErrorMsg(pParse, "unknown column \"%s\" in foreign key definition", pFromCol->a[i].zName); goto fk_end; } } } if( pToCol ){ for(i=0; ia[i].zName); pFKey->aCol[i].zCol = z; memcpy(z, pToCol->a[i].zName, n); z[n] = 0; z += n+1; } } pFKey->isDeferred = 0; pFKey->aAction[0] = (u8)(flags & 0xff); /* ON DELETE action */ pFKey->aAction[1] = (u8)((flags >> 8 ) & 0xff); /* ON UPDATE action */ assert( sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); pNextTo = (FKey *)sqlite3HashInsert(&p->pSchema->fkeyHash, pFKey->zTo, (void *)pFKey ); if( pNextTo==pFKey ){ db->mallocFailed = 1; goto fk_end; } if( pNextTo ){ assert( pNextTo->pPrevTo==0 ); pFKey->pNextTo = pNextTo; pNextTo->pPrevTo = pFKey; } /* Link the foreign key to the table as the last step. */ p->pFKey = pFKey; pFKey = 0; fk_end: sqlite3DbFree(db, pFKey); #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ sqlite3ExprListDelete(db, pFromCol); sqlite3ExprListDelete(db, pToCol); } /* ** This routine is called when an INITIALLY IMMEDIATE or INITIALLY DEFERRED ** clause is seen as part of a foreign key definition. The isDeferred ** parameter is 1 for INITIALLY DEFERRED and 0 for INITIALLY IMMEDIATE. ** The behavior of the most recently created foreign key is adjusted ** accordingly. */ SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse *pParse, int isDeferred){ #ifndef SQLITE_OMIT_FOREIGN_KEY Table *pTab; FKey *pFKey; if( (pTab = pParse->pNewTable)==0 || (pFKey = pTab->pFKey)==0 ) return; assert( isDeferred==0 || isDeferred==1 ); /* EV: R-30323-21917 */ pFKey->isDeferred = (u8)isDeferred; #endif } /* ** Generate code that will erase and refill index *pIdx. This is ** used to initialize a newly created index or to recompute the ** content of an index in response to a REINDEX command. ** ** if memRootPage is not negative, it means that the index is newly ** created. The register specified by memRootPage contains the ** root page number of the index. If memRootPage is negative, then ** the index already exists and must be cleared before being refilled and ** the root page number of the index is taken from pIndex->tnum. */ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ Table *pTab = pIndex->pTable; /* The table that is indexed */ int iTab = pParse->nTab++; /* Btree cursor used for pTab */ int iIdx = pParse->nTab++; /* Btree cursor used for pIndex */ int iSorter; /* Cursor opened by OpenSorter (if in use) */ int addr1; /* Address of top of loop */ int addr2; /* Address to jump to for next iteration */ int tnum; /* Root page of index */ int iPartIdxLabel; /* Jump to this label to skip a row */ Vdbe *v; /* Generate code into this virtual machine */ KeyInfo *pKey; /* KeyInfo for index */ int regRecord; /* Register holding assembled index record */ sqlite3 *db = pParse->db; /* The database connection */ int iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); #ifndef SQLITE_OMIT_AUTHORIZATION if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0, db->aDb[iDb].zName ) ){ return; } #endif /* Require a write-lock on the table to perform this operation */ sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); v = sqlite3GetVdbe(pParse); if( v==0 ) return; if( memRootPage>=0 ){ tnum = memRootPage; }else{ tnum = pIndex->tnum; } pKey = sqlite3KeyInfoOfIndex(pParse, pIndex); /* Open the sorter cursor if we are to use one. */ iSorter = pParse->nTab++; sqlite3VdbeAddOp4(v, OP_SorterOpen, iSorter, 0, pIndex->nKeyCol, (char*) sqlite3KeyInfoRef(pKey), P4_KEYINFO); /* Open the table. Loop through all rows of the table, inserting index ** records into the sorter. */ sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead); addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iTab, 0); VdbeCoverage(v); regRecord = sqlite3GetTempReg(pParse); sqlite3GenerateIndexKey(pParse,pIndex,iTab,regRecord,0,&iPartIdxLabel,0,0); sqlite3VdbeAddOp2(v, OP_SorterInsert, iSorter, regRecord); sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); sqlite3VdbeAddOp2(v, OP_Next, iTab, addr1+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr1); if( memRootPage<0 ) sqlite3VdbeAddOp2(v, OP_Clear, tnum, iDb); sqlite3VdbeAddOp4(v, OP_OpenWrite, iIdx, tnum, iDb, (char *)pKey, P4_KEYINFO); sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0)); addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v); assert( pKey!=0 || db->mallocFailed || pParse->nErr ); if( IsUniqueIndex(pIndex) && pKey!=0 ){ int j2 = sqlite3VdbeCurrentAddr(v) + 3; sqlite3VdbeAddOp2(v, OP_Goto, 0, j2); addr2 = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp4Int(v, OP_SorterCompare, iSorter, j2, regRecord, pIndex->nKeyCol); VdbeCoverage(v); sqlite3UniqueConstraint(pParse, OE_Abort, pIndex); }else{ addr2 = sqlite3VdbeCurrentAddr(v); } sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx); sqlite3VdbeAddOp3(v, OP_Last, iIdx, 0, -1); sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 0); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr1); sqlite3VdbeAddOp1(v, OP_Close, iTab); sqlite3VdbeAddOp1(v, OP_Close, iIdx); sqlite3VdbeAddOp1(v, OP_Close, iSorter); } /* ** Allocate heap space to hold an Index object with nCol columns. ** ** Increase the allocation size to provide an extra nExtra bytes ** of 8-byte aligned space after the Index object and return a ** pointer to this extra space in *ppExtra. */ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( sqlite3 *db, /* Database connection */ i16 nCol, /* Total number of columns in the index */ int nExtra, /* Number of bytes of extra space to alloc */ char **ppExtra /* Pointer to the "extra" space */ ){ Index *p; /* Allocated index object */ int nByte; /* Bytes of space for Index object + arrays */ nByte = ROUND8(sizeof(Index)) + /* Index structure */ ROUND8(sizeof(char*)*nCol) + /* Index.azColl */ ROUND8(sizeof(LogEst)*(nCol+1) + /* Index.aiRowLogEst */ sizeof(i16)*nCol + /* Index.aiColumn */ sizeof(u8)*nCol); /* Index.aSortOrder */ p = sqlite3DbMallocZero(db, nByte + nExtra); if( p ){ char *pExtra = ((char*)p)+ROUND8(sizeof(Index)); p->azColl = (char**)pExtra; pExtra += ROUND8(sizeof(char*)*nCol); p->aiRowLogEst = (LogEst*)pExtra; pExtra += sizeof(LogEst)*(nCol+1); p->aiColumn = (i16*)pExtra; pExtra += sizeof(i16)*nCol; p->aSortOrder = (u8*)pExtra; p->nColumn = nCol; p->nKeyCol = nCol - 1; *ppExtra = ((char*)p) + nByte; } return p; } /* ** Create a new index for an SQL table. pName1.pName2 is the name of the index ** and pTblList is the name of the table that is to be indexed. Both will ** be NULL for a primary key or an index that is created to satisfy a ** UNIQUE constraint. If pTable and pIndex are NULL, use pParse->pNewTable ** as the table to be indexed. pParse->pNewTable is a table that is ** currently being constructed by a CREATE TABLE statement. ** ** pList is a list of columns to be indexed. pList will be NULL if this ** is a primary key or unique-constraint on the most recent column added ** to the table currently under construction. ** ** If the index is created successfully, return a pointer to the new Index ** structure. This is used by sqlite3AddPrimaryKey() to mark the index ** as the tables primary key (Index.idxType==SQLITE_IDXTYPE_PRIMARYKEY) */ SQLITE_PRIVATE Index *sqlite3CreateIndex( Parse *pParse, /* All information about this parse */ Token *pName1, /* First part of index name. May be NULL */ Token *pName2, /* Second part of index name. May be NULL */ SrcList *pTblName, /* Table to index. Use pParse->pNewTable if 0 */ ExprList *pList, /* A list of columns to be indexed */ int onError, /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */ Token *pStart, /* The CREATE token that begins this statement */ Expr *pPIWhere, /* WHERE clause for partial indices */ int sortOrder, /* Sort order of primary key when pList==NULL */ int ifNotExist /* Omit error if index already exists */ ){ Index *pRet = 0; /* Pointer to return */ Table *pTab = 0; /* Table to be indexed */ Index *pIndex = 0; /* The index to be created */ char *zName = 0; /* Name of the index */ int nName; /* Number of characters in zName */ int i, j; DbFixer sFix; /* For assigning database names to pTable */ int sortOrderMask; /* 1 to honor DESC in index. 0 to ignore. */ sqlite3 *db = pParse->db; Db *pDb; /* The specific table containing the indexed database */ int iDb; /* Index of the database that is being written */ Token *pName = 0; /* Unqualified name of the index to create */ struct ExprList_item *pListItem; /* For looping over pList */ const Column *pTabCol; /* A column in the table */ int nExtra = 0; /* Space allocated for zExtra[] */ int nExtraCol; /* Number of extra columns needed */ char *zExtra = 0; /* Extra space after the Index object */ Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */ if( db->mallocFailed || IN_DECLARE_VTAB || pParse->nErr>0 ){ goto exit_create_index; } if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_create_index; } /* ** Find the table that is to be indexed. Return early if not found. */ if( pTblName!=0 ){ /* Use the two-part index name to determine the database ** to search for the table. 'Fix' the table name to this db ** before looking up the table. */ assert( pName1 && pName2 ); iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); if( iDb<0 ) goto exit_create_index; assert( pName && pName->z ); #ifndef SQLITE_OMIT_TEMPDB /* If the index name was unqualified, check if the table ** is a temp table. If so, set the database to 1. Do not do this ** if initialising a database schema. */ if( !db->init.busy ){ pTab = sqlite3SrcListLookup(pParse, pTblName); if( pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ iDb = 1; } } #endif sqlite3FixInit(&sFix, pParse, iDb, "index", pName); if( sqlite3FixSrcList(&sFix, pTblName) ){ /* Because the parser constructs pTblName from a single identifier, ** sqlite3FixSrcList can never fail. */ assert(0); } pTab = sqlite3LocateTableItem(pParse, 0, &pTblName->a[0]); assert( db->mallocFailed==0 || pTab==0 ); if( pTab==0 ) goto exit_create_index; if( iDb==1 && db->aDb[iDb].pSchema!=pTab->pSchema ){ sqlite3ErrorMsg(pParse, "cannot create a TEMP index on non-TEMP table \"%s\"", pTab->zName); goto exit_create_index; } if( !HasRowid(pTab) ) pPk = sqlite3PrimaryKeyIndex(pTab); }else{ assert( pName==0 ); assert( pStart==0 ); pTab = pParse->pNewTable; if( !pTab ) goto exit_create_index; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); } pDb = &db->aDb[iDb]; assert( pTab!=0 ); assert( pParse->nErr==0 ); if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 && db->init.busy==0 #if SQLITE_USER_AUTHENTICATION && sqlite3UserAuthTable(pTab->zName)==0 #endif && sqlite3StrNICmp(&pTab->zName[7],"altertab_",9)!=0 ){ sqlite3ErrorMsg(pParse, "table %s may not be indexed", pTab->zName); goto exit_create_index; } #ifndef SQLITE_OMIT_VIEW if( pTab->pSelect ){ sqlite3ErrorMsg(pParse, "views may not be indexed"); goto exit_create_index; } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "virtual tables may not be indexed"); goto exit_create_index; } #endif /* ** Find the name of the index. Make sure there is not already another ** index or table with the same name. ** ** Exception: If we are reading the names of permanent indices from the ** sqlite_master table (because some other process changed the schema) and ** one of the index names collides with the name of a temporary table or ** index, then we will continue to process this index. ** ** If pName==0 it means that we are ** dealing with a primary key or UNIQUE constraint. We have to invent our ** own name. */ if( pName ){ zName = sqlite3NameFromToken(db, pName); if( zName==0 ) goto exit_create_index; assert( pName->z!=0 ); if( SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto exit_create_index; } if( !db->init.busy ){ if( sqlite3FindTable(db, zName, 0)!=0 ){ sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); goto exit_create_index; } } if( sqlite3FindIndex(db, zName, pDb->zName)!=0 ){ if( !ifNotExist ){ sqlite3ErrorMsg(pParse, "index %s already exists", zName); }else{ assert( !db->init.busy ); sqlite3CodeVerifySchema(pParse, iDb); } goto exit_create_index; } }else{ int n; Index *pLoop; for(pLoop=pTab->pIndex, n=1; pLoop; pLoop=pLoop->pNext, n++){} zName = sqlite3MPrintf(db, "sqlite_autoindex_%s_%d", pTab->zName, n); if( zName==0 ){ goto exit_create_index; } } /* Check for authorization to create an index. */ #ifndef SQLITE_OMIT_AUTHORIZATION { const char *zDb = pDb->zName; if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){ goto exit_create_index; } i = SQLITE_CREATE_INDEX; if( !OMIT_TEMPDB && iDb==1 ) i = SQLITE_CREATE_TEMP_INDEX; if( sqlite3AuthCheck(pParse, i, zName, pTab->zName, zDb) ){ goto exit_create_index; } } #endif /* If pList==0, it means this routine was called to make a primary ** key out of the last column added to the table under construction. ** So create a fake list to simulate this. */ if( pList==0 ){ pList = sqlite3ExprListAppend(pParse, 0, 0); if( pList==0 ) goto exit_create_index; pList->a[0].zName = sqlite3DbStrDup(pParse->db, pTab->aCol[pTab->nCol-1].zName); pList->a[0].sortOrder = (u8)sortOrder; } /* Figure out how many bytes of space are required to store explicitly ** specified collation sequence names. */ for(i=0; inExpr; i++){ Expr *pExpr = pList->a[i].pExpr; if( pExpr ){ assert( pExpr->op==TK_COLLATE ); nExtra += (1 + sqlite3Strlen30(pExpr->u.zToken)); } } /* ** Allocate the index structure. */ nName = sqlite3Strlen30(zName); nExtraCol = pPk ? pPk->nKeyCol : 1; pIndex = sqlite3AllocateIndexObject(db, pList->nExpr + nExtraCol, nName + nExtra + 1, &zExtra); if( db->mallocFailed ){ goto exit_create_index; } assert( EIGHT_BYTE_ALIGNMENT(pIndex->aiRowLogEst) ); assert( EIGHT_BYTE_ALIGNMENT(pIndex->azColl) ); pIndex->zName = zExtra; zExtra += nName + 1; memcpy(pIndex->zName, zName, nName+1); pIndex->pTable = pTab; pIndex->onError = (u8)onError; pIndex->uniqNotNull = onError!=OE_None; pIndex->idxType = pName ? SQLITE_IDXTYPE_APPDEF : SQLITE_IDXTYPE_UNIQUE; pIndex->pSchema = db->aDb[iDb].pSchema; pIndex->nKeyCol = pList->nExpr; if( pPIWhere ){ sqlite3ResolveSelfReference(pParse, pTab, NC_PartIdx, pPIWhere, 0); pIndex->pPartIdxWhere = pPIWhere; pPIWhere = 0; } assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); /* Check to see if we should honor DESC requests on index columns */ if( pDb->pSchema->file_format>=4 ){ sortOrderMask = -1; /* Honor DESC */ }else{ sortOrderMask = 0; /* Ignore DESC */ } /* Scan the names of the columns of the table to be indexed and ** load the column indices into the Index structure. Report an error ** if any column is not found. ** ** TODO: Add a test to make sure that the same column is not named ** more than once within the same index. Only the first instance of ** the column will ever be used by the optimizer. Note that using the ** same column more than once cannot be an error because that would ** break backwards compatibility - it needs to be a warning. */ for(i=0, pListItem=pList->a; inExpr; i++, pListItem++){ const char *zColName = pListItem->zName; int requestedSortOrder; char *zColl; /* Collation sequence name */ for(j=0, pTabCol=pTab->aCol; jnCol; j++, pTabCol++){ if( sqlite3StrICmp(zColName, pTabCol->zName)==0 ) break; } if( j>=pTab->nCol ){ sqlite3ErrorMsg(pParse, "table %s has no column named %s", pTab->zName, zColName); pParse->checkSchema = 1; goto exit_create_index; } assert( j<=0x7fff ); pIndex->aiColumn[i] = (i16)j; if( pListItem->pExpr ){ int nColl; assert( pListItem->pExpr->op==TK_COLLATE ); zColl = pListItem->pExpr->u.zToken; nColl = sqlite3Strlen30(zColl) + 1; assert( nExtra>=nColl ); memcpy(zExtra, zColl, nColl); zColl = zExtra; zExtra += nColl; nExtra -= nColl; }else{ zColl = pTab->aCol[j].zColl; if( !zColl ) zColl = "BINARY"; } if( !db->init.busy && !sqlite3LocateCollSeq(pParse, zColl) ){ goto exit_create_index; } pIndex->azColl[i] = zColl; requestedSortOrder = pListItem->sortOrder & sortOrderMask; pIndex->aSortOrder[i] = (u8)requestedSortOrder; if( pTab->aCol[j].notNull==0 ) pIndex->uniqNotNull = 0; } if( pPk ){ for(j=0; jnKeyCol; j++){ int x = pPk->aiColumn[j]; if( hasColumn(pIndex->aiColumn, pIndex->nKeyCol, x) ){ pIndex->nColumn--; }else{ pIndex->aiColumn[i] = x; pIndex->azColl[i] = pPk->azColl[j]; pIndex->aSortOrder[i] = pPk->aSortOrder[j]; i++; } } assert( i==pIndex->nColumn ); }else{ pIndex->aiColumn[i] = -1; pIndex->azColl[i] = "BINARY"; } sqlite3DefaultRowEst(pIndex); if( pParse->pNewTable==0 ) estimateIndexWidth(pIndex); if( pTab==pParse->pNewTable ){ /* This routine has been called to create an automatic index as a ** result of a PRIMARY KEY or UNIQUE clause on a column definition, or ** a PRIMARY KEY or UNIQUE clause following the column definitions. ** i.e. one of: ** ** CREATE TABLE t(x PRIMARY KEY, y); ** CREATE TABLE t(x, y, UNIQUE(x, y)); ** ** Either way, check to see if the table already has such an index. If ** so, don't bother creating this one. This only applies to ** automatically created indices. Users can do as they wish with ** explicit indices. ** ** Two UNIQUE or PRIMARY KEY constraints are considered equivalent ** (and thus suppressing the second one) even if they have different ** sort orders. ** ** If there are different collating sequences or if the columns of ** the constraint occur in different orders, then the constraints are ** considered distinct and both result in separate indices. */ Index *pIdx; for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int k; assert( IsUniqueIndex(pIdx) ); assert( pIdx->idxType!=SQLITE_IDXTYPE_APPDEF ); assert( IsUniqueIndex(pIndex) ); if( pIdx->nKeyCol!=pIndex->nKeyCol ) continue; for(k=0; knKeyCol; k++){ const char *z1; const char *z2; if( pIdx->aiColumn[k]!=pIndex->aiColumn[k] ) break; z1 = pIdx->azColl[k]; z2 = pIndex->azColl[k]; if( z1!=z2 && sqlite3StrICmp(z1, z2) ) break; } if( k==pIdx->nKeyCol ){ if( pIdx->onError!=pIndex->onError ){ /* This constraint creates the same index as a previous ** constraint specified somewhere in the CREATE TABLE statement. ** However the ON CONFLICT clauses are different. If both this ** constraint and the previous equivalent constraint have explicit ** ON CONFLICT clauses this is an error. Otherwise, use the ** explicitly specified behavior for the index. */ if( !(pIdx->onError==OE_Default || pIndex->onError==OE_Default) ){ sqlite3ErrorMsg(pParse, "conflicting ON CONFLICT clauses specified", 0); } if( pIdx->onError==OE_Default ){ pIdx->onError = pIndex->onError; } } pRet = pIdx; goto exit_create_index; } } } /* Link the new Index structure to its table and to the other ** in-memory database structures. */ if( db->init.busy ){ Index *p; assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); p = sqlite3HashInsert(&pIndex->pSchema->idxHash, pIndex->zName, pIndex); if( p ){ assert( p==pIndex ); /* Malloc must have failed */ db->mallocFailed = 1; goto exit_create_index; } db->flags |= SQLITE_InternChanges; if( pTblName!=0 ){ pIndex->tnum = db->init.newTnum; } } /* If this is the initial CREATE INDEX statement (or CREATE TABLE if the ** index is an implied index for a UNIQUE or PRIMARY KEY constraint) then ** emit code to allocate the index rootpage on disk and make an entry for ** the index in the sqlite_master table and populate the index with ** content. But, do not do this if we are simply reading the sqlite_master ** table to parse the schema, or if this index is the PRIMARY KEY index ** of a WITHOUT ROWID table. ** ** If pTblName==0 it means this index is generated as an implied PRIMARY KEY ** or UNIQUE index in a CREATE TABLE statement. Since the table ** has just been created, it contains no data and the index initialization ** step can be skipped. */ else if( pParse->nErr==0 && (HasRowid(pTab) || pTblName!=0) ){ Vdbe *v; char *zStmt; int iMem = ++pParse->nMem; v = sqlite3GetVdbe(pParse); if( v==0 ) goto exit_create_index; sqlite3BeginWriteOperation(pParse, 1, iDb); /* Create the rootpage for the index using CreateIndex. But before ** doing so, code a Noop instruction and store its address in ** Index.tnum. This is required in case this index is actually a ** PRIMARY KEY and the table is actually a WITHOUT ROWID table. In ** that case the convertToWithoutRowidTable() routine will replace ** the Noop with a Goto to jump over the VDBE code generated below. */ pIndex->tnum = sqlite3VdbeAddOp0(v, OP_Noop); sqlite3VdbeAddOp2(v, OP_CreateIndex, iDb, iMem); /* Gather the complete text of the CREATE INDEX statement into ** the zStmt variable */ if( pStart ){ int n = (int)(pParse->sLastToken.z - pName->z) + pParse->sLastToken.n; if( pName->z[n-1]==';' ) n--; /* A named index with an explicit CREATE INDEX statement */ zStmt = sqlite3MPrintf(db, "CREATE%s INDEX %.*s", onError==OE_None ? "" : " UNIQUE", n, pName->z); }else{ /* An automatic index created by a PRIMARY KEY or UNIQUE constraint */ /* zStmt = sqlite3MPrintf(""); */ zStmt = 0; } /* Add an entry in sqlite_master for this index */ sqlite3NestedParse(pParse, "INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName, pTab->zName, iMem, zStmt ); sqlite3DbFree(db, zStmt); /* Fill the index with data and reparse the schema. Code an OP_Expire ** to invalidate all pre-compiled statements. */ if( pTblName ){ sqlite3RefillIndex(pParse, pIndex, iMem); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); sqlite3VdbeAddOp1(v, OP_Expire, 0); } sqlite3VdbeJumpHere(v, pIndex->tnum); } /* When adding an index to the list of indices for a table, make ** sure all indices labeled OE_Replace come after all those labeled ** OE_Ignore. This is necessary for the correct constraint check ** processing (in sqlite3GenerateConstraintChecks()) as part of ** UPDATE and INSERT statements. */ if( db->init.busy || pTblName==0 ){ if( onError!=OE_Replace || pTab->pIndex==0 || pTab->pIndex->onError==OE_Replace){ pIndex->pNext = pTab->pIndex; pTab->pIndex = pIndex; }else{ Index *pOther = pTab->pIndex; while( pOther->pNext && pOther->pNext->onError!=OE_Replace ){ pOther = pOther->pNext; } pIndex->pNext = pOther->pNext; pOther->pNext = pIndex; } pRet = pIndex; pIndex = 0; } /* Clean up before exiting */ exit_create_index: if( pIndex ) freeIndex(db, pIndex); sqlite3ExprDelete(db, pPIWhere); sqlite3ExprListDelete(db, pList); sqlite3SrcListDelete(db, pTblName); sqlite3DbFree(db, zName); return pRet; } /* ** Fill the Index.aiRowEst[] array with default information - information ** to be used when we have not run the ANALYZE command. ** ** aiRowEst[0] is supposed to contain the number of elements in the index. ** Since we do not know, guess 1 million. aiRowEst[1] is an estimate of the ** number of rows in the table that match any particular value of the ** first column of the index. aiRowEst[2] is an estimate of the number ** of rows that match any particular combination of the first 2 columns ** of the index. And so forth. It must always be the case that * ** aiRowEst[N]<=aiRowEst[N-1] ** aiRowEst[N]>=1 ** ** Apart from that, we have little to go on besides intuition as to ** how aiRowEst[] should be initialized. The numbers generated here ** are based on typical values found in actual indices. */ SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){ /* 10, 9, 8, 7, 6 */ LogEst aVal[] = { 33, 32, 30, 28, 26 }; LogEst *a = pIdx->aiRowLogEst; int nCopy = MIN(ArraySize(aVal), pIdx->nKeyCol); int i; /* Set the first entry (number of rows in the index) to the estimated ** number of rows in the table. Or 10, if the estimated number of rows ** in the table is less than that. */ a[0] = pIdx->pTable->nRowLogEst; if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) ); /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is ** 6 and each subsequent value (if any) is 5. */ memcpy(&a[1], aVal, nCopy*sizeof(LogEst)); for(i=nCopy+1; i<=pIdx->nKeyCol; i++){ a[i] = 23; assert( 23==sqlite3LogEst(5) ); } assert( 0==sqlite3LogEst(1) ); if( IsUniqueIndex(pIdx) ) a[pIdx->nKeyCol] = 0; } /* ** This routine will drop an existing named index. This routine ** implements the DROP INDEX statement. */ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists){ Index *pIndex; Vdbe *v; sqlite3 *db = pParse->db; int iDb; assert( pParse->nErr==0 ); /* Never called with prior errors */ if( db->mallocFailed ){ goto exit_drop_index; } assert( pName->nSrc==1 ); if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto exit_drop_index; } pIndex = sqlite3FindIndex(db, pName->a[0].zName, pName->a[0].zDatabase); if( pIndex==0 ){ if( !ifExists ){ sqlite3ErrorMsg(pParse, "no such index: %S", pName, 0); }else{ sqlite3CodeVerifyNamedSchema(pParse, pName->a[0].zDatabase); } pParse->checkSchema = 1; goto exit_drop_index; } if( pIndex->idxType!=SQLITE_IDXTYPE_APPDEF ){ sqlite3ErrorMsg(pParse, "index associated with UNIQUE " "or PRIMARY KEY constraint cannot be dropped", 0); goto exit_drop_index; } iDb = sqlite3SchemaToIndex(db, pIndex->pSchema); #ifndef SQLITE_OMIT_AUTHORIZATION { int code = SQLITE_DROP_INDEX; Table *pTab = pIndex->pTable; const char *zDb = db->aDb[iDb].zName; const char *zTab = SCHEMA_TABLE(iDb); if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ goto exit_drop_index; } if( !OMIT_TEMPDB && iDb ) code = SQLITE_DROP_TEMP_INDEX; if( sqlite3AuthCheck(pParse, code, pIndex->zName, pTab->zName, zDb) ){ goto exit_drop_index; } } #endif /* Generate code to remove the index and from the master table */ v = sqlite3GetVdbe(pParse); if( v ){ sqlite3BeginWriteOperation(pParse, 1, iDb); sqlite3NestedParse(pParse, "DELETE FROM %Q.%s WHERE name=%Q AND type='index'", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName ); sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName); sqlite3ChangeCookie(pParse, iDb); destroyRootPage(pParse, pIndex->tnum, iDb); sqlite3VdbeAddOp4(v, OP_DropIndex, iDb, 0, 0, pIndex->zName, 0); } exit_drop_index: sqlite3SrcListDelete(db, pName); } /* ** pArray is a pointer to an array of objects. Each object in the ** array is szEntry bytes in size. This routine uses sqlite3DbRealloc() ** to extend the array so that there is space for a new object at the end. ** ** When this function is called, *pnEntry contains the current size of ** the array (in entries - so the allocation is ((*pnEntry) * szEntry) bytes ** in total). ** ** If the realloc() is successful (i.e. if no OOM condition occurs), the ** space allocated for the new object is zeroed, *pnEntry updated to ** reflect the new size of the array and a pointer to the new allocation ** returned. *pIdx is set to the index of the new array entry in this case. ** ** Otherwise, if the realloc() fails, *pIdx is set to -1, *pnEntry remains ** unchanged and a copy of pArray returned. */ SQLITE_PRIVATE void *sqlite3ArrayAllocate( sqlite3 *db, /* Connection to notify of malloc failures */ void *pArray, /* Array of objects. Might be reallocated */ int szEntry, /* Size of each object in the array */ int *pnEntry, /* Number of objects currently in use */ int *pIdx /* Write the index of a new slot here */ ){ char *z; int n = *pnEntry; if( (n & (n-1))==0 ){ int sz = (n==0) ? 1 : 2*n; void *pNew = sqlite3DbRealloc(db, pArray, sz*szEntry); if( pNew==0 ){ *pIdx = -1; return pArray; } pArray = pNew; } z = (char*)pArray; memset(&z[n * szEntry], 0, szEntry); *pIdx = n; ++*pnEntry; return pArray; } /* ** Append a new element to the given IdList. Create a new IdList if ** need be. ** ** A new IdList is returned, or NULL if malloc() fails. */ SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3 *db, IdList *pList, Token *pToken){ int i; if( pList==0 ){ pList = sqlite3DbMallocZero(db, sizeof(IdList) ); if( pList==0 ) return 0; } pList->a = sqlite3ArrayAllocate( db, pList->a, sizeof(pList->a[0]), &pList->nId, &i ); if( i<0 ){ sqlite3IdListDelete(db, pList); return 0; } pList->a[i].zName = sqlite3NameFromToken(db, pToken); return pList; } /* ** Delete an IdList. */ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; if( pList==0 ) return; for(i=0; inId; i++){ sqlite3DbFree(db, pList->a[i].zName); } sqlite3DbFree(db, pList->a); sqlite3DbFree(db, pList); } /* ** Return the index in pList of the identifier named zId. Return -1 ** if not found. */ SQLITE_PRIVATE int sqlite3IdListIndex(IdList *pList, const char *zName){ int i; if( pList==0 ) return -1; for(i=0; inId; i++){ if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i; } return -1; } /* ** Expand the space allocated for the given SrcList object by ** creating nExtra new slots beginning at iStart. iStart is zero based. ** New slots are zeroed. ** ** For example, suppose a SrcList initially contains two entries: A,B. ** To append 3 new entries onto the end, do this: ** ** sqlite3SrcListEnlarge(db, pSrclist, 3, 2); ** ** After the call above it would contain: A, B, nil, nil, nil. ** If the iStart argument had been 1 instead of 2, then the result ** would have been: A, nil, nil, nil, B. To prepend the new slots, ** the iStart value would be 0. The result then would ** be: nil, nil, nil, A, B. ** ** If a memory allocation fails the SrcList is unchanged. The ** db->mallocFailed flag will be set to true. */ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge( sqlite3 *db, /* Database connection to notify of OOM errors */ SrcList *pSrc, /* The SrcList to be enlarged */ int nExtra, /* Number of new slots to add to pSrc->a[] */ int iStart /* Index in pSrc->a[] of first new slot */ ){ int i; /* Sanity checking on calling parameters */ assert( iStart>=0 ); assert( nExtra>=1 ); assert( pSrc!=0 ); assert( iStart<=pSrc->nSrc ); /* Allocate additional space if needed */ if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){ SrcList *pNew; int nAlloc = pSrc->nSrc+nExtra; int nGot; pNew = sqlite3DbRealloc(db, pSrc, sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) ); if( pNew==0 ){ assert( db->mallocFailed ); return pSrc; } pSrc = pNew; nGot = (sqlite3DbMallocSize(db, pNew) - sizeof(*pSrc))/sizeof(pSrc->a[0])+1; pSrc->nAlloc = nGot; } /* Move existing slots that come after the newly inserted slots ** out of the way */ for(i=pSrc->nSrc-1; i>=iStart; i--){ pSrc->a[i+nExtra] = pSrc->a[i]; } pSrc->nSrc += nExtra; /* Zero the newly allocated slots */ memset(&pSrc->a[iStart], 0, sizeof(pSrc->a[0])*nExtra); for(i=iStart; ia[i].iCursor = -1; } /* Return a pointer to the enlarged SrcList */ return pSrc; } /* ** Append a new table name to the given SrcList. Create a new SrcList if ** need be. A new entry is created in the SrcList even if pTable is NULL. ** ** A SrcList is returned, or NULL if there is an OOM error. The returned ** SrcList might be the same as the SrcList that was input or it might be ** a new one. If an OOM error does occurs, then the prior value of pList ** that is input to this routine is automatically freed. ** ** If pDatabase is not null, it means that the table has an optional ** database name prefix. Like this: "database.table". The pDatabase ** points to the table name and the pTable points to the database name. ** The SrcList.a[].zName field is filled with the table name which might ** come from pTable (if pDatabase is NULL) or from pDatabase. ** SrcList.a[].zDatabase is filled with the database name from pTable, ** or with NULL if no database is specified. ** ** In other words, if call like this: ** ** sqlite3SrcListAppend(D,A,B,0); ** ** Then B is a table name and the database name is unspecified. If called ** like this: ** ** sqlite3SrcListAppend(D,A,B,C); ** ** Then C is the table name and B is the database name. If C is defined ** then so is B. In other words, we never have a case where: ** ** sqlite3SrcListAppend(D,A,0,C); ** ** Both pTable and pDatabase are assumed to be quoted. They are dequoted ** before being added to the SrcList. */ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend( sqlite3 *db, /* Connection to notify of malloc failures */ SrcList *pList, /* Append to this SrcList. NULL creates a new SrcList */ Token *pTable, /* Table to append */ Token *pDatabase /* Database of the table */ ){ struct SrcList_item *pItem; assert( pDatabase==0 || pTable!=0 ); /* Cannot have C without B */ if( pList==0 ){ pList = sqlite3DbMallocZero(db, sizeof(SrcList) ); if( pList==0 ) return 0; pList->nAlloc = 1; } pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc); if( db->mallocFailed ){ sqlite3SrcListDelete(db, pList); return 0; } pItem = &pList->a[pList->nSrc-1]; if( pDatabase && pDatabase->z==0 ){ pDatabase = 0; } if( pDatabase ){ Token *pTemp = pDatabase; pDatabase = pTable; pTable = pTemp; } pItem->zName = sqlite3NameFromToken(db, pTable); pItem->zDatabase = sqlite3NameFromToken(db, pDatabase); return pList; } /* ** Assign VdbeCursor index numbers to all tables in a SrcList */ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ int i; struct SrcList_item *pItem; assert(pList || pParse->db->mallocFailed ); if( pList ){ for(i=0, pItem=pList->a; inSrc; i++, pItem++){ if( pItem->iCursor>=0 ) break; pItem->iCursor = pParse->nTab++; if( pItem->pSelect ){ sqlite3SrcListAssignCursors(pParse, pItem->pSelect->pSrc); } } } } /* ** Delete an entire SrcList including all its substructure. */ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ int i; struct SrcList_item *pItem; if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ sqlite3DbFree(db, pItem->zDatabase); sqlite3DbFree(db, pItem->zName); sqlite3DbFree(db, pItem->zAlias); sqlite3DbFree(db, pItem->zIndexedBy); sqlite3DeleteTable(db, pItem->pTab); sqlite3SelectDelete(db, pItem->pSelect); sqlite3ExprDelete(db, pItem->pOn); sqlite3IdListDelete(db, pItem->pUsing); } sqlite3DbFree(db, pList); } /* ** This routine is called by the parser to add a new term to the ** end of a growing FROM clause. The "p" parameter is the part of ** the FROM clause that has already been constructed. "p" is NULL ** if this is the first term of the FROM clause. pTable and pDatabase ** are the name of the table and database named in the FROM clause term. ** pDatabase is NULL if the database name qualifier is missing - the ** usual case. If the term has an alias, then pAlias points to the ** alias token. If the term is a subquery, then pSubquery is the ** SELECT statement that the subquery encodes. The pTable and ** pDatabase parameters are NULL for subqueries. The pOn and pUsing ** parameters are the content of the ON and USING clauses. ** ** Return a new SrcList which encodes is the FROM with the new ** term added. */ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm( Parse *pParse, /* Parsing context */ SrcList *p, /* The left part of the FROM clause already seen */ Token *pTable, /* Name of the table to add to the FROM clause */ Token *pDatabase, /* Name of the database containing pTable */ Token *pAlias, /* The right-hand side of the AS subexpression */ Select *pSubquery, /* A subquery used in place of a table name */ Expr *pOn, /* The ON clause of a join */ IdList *pUsing /* The USING clause of a join */ ){ struct SrcList_item *pItem; sqlite3 *db = pParse->db; if( !p && (pOn || pUsing) ){ sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s", (pOn ? "ON" : "USING") ); goto append_from_error; } p = sqlite3SrcListAppend(db, p, pTable, pDatabase); if( p==0 || NEVER(p->nSrc==0) ){ goto append_from_error; } pItem = &p->a[p->nSrc-1]; assert( pAlias!=0 ); if( pAlias->n ){ pItem->zAlias = sqlite3NameFromToken(db, pAlias); } pItem->pSelect = pSubquery; pItem->pOn = pOn; pItem->pUsing = pUsing; return p; append_from_error: assert( p==0 ); sqlite3ExprDelete(db, pOn); sqlite3IdListDelete(db, pUsing); sqlite3SelectDelete(db, pSubquery); return 0; } /* ** Add an INDEXED BY or NOT INDEXED clause to the most recently added ** element of the source-list passed as the second argument. */ SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *pParse, SrcList *p, Token *pIndexedBy){ assert( pIndexedBy!=0 ); if( p && ALWAYS(p->nSrc>0) ){ struct SrcList_item *pItem = &p->a[p->nSrc-1]; assert( pItem->notIndexed==0 && pItem->zIndexedBy==0 ); if( pIndexedBy->n==1 && !pIndexedBy->z ){ /* A "NOT INDEXED" clause was supplied. See parse.y ** construct "indexed_opt" for details. */ pItem->notIndexed = 1; }else{ pItem->zIndexedBy = sqlite3NameFromToken(pParse->db, pIndexedBy); } } } /* ** When building up a FROM clause in the parser, the join operator ** is initially attached to the left operand. But the code generator ** expects the join operator to be on the right operand. This routine ** Shifts all join operators from left to right for an entire FROM ** clause. ** ** Example: Suppose the join is like this: ** ** A natural cross join B ** ** The operator is "natural cross join". The A and B operands are stored ** in p->a[0] and p->a[1], respectively. The parser initially stores the ** operator with A. This routine shifts that operator over to B. */ SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){ if( p ){ int i; for(i=p->nSrc-1; i>0; i--){ p->a[i].jointype = p->a[i-1].jointype; } p->a[0].jointype = 0; } } /* ** Begin a transaction */ SQLITE_PRIVATE void sqlite3BeginTransaction(Parse *pParse, int type){ sqlite3 *db; Vdbe *v; int i; assert( pParse!=0 ); db = pParse->db; assert( db!=0 ); /* if( db->aDb[0].pBt==0 ) return; */ if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "BEGIN", 0, 0) ){ return; } v = sqlite3GetVdbe(pParse); if( !v ) return; if( type!=TK_DEFERRED ){ for(i=0; inDb; i++){ sqlite3VdbeAddOp2(v, OP_Transaction, i, (type==TK_EXCLUSIVE)+1); sqlite3VdbeUsesBtree(v, i); } } sqlite3VdbeAddOp2(v, OP_AutoCommit, 0, 0); } /* ** Commit a transaction */ SQLITE_PRIVATE void sqlite3CommitTransaction(Parse *pParse){ Vdbe *v; assert( pParse!=0 ); assert( pParse->db!=0 ); if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "COMMIT", 0, 0) ){ return; } v = sqlite3GetVdbe(pParse); if( v ){ sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 0); } } /* ** Rollback a transaction */ SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse *pParse){ Vdbe *v; assert( pParse!=0 ); assert( pParse->db!=0 ); if( sqlite3AuthCheck(pParse, SQLITE_TRANSACTION, "ROLLBACK", 0, 0) ){ return; } v = sqlite3GetVdbe(pParse); if( v ){ sqlite3VdbeAddOp2(v, OP_AutoCommit, 1, 1); } } /* ** This function is called by the parser when it parses a command to create, ** release or rollback an SQL savepoint. */ SQLITE_PRIVATE void sqlite3Savepoint(Parse *pParse, int op, Token *pName){ char *zName = sqlite3NameFromToken(pParse->db, pName); if( zName ){ Vdbe *v = sqlite3GetVdbe(pParse); #ifndef SQLITE_OMIT_AUTHORIZATION static const char * const az[] = { "BEGIN", "RELEASE", "ROLLBACK" }; assert( !SAVEPOINT_BEGIN && SAVEPOINT_RELEASE==1 && SAVEPOINT_ROLLBACK==2 ); #endif if( !v || sqlite3AuthCheck(pParse, SQLITE_SAVEPOINT, az[op], zName, 0) ){ sqlite3DbFree(pParse->db, zName); return; } sqlite3VdbeAddOp4(v, OP_Savepoint, op, 0, 0, zName, P4_DYNAMIC); } } /* ** Make sure the TEMP database is open and available for use. Return ** the number of errors. Leave any error messages in the pParse structure. */ SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){ sqlite3 *db = pParse->db; if( db->aDb[1].pBt==0 && !pParse->explain ){ int rc; Btree *pBt; static const int flags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE | SQLITE_OPEN_TEMP_DB; rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pBt, 0, flags); if( rc!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "unable to open a temporary database " "file for storing temporary tables"); pParse->rc = rc; return 1; } db->aDb[1].pBt = pBt; assert( db->aDb[1].pSchema ); if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize, -1, 0) ){ db->mallocFailed = 1; return 1; } } return 0; } /* ** Record the fact that the schema cookie will need to be verified ** for database iDb. The code to actually verify the schema cookie ** will occur at the end of the top-level VDBE and will be generated ** later, by sqlite3FinishCoding(). */ SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){ Parse *pToplevel = sqlite3ParseToplevel(pParse); sqlite3 *db = pToplevel->db; assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 || iDb==1 ); assert( iDbcookieMask, iDb)==0 ){ DbMaskSet(pToplevel->cookieMask, iDb); pToplevel->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie; if( !OMIT_TEMPDB && iDb==1 ){ sqlite3OpenTempDatabase(pToplevel); } } } /* ** If argument zDb is NULL, then call sqlite3CodeVerifySchema() for each ** attached database. Otherwise, invoke it for the database named zDb only. */ SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb){ sqlite3 *db = pParse->db; int i; for(i=0; inDb; i++){ Db *pDb = &db->aDb[i]; if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zName)) ){ sqlite3CodeVerifySchema(pParse, i); } } } /* ** Generate VDBE code that prepares for doing an operation that ** might change the database. ** ** This routine starts a new transaction if we are not already within ** a transaction. If we are already within a transaction, then a checkpoint ** is set if the setStatement parameter is true. A checkpoint should ** be set for operations that might fail (due to a constraint) part of ** the way through and which will need to undo some writes without having to ** rollback the whole transaction. For operations where all constraints ** can be checked before any changes are made to the database, it is never ** necessary to undo a write and the checkpoint should not be set. */ SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse *pParse, int setStatement, int iDb){ Parse *pToplevel = sqlite3ParseToplevel(pParse); sqlite3CodeVerifySchema(pParse, iDb); DbMaskSet(pToplevel->writeMask, iDb); pToplevel->isMultiWrite |= setStatement; } /* ** Indicate that the statement currently under construction might write ** more than one entry (example: deleting one row then inserting another, ** inserting multiple rows in a table, or inserting a row and index entries.) ** If an abort occurs after some of these writes have completed, then it will ** be necessary to undo the completed writes. */ SQLITE_PRIVATE void sqlite3MultiWrite(Parse *pParse){ Parse *pToplevel = sqlite3ParseToplevel(pParse); pToplevel->isMultiWrite = 1; } /* ** The code generator calls this routine if is discovers that it is ** possible to abort a statement prior to completion. In order to ** perform this abort without corrupting the database, we need to make ** sure that the statement is protected by a statement transaction. ** ** Technically, we only need to set the mayAbort flag if the ** isMultiWrite flag was previously set. There is a time dependency ** such that the abort must occur after the multiwrite. This makes ** some statements involving the REPLACE conflict resolution algorithm ** go a little faster. But taking advantage of this time dependency ** makes it more difficult to prove that the code is correct (in ** particular, it prevents us from writing an effective ** implementation of sqlite3AssertMayAbort()) and so we have chosen ** to take the safe route and skip the optimization. */ SQLITE_PRIVATE void sqlite3MayAbort(Parse *pParse){ Parse *pToplevel = sqlite3ParseToplevel(pParse); pToplevel->mayAbort = 1; } /* ** Code an OP_Halt that causes the vdbe to return an SQLITE_CONSTRAINT ** error. The onError parameter determines which (if any) of the statement ** and/or current transaction is rolled back. */ SQLITE_PRIVATE void sqlite3HaltConstraint( Parse *pParse, /* Parsing context */ int errCode, /* extended error code */ int onError, /* Constraint type */ char *p4, /* Error message */ i8 p4type, /* P4_STATIC or P4_TRANSIENT */ u8 p5Errmsg /* P5_ErrMsg type */ ){ Vdbe *v = sqlite3GetVdbe(pParse); assert( (errCode&0xff)==SQLITE_CONSTRAINT ); if( onError==OE_Abort ){ sqlite3MayAbort(pParse); } sqlite3VdbeAddOp4(v, OP_Halt, errCode, onError, 0, p4, p4type); if( p5Errmsg ) sqlite3VdbeChangeP5(v, p5Errmsg); } /* ** Code an OP_Halt due to UNIQUE or PRIMARY KEY constraint violation. */ SQLITE_PRIVATE void sqlite3UniqueConstraint( Parse *pParse, /* Parsing context */ int onError, /* Constraint type */ Index *pIdx /* The index that triggers the constraint */ ){ char *zErr; int j; StrAccum errMsg; Table *pTab = pIdx->pTable; sqlite3StrAccumInit(&errMsg, pParse->db, 0, 0, 200); for(j=0; jnKeyCol; j++){ char *zCol = pTab->aCol[pIdx->aiColumn[j]].zName; if( j ) sqlite3StrAccumAppend(&errMsg, ", ", 2); sqlite3StrAccumAppendAll(&errMsg, pTab->zName); sqlite3StrAccumAppend(&errMsg, ".", 1); sqlite3StrAccumAppendAll(&errMsg, zCol); } zErr = sqlite3StrAccumFinish(&errMsg); sqlite3HaltConstraint(pParse, IsPrimaryKeyIndex(pIdx) ? SQLITE_CONSTRAINT_PRIMARYKEY : SQLITE_CONSTRAINT_UNIQUE, onError, zErr, P4_DYNAMIC, P5_ConstraintUnique); } /* ** Code an OP_Halt due to non-unique rowid. */ SQLITE_PRIVATE void sqlite3RowidConstraint( Parse *pParse, /* Parsing context */ int onError, /* Conflict resolution algorithm */ Table *pTab /* The table with the non-unique rowid */ ){ char *zMsg; int rc; if( pTab->iPKey>=0 ){ zMsg = sqlite3MPrintf(pParse->db, "%s.%s", pTab->zName, pTab->aCol[pTab->iPKey].zName); rc = SQLITE_CONSTRAINT_PRIMARYKEY; }else{ zMsg = sqlite3MPrintf(pParse->db, "%s.rowid", pTab->zName); rc = SQLITE_CONSTRAINT_ROWID; } sqlite3HaltConstraint(pParse, rc, onError, zMsg, P4_DYNAMIC, P5_ConstraintUnique); } /* ** Check to see if pIndex uses the collating sequence pColl. Return ** true if it does and false if it does not. */ #ifndef SQLITE_OMIT_REINDEX static int collationMatch(const char *zColl, Index *pIndex){ int i; assert( zColl!=0 ); for(i=0; inColumn; i++){ const char *z = pIndex->azColl[i]; assert( z!=0 || pIndex->aiColumn[i]<0 ); if( pIndex->aiColumn[i]>=0 && 0==sqlite3StrICmp(z, zColl) ){ return 1; } } return 0; } #endif /* ** Recompute all indices of pTab that use the collating sequence pColl. ** If pColl==0 then recompute all indices of pTab. */ #ifndef SQLITE_OMIT_REINDEX static void reindexTable(Parse *pParse, Table *pTab, char const *zColl){ Index *pIndex; /* An index associated with pTab */ for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ if( zColl==0 || collationMatch(zColl, pIndex) ){ int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); } } } #endif /* ** Recompute all indices of all tables in all databases where the ** indices use the collating sequence pColl. If pColl==0 then recompute ** all indices everywhere. */ #ifndef SQLITE_OMIT_REINDEX static void reindexDatabases(Parse *pParse, char const *zColl){ Db *pDb; /* A single database */ int iDb; /* The database index number */ sqlite3 *db = pParse->db; /* The database connection */ HashElem *k; /* For looping over tables in pDb */ Table *pTab; /* A table in the database */ assert( sqlite3BtreeHoldsAllMutexes(db) ); /* Needed for schema access */ for(iDb=0, pDb=db->aDb; iDbnDb; iDb++, pDb++){ assert( pDb!=0 ); for(k=sqliteHashFirst(&pDb->pSchema->tblHash); k; k=sqliteHashNext(k)){ pTab = (Table*)sqliteHashData(k); reindexTable(pParse, pTab, zColl); } } } #endif /* ** Generate code for the REINDEX command. ** ** REINDEX -- 1 ** REINDEX -- 2 ** REINDEX ?.? -- 3 ** REINDEX ?.? -- 4 ** ** Form 1 causes all indices in all attached databases to be rebuilt. ** Form 2 rebuilds all indices in all databases that use the named ** collating function. Forms 3 and 4 rebuild the named index or all ** indices associated with the named table. */ #ifndef SQLITE_OMIT_REINDEX SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ CollSeq *pColl; /* Collating sequence to be reindexed, or NULL */ char *z; /* Name of a table or index */ const char *zDb; /* Name of the database */ Table *pTab; /* A table in the database */ Index *pIndex; /* An index associated with pTab */ int iDb; /* The database index number */ sqlite3 *db = pParse->db; /* The database connection */ Token *pObjName; /* Name of the table or index to be reindexed */ /* Read the database schema. If an error occurs, leave an error message ** and code in pParse and return NULL. */ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ return; } if( pName1==0 ){ reindexDatabases(pParse, 0); return; }else if( NEVER(pName2==0) || pName2->z==0 ){ char *zColl; assert( pName1->z ); zColl = sqlite3NameFromToken(pParse->db, pName1); if( !zColl ) return; pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); if( pColl ){ reindexDatabases(pParse, zColl); sqlite3DbFree(db, zColl); return; } sqlite3DbFree(db, zColl); } iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pObjName); if( iDb<0 ) return; z = sqlite3NameFromToken(db, pObjName); if( z==0 ) return; zDb = db->aDb[iDb].zName; pTab = sqlite3FindTable(db, z, zDb); if( pTab ){ reindexTable(pParse, pTab, 0); sqlite3DbFree(db, z); return; } pIndex = sqlite3FindIndex(db, z, zDb); sqlite3DbFree(db, z); if( pIndex ){ sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); return; } sqlite3ErrorMsg(pParse, "unable to identify the object to be reindexed"); } #endif /* ** Return a KeyInfo structure that is appropriate for the given Index. ** ** The KeyInfo structure for an index is cached in the Index object. ** So there might be multiple references to the returned pointer. The ** caller should not try to modify the KeyInfo object. ** ** The caller should invoke sqlite3KeyInfoUnref() on the returned object ** when it has finished using it. */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse *pParse, Index *pIdx){ int i; int nCol = pIdx->nColumn; int nKey = pIdx->nKeyCol; KeyInfo *pKey; if( pParse->nErr ) return 0; if( pIdx->uniqNotNull ){ pKey = sqlite3KeyInfoAlloc(pParse->db, nKey, nCol-nKey); }else{ pKey = sqlite3KeyInfoAlloc(pParse->db, nCol, 0); } if( pKey ){ assert( sqlite3KeyInfoIsWriteable(pKey) ); for(i=0; iazColl[i]; assert( zColl!=0 ); pKey->aColl[i] = strcmp(zColl,"BINARY")==0 ? 0 : sqlite3LocateCollSeq(pParse, zColl); pKey->aSortOrder[i] = pIdx->aSortOrder[i]; } if( pParse->nErr ){ sqlite3KeyInfoUnref(pKey); pKey = 0; } } return pKey; } #ifndef SQLITE_OMIT_CTE /* ** This routine is invoked once per CTE by the parser while parsing a ** WITH clause. */ SQLITE_PRIVATE With *sqlite3WithAdd( Parse *pParse, /* Parsing context */ With *pWith, /* Existing WITH clause, or NULL */ Token *pName, /* Name of the common-table */ ExprList *pArglist, /* Optional column name list for the table */ Select *pQuery /* Query used to initialize the table */ ){ sqlite3 *db = pParse->db; With *pNew; char *zName; /* Check that the CTE name is unique within this WITH clause. If ** not, store an error in the Parse structure. */ zName = sqlite3NameFromToken(pParse->db, pName); if( zName && pWith ){ int i; for(i=0; inCte; i++){ if( sqlite3StrICmp(zName, pWith->a[i].zName)==0 ){ sqlite3ErrorMsg(pParse, "duplicate WITH table name: %s", zName); } } } if( pWith ){ int nByte = sizeof(*pWith) + (sizeof(pWith->a[1]) * pWith->nCte); pNew = sqlite3DbRealloc(db, pWith, nByte); }else{ pNew = sqlite3DbMallocZero(db, sizeof(*pWith)); } assert( zName!=0 || pNew==0 ); assert( db->mallocFailed==0 || pNew==0 ); if( pNew==0 ){ sqlite3ExprListDelete(db, pArglist); sqlite3SelectDelete(db, pQuery); sqlite3DbFree(db, zName); pNew = pWith; }else{ pNew->a[pNew->nCte].pSelect = pQuery; pNew->a[pNew->nCte].pCols = pArglist; pNew->a[pNew->nCte].zName = zName; pNew->a[pNew->nCte].zErr = 0; pNew->nCte++; } return pNew; } /* ** Free the contents of the With object passed as the second argument. */ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ if( pWith ){ int i; for(i=0; inCte; i++){ struct Cte *pCte = &pWith->a[i]; sqlite3ExprListDelete(db, pCte->pCols); sqlite3SelectDelete(db, pCte->pSelect); sqlite3DbFree(db, pCte->zName); } sqlite3DbFree(db, pWith); } } #endif /* !defined(SQLITE_OMIT_CTE) */ /************** End of build.c ***********************************************/ /************** Begin file callback.c ****************************************/ /* ** 2005 May 23 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains functions used to access the internal hash tables ** of user defined functions and collation sequences. */ /* #include "sqliteInt.h" */ /* ** Invoke the 'collation needed' callback to request a collation sequence ** in the encoding enc of name zName, length nName. */ static void callCollNeeded(sqlite3 *db, int enc, const char *zName){ assert( !db->xCollNeeded || !db->xCollNeeded16 ); if( db->xCollNeeded ){ char *zExternal = sqlite3DbStrDup(db, zName); if( !zExternal ) return; db->xCollNeeded(db->pCollNeededArg, db, enc, zExternal); sqlite3DbFree(db, zExternal); } #ifndef SQLITE_OMIT_UTF16 if( db->xCollNeeded16 ){ char const *zExternal; sqlite3_value *pTmp = sqlite3ValueNew(db); sqlite3ValueSetStr(pTmp, -1, zName, SQLITE_UTF8, SQLITE_STATIC); zExternal = sqlite3ValueText(pTmp, SQLITE_UTF16NATIVE); if( zExternal ){ db->xCollNeeded16(db->pCollNeededArg, db, (int)ENC(db), zExternal); } sqlite3ValueFree(pTmp); } #endif } /* ** This routine is called if the collation factory fails to deliver a ** collation function in the best encoding but there may be other versions ** of this collation function (for other text encodings) available. Use one ** of these instead if they exist. Avoid a UTF-8 <-> UTF-16 conversion if ** possible. */ static int synthCollSeq(sqlite3 *db, CollSeq *pColl){ CollSeq *pColl2; char *z = pColl->zName; int i; static const u8 aEnc[] = { SQLITE_UTF16BE, SQLITE_UTF16LE, SQLITE_UTF8 }; for(i=0; i<3; i++){ pColl2 = sqlite3FindCollSeq(db, aEnc[i], z, 0); if( pColl2->xCmp!=0 ){ memcpy(pColl, pColl2, sizeof(CollSeq)); pColl->xDel = 0; /* Do not copy the destructor */ return SQLITE_OK; } } return SQLITE_ERROR; } /* ** This function is responsible for invoking the collation factory callback ** or substituting a collation sequence of a different encoding when the ** requested collation sequence is not available in the desired encoding. ** ** If it is not NULL, then pColl must point to the database native encoding ** collation sequence with name zName, length nName. ** ** The return value is either the collation sequence to be used in database ** db for collation type name zName, length nName, or NULL, if no collation ** sequence can be found. If no collation is found, leave an error message. ** ** See also: sqlite3LocateCollSeq(), sqlite3FindCollSeq() */ SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq( Parse *pParse, /* Parsing context */ u8 enc, /* The desired encoding for the collating sequence */ CollSeq *pColl, /* Collating sequence with native encoding, or NULL */ const char *zName /* Collating sequence name */ ){ CollSeq *p; sqlite3 *db = pParse->db; p = pColl; if( !p ){ p = sqlite3FindCollSeq(db, enc, zName, 0); } if( !p || !p->xCmp ){ /* No collation sequence of this type for this encoding is registered. ** Call the collation factory to see if it can supply us with one. */ callCollNeeded(db, enc, zName); p = sqlite3FindCollSeq(db, enc, zName, 0); } if( p && !p->xCmp && synthCollSeq(db, p) ){ p = 0; } assert( !p || p->xCmp ); if( p==0 ){ sqlite3ErrorMsg(pParse, "no such collation sequence: %s", zName); } return p; } /* ** This routine is called on a collation sequence before it is used to ** check that it is defined. An undefined collation sequence exists when ** a database is loaded that contains references to collation sequences ** that have not been defined by sqlite3_create_collation() etc. ** ** If required, this routine calls the 'collation needed' callback to ** request a definition of the collating sequence. If this doesn't work, ** an equivalent collating sequence that uses a text encoding different ** from the main database is substituted, if one is available. */ SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *pParse, CollSeq *pColl){ if( pColl ){ const char *zName = pColl->zName; sqlite3 *db = pParse->db; CollSeq *p = sqlite3GetCollSeq(pParse, ENC(db), pColl, zName); if( !p ){ return SQLITE_ERROR; } assert( p==pColl ); } return SQLITE_OK; } /* ** Locate and return an entry from the db.aCollSeq hash table. If the entry ** specified by zName and nName is not found and parameter 'create' is ** true, then create a new entry. Otherwise return NULL. ** ** Each pointer stored in the sqlite3.aCollSeq hash table contains an ** array of three CollSeq structures. The first is the collation sequence ** preferred for UTF-8, the second UTF-16le, and the third UTF-16be. ** ** Stored immediately after the three collation sequences is a copy of ** the collation sequence name. A pointer to this string is stored in ** each collation sequence structure. */ static CollSeq *findCollSeqEntry( sqlite3 *db, /* Database connection */ const char *zName, /* Name of the collating sequence */ int create /* Create a new entry if true */ ){ CollSeq *pColl; pColl = sqlite3HashFind(&db->aCollSeq, zName); if( 0==pColl && create ){ int nName = sqlite3Strlen30(zName); pColl = sqlite3DbMallocZero(db, 3*sizeof(*pColl) + nName + 1); if( pColl ){ CollSeq *pDel = 0; pColl[0].zName = (char*)&pColl[3]; pColl[0].enc = SQLITE_UTF8; pColl[1].zName = (char*)&pColl[3]; pColl[1].enc = SQLITE_UTF16LE; pColl[2].zName = (char*)&pColl[3]; pColl[2].enc = SQLITE_UTF16BE; memcpy(pColl[0].zName, zName, nName); pColl[0].zName[nName] = 0; pDel = sqlite3HashInsert(&db->aCollSeq, pColl[0].zName, pColl); /* If a malloc() failure occurred in sqlite3HashInsert(), it will ** return the pColl pointer to be deleted (because it wasn't added ** to the hash table). */ assert( pDel==0 || pDel==pColl ); if( pDel!=0 ){ db->mallocFailed = 1; sqlite3DbFree(db, pDel); pColl = 0; } } } return pColl; } /* ** Parameter zName points to a UTF-8 encoded string nName bytes long. ** Return the CollSeq* pointer for the collation sequence named zName ** for the encoding 'enc' from the database 'db'. ** ** If the entry specified is not found and 'create' is true, then create a ** new entry. Otherwise return NULL. ** ** A separate function sqlite3LocateCollSeq() is a wrapper around ** this routine. sqlite3LocateCollSeq() invokes the collation factory ** if necessary and generates an error message if the collating sequence ** cannot be found. ** ** See also: sqlite3LocateCollSeq(), sqlite3GetCollSeq() */ SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq( sqlite3 *db, u8 enc, const char *zName, int create ){ CollSeq *pColl; if( zName ){ pColl = findCollSeqEntry(db, zName, create); }else{ pColl = db->pDfltColl; } assert( SQLITE_UTF8==1 && SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); assert( enc>=SQLITE_UTF8 && enc<=SQLITE_UTF16BE ); if( pColl ) pColl += enc-1; return pColl; } /* During the search for the best function definition, this procedure ** is called to test how well the function passed as the first argument ** matches the request for a function with nArg arguments in a system ** that uses encoding enc. The value returned indicates how well the ** request is matched. A higher value indicates a better match. ** ** If nArg is -1 that means to only return a match (non-zero) if p->nArg ** is also -1. In other words, we are searching for a function that ** takes a variable number of arguments. ** ** If nArg is -2 that means that we are searching for any function ** regardless of the number of arguments it uses, so return a positive ** match score for any ** ** The returned value is always between 0 and 6, as follows: ** ** 0: Not a match. ** 1: UTF8/16 conversion required and function takes any number of arguments. ** 2: UTF16 byte order change required and function takes any number of args. ** 3: encoding matches and function takes any number of arguments ** 4: UTF8/16 conversion required - argument count matches exactly ** 5: UTF16 byte order conversion required - argument count matches exactly ** 6: Perfect match: encoding and argument count match exactly. ** ** If nArg==(-2) then any function with a non-null xStep or xFunc is ** a perfect match and any function with both xStep and xFunc NULL is ** a non-match. */ #define FUNC_PERFECT_MATCH 6 /* The score for a perfect match */ static int matchQuality( FuncDef *p, /* The function we are evaluating for match quality */ int nArg, /* Desired number of arguments. (-1)==any */ u8 enc /* Desired text encoding */ ){ int match; /* nArg of -2 is a special case */ if( nArg==(-2) ) return (p->xFunc==0 && p->xStep==0) ? 0 : FUNC_PERFECT_MATCH; /* Wrong number of arguments means "no match" */ if( p->nArg!=nArg && p->nArg>=0 ) return 0; /* Give a better score to a function with a specific number of arguments ** than to function that accepts any number of arguments. */ if( p->nArg==nArg ){ match = 4; }else{ match = 1; } /* Bonus points if the text encoding matches */ if( enc==(p->funcFlags & SQLITE_FUNC_ENCMASK) ){ match += 2; /* Exact encoding match */ }else if( (enc & p->funcFlags & 2)!=0 ){ match += 1; /* Both are UTF16, but with different byte orders */ } return match; } /* ** Search a FuncDefHash for a function with the given name. Return ** a pointer to the matching FuncDef if found, or 0 if there is no match. */ static FuncDef *functionSearch( FuncDefHash *pHash, /* Hash table to search */ int h, /* Hash of the name */ const char *zFunc, /* Name of function */ int nFunc /* Number of bytes in zFunc */ ){ FuncDef *p; for(p=pHash->a[h]; p; p=p->pHash){ if( sqlite3StrNICmp(p->zName, zFunc, nFunc)==0 && p->zName[nFunc]==0 ){ return p; } } return 0; } /* ** Insert a new FuncDef into a FuncDefHash hash table. */ SQLITE_PRIVATE void sqlite3FuncDefInsert( FuncDefHash *pHash, /* The hash table into which to insert */ FuncDef *pDef /* The function definition to insert */ ){ FuncDef *pOther; int nName = sqlite3Strlen30(pDef->zName); u8 c1 = (u8)pDef->zName[0]; int h = (sqlite3UpperToLower[c1] + nName) % ArraySize(pHash->a); pOther = functionSearch(pHash, h, pDef->zName, nName); if( pOther ){ assert( pOther!=pDef && pOther->pNext!=pDef ); pDef->pNext = pOther->pNext; pOther->pNext = pDef; }else{ pDef->pNext = 0; pDef->pHash = pHash->a[h]; pHash->a[h] = pDef; } } /* ** Locate a user function given a name, a number of arguments and a flag ** indicating whether the function prefers UTF-16 over UTF-8. Return a ** pointer to the FuncDef structure that defines that function, or return ** NULL if the function does not exist. ** ** If the createFlag argument is true, then a new (blank) FuncDef ** structure is created and liked into the "db" structure if a ** no matching function previously existed. ** ** If nArg is -2, then the first valid function found is returned. A ** function is valid if either xFunc or xStep is non-zero. The nArg==(-2) ** case is used to see if zName is a valid function name for some number ** of arguments. If nArg is -2, then createFlag must be 0. ** ** If createFlag is false, then a function with the required name and ** number of arguments may be returned even if the eTextRep flag does not ** match that requested. */ SQLITE_PRIVATE FuncDef *sqlite3FindFunction( sqlite3 *db, /* An open database */ const char *zName, /* Name of the function. Not null-terminated */ int nName, /* Number of characters in the name */ int nArg, /* Number of arguments. -1 means any number */ u8 enc, /* Preferred text encoding */ u8 createFlag /* Create new entry if true and does not otherwise exist */ ){ FuncDef *p; /* Iterator variable */ FuncDef *pBest = 0; /* Best match found so far */ int bestScore = 0; /* Score of best match */ int h; /* Hash value */ assert( nArg>=(-2) ); assert( nArg>=(-1) || createFlag==0 ); h = (sqlite3UpperToLower[(u8)zName[0]] + nName) % ArraySize(db->aFunc.a); /* First search for a match amongst the application-defined functions. */ p = functionSearch(&db->aFunc, h, zName, nName); while( p ){ int score = matchQuality(p, nArg, enc); if( score>bestScore ){ pBest = p; bestScore = score; } p = p->pNext; } /* If no match is found, search the built-in functions. ** ** If the SQLITE_PreferBuiltin flag is set, then search the built-in ** functions even if a prior app-defined function was found. And give ** priority to built-in functions. ** ** Except, if createFlag is true, that means that we are trying to ** install a new function. Whatever FuncDef structure is returned it will ** have fields overwritten with new information appropriate for the ** new function. But the FuncDefs for built-in functions are read-only. ** So we must not search for built-ins when creating a new function. */ if( !createFlag && (pBest==0 || (db->flags & SQLITE_PreferBuiltin)!=0) ){ FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); bestScore = 0; p = functionSearch(pHash, h, zName, nName); while( p ){ int score = matchQuality(p, nArg, enc); if( score>bestScore ){ pBest = p; bestScore = score; } p = p->pNext; } } /* If the createFlag parameter is true and the search did not reveal an ** exact match for the name, number of arguments and encoding, then add a ** new entry to the hash table and return it. */ if( createFlag && bestScorezName = (char *)&pBest[1]; pBest->nArg = (u16)nArg; pBest->funcFlags = enc; memcpy(pBest->zName, zName, nName); pBest->zName[nName] = 0; sqlite3FuncDefInsert(&db->aFunc, pBest); } if( pBest && (pBest->xStep || pBest->xFunc || createFlag) ){ return pBest; } return 0; } /* ** Free all resources held by the schema structure. The void* argument points ** at a Schema struct. This function does not call sqlite3DbFree(db, ) on the ** pointer itself, it just cleans up subsidiary resources (i.e. the contents ** of the schema hash tables). ** ** The Schema.cache_size variable is not cleared. */ SQLITE_PRIVATE void sqlite3SchemaClear(void *p){ Hash temp1; Hash temp2; HashElem *pElem; Schema *pSchema = (Schema *)p; temp1 = pSchema->tblHash; temp2 = pSchema->trigHash; sqlite3HashInit(&pSchema->trigHash); sqlite3HashClear(&pSchema->idxHash); for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem)); } sqlite3HashClear(&temp2); sqlite3HashInit(&pSchema->tblHash); for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ Table *pTab = sqliteHashData(pElem); sqlite3DeleteTable(0, pTab); } sqlite3HashClear(&temp1); sqlite3HashClear(&pSchema->fkeyHash); pSchema->pSeqTab = 0; if( pSchema->schemaFlags & DB_SchemaLoaded ){ pSchema->iGeneration++; pSchema->schemaFlags &= ~DB_SchemaLoaded; } } /* ** Find and return the schema associated with a BTree. Create ** a new one if necessary. */ SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *db, Btree *pBt){ Schema * p; if( pBt ){ p = (Schema *)sqlite3BtreeSchema(pBt, sizeof(Schema), sqlite3SchemaClear); }else{ p = (Schema *)sqlite3DbMallocZero(0, sizeof(Schema)); } if( !p ){ db->mallocFailed = 1; }else if ( 0==p->file_format ){ sqlite3HashInit(&p->tblHash); sqlite3HashInit(&p->idxHash); sqlite3HashInit(&p->trigHash); sqlite3HashInit(&p->fkeyHash); p->enc = SQLITE_UTF8; } return p; } /************** End of callback.c ********************************************/ /************** Begin file delete.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that are called by the parser ** in order to generate code for DELETE FROM statements. */ /* #include "sqliteInt.h" */ /* ** While a SrcList can in general represent multiple tables and subqueries ** (as in the FROM clause of a SELECT statement) in this case it contains ** the name of a single table, as one might find in an INSERT, DELETE, ** or UPDATE statement. Look up that table in the symbol table and ** return a pointer. Set an error message and return NULL if the table ** name is not found or if any other error occurs. ** ** The following fields are initialized appropriate in pSrc: ** ** pSrc->a[0].pTab Pointer to the Table object ** pSrc->a[0].pIndex Pointer to the INDEXED BY index, if there is one ** */ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ struct SrcList_item *pItem = pSrc->a; Table *pTab; assert( pItem && pSrc->nSrc==1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); sqlite3DeleteTable(pParse->db, pItem->pTab); pItem->pTab = pTab; if( pTab ){ pTab->nRef++; } if( sqlite3IndexedByLookup(pParse, pItem) ){ pTab = 0; } return pTab; } /* ** Check to make sure the given table is writable. If it is not ** writable, generate an error message and return 1. If it is ** writable return 0; */ SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ /* A table is not writable under the following circumstances: ** ** 1) It is a virtual table and no implementation of the xUpdate method ** has been provided, or ** 2) It is a system table (i.e. sqlite_master), this call is not ** part of a nested parse and writable_schema pragma has not ** been specified. ** ** In either case leave an error message in pParse and return non-zero. */ if( ( IsVirtual(pTab) && sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ) || ( (pTab->tabFlags & TF_Readonly)!=0 && (pParse->db->flags & SQLITE_WriteSchema)==0 && pParse->nested==0 ) ){ sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); return 1; } #ifndef SQLITE_OMIT_VIEW if( !viewOk && pTab->pSelect ){ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); return 1; } #endif return 0; } #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) /* ** Evaluate a view and store its result in an ephemeral table. The ** pWhere argument is an optional WHERE clause that restricts the ** set of rows in the view that are to be added to the ephemeral table. */ SQLITE_PRIVATE void sqlite3MaterializeView( Parse *pParse, /* Parsing context */ Table *pView, /* View definition */ Expr *pWhere, /* Optional WHERE clause to be added */ int iCur /* Cursor number for ephemeral table */ ){ SelectDest dest; Select *pSel; SrcList *pFrom; sqlite3 *db = pParse->db; int iDb = sqlite3SchemaToIndex(db, pView->pSchema); pWhere = sqlite3ExprDup(db, pWhere, 0); pFrom = sqlite3SrcListAppend(db, 0, 0, 0); if( pFrom ){ assert( pFrom->nSrc==1 ); pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName); pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName); assert( pFrom->a[0].pOn==0 ); assert( pFrom->a[0].pUsing==0 ); } pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, 0, 0, 0, 0); sqlite3SelectDestInit(&dest, SRT_EphemTab, iCur); sqlite3Select(pParse, pSel, &dest); sqlite3SelectDelete(db, pSel); } #endif /* !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) */ #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) /* ** Generate an expression tree to implement the WHERE, ORDER BY, ** and LIMIT/OFFSET portion of DELETE and UPDATE statements. ** ** DELETE FROM table_wxyz WHERE a<5 ORDER BY a LIMIT 1; ** \__________________________/ ** pLimitWhere (pInClause) */ SQLITE_PRIVATE Expr *sqlite3LimitWhere( Parse *pParse, /* The parser context */ SrcList *pSrc, /* the FROM clause -- which tables to scan */ Expr *pWhere, /* The WHERE clause. May be null */ ExprList *pOrderBy, /* The ORDER BY clause. May be null */ Expr *pLimit, /* The LIMIT clause. May be null */ Expr *pOffset, /* The OFFSET clause. May be null */ char *zStmtType /* Either DELETE or UPDATE. For err msgs. */ ){ Expr *pWhereRowid = NULL; /* WHERE rowid .. */ Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ Expr *pSelectRowid = NULL; /* SELECT rowid ... */ ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ Select *pSelect = NULL; /* Complete SELECT tree */ /* Check that there isn't an ORDER BY without a LIMIT clause. */ if( pOrderBy && (pLimit == 0) ) { sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType); goto limit_where_cleanup_2; } /* We only need to generate a select expression if there ** is a limit/offset term to enforce. */ if( pLimit == 0 ) { /* if pLimit is null, pOffset will always be null as well. */ assert( pOffset == 0 ); return pWhere; } /* Generate a select expression tree to enforce the limit/offset ** term for the DELETE or UPDATE statement. For example: ** DELETE FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 ** becomes: ** DELETE FROM table_a WHERE rowid IN ( ** SELECT rowid FROM table_a WHERE col1=1 ORDER BY col2 LIMIT 1 OFFSET 1 ** ); */ pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); if( pSelectRowid == 0 ) goto limit_where_cleanup_2; pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); if( pEList == 0 ) goto limit_where_cleanup_2; /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); if( pSelectSrc == 0 ) { sqlite3ExprListDelete(pParse->db, pEList); goto limit_where_cleanup_2; } /* generate the SELECT expression tree. */ pSelect = sqlite3SelectNew(pParse,pEList,pSelectSrc,pWhere,0,0, pOrderBy,0,pLimit,pOffset); if( pSelect == 0 ) return 0; /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); if( pWhereRowid == 0 ) goto limit_where_cleanup_1; pInClause = sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0); if( pInClause == 0 ) goto limit_where_cleanup_1; pInClause->x.pSelect = pSelect; pInClause->flags |= EP_xIsSelect; sqlite3ExprSetHeightAndFlags(pParse, pInClause); return pInClause; /* something went wrong. clean up anything allocated. */ limit_where_cleanup_1: sqlite3SelectDelete(pParse->db, pSelect); return 0; limit_where_cleanup_2: sqlite3ExprDelete(pParse->db, pWhere); sqlite3ExprListDelete(pParse->db, pOrderBy); sqlite3ExprDelete(pParse->db, pLimit); sqlite3ExprDelete(pParse->db, pOffset); return 0; } #endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) */ /* && !defined(SQLITE_OMIT_SUBQUERY) */ /* ** Generate code for a DELETE FROM statement. ** ** DELETE FROM table_wxyz WHERE a<5 AND b NOT NULL; ** \________/ \________________/ ** pTabList pWhere */ SQLITE_PRIVATE void sqlite3DeleteFrom( Parse *pParse, /* The parser context */ SrcList *pTabList, /* The table from which we should delete things */ Expr *pWhere /* The WHERE clause. May be null */ ){ Vdbe *v; /* The virtual database engine */ Table *pTab; /* The table from which records will be deleted */ const char *zDb; /* Name of database holding pTab */ int i; /* Loop counter */ WhereInfo *pWInfo; /* Information about the WHERE clause */ Index *pIdx; /* For looping over indices of the table */ int iTabCur; /* Cursor number for the table */ int iDataCur = 0; /* VDBE cursor for the canonical data source */ int iIdxCur = 0; /* Cursor number of the first index */ int nIdx; /* Number of indices */ sqlite3 *db; /* Main database structure */ AuthContext sContext; /* Authorization context */ NameContext sNC; /* Name context to resolve expressions in */ int iDb; /* Database number */ int memCnt = -1; /* Memory cell used for change counting */ int rcauth; /* Value returned by authorization callback */ int okOnePass; /* True for one-pass algorithm without the FIFO */ int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ u8 *aToOpen = 0; /* Open cursor iTabCur+j if aToOpen[j] is true */ Index *pPk; /* The PRIMARY KEY index on the table */ int iPk = 0; /* First of nPk registers holding PRIMARY KEY value */ i16 nPk = 1; /* Number of columns in the PRIMARY KEY */ int iKey; /* Memory cell holding key of row to be deleted */ i16 nKey; /* Number of memory cells in the row key */ int iEphCur = 0; /* Ephemeral table holding all primary key values */ int iRowSet = 0; /* Register for rowset of rows to delete */ int addrBypass = 0; /* Address of jump over the delete logic */ int addrLoop = 0; /* Top of the delete loop */ int addrDelete = 0; /* Jump directly to the delete logic */ int addrEphOpen = 0; /* Instruction to open the Ephemeral table */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to delete from a view */ Trigger *pTrigger; /* List of table triggers, if required */ #endif memset(&sContext, 0, sizeof(sContext)); db = pParse->db; if( pParse->nErr || db->mallocFailed ){ goto delete_from_cleanup; } assert( pTabList->nSrc==1 ); /* Locate the table which we want to delete. This table has to be ** put in an SrcList structure because some of the subroutines we ** will be calling are designed to work with multiple tables and expect ** an SrcList* parameter instead of just a Table* parameter. */ pTab = sqlite3SrcListLookup(pParse, pTabList); if( pTab==0 ) goto delete_from_cleanup; /* Figure out if we have any triggers and if the table being ** deleted from is a view */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); isView = pTab->pSelect!=0; #else # define pTrigger 0 # define isView 0 #endif #ifdef SQLITE_OMIT_VIEW # undef isView # define isView 0 #endif /* If pTab is really a view, make sure it has been initialized. */ if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto delete_from_cleanup; } if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ goto delete_from_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDbnDb ); zDb = db->aDb[iDb].zName; rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb); assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE ); if( rcauth==SQLITE_DENY ){ goto delete_from_cleanup; } assert(!isView || pTrigger); /* Assign cursor numbers to the table and all its indices. */ assert( pTabList->nSrc==1 ); iTabCur = pTabList->a[0].iCursor = pParse->nTab++; for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ pParse->nTab++; } /* Start the view context */ if( isView ){ sqlite3AuthContextPush(pParse, &sContext, pTab->zName); } /* Begin generating code. */ v = sqlite3GetVdbe(pParse); if( v==0 ){ goto delete_from_cleanup; } if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); sqlite3BeginWriteOperation(pParse, 1, iDb); /* If we are trying to delete from a view, realize that view into ** an ephemeral table. */ #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) if( isView ){ sqlite3MaterializeView(pParse, pTab, pWhere, iTabCur); iDataCur = iIdxCur = iTabCur; } #endif /* Resolve the column names in the WHERE clause. */ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; if( sqlite3ResolveExprNames(&sNC, pWhere) ){ goto delete_from_cleanup; } /* Initialize the counter of the number of rows deleted, if ** we are counting rows. */ if( db->flags & SQLITE_CountRows ){ memCnt = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, memCnt); } #ifndef SQLITE_OMIT_TRUNCATE_OPTIMIZATION /* Special case: A DELETE without a WHERE clause deletes everything. ** It is easier just to erase the whole table. Prior to version 3.6.5, ** this optimization caused the row change count (the value returned by ** API function sqlite3_count_changes) to be set incorrectly. */ if( rcauth==SQLITE_OK && pWhere==0 && !pTrigger && !IsVirtual(pTab) && 0==sqlite3FkRequired(pParse, pTab, 0, 0) ){ assert( !isView ); sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); if( HasRowid(pTab) ){ sqlite3VdbeAddOp4(v, OP_Clear, pTab->tnum, iDb, memCnt, pTab->zName, P4_STATIC); } for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ assert( pIdx->pSchema==pTab->pSchema ); sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); } }else #endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ { if( HasRowid(pTab) ){ /* For a rowid table, initialize the RowSet to an empty set */ pPk = 0; nPk = 1; iRowSet = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); }else{ /* For a WITHOUT ROWID table, create an ephemeral table used to ** hold all primary keys for rows to be deleted. */ pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk!=0 ); nPk = pPk->nKeyCol; iPk = pParse->nMem+1; pParse->nMem += nPk; iEphCur = pParse->nTab++; addrEphOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEphCur, nPk); sqlite3VdbeSetP4KeyInfo(pParse, pPk); } /* Construct a query to find the rowid or primary key for every row ** to be deleted, based on the WHERE clause. */ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK, iTabCur+1); if( pWInfo==0 ) goto delete_from_cleanup; okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); /* Keep track of the number of rows to be deleted */ if( db->flags & SQLITE_CountRows ){ sqlite3VdbeAddOp2(v, OP_AddImm, memCnt, 1); } /* Extract the rowid or primary key for the current row */ if( pPk ){ for(i=0; iaiColumn[i], iPk+i); } iKey = iPk; }else{ iKey = pParse->nMem + 1; iKey = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iTabCur, iKey, 0); if( iKey>pParse->nMem ) pParse->nMem = iKey; } if( okOnePass ){ /* For ONEPASS, no need to store the rowid/primary-key. There is only ** one, so just keep it in its register(s) and fall through to the ** delete code. */ nKey = nPk; /* OP_Found will use an unpacked key */ aToOpen = sqlite3DbMallocRaw(db, nIdx+2); if( aToOpen==0 ){ sqlite3WhereEnd(pWInfo); goto delete_from_cleanup; } memset(aToOpen, 1, nIdx+1); aToOpen[nIdx+1] = 0; if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iTabCur] = 0; if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iTabCur] = 0; if( addrEphOpen ) sqlite3VdbeChangeToNoop(v, addrEphOpen); addrDelete = sqlite3VdbeAddOp0(v, OP_Goto); /* Jump to DELETE logic */ }else if( pPk ){ /* Construct a composite key for the row to be deleted and remember it */ iKey = ++pParse->nMem; nKey = 0; /* Zero tells OP_Found to use a composite key */ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey, sqlite3IndexAffinityStr(v, pPk), nPk); sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey); }else{ /* Get the rowid of the row to be deleted and remember it in the RowSet */ nKey = 1; /* OP_Seek always uses a single rowid */ sqlite3VdbeAddOp2(v, OP_RowSetAdd, iRowSet, iKey); } /* End of the WHERE loop */ sqlite3WhereEnd(pWInfo); if( okOnePass ){ /* Bypass the delete logic below if the WHERE loop found zero rows */ addrBypass = sqlite3VdbeMakeLabel(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBypass); sqlite3VdbeJumpHere(v, addrDelete); } /* Unless this is a view, open cursors for the table we are ** deleting from and all its indices. If this is a view, then the ** only effect this statement has is to fire the INSTEAD OF ** triggers. */ if( !isView ){ testcase( IsVirtual(pTab) ); sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, iTabCur, aToOpen, &iDataCur, &iIdxCur); assert( pPk || IsVirtual(pTab) || iDataCur==iTabCur ); assert( pPk || IsVirtual(pTab) || iIdxCur==iDataCur+1 ); } /* Set up a loop over the rowids/primary-keys that were found in the ** where-clause loop above. */ if( okOnePass ){ /* Just one row. Hence the top-of-loop is a no-op */ assert( nKey==nPk ); /* OP_Found will use an unpacked key */ assert( !IsVirtual(pTab) ); if( aToOpen[iDataCur-iTabCur] ){ assert( pPk!=0 || pTab->pSelect!=0 ); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, addrBypass, iKey, nKey); VdbeCoverage(v); } }else if( pPk ){ addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_RowKey, iEphCur, iKey); assert( nKey==0 ); /* OP_Found will use a composite key */ }else{ addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey); VdbeCoverage(v); assert( nKey==1 ); } /* Delete the row */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); sqlite3VtabMakeWritable(pParse, pTab); sqlite3VdbeAddOp4(v, OP_VUpdate, 0, 1, iKey, pVTab, P4_VTAB); sqlite3VdbeChangeP5(v, OE_Abort); sqlite3MayAbort(pParse); }else #endif { int count = (pParse->nested==0); /* True to count changes */ sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, iKey, nKey, count, OE_Default, okOnePass); } /* End of the loop over all rowids/primary-keys. */ if( okOnePass ){ sqlite3VdbeResolveLabel(v, addrBypass); }else if( pPk ){ sqlite3VdbeAddOp2(v, OP_Next, iEphCur, addrLoop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addrLoop); }else{ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrLoop); sqlite3VdbeJumpHere(v, addrLoop); } /* Close the cursors open on the table and its indexes. */ if( !isView && !IsVirtual(pTab) ){ if( !pPk ) sqlite3VdbeAddOp1(v, OP_Close, iDataCur); for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ sqlite3VdbeAddOp1(v, OP_Close, iIdxCur + i); } } } /* End non-truncate path */ /* Update the sqlite_sequence table by storing the content of the ** maximum rowid counter values recorded while inserting into ** autoincrement tables. */ if( pParse->nested==0 && pParse->pTriggerTab==0 ){ sqlite3AutoincrementEnd(pParse); } /* Return the number of rows that were deleted. If this routine is ** generating code because of a call to sqlite3NestedParse(), do not ** invoke the callback function. */ if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ sqlite3VdbeAddOp2(v, OP_ResultRow, memCnt, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows deleted", SQLITE_STATIC); } delete_from_cleanup: sqlite3AuthContextPop(&sContext); sqlite3SrcListDelete(db, pTabList); sqlite3ExprDelete(db, pWhere); sqlite3DbFree(db, aToOpen); return; } /* Make sure "isView" and other macros defined above are undefined. Otherwise ** they may interfere with compilation of other functions in this file ** (or in another file, if this file becomes part of the amalgamation). */ #ifdef isView #undef isView #endif #ifdef pTrigger #undef pTrigger #endif /* ** This routine generates VDBE code that causes a single row of a ** single table to be deleted. Both the original table entry and ** all indices are removed. ** ** Preconditions: ** ** 1. iDataCur is an open cursor on the btree that is the canonical data ** store for the table. (This will be either the table itself, ** in the case of a rowid table, or the PRIMARY KEY index in the case ** of a WITHOUT ROWID table.) ** ** 2. Read/write cursors for all indices of pTab must be open as ** cursor number iIdxCur+i for the i-th index. ** ** 3. The primary key for the row to be deleted must be stored in a ** sequence of nPk memory cells starting at iPk. If nPk==0 that means ** that a search record formed from OP_MakeRecord is contained in the ** single memory location iPk. */ SQLITE_PRIVATE void sqlite3GenerateRowDelete( Parse *pParse, /* Parsing context */ Table *pTab, /* Table containing the row to be deleted */ Trigger *pTrigger, /* List of triggers to (potentially) fire */ int iDataCur, /* Cursor from which column data is extracted */ int iIdxCur, /* First index cursor */ int iPk, /* First memory cell containing the PRIMARY KEY */ i16 nPk, /* Number of PRIMARY KEY memory cells */ u8 count, /* If non-zero, increment the row change counter */ u8 onconf, /* Default ON CONFLICT policy for triggers */ u8 bNoSeek /* iDataCur is already pointing to the row to delete */ ){ Vdbe *v = pParse->pVdbe; /* Vdbe */ int iOld = 0; /* First register in OLD.* array */ int iLabel; /* Label resolved to end of generated code */ u8 opSeek; /* Seek opcode */ /* Vdbe is guaranteed to have been allocated by this stage. */ assert( v ); VdbeModuleComment((v, "BEGIN: GenRowDel(%d,%d,%d,%d)", iDataCur, iIdxCur, iPk, (int)nPk)); /* Seek cursor iCur to the row to delete. If this row no longer exists ** (this can happen if a trigger program has already deleted it), do ** not attempt to delete it or fire any DELETE triggers. */ iLabel = sqlite3VdbeMakeLabel(v); opSeek = HasRowid(pTab) ? OP_NotExists : OP_NotFound; if( !bNoSeek ){ sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk); VdbeCoverageIf(v, opSeek==OP_NotExists); VdbeCoverageIf(v, opSeek==OP_NotFound); } /* If there are any triggers to fire, allocate a range of registers to ** use for the old.* references in the triggers. */ if( sqlite3FkRequired(pParse, pTab, 0, 0) || pTrigger ){ u32 mask; /* Mask of OLD.* columns in use */ int iCol; /* Iterator used while populating OLD.* */ int addrStart; /* Start of BEFORE trigger programs */ /* TODO: Could use temporary registers here. Also could attempt to ** avoid copying the contents of the rowid register. */ mask = sqlite3TriggerColmask( pParse, pTrigger, 0, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onconf ); mask |= sqlite3FkOldmask(pParse, pTab); iOld = pParse->nMem+1; pParse->nMem += (1 + pTab->nCol); /* Populate the OLD.* pseudo-table register array. These values will be ** used by any BEFORE and AFTER triggers that exist. */ sqlite3VdbeAddOp2(v, OP_Copy, iPk, iOld); for(iCol=0; iColnCol; iCol++){ testcase( mask!=0xffffffff && iCol==31 ); testcase( mask!=0xffffffff && iCol==32 ); if( mask==0xffffffff || (iCol<=31 && (mask & MASKBIT32(iCol))!=0) ){ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, iCol, iOld+iCol+1); } } /* Invoke BEFORE DELETE trigger programs. */ addrStart = sqlite3VdbeCurrentAddr(v); sqlite3CodeRowTrigger(pParse, pTrigger, TK_DELETE, 0, TRIGGER_BEFORE, pTab, iOld, onconf, iLabel ); /* If any BEFORE triggers were coded, then seek the cursor to the ** row to be deleted again. It may be that the BEFORE triggers moved ** the cursor or of already deleted the row that the cursor was ** pointing to. */ if( addrStartpSelect==0 ){ sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0); sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); if( count ){ sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); } } /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to ** handle rows (possibly in other tables) that refer via a foreign key ** to the row just deleted. */ sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0); /* Invoke AFTER DELETE trigger programs. */ sqlite3CodeRowTrigger(pParse, pTrigger, TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel ); /* Jump here if the row had already been deleted before any BEFORE ** trigger programs were invoked. Or if a trigger program throws a ** RAISE(IGNORE) exception. */ sqlite3VdbeResolveLabel(v, iLabel); VdbeModuleComment((v, "END: GenRowDel()")); } /* ** This routine generates VDBE code that causes the deletion of all ** index entries associated with a single row of a single table, pTab ** ** Preconditions: ** ** 1. A read/write cursor "iDataCur" must be open on the canonical storage ** btree for the table pTab. (This will be either the table itself ** for rowid tables or to the primary key index for WITHOUT ROWID ** tables.) ** ** 2. Read/write cursors for all indices of pTab must be open as ** cursor number iIdxCur+i for the i-th index. (The pTab->pIndex ** index is the 0-th index.) ** ** 3. The "iDataCur" cursor must be already be positioned on the row ** that is to be deleted. */ SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete( Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* Table containing the row to be deleted */ int iDataCur, /* Cursor of table holding data. */ int iIdxCur, /* First index cursor */ int *aRegIdx /* Only delete if aRegIdx!=0 && aRegIdx[i]>0 */ ){ int i; /* Index loop counter */ int r1 = -1; /* Register holding an index key */ int iPartIdxLabel; /* Jump destination for skipping partial index entries */ Index *pIdx; /* Current index */ Index *pPrior = 0; /* Prior index */ Vdbe *v; /* The prepared statement under construction */ Index *pPk; /* PRIMARY KEY index, or NULL for rowid tables */ v = pParse->pVdbe; pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){ assert( iIdxCur+i!=iDataCur || pPk==pIdx ); if( aRegIdx!=0 && aRegIdx[i]==0 ) continue; if( pIdx==pPk ) continue; VdbeModuleComment((v, "GenRowIdxDel for %s", pIdx->zName)); r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 1, &iPartIdxLabel, pPrior, r1); sqlite3VdbeAddOp3(v, OP_IdxDelete, iIdxCur+i, r1, pIdx->uniqNotNull ? pIdx->nKeyCol : pIdx->nColumn); sqlite3ResolvePartIdxLabel(pParse, iPartIdxLabel); pPrior = pIdx; } } /* ** Generate code that will assemble an index key and stores it in register ** regOut. The key with be for index pIdx which is an index on pTab. ** iCur is the index of a cursor open on the pTab table and pointing to ** the entry that needs indexing. If pTab is a WITHOUT ROWID table, then ** iCur must be the cursor of the PRIMARY KEY index. ** ** Return a register number which is the first in a block of ** registers that holds the elements of the index key. The ** block of registers has already been deallocated by the time ** this routine returns. ** ** If *piPartIdxLabel is not NULL, fill it in with a label and jump ** to that label if pIdx is a partial index that should be skipped. ** The label should be resolved using sqlite3ResolvePartIdxLabel(). ** A partial index should be skipped if its WHERE clause evaluates ** to false or null. If pIdx is not a partial index, *piPartIdxLabel ** will be set to zero which is an empty label that is ignored by ** sqlite3ResolvePartIdxLabel(). ** ** The pPrior and regPrior parameters are used to implement a cache to ** avoid unnecessary register loads. If pPrior is not NULL, then it is ** a pointer to a different index for which an index key has just been ** computed into register regPrior. If the current pIdx index is generating ** its key into the same sequence of registers and if pPrior and pIdx share ** a column in common, then the register corresponding to that column already ** holds the correct value and the loading of that register is skipped. ** This optimization is helpful when doing a DELETE or an INTEGRITY_CHECK ** on a table with multiple indices, and especially with the ROWID or ** PRIMARY KEY columns of the index. */ SQLITE_PRIVATE int sqlite3GenerateIndexKey( Parse *pParse, /* Parsing context */ Index *pIdx, /* The index for which to generate a key */ int iDataCur, /* Cursor number from which to take column data */ int regOut, /* Put the new key into this register if not 0 */ int prefixOnly, /* Compute only a unique prefix of the key */ int *piPartIdxLabel, /* OUT: Jump to this label to skip partial index */ Index *pPrior, /* Previously generated index key */ int regPrior /* Register holding previous generated key */ ){ Vdbe *v = pParse->pVdbe; int j; Table *pTab = pIdx->pTable; int regBase; int nCol; if( piPartIdxLabel ){ if( pIdx->pPartIdxWhere ){ *piPartIdxLabel = sqlite3VdbeMakeLabel(v); pParse->iPartIdxTab = iDataCur; sqlite3ExprCachePush(pParse); sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, *piPartIdxLabel, SQLITE_JUMPIFNULL); }else{ *piPartIdxLabel = 0; } } nCol = (prefixOnly && pIdx->uniqNotNull) ? pIdx->nKeyCol : pIdx->nColumn; regBase = sqlite3GetTempRange(pParse, nCol); if( pPrior && (regBase!=regPrior || pPrior->pPartIdxWhere) ) pPrior = 0; for(j=0; jaiColumn[j]==pIdx->aiColumn[j] ) continue; sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, pIdx->aiColumn[j], regBase+j); /* If the column affinity is REAL but the number is an integer, then it ** might be stored in the table as an integer (using a compact ** representation) then converted to REAL by an OP_RealAffinity opcode. ** But we are getting ready to store this value back into an index, where ** it should be converted by to INTEGER again. So omit the OP_RealAffinity ** opcode if it is present */ sqlite3VdbeDeletePriorOpcode(v, OP_RealAffinity); } if( regOut ){ sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut); } sqlite3ReleaseTempRange(pParse, regBase, nCol); return regBase; } /* ** If a prior call to sqlite3GenerateIndexKey() generated a jump-over label ** because it was a partial index, then this routine should be called to ** resolve that label. */ SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse *pParse, int iLabel){ if( iLabel ){ sqlite3VdbeResolveLabel(pParse->pVdbe, iLabel); sqlite3ExprCachePop(pParse); } } /************** End of delete.c **********************************************/ /************** Begin file func.c ********************************************/ /* ** 2002 February 23 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the C-language implementations for many of the SQL ** functions of SQLite. (Some function, and in particular the date and ** time functions, are implemented separately.) */ /* #include "sqliteInt.h" */ /* #include */ /* #include */ /* #include "vdbeInt.h" */ /* ** Return the collating function associated with a function. */ static CollSeq *sqlite3GetFuncCollSeq(sqlite3_context *context){ VdbeOp *pOp; assert( context->pVdbe!=0 ); pOp = &context->pVdbe->aOp[context->iOp-1]; assert( pOp->opcode==OP_CollSeq ); assert( pOp->p4type==P4_COLLSEQ ); return pOp->p4.pColl; } /* ** Indicate that the accumulator load should be skipped on this ** iteration of the aggregate loop. */ static void sqlite3SkipAccumulatorLoad(sqlite3_context *context){ context->skipFlag = 1; } /* ** Implementation of the non-aggregate min() and max() functions */ static void minmaxFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int i; int mask; /* 0 for min() or 0xffffffff for max() */ int iBest; CollSeq *pColl; assert( argc>1 ); mask = sqlite3_user_data(context)==0 ? 0 : -1; pColl = sqlite3GetFuncCollSeq(context); assert( pColl ); assert( mask==-1 || mask==0 ); iBest = 0; if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; for(i=1; i=0 ){ testcase( mask==0 ); iBest = i; } } sqlite3_result_value(context, argv[iBest]); } /* ** Return the type of the argument. */ static void typeofFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ const char *z = 0; UNUSED_PARAMETER(NotUsed); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_INTEGER: z = "integer"; break; case SQLITE_TEXT: z = "text"; break; case SQLITE_FLOAT: z = "real"; break; case SQLITE_BLOB: z = "blob"; break; default: z = "null"; break; } sqlite3_result_text(context, z, -1, SQLITE_STATIC); } /* ** Implementation of the length() function */ static void lengthFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int len; assert( argc==1 ); UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_BLOB: case SQLITE_INTEGER: case SQLITE_FLOAT: { sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); break; } case SQLITE_TEXT: { const unsigned char *z = sqlite3_value_text(argv[0]); if( z==0 ) return; len = 0; while( *z ){ len++; SQLITE_SKIP_UTF8(z); } sqlite3_result_int(context, len); break; } default: { sqlite3_result_null(context); break; } } } /* ** Implementation of the abs() function. ** ** IMP: R-23979-26855 The abs(X) function returns the absolute value of ** the numeric argument X. */ static void absFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_INTEGER: { i64 iVal = sqlite3_value_int64(argv[0]); if( iVal<0 ){ if( iVal==SMALLEST_INT64 ){ /* IMP: R-31676-45509 If X is the integer -9223372036854775808 ** then abs(X) throws an integer overflow error since there is no ** equivalent positive 64-bit two complement value. */ sqlite3_result_error(context, "integer overflow", -1); return; } iVal = -iVal; } sqlite3_result_int64(context, iVal); break; } case SQLITE_NULL: { /* IMP: R-37434-19929 Abs(X) returns NULL if X is NULL. */ sqlite3_result_null(context); break; } default: { /* Because sqlite3_value_double() returns 0.0 if the argument is not ** something that can be converted into a number, we have: ** IMP: R-01992-00519 Abs(X) returns 0.0 if X is a string or blob ** that cannot be converted to a numeric value. */ double rVal = sqlite3_value_double(argv[0]); if( rVal<0 ) rVal = -rVal; sqlite3_result_double(context, rVal); break; } } } /* ** Implementation of the instr() function. ** ** instr(haystack,needle) finds the first occurrence of needle ** in haystack and returns the number of previous characters plus 1, ** or 0 if needle does not occur within haystack. ** ** If both haystack and needle are BLOBs, then the result is one more than ** the number of bytes in haystack prior to the first occurrence of needle, ** or 0 if needle never occurs in haystack. */ static void instrFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zHaystack; const unsigned char *zNeedle; int nHaystack; int nNeedle; int typeHaystack, typeNeedle; int N = 1; int isText; UNUSED_PARAMETER(argc); typeHaystack = sqlite3_value_type(argv[0]); typeNeedle = sqlite3_value_type(argv[1]); if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return; nHaystack = sqlite3_value_bytes(argv[0]); nNeedle = sqlite3_value_bytes(argv[1]); if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){ zHaystack = sqlite3_value_blob(argv[0]); zNeedle = sqlite3_value_blob(argv[1]); isText = 0; }else{ zHaystack = sqlite3_value_text(argv[0]); zNeedle = sqlite3_value_text(argv[1]); isText = 1; } while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){ N++; do{ nHaystack--; zHaystack++; }while( isText && (zHaystack[0]&0xc0)==0x80 ); } if( nNeedle>nHaystack ) N = 0; sqlite3_result_int(context, N); } /* ** Implementation of the printf() function. */ static void printfFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ PrintfArguments x; StrAccum str; const char *zFormat; int n; sqlite3 *db = sqlite3_context_db_handle(context); if( argc>=1 && (zFormat = (const char*)sqlite3_value_text(argv[0]))!=0 ){ x.nArg = argc-1; x.nUsed = 0; x.apArg = argv+1; sqlite3StrAccumInit(&str, db, 0, 0, db->aLimit[SQLITE_LIMIT_LENGTH]); sqlite3XPrintf(&str, SQLITE_PRINTF_SQLFUNC, zFormat, &x); n = str.nChar; sqlite3_result_text(context, sqlite3StrAccumFinish(&str), n, SQLITE_DYNAMIC); } } /* ** Implementation of the substr() function. ** ** substr(x,p1,p2) returns p2 characters of x[] beginning with p1. ** p1 is 1-indexed. So substr(x,1,1) returns the first character ** of x. If x is text, then we actually count UTF-8 characters. ** If x is a blob, then we count bytes. ** ** If p1 is negative, then we begin abs(p1) from the end of x[]. ** ** If p2 is negative, return the p2 characters preceding p1. */ static void substrFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *z; const unsigned char *z2; int len; int p0type; i64 p1, p2; int negP2 = 0; assert( argc==3 || argc==2 ); if( sqlite3_value_type(argv[1])==SQLITE_NULL || (argc==3 && sqlite3_value_type(argv[2])==SQLITE_NULL) ){ return; } p0type = sqlite3_value_type(argv[0]); p1 = sqlite3_value_int(argv[1]); if( p0type==SQLITE_BLOB ){ len = sqlite3_value_bytes(argv[0]); z = sqlite3_value_blob(argv[0]); if( z==0 ) return; assert( len==sqlite3_value_bytes(argv[0]) ); }else{ z = sqlite3_value_text(argv[0]); if( z==0 ) return; len = 0; if( p1<0 ){ for(z2=z; *z2; len++){ SQLITE_SKIP_UTF8(z2); } } } #ifdef SQLITE_SUBSTR_COMPATIBILITY /* If SUBSTR_COMPATIBILITY is defined then substr(X,0,N) work the same as ** as substr(X,1,N) - it returns the first N characters of X. This ** is essentially a back-out of the bug-fix in check-in [5fc125d362df4b8] ** from 2009-02-02 for compatibility of applications that exploited the ** old buggy behavior. */ if( p1==0 ) p1 = 1; /* */ #endif if( argc==3 ){ p2 = sqlite3_value_int(argv[2]); if( p2<0 ){ p2 = -p2; negP2 = 1; } }else{ p2 = sqlite3_context_db_handle(context)->aLimit[SQLITE_LIMIT_LENGTH]; } if( p1<0 ){ p1 += len; if( p1<0 ){ p2 += p1; if( p2<0 ) p2 = 0; p1 = 0; } }else if( p1>0 ){ p1--; }else if( p2>0 ){ p2--; } if( negP2 ){ p1 -= p2; if( p1<0 ){ p2 += p1; p1 = 0; } } assert( p1>=0 && p2>=0 ); if( p0type!=SQLITE_BLOB ){ while( *z && p1 ){ SQLITE_SKIP_UTF8(z); p1--; } for(z2=z; *z2 && p2; p2--){ SQLITE_SKIP_UTF8(z2); } sqlite3_result_text64(context, (char*)z, z2-z, SQLITE_TRANSIENT, SQLITE_UTF8); }else{ if( p1+p2>len ){ p2 = len-p1; if( p2<0 ) p2 = 0; } sqlite3_result_blob64(context, (char*)&z[p1], (u64)p2, SQLITE_TRANSIENT); } } /* ** Implementation of the round() function */ #ifndef SQLITE_OMIT_FLOATING_POINT static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ int n = 0; double r; char *zBuf; assert( argc==1 || argc==2 ); if( argc==2 ){ if( SQLITE_NULL==sqlite3_value_type(argv[1]) ) return; n = sqlite3_value_int(argv[1]); if( n>30 ) n = 30; if( n<0 ) n = 0; } if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; r = sqlite3_value_double(argv[0]); /* If Y==0 and X will fit in a 64-bit int, ** handle the rounding directly, ** otherwise use printf. */ if( n==0 && r>=0 && r0 ); testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH] ); testcase( nByte==db->aLimit[SQLITE_LIMIT_LENGTH]+1 ); if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); z = 0; }else{ z = sqlite3Malloc(nByte); if( !z ){ sqlite3_result_error_nomem(context); } } return z; } /* ** Implementation of the upper() and lower() SQL functions. */ static void upperFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ char *z1; const char *z2; int i, n; UNUSED_PARAMETER(argc); z2 = (char*)sqlite3_value_text(argv[0]); n = sqlite3_value_bytes(argv[0]); /* Verify that the call to _bytes() does not invalidate the _text() pointer */ assert( z2==(char*)sqlite3_value_text(argv[0]) ); if( z2 ){ z1 = contextMalloc(context, ((i64)n)+1); if( z1 ){ for(i=0; imatchOne; /* "?" or "_" */ u32 matchAll = pInfo->matchAll; /* "*" or "%" */ u32 matchOther; /* "[" or the escape character */ u8 noCase = pInfo->noCase; /* True if uppercase==lowercase */ const u8 *zEscaped = 0; /* One past the last escaped input char */ /* The GLOB operator does not have an ESCAPE clause. And LIKE does not ** have the matchSet operator. So we either have to look for one or ** the other, never both. Hence the single variable matchOther is used ** to store the one we have to look for. */ matchOther = esc ? esc : pInfo->matchSet; while( (c = Utf8Read(zPattern))!=0 ){ if( c==matchAll ){ /* Match "*" */ /* Skip over multiple "*" characters in the pattern. If there ** are also "?" characters, skip those as well, but consume a ** single character of the input string for each "?" skipped */ while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){ if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){ return 0; } } if( c==0 ){ return 1; /* "*" at the end of the pattern matches */ }else if( c==matchOther ){ if( esc ){ c = sqlite3Utf8Read(&zPattern); if( c==0 ) return 0; }else{ /* "[...]" immediately follows the "*". We have to do a slow ** recursive search in this case, but it is an unusual case. */ assert( matchOther<0x80 ); /* '[' is a single-byte character */ while( *zString && patternCompare(&zPattern[-1],zString,pInfo,esc)==0 ){ SQLITE_SKIP_UTF8(zString); } return *zString!=0; } } /* At this point variable c contains the first character of the ** pattern string past the "*". Search in the input string for the ** first matching character and recursively contine the match from ** that point. ** ** For a case-insensitive search, set variable cx to be the same as ** c but in the other case and search the input string for either ** c or cx. */ if( c<=0x80 ){ u32 cx; if( noCase ){ cx = sqlite3Toupper(c); c = sqlite3Tolower(c); }else{ cx = c; } while( (c2 = *(zString++))!=0 ){ if( c2!=c && c2!=cx ) continue; if( patternCompare(zPattern,zString,pInfo,esc) ) return 1; } }else{ while( (c2 = Utf8Read(zString))!=0 ){ if( c2!=c ) continue; if( patternCompare(zPattern,zString,pInfo,esc) ) return 1; } } return 0; } if( c==matchOther ){ if( esc ){ c = sqlite3Utf8Read(&zPattern); if( c==0 ) return 0; zEscaped = zPattern; }else{ u32 prior_c = 0; int seen = 0; int invert = 0; c = sqlite3Utf8Read(&zString); if( c==0 ) return 0; c2 = sqlite3Utf8Read(&zPattern); if( c2=='^' ){ invert = 1; c2 = sqlite3Utf8Read(&zPattern); } if( c2==']' ){ if( c==']' ) seen = 1; c2 = sqlite3Utf8Read(&zPattern); } while( c2 && c2!=']' ){ if( c2=='-' && zPattern[0]!=']' && zPattern[0]!=0 && prior_c>0 ){ c2 = sqlite3Utf8Read(&zPattern); if( c>=prior_c && c<=c2 ) seen = 1; prior_c = 0; }else{ if( c==c2 ){ seen = 1; } prior_c = c2; } c2 = sqlite3Utf8Read(&zPattern); } if( c2==0 || (seen ^ invert)==0 ){ return 0; } continue; } } c2 = Utf8Read(zString); if( c==c2 ) continue; if( noCase && c<0x80 && c2<0x80 && sqlite3Tolower(c)==sqlite3Tolower(c2) ){ continue; } if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue; return 0; } return *zString==0; } /* ** The sqlite3_strglob() interface. */ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlobPattern, const char *zString){ return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, 0)==0; } /* ** Count the number of times that the LIKE operator (or GLOB which is ** just a variation of LIKE) gets called. This is used for testing ** only. */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_like_count = 0; #endif /* ** Implementation of the like() SQL function. This function implements ** the build-in LIKE operator. The first argument to the function is the ** pattern and the second argument is the string. So, the SQL statements: ** ** A LIKE B ** ** is implemented as like(B,A). ** ** This same function (with a different compareInfo structure) computes ** the GLOB operator. */ static void likeFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zA, *zB; u32 escape = 0; int nPat; sqlite3 *db = sqlite3_context_db_handle(context); zB = sqlite3_value_text(argv[0]); zA = sqlite3_value_text(argv[1]); /* Limit the length of the LIKE or GLOB pattern to avoid problems ** of deep recursion and N*N behavior in patternCompare(). */ nPat = sqlite3_value_bytes(argv[0]); testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ); testcase( nPat==db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]+1 ); if( nPat > db->aLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH] ){ sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); return; } assert( zB==sqlite3_value_text(argv[0]) ); /* Encoding did not change */ if( argc==3 ){ /* The escape character string must consist of a single UTF-8 character. ** Otherwise, return an error. */ const unsigned char *zEsc = sqlite3_value_text(argv[2]); if( zEsc==0 ) return; if( sqlite3Utf8CharLen((char*)zEsc, -1)!=1 ){ sqlite3_result_error(context, "ESCAPE expression must be a single character", -1); return; } escape = sqlite3Utf8Read(&zEsc); } if( zA && zB ){ struct compareInfo *pInfo = sqlite3_user_data(context); #ifdef SQLITE_TEST sqlite3_like_count++; #endif sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)); } } /* ** Implementation of the NULLIF(x,y) function. The result is the first ** argument if the arguments are different. The result is NULL if the ** arguments are equal to each other. */ static void nullifFunc( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ CollSeq *pColl = sqlite3GetFuncCollSeq(context); UNUSED_PARAMETER(NotUsed); if( sqlite3MemCompare(argv[0], argv[1], pColl)!=0 ){ sqlite3_result_value(context, argv[0]); } } /* ** Implementation of the sqlite_version() function. The result is the version ** of the SQLite library that is running. */ static void versionFunc( sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); /* IMP: R-48699-48617 This function is an SQL wrapper around the ** sqlite3_libversion() C-interface. */ sqlite3_result_text(context, sqlite3_libversion(), -1, SQLITE_STATIC); } /* ** Implementation of the sqlite_source_id() function. The result is a string ** that identifies the particular version of the source code used to build ** SQLite. */ static void sourceidFunc( sqlite3_context *context, int NotUsed, sqlite3_value **NotUsed2 ){ UNUSED_PARAMETER2(NotUsed, NotUsed2); /* IMP: R-24470-31136 This function is an SQL wrapper around the ** sqlite3_sourceid() C interface. */ sqlite3_result_text(context, sqlite3_sourceid(), -1, SQLITE_STATIC); } /* ** Implementation of the sqlite_log() function. This is a wrapper around ** sqlite3_log(). The return value is NULL. The function exists purely for ** its side-effects. */ static void errlogFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ UNUSED_PARAMETER(argc); UNUSED_PARAMETER(context); sqlite3_log(sqlite3_value_int(argv[0]), "%s", sqlite3_value_text(argv[1])); } /* ** Implementation of the sqlite_compileoption_used() function. ** The result is an integer that identifies if the compiler option ** was used to build SQLite. */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS static void compileoptionusedFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const char *zOptName; assert( argc==1 ); UNUSED_PARAMETER(argc); /* IMP: R-39564-36305 The sqlite_compileoption_used() SQL ** function is a wrapper around the sqlite3_compileoption_used() C/C++ ** function. */ if( (zOptName = (const char*)sqlite3_value_text(argv[0]))!=0 ){ sqlite3_result_int(context, sqlite3_compileoption_used(zOptName)); } } #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ /* ** Implementation of the sqlite_compileoption_get() function. ** The result is a string that identifies the compiler options ** used to build SQLite. */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS static void compileoptiongetFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int n; assert( argc==1 ); UNUSED_PARAMETER(argc); /* IMP: R-04922-24076 The sqlite_compileoption_get() SQL function ** is a wrapper around the sqlite3_compileoption_get() C/C++ function. */ n = sqlite3_value_int(argv[0]); sqlite3_result_text(context, sqlite3_compileoption_get(n), -1, SQLITE_STATIC); } #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ /* Array for converting from half-bytes (nybbles) into ASCII hex ** digits. */ static const char hexdigits[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; /* ** Implementation of the QUOTE() function. This function takes a single ** argument. If the argument is numeric, the return value is the same as ** the argument. If the argument is NULL, the return value is the string ** "NULL". Otherwise, the argument is enclosed in single quotes with ** single-quote escapes. */ static void quoteFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ assert( argc==1 ); UNUSED_PARAMETER(argc); switch( sqlite3_value_type(argv[0]) ){ case SQLITE_FLOAT: { double r1, r2; char zBuf[50]; r1 = sqlite3_value_double(argv[0]); sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.15g", r1); sqlite3AtoF(zBuf, &r2, 20, SQLITE_UTF8); if( r1!=r2 ){ sqlite3_snprintf(sizeof(zBuf), zBuf, "%!.20e", r1); } sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); break; } case SQLITE_INTEGER: { sqlite3_result_value(context, argv[0]); break; } case SQLITE_BLOB: { char *zText = 0; char const *zBlob = sqlite3_value_blob(argv[0]); int nBlob = sqlite3_value_bytes(argv[0]); assert( zBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ zText = (char *)contextMalloc(context, (2*(i64)nBlob)+4); if( zText ){ int i; for(i=0; i>4)&0x0F]; zText[(i*2)+3] = hexdigits[(zBlob[i])&0x0F]; } zText[(nBlob*2)+2] = '\''; zText[(nBlob*2)+3] = '\0'; zText[0] = 'X'; zText[1] = '\''; sqlite3_result_text(context, zText, -1, SQLITE_TRANSIENT); sqlite3_free(zText); } break; } case SQLITE_TEXT: { int i,j; u64 n; const unsigned char *zArg = sqlite3_value_text(argv[0]); char *z; if( zArg==0 ) return; for(i=0, n=0; zArg[i]; i++){ if( zArg[i]=='\'' ) n++; } z = contextMalloc(context, ((i64)i)+((i64)n)+3); if( z ){ z[0] = '\''; for(i=0, j=1; zArg[i]; i++){ z[j++] = zArg[i]; if( zArg[i]=='\'' ){ z[j++] = '\''; } } z[j++] = '\''; z[j] = 0; sqlite3_result_text(context, z, j, sqlite3_free); } break; } default: { assert( sqlite3_value_type(argv[0])==SQLITE_NULL ); sqlite3_result_text(context, "NULL", 4, SQLITE_STATIC); break; } } } /* ** The unicode() function. Return the integer unicode code-point value ** for the first character of the input string. */ static void unicodeFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *z = sqlite3_value_text(argv[0]); (void)argc; if( z && z[0] ) sqlite3_result_int(context, sqlite3Utf8Read(&z)); } /* ** The char() function takes zero or more arguments, each of which is ** an integer. It constructs a string where each character of the string ** is the unicode character for the corresponding integer argument. */ static void charFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ unsigned char *z, *zOut; int i; zOut = z = sqlite3_malloc64( argc*4+1 ); if( z==0 ){ sqlite3_result_error_nomem(context); return; } for(i=0; i0x10ffff ) x = 0xfffd; c = (unsigned)(x & 0x1fffff); if( c<0x00080 ){ *zOut++ = (u8)(c&0xFF); }else if( c<0x00800 ){ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); *zOut++ = 0x80 + (u8)(c & 0x3F); }else if( c<0x10000 ){ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); *zOut++ = 0x80 + (u8)(c & 0x3F); }else{ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); *zOut++ = 0x80 + (u8)(c & 0x3F); } \ } sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8); } /* ** The hex() function. Interpret the argument as a blob. Return ** a hexadecimal rendering as text. */ static void hexFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int i, n; const unsigned char *pBlob; char *zHex, *z; assert( argc==1 ); UNUSED_PARAMETER(argc); pBlob = sqlite3_value_blob(argv[0]); n = sqlite3_value_bytes(argv[0]); assert( pBlob==sqlite3_value_blob(argv[0]) ); /* No encoding change */ z = zHex = contextMalloc(context, ((i64)n)*2 + 1); if( zHex ){ for(i=0; i>4)&0xf]; *(z++) = hexdigits[c&0xf]; } *z = 0; sqlite3_result_text(context, zHex, n*2, sqlite3_free); } } /* ** The zeroblob(N) function returns a zero-filled blob of size N bytes. */ static void zeroblobFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ i64 n; int rc; assert( argc==1 ); UNUSED_PARAMETER(argc); n = sqlite3_value_int64(argv[0]); if( n<0 ) n = 0; rc = sqlite3_result_zeroblob64(context, n); /* IMP: R-00293-64994 */ if( rc ){ sqlite3_result_error_code(context, rc); } } /* ** The replace() function. Three arguments are all strings: call ** them A, B, and C. The result is also a string which is derived ** from A by replacing every occurrence of B with C. The match ** must be exact. Collating sequences are not used. */ static void replaceFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zStr; /* The input string A */ const unsigned char *zPattern; /* The pattern string B */ const unsigned char *zRep; /* The replacement string C */ unsigned char *zOut; /* The output */ int nStr; /* Size of zStr */ int nPattern; /* Size of zPattern */ int nRep; /* Size of zRep */ i64 nOut; /* Maximum size of zOut */ int loopLimit; /* Last zStr[] that might match zPattern[] */ int i, j; /* Loop counters */ assert( argc==3 ); UNUSED_PARAMETER(argc); zStr = sqlite3_value_text(argv[0]); if( zStr==0 ) return; nStr = sqlite3_value_bytes(argv[0]); assert( zStr==sqlite3_value_text(argv[0]) ); /* No encoding change */ zPattern = sqlite3_value_text(argv[1]); if( zPattern==0 ){ assert( sqlite3_value_type(argv[1])==SQLITE_NULL || sqlite3_context_db_handle(context)->mallocFailed ); return; } if( zPattern[0]==0 ){ assert( sqlite3_value_type(argv[1])!=SQLITE_NULL ); sqlite3_result_value(context, argv[0]); return; } nPattern = sqlite3_value_bytes(argv[1]); assert( zPattern==sqlite3_value_text(argv[1]) ); /* No encoding change */ zRep = sqlite3_value_text(argv[2]); if( zRep==0 ) return; nRep = sqlite3_value_bytes(argv[2]); assert( zRep==sqlite3_value_text(argv[2]) ); nOut = nStr + 1; assert( nOutaLimit[SQLITE_LIMIT_LENGTH] ); testcase( nOut-2==db->aLimit[SQLITE_LIMIT_LENGTH] ); if( nOut-1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(context); sqlite3_free(zOut); return; } zOld = zOut; zOut = sqlite3_realloc64(zOut, (int)nOut); if( zOut==0 ){ sqlite3_result_error_nomem(context); sqlite3_free(zOld); return; } memcpy(&zOut[j], zRep, nRep); j += nRep; i += nPattern-1; } } assert( j+nStr-i+1==nOut ); memcpy(&zOut[j], &zStr[i], nStr-i); j += nStr - i; assert( j<=nOut ); zOut[j] = 0; sqlite3_result_text(context, (char*)zOut, j, sqlite3_free); } /* ** Implementation of the TRIM(), LTRIM(), and RTRIM() functions. ** The userdata is 0x1 for left trim, 0x2 for right trim, 0x3 for both. */ static void trimFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zIn; /* Input string */ const unsigned char *zCharSet; /* Set of characters to trim */ int nIn; /* Number of bytes in input */ int flags; /* 1: trimleft 2: trimright 3: trim */ int i; /* Loop counter */ unsigned char *aLen = 0; /* Length of each character in zCharSet */ unsigned char **azChar = 0; /* Individual characters in zCharSet */ int nChar; /* Number of characters in zCharSet */ if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ return; } zIn = sqlite3_value_text(argv[0]); if( zIn==0 ) return; nIn = sqlite3_value_bytes(argv[0]); assert( zIn==sqlite3_value_text(argv[0]) ); if( argc==1 ){ static const unsigned char lenOne[] = { 1 }; static unsigned char * const azOne[] = { (u8*)" " }; nChar = 1; aLen = (u8*)lenOne; azChar = (unsigned char **)azOne; zCharSet = 0; }else if( (zCharSet = sqlite3_value_text(argv[1]))==0 ){ return; }else{ const unsigned char *z; for(z=zCharSet, nChar=0; *z; nChar++){ SQLITE_SKIP_UTF8(z); } if( nChar>0 ){ azChar = contextMalloc(context, ((i64)nChar)*(sizeof(char*)+1)); if( azChar==0 ){ return; } aLen = (unsigned char*)&azChar[nChar]; for(z=zCharSet, nChar=0; *z; nChar++){ azChar[nChar] = (unsigned char *)z; SQLITE_SKIP_UTF8(z); aLen[nChar] = (u8)(z - azChar[nChar]); } } } if( nChar>0 ){ flags = SQLITE_PTR_TO_INT(sqlite3_user_data(context)); if( flags & 1 ){ while( nIn>0 ){ int len = 0; for(i=0; i=nChar ) break; zIn += len; nIn -= len; } } if( flags & 2 ){ while( nIn>0 ){ int len = 0; for(i=0; i=nChar ) break; nIn -= len; } } if( zCharSet ){ sqlite3_free(azChar); } } sqlite3_result_text(context, (char*)zIn, nIn, SQLITE_TRANSIENT); } /* IMP: R-25361-16150 This function is omitted from SQLite by default. It ** is only available if the SQLITE_SOUNDEX compile-time option is used ** when SQLite is built. */ #ifdef SQLITE_SOUNDEX /* ** Compute the soundex encoding of a word. ** ** IMP: R-59782-00072 The soundex(X) function returns a string that is the ** soundex encoding of the string X. */ static void soundexFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ char zResult[8]; const u8 *zIn; int i, j; static const unsigned char iCode[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 1, 2, 0, 0, 2, 2, 4, 5, 5, 0, 1, 2, 6, 2, 3, 0, 1, 0, 2, 0, 2, 0, 0, 0, 0, 0, }; assert( argc==1 ); zIn = (u8*)sqlite3_value_text(argv[0]); if( zIn==0 ) zIn = (u8*)""; for(i=0; zIn[i] && !sqlite3Isalpha(zIn[i]); i++){} if( zIn[i] ){ u8 prevcode = iCode[zIn[i]&0x7f]; zResult[0] = sqlite3Toupper(zIn[i]); for(j=1; j<4 && zIn[i]; i++){ int code = iCode[zIn[i]&0x7f]; if( code>0 ){ if( code!=prevcode ){ prevcode = code; zResult[j++] = code + '0'; } }else{ prevcode = 0; } } while( j<4 ){ zResult[j++] = '0'; } zResult[j] = 0; sqlite3_result_text(context, zResult, 4, SQLITE_TRANSIENT); }else{ /* IMP: R-64894-50321 The string "?000" is returned if the argument ** is NULL or contains no ASCII alphabetic characters. */ sqlite3_result_text(context, "?000", 4, SQLITE_STATIC); } } #endif /* SQLITE_SOUNDEX */ #ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** A function that loads a shared-library extension then returns NULL. */ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ const char *zFile = (const char *)sqlite3_value_text(argv[0]); const char *zProc; sqlite3 *db = sqlite3_context_db_handle(context); char *zErrMsg = 0; if( argc==2 ){ zProc = (const char *)sqlite3_value_text(argv[1]); }else{ zProc = 0; } if( zFile && sqlite3_load_extension(db, zFile, zProc, &zErrMsg) ){ sqlite3_result_error(context, zErrMsg, -1); sqlite3_free(zErrMsg); } } #endif /* ** An instance of the following structure holds the context of a ** sum() or avg() aggregate computation. */ typedef struct SumCtx SumCtx; struct SumCtx { double rSum; /* Floating point sum */ i64 iSum; /* Integer sum */ i64 cnt; /* Number of elements summed */ u8 overflow; /* True if integer overflow seen */ u8 approx; /* True if non-integer value was input to the sum */ }; /* ** Routines used to compute the sum, average, and total. ** ** The SUM() function follows the (broken) SQL standard which means ** that it returns NULL if it sums over no inputs. TOTAL returns ** 0.0 in that case. In addition, TOTAL always returns a float where ** SUM might return an integer if it never encounters a floating point ** value. TOTAL never fails, but SUM might through an exception if ** it overflows an integer. */ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ SumCtx *p; int type; assert( argc==1 ); UNUSED_PARAMETER(argc); p = sqlite3_aggregate_context(context, sizeof(*p)); type = sqlite3_value_numeric_type(argv[0]); if( p && type!=SQLITE_NULL ){ p->cnt++; if( type==SQLITE_INTEGER ){ i64 v = sqlite3_value_int64(argv[0]); p->rSum += v; if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){ p->overflow = 1; } }else{ p->rSum += sqlite3_value_double(argv[0]); p->approx = 1; } } } static void sumFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ if( p->overflow ){ sqlite3_result_error(context,"integer overflow",-1); }else if( p->approx ){ sqlite3_result_double(context, p->rSum); }else{ sqlite3_result_int64(context, p->iSum); } } } static void avgFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ sqlite3_result_double(context, p->rSum/(double)p->cnt); } } static void totalFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ sqlite3_result_double(context, p ? p->rSum : (double)0); } /* ** The following structure keeps track of state information for the ** count() aggregate function. */ typedef struct CountCtx CountCtx; struct CountCtx { i64 n; }; /* ** Routines to implement the count() aggregate function. */ static void countStep(sqlite3_context *context, int argc, sqlite3_value **argv){ CountCtx *p; p = sqlite3_aggregate_context(context, sizeof(*p)); if( (argc==0 || SQLITE_NULL!=sqlite3_value_type(argv[0])) && p ){ p->n++; } #ifndef SQLITE_OMIT_DEPRECATED /* The sqlite3_aggregate_count() function is deprecated. But just to make ** sure it still operates correctly, verify that its count agrees with our ** internal count when using count(*) and when the total count can be ** expressed as a 32-bit integer. */ assert( argc==1 || p==0 || p->n>0x7fffffff || p->n==sqlite3_aggregate_count(context) ); #endif } static void countFinalize(sqlite3_context *context){ CountCtx *p; p = sqlite3_aggregate_context(context, 0); sqlite3_result_int64(context, p ? p->n : 0); } /* ** Routines to implement min() and max() aggregate functions. */ static void minmaxStep( sqlite3_context *context, int NotUsed, sqlite3_value **argv ){ Mem *pArg = (Mem *)argv[0]; Mem *pBest; UNUSED_PARAMETER(NotUsed); pBest = (Mem *)sqlite3_aggregate_context(context, sizeof(*pBest)); if( !pBest ) return; if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ if( pBest->flags ) sqlite3SkipAccumulatorLoad(context); }else if( pBest->flags ){ int max; int cmp; CollSeq *pColl = sqlite3GetFuncCollSeq(context); /* This step function is used for both the min() and max() aggregates, ** the only difference between the two being that the sense of the ** comparison is inverted. For the max() aggregate, the ** sqlite3_user_data() function returns (void *)-1. For min() it ** returns (void *)db, where db is the sqlite3* database pointer. ** Therefore the next statement sets variable 'max' to 1 for the max() ** aggregate, or 0 for min(). */ max = sqlite3_user_data(context)!=0; cmp = sqlite3MemCompare(pBest, pArg, pColl); if( (max && cmp<0) || (!max && cmp>0) ){ sqlite3VdbeMemCopy(pBest, pArg); }else{ sqlite3SkipAccumulatorLoad(context); } }else{ pBest->db = sqlite3_context_db_handle(context); sqlite3VdbeMemCopy(pBest, pArg); } } static void minMaxFinalize(sqlite3_context *context){ sqlite3_value *pRes; pRes = (sqlite3_value *)sqlite3_aggregate_context(context, 0); if( pRes ){ if( pRes->flags ){ sqlite3_result_value(context, pRes); } sqlite3VdbeMemRelease(pRes); } } /* ** group_concat(EXPR, ?SEPARATOR?) */ static void groupConcatStep( sqlite3_context *context, int argc, sqlite3_value **argv ){ const char *zVal; StrAccum *pAccum; const char *zSep; int nVal, nSep; assert( argc==1 || argc==2 ); if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; pAccum = (StrAccum*)sqlite3_aggregate_context(context, sizeof(*pAccum)); if( pAccum ){ sqlite3 *db = sqlite3_context_db_handle(context); int firstTerm = pAccum->mxAlloc==0; pAccum->mxAlloc = db->aLimit[SQLITE_LIMIT_LENGTH]; if( !firstTerm ){ if( argc==2 ){ zSep = (char*)sqlite3_value_text(argv[1]); nSep = sqlite3_value_bytes(argv[1]); }else{ zSep = ","; nSep = 1; } if( nSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep); } zVal = (char*)sqlite3_value_text(argv[0]); nVal = sqlite3_value_bytes(argv[0]); if( zVal ) sqlite3StrAccumAppend(pAccum, zVal, nVal); } } static void groupConcatFinalize(sqlite3_context *context){ StrAccum *pAccum; pAccum = sqlite3_aggregate_context(context, 0); if( pAccum ){ if( pAccum->accError==STRACCUM_TOOBIG ){ sqlite3_result_error_toobig(context); }else if( pAccum->accError==STRACCUM_NOMEM ){ sqlite3_result_error_nomem(context); }else{ sqlite3_result_text(context, sqlite3StrAccumFinish(pAccum), -1, sqlite3_free); } } } /* ** This routine does per-connection function registration. Most ** of the built-in functions above are part of the global function set. ** This routine only deals with those that are not global. */ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(sqlite3 *db){ int rc = sqlite3_overload_function(db, "MATCH", 2); assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; } } /* ** Set the LIKEOPT flag on the 2-argument function with the given name. */ static void setLikeOptFlag(sqlite3 *db, const char *zName, u8 flagVal){ FuncDef *pDef; pDef = sqlite3FindFunction(db, zName, sqlite3Strlen30(zName), 2, SQLITE_UTF8, 0); if( ALWAYS(pDef) ){ pDef->funcFlags |= flagVal; } } /* ** Register the built-in LIKE and GLOB functions. The caseSensitive ** parameter determines whether or not the LIKE operator is case ** sensitive. GLOB is always case sensitive. */ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ struct compareInfo *pInfo; if( caseSensitive ){ pInfo = (struct compareInfo*)&likeInfoAlt; }else{ pInfo = (struct compareInfo*)&likeInfoNorm; } sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0); sqlite3CreateFunc(db, "glob", 2, SQLITE_UTF8, (struct compareInfo*)&globInfo, likeFunc, 0, 0, 0); setLikeOptFlag(db, "glob", SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE); setLikeOptFlag(db, "like", caseSensitive ? (SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE) : SQLITE_FUNC_LIKE); } /* ** pExpr points to an expression which implements a function. If ** it is appropriate to apply the LIKE optimization to that function ** then set aWc[0] through aWc[2] to the wildcard characters and ** return TRUE. If the function is not a LIKE-style function then ** return FALSE. ** ** *pIsNocase is set to true if uppercase and lowercase are equivalent for ** the function (default for LIKE). If the function makes the distinction ** between uppercase and lowercase (as does GLOB) then *pIsNocase is set to ** false. */ SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3 *db, Expr *pExpr, int *pIsNocase, char *aWc){ FuncDef *pDef; if( pExpr->op!=TK_FUNCTION || !pExpr->x.pList || pExpr->x.pList->nExpr!=2 ){ return 0; } assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); pDef = sqlite3FindFunction(db, pExpr->u.zToken, sqlite3Strlen30(pExpr->u.zToken), 2, SQLITE_UTF8, 0); if( NEVER(pDef==0) || (pDef->funcFlags & SQLITE_FUNC_LIKE)==0 ){ return 0; } /* The memcpy() statement assumes that the wildcard characters are ** the first three statements in the compareInfo structure. The ** asserts() that follow verify that assumption */ memcpy(aWc, pDef->pUserData, 3); assert( (char*)&likeInfoAlt == (char*)&likeInfoAlt.matchAll ); assert( &((char*)&likeInfoAlt)[1] == (char*)&likeInfoAlt.matchOne ); assert( &((char*)&likeInfoAlt)[2] == (char*)&likeInfoAlt.matchSet ); *pIsNocase = (pDef->funcFlags & SQLITE_FUNC_CASE)==0; return 1; } /* ** All of the FuncDef structures in the aBuiltinFunc[] array above ** to the global function hash table. This occurs at start-time (as ** a consequence of calling sqlite3_initialize()). ** ** After this routine runs */ SQLITE_PRIVATE void sqlite3RegisterGlobalFunctions(void){ /* ** The following array holds FuncDef structures for all of the functions ** defined in this file. ** ** The array cannot be constant since changes are made to the ** FuncDef.pHash elements at start-time. The elements of this array ** are read-only after initialization is complete. */ static SQLITE_WSD FuncDef aBuiltinFunc[] = { FUNCTION(ltrim, 1, 1, 0, trimFunc ), FUNCTION(ltrim, 2, 1, 0, trimFunc ), FUNCTION(rtrim, 1, 2, 0, trimFunc ), FUNCTION(rtrim, 2, 2, 0, trimFunc ), FUNCTION(trim, 1, 3, 0, trimFunc ), FUNCTION(trim, 2, 3, 0, trimFunc ), FUNCTION(min, -1, 0, 1, minmaxFunc ), FUNCTION(min, 0, 0, 1, 0 ), AGGREGATE2(min, 1, 0, 1, minmaxStep, minMaxFinalize, SQLITE_FUNC_MINMAX ), FUNCTION(max, -1, 1, 1, minmaxFunc ), FUNCTION(max, 0, 1, 1, 0 ), AGGREGATE2(max, 1, 1, 1, minmaxStep, minMaxFinalize, SQLITE_FUNC_MINMAX ), FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), FUNCTION(instr, 2, 0, 0, instrFunc ), FUNCTION(substr, 2, 0, 0, substrFunc ), FUNCTION(substr, 3, 0, 0, substrFunc ), FUNCTION(printf, -1, 0, 0, printfFunc ), FUNCTION(unicode, 1, 0, 0, unicodeFunc ), FUNCTION(char, -1, 0, 0, charFunc ), FUNCTION(abs, 1, 0, 0, absFunc ), #ifndef SQLITE_OMIT_FLOATING_POINT FUNCTION(round, 1, 0, 0, roundFunc ), FUNCTION(round, 2, 0, 0, roundFunc ), #endif FUNCTION(upper, 1, 0, 0, upperFunc ), FUNCTION(lower, 1, 0, 0, lowerFunc ), FUNCTION(coalesce, 1, 0, 0, 0 ), FUNCTION(coalesce, 0, 0, 0, 0 ), FUNCTION2(coalesce, -1, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), FUNCTION(hex, 1, 0, 0, hexFunc ), FUNCTION2(ifnull, 2, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), FUNCTION2(likely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY), VFUNCTION(random, 0, 0, 0, randomFunc ), VFUNCTION(randomblob, 1, 0, 0, randomBlob ), FUNCTION(nullif, 2, 0, 1, nullifFunc ), FUNCTION(sqlite_version, 0, 0, 0, versionFunc ), FUNCTION(sqlite_source_id, 0, 0, 0, sourceidFunc ), FUNCTION(sqlite_log, 2, 0, 0, errlogFunc ), #if SQLITE_USER_AUTHENTICATION FUNCTION(sqlite_crypt, 2, 0, 0, sqlite3CryptFunc ), #endif #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS FUNCTION(sqlite_compileoption_used,1, 0, 0, compileoptionusedFunc ), FUNCTION(sqlite_compileoption_get, 1, 0, 0, compileoptiongetFunc ), #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ FUNCTION(quote, 1, 0, 0, quoteFunc ), VFUNCTION(last_insert_rowid, 0, 0, 0, last_insert_rowid), VFUNCTION(changes, 0, 0, 0, changes ), VFUNCTION(total_changes, 0, 0, 0, total_changes ), FUNCTION(replace, 3, 0, 0, replaceFunc ), FUNCTION(zeroblob, 1, 0, 0, zeroblobFunc ), #ifdef SQLITE_SOUNDEX FUNCTION(soundex, 1, 0, 0, soundexFunc ), #endif #ifndef SQLITE_OMIT_LOAD_EXTENSION FUNCTION(load_extension, 1, 0, 0, loadExt ), FUNCTION(load_extension, 2, 0, 0, loadExt ), #endif AGGREGATE(sum, 1, 0, 0, sumStep, sumFinalize ), AGGREGATE(total, 1, 0, 0, sumStep, totalFinalize ), AGGREGATE(avg, 1, 0, 0, sumStep, avgFinalize ), AGGREGATE2(count, 0, 0, 0, countStep, countFinalize, SQLITE_FUNC_COUNT ), AGGREGATE(count, 1, 0, 0, countStep, countFinalize ), AGGREGATE(group_concat, 1, 0, 0, groupConcatStep, groupConcatFinalize), AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize), LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), #ifdef SQLITE_CASE_SENSITIVE_LIKE LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), #else LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE), LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE), #endif }; int i; FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); FuncDef *aFunc = (FuncDef*)&GLOBAL(FuncDef, aBuiltinFunc); for(i=0; idb->mallocFailed flag is set. */ SQLITE_PRIVATE int sqlite3FkLocateIndex( Parse *pParse, /* Parse context to store any error in */ Table *pParent, /* Parent table of FK constraint pFKey */ FKey *pFKey, /* Foreign key to find index for */ Index **ppIdx, /* OUT: Unique index on parent table */ int **paiCol /* OUT: Map of index columns in pFKey */ ){ Index *pIdx = 0; /* Value to return via *ppIdx */ int *aiCol = 0; /* Value to return via *paiCol */ int nCol = pFKey->nCol; /* Number of columns in parent key */ char *zKey = pFKey->aCol[0].zCol; /* Name of left-most parent key column */ /* The caller is responsible for zeroing output parameters. */ assert( ppIdx && *ppIdx==0 ); assert( !paiCol || *paiCol==0 ); assert( pParse ); /* If this is a non-composite (single column) foreign key, check if it ** maps to the INTEGER PRIMARY KEY of table pParent. If so, leave *ppIdx ** and *paiCol set to zero and return early. ** ** Otherwise, for a composite foreign key (more than one column), allocate ** space for the aiCol array (returned via output parameter *paiCol). ** Non-composite foreign keys do not require the aiCol array. */ if( nCol==1 ){ /* The FK maps to the IPK if any of the following are true: ** ** 1) There is an INTEGER PRIMARY KEY column and the FK is implicitly ** mapped to the primary key of table pParent, or ** 2) The FK is explicitly mapped to a column declared as INTEGER ** PRIMARY KEY. */ if( pParent->iPKey>=0 ){ if( !zKey ) return 0; if( !sqlite3StrICmp(pParent->aCol[pParent->iPKey].zName, zKey) ) return 0; } }else if( paiCol ){ assert( nCol>1 ); aiCol = (int *)sqlite3DbMallocRaw(pParse->db, nCol*sizeof(int)); if( !aiCol ) return 1; *paiCol = aiCol; } for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) ){ /* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number ** of columns. If each indexed column corresponds to a foreign key ** column of pFKey, then this index is a winner. */ if( zKey==0 ){ /* If zKey is NULL, then this foreign key is implicitly mapped to ** the PRIMARY KEY of table pParent. The PRIMARY KEY index may be ** identified by the test. */ if( IsPrimaryKeyIndex(pIdx) ){ if( aiCol ){ int i; for(i=0; iaCol[i].iFrom; } break; } }else{ /* If zKey is non-NULL, then this foreign key was declared to ** map to an explicit list of columns in table pParent. Check if this ** index matches those columns. Also, check that the index uses ** the default collation sequences for each column. */ int i, j; for(i=0; iaiColumn[i]; /* Index of column in parent tbl */ char *zDfltColl; /* Def. collation for column */ char *zIdxCol; /* Name of indexed column */ /* If the index uses a collation sequence that is different from ** the default collation sequence for the column, this index is ** unusable. Bail out early in this case. */ zDfltColl = pParent->aCol[iCol].zColl; if( !zDfltColl ){ zDfltColl = "BINARY"; } if( sqlite3StrICmp(pIdx->azColl[i], zDfltColl) ) break; zIdxCol = pParent->aCol[iCol].zName; for(j=0; jaCol[j].zCol, zIdxCol)==0 ){ if( aiCol ) aiCol[i] = pFKey->aCol[j].iFrom; break; } } if( j==nCol ) break; } if( i==nCol ) break; /* pIdx is usable */ } } } if( !pIdx ){ if( !pParse->disableTriggers ){ sqlite3ErrorMsg(pParse, "foreign key mismatch - \"%w\" referencing \"%w\"", pFKey->pFrom->zName, pFKey->zTo); } sqlite3DbFree(pParse->db, aiCol); return 1; } *ppIdx = pIdx; return 0; } /* ** This function is called when a row is inserted into or deleted from the ** child table of foreign key constraint pFKey. If an SQL UPDATE is executed ** on the child table of pFKey, this function is invoked twice for each row ** affected - once to "delete" the old row, and then again to "insert" the ** new row. ** ** Each time it is called, this function generates VDBE code to locate the ** row in the parent table that corresponds to the row being inserted into ** or deleted from the child table. If the parent row can be found, no ** special action is taken. Otherwise, if the parent row can *not* be ** found in the parent table: ** ** Operation | FK type | Action taken ** -------------------------------------------------------------------------- ** INSERT immediate Increment the "immediate constraint counter". ** ** DELETE immediate Decrement the "immediate constraint counter". ** ** INSERT deferred Increment the "deferred constraint counter". ** ** DELETE deferred Decrement the "deferred constraint counter". ** ** These operations are identified in the comment at the top of this file ** (fkey.c) as "I.1" and "D.1". */ static void fkLookupParent( Parse *pParse, /* Parse context */ int iDb, /* Index of database housing pTab */ Table *pTab, /* Parent table of FK pFKey */ Index *pIdx, /* Unique index on parent key columns in pTab */ FKey *pFKey, /* Foreign key constraint */ int *aiCol, /* Map from parent key columns to child table columns */ int regData, /* Address of array containing child table row */ int nIncr, /* Increment constraint counter by this */ int isIgnore /* If true, pretend pTab contains all NULL values */ ){ int i; /* Iterator variable */ Vdbe *v = sqlite3GetVdbe(pParse); /* Vdbe to add code to */ int iCur = pParse->nTab - 1; /* Cursor number to use */ int iOk = sqlite3VdbeMakeLabel(v); /* jump here if parent key found */ /* If nIncr is less than zero, then check at runtime if there are any ** outstanding constraints to resolve. If there are not, there is no need ** to check if deleting this row resolves any outstanding violations. ** ** Check if any of the key columns in the child table row are NULL. If ** any are, then the constraint is considered satisfied. No need to ** search for a matching row in the parent table. */ if( nIncr<0 ){ sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, iOk); VdbeCoverage(v); } for(i=0; inCol; i++){ int iReg = aiCol[i] + regData + 1; sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iOk); VdbeCoverage(v); } if( isIgnore==0 ){ if( pIdx==0 ){ /* If pIdx is NULL, then the parent key is the INTEGER PRIMARY KEY ** column of the parent table (table pTab). */ int iMustBeInt; /* Address of MustBeInt instruction */ int regTemp = sqlite3GetTempReg(pParse); /* Invoke MustBeInt to coerce the child key value to an integer (i.e. ** apply the affinity of the parent key). If this fails, then there ** is no matching parent key. Before using MustBeInt, make a copy of ** the value. Otherwise, the value inserted into the child key column ** will have INTEGER affinity applied to it, which may not be correct. */ sqlite3VdbeAddOp2(v, OP_SCopy, aiCol[0]+1+regData, regTemp); iMustBeInt = sqlite3VdbeAddOp2(v, OP_MustBeInt, regTemp, 0); VdbeCoverage(v); /* If the parent table is the same as the child table, and we are about ** to increment the constraint-counter (i.e. this is an INSERT operation), ** then check if the row being inserted matches itself. If so, do not ** increment the constraint-counter. */ if( pTab==pFKey->pFrom && nIncr==1 ){ sqlite3VdbeAddOp3(v, OP_Eq, regData, iOk, regTemp); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); } sqlite3OpenTable(pParse, iCur, iDb, pTab, OP_OpenRead); sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, regTemp); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk); sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); sqlite3VdbeJumpHere(v, iMustBeInt); sqlite3ReleaseTempReg(pParse, regTemp); }else{ int nCol = pFKey->nCol; int regTemp = sqlite3GetTempRange(pParse, nCol); int regRec = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_OpenRead, iCur, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); for(i=0; ipFrom && nIncr==1 ){ int iJump = sqlite3VdbeCurrentAddr(v) + nCol + 1; for(i=0; iaiColumn[i]+1+regData; assert( aiCol[i]!=pTab->iPKey ); if( pIdx->aiColumn[i]==pTab->iPKey ){ /* The parent key is a composite key that includes the IPK column */ iParent = regData; } sqlite3VdbeAddOp3(v, OP_Ne, iChild, iJump, iParent); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); } sqlite3VdbeAddOp2(v, OP_Goto, 0, iOk); } sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec, sqlite3IndexAffinityStr(v,pIdx), nCol); sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v); sqlite3ReleaseTempReg(pParse, regRec); sqlite3ReleaseTempRange(pParse, regTemp, nCol); } } if( !pFKey->isDeferred && !(pParse->db->flags & SQLITE_DeferFKs) && !pParse->pToplevel && !pParse->isMultiWrite ){ /* Special case: If this is an INSERT statement that will insert exactly ** one row into the table, raise a constraint immediately instead of ** incrementing a counter. This is necessary as the VM code is being ** generated for will not open a statement transaction. */ assert( nIncr==1 ); sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, OE_Abort, 0, P4_STATIC, P5_ConstraintFK); }else{ if( nIncr>0 && pFKey->isDeferred==0 ){ sqlite3MayAbort(pParse); } sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); } sqlite3VdbeResolveLabel(v, iOk); sqlite3VdbeAddOp1(v, OP_Close, iCur); } /* ** Return an Expr object that refers to a memory register corresponding ** to column iCol of table pTab. ** ** regBase is the first of an array of register that contains the data ** for pTab. regBase itself holds the rowid. regBase+1 holds the first ** column. regBase+2 holds the second column, and so forth. */ static Expr *exprTableRegister( Parse *pParse, /* Parsing and code generating context */ Table *pTab, /* The table whose content is at r[regBase]... */ int regBase, /* Contents of table pTab */ i16 iCol /* Which column of pTab is desired */ ){ Expr *pExpr; Column *pCol; const char *zColl; sqlite3 *db = pParse->db; pExpr = sqlite3Expr(db, TK_REGISTER, 0); if( pExpr ){ if( iCol>=0 && iCol!=pTab->iPKey ){ pCol = &pTab->aCol[iCol]; pExpr->iTable = regBase + iCol + 1; pExpr->affinity = pCol->affinity; zColl = pCol->zColl; if( zColl==0 ) zColl = db->pDfltColl->zName; pExpr = sqlite3ExprAddCollateString(pParse, pExpr, zColl); }else{ pExpr->iTable = regBase; pExpr->affinity = SQLITE_AFF_INTEGER; } } return pExpr; } /* ** Return an Expr object that refers to column iCol of table pTab which ** has cursor iCur. */ static Expr *exprTableColumn( sqlite3 *db, /* The database connection */ Table *pTab, /* The table whose column is desired */ int iCursor, /* The open cursor on the table */ i16 iCol /* The column that is wanted */ ){ Expr *pExpr = sqlite3Expr(db, TK_COLUMN, 0); if( pExpr ){ pExpr->pTab = pTab; pExpr->iTable = iCursor; pExpr->iColumn = iCol; } return pExpr; } /* ** This function is called to generate code executed when a row is deleted ** from the parent table of foreign key constraint pFKey and, if pFKey is ** deferred, when a row is inserted into the same table. When generating ** code for an SQL UPDATE operation, this function may be called twice - ** once to "delete" the old row and once to "insert" the new row. ** ** Parameter nIncr is passed -1 when inserting a row (as this may decrease ** the number of FK violations in the db) or +1 when deleting one (as this ** may increase the number of FK constraint problems). ** ** The code generated by this function scans through the rows in the child ** table that correspond to the parent table row being deleted or inserted. ** For each child row found, one of the following actions is taken: ** ** Operation | FK type | Action taken ** -------------------------------------------------------------------------- ** DELETE immediate Increment the "immediate constraint counter". ** Or, if the ON (UPDATE|DELETE) action is RESTRICT, ** throw a "FOREIGN KEY constraint failed" exception. ** ** INSERT immediate Decrement the "immediate constraint counter". ** ** DELETE deferred Increment the "deferred constraint counter". ** Or, if the ON (UPDATE|DELETE) action is RESTRICT, ** throw a "FOREIGN KEY constraint failed" exception. ** ** INSERT deferred Decrement the "deferred constraint counter". ** ** These operations are identified in the comment at the top of this file ** (fkey.c) as "I.2" and "D.2". */ static void fkScanChildren( Parse *pParse, /* Parse context */ SrcList *pSrc, /* The child table to be scanned */ Table *pTab, /* The parent table */ Index *pIdx, /* Index on parent covering the foreign key */ FKey *pFKey, /* The foreign key linking pSrc to pTab */ int *aiCol, /* Map from pIdx cols to child table cols */ int regData, /* Parent row data starts here */ int nIncr /* Amount to increment deferred counter by */ ){ sqlite3 *db = pParse->db; /* Database handle */ int i; /* Iterator variable */ Expr *pWhere = 0; /* WHERE clause to scan with */ NameContext sNameContext; /* Context used to resolve WHERE clause */ WhereInfo *pWInfo; /* Context used by sqlite3WhereXXX() */ int iFkIfZero = 0; /* Address of OP_FkIfZero */ Vdbe *v = sqlite3GetVdbe(pParse); assert( pIdx==0 || pIdx->pTable==pTab ); assert( pIdx==0 || pIdx->nKeyCol==pFKey->nCol ); assert( pIdx!=0 || pFKey->nCol==1 ); assert( pIdx!=0 || HasRowid(pTab) ); if( nIncr<0 ){ iFkIfZero = sqlite3VdbeAddOp2(v, OP_FkIfZero, pFKey->isDeferred, 0); VdbeCoverage(v); } /* Create an Expr object representing an SQL expression like: ** ** = AND = ... ** ** The collation sequence used for the comparison should be that of ** the parent key columns. The affinity of the parent key column should ** be applied to each child key value before the comparison takes place. */ for(i=0; inCol; i++){ Expr *pLeft; /* Value from parent table row */ Expr *pRight; /* Column ref to child table */ Expr *pEq; /* Expression (pLeft = pRight) */ i16 iCol; /* Index of column in child table */ const char *zCol; /* Name of column in child table */ iCol = pIdx ? pIdx->aiColumn[i] : -1; pLeft = exprTableRegister(pParse, pTab, regData, iCol); iCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; assert( iCol>=0 ); zCol = pFKey->pFrom->aCol[iCol].zName; pRight = sqlite3Expr(db, TK_ID, zCol); pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); pWhere = sqlite3ExprAnd(db, pWhere, pEq); } /* If the child table is the same as the parent table, then add terms ** to the WHERE clause that prevent this entry from being scanned. ** The added WHERE clause terms are like this: ** ** $current_rowid!=rowid ** NOT( $current_a==a AND $current_b==b AND ... ) ** ** The first form is used for rowid tables. The second form is used ** for WITHOUT ROWID tables. In the second form, the primary key is ** (a,b,...) */ if( pTab==pFKey->pFrom && nIncr>0 ){ Expr *pNe; /* Expression (pLeft != pRight) */ Expr *pLeft; /* Value from parent table row */ Expr *pRight; /* Column ref to child table */ if( HasRowid(pTab) ){ pLeft = exprTableRegister(pParse, pTab, regData, -1); pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1); pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight, 0); }else{ Expr *pEq, *pAll = 0; Index *pPk = sqlite3PrimaryKeyIndex(pTab); assert( pIdx!=0 ); for(i=0; inKeyCol; i++){ i16 iCol = pIdx->aiColumn[i]; pLeft = exprTableRegister(pParse, pTab, regData, iCol); pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol); pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0); pAll = sqlite3ExprAnd(db, pAll, pEq); } pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0, 0); } pWhere = sqlite3ExprAnd(db, pWhere, pNe); } /* Resolve the references in the WHERE clause. */ memset(&sNameContext, 0, sizeof(NameContext)); sNameContext.pSrcList = pSrc; sNameContext.pParse = pParse; sqlite3ResolveExprNames(&sNameContext, pWhere); /* Create VDBE to loop through the entries in pSrc that match the WHERE ** clause. For each row found, increment either the deferred or immediate ** foreign key constraint counter. */ pWInfo = sqlite3WhereBegin(pParse, pSrc, pWhere, 0, 0, 0, 0); sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, nIncr); if( pWInfo ){ sqlite3WhereEnd(pWInfo); } /* Clean up the WHERE clause constructed above. */ sqlite3ExprDelete(db, pWhere); if( iFkIfZero ){ sqlite3VdbeJumpHere(v, iFkIfZero); } } /* ** This function returns a linked list of FKey objects (connected by ** FKey.pNextTo) holding all children of table pTab. For example, ** given the following schema: ** ** CREATE TABLE t1(a PRIMARY KEY); ** CREATE TABLE t2(b REFERENCES t1(a); ** ** Calling this function with table "t1" as an argument returns a pointer ** to the FKey structure representing the foreign key constraint on table ** "t2". Calling this function with "t2" as the argument would return a ** NULL pointer (as there are no FK constraints for which t2 is the parent ** table). */ SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *pTab){ return (FKey *)sqlite3HashFind(&pTab->pSchema->fkeyHash, pTab->zName); } /* ** The second argument is a Trigger structure allocated by the ** fkActionTrigger() routine. This function deletes the Trigger structure ** and all of its sub-components. ** ** The Trigger structure or any of its sub-components may be allocated from ** the lookaside buffer belonging to database handle dbMem. */ static void fkTriggerDelete(sqlite3 *dbMem, Trigger *p){ if( p ){ TriggerStep *pStep = p->step_list; sqlite3ExprDelete(dbMem, pStep->pWhere); sqlite3ExprListDelete(dbMem, pStep->pExprList); sqlite3SelectDelete(dbMem, pStep->pSelect); sqlite3ExprDelete(dbMem, p->pWhen); sqlite3DbFree(dbMem, p); } } /* ** This function is called to generate code that runs when table pTab is ** being dropped from the database. The SrcList passed as the second argument ** to this function contains a single entry guaranteed to resolve to ** table pTab. ** ** Normally, no code is required. However, if either ** ** (a) The table is the parent table of a FK constraint, or ** (b) The table is the child table of a deferred FK constraint and it is ** determined at runtime that there are outstanding deferred FK ** constraint violations in the database, ** ** then the equivalent of "DELETE FROM " is executed before dropping ** the table from the database. Triggers are disabled while running this ** DELETE, but foreign key actions are not. */ SQLITE_PRIVATE void sqlite3FkDropTable(Parse *pParse, SrcList *pName, Table *pTab){ sqlite3 *db = pParse->db; if( (db->flags&SQLITE_ForeignKeys) && !IsVirtual(pTab) && !pTab->pSelect ){ int iSkip = 0; Vdbe *v = sqlite3GetVdbe(pParse); assert( v ); /* VDBE has already been allocated */ if( sqlite3FkReferences(pTab)==0 ){ /* Search for a deferred foreign key constraint for which this table ** is the child table. If one cannot be found, return without ** generating any VDBE code. If one can be found, then jump over ** the entire DELETE if there are no outstanding deferred constraints ** when this statement is run. */ FKey *p; for(p=pTab->pFKey; p; p=p->pNextFrom){ if( p->isDeferred || (db->flags & SQLITE_DeferFKs) ) break; } if( !p ) return; iSkip = sqlite3VdbeMakeLabel(v); sqlite3VdbeAddOp2(v, OP_FkIfZero, 1, iSkip); VdbeCoverage(v); } pParse->disableTriggers = 1; sqlite3DeleteFrom(pParse, sqlite3SrcListDup(db, pName, 0), 0); pParse->disableTriggers = 0; /* If the DELETE has generated immediate foreign key constraint ** violations, halt the VDBE and return an error at this point, before ** any modifications to the schema are made. This is because statement ** transactions are not able to rollback schema changes. ** ** If the SQLITE_DeferFKs flag is set, then this is not required, as ** the statement transaction will not be rolled back even if FK ** constraints are violated. */ if( (db->flags & SQLITE_DeferFKs)==0 ){ sqlite3VdbeAddOp2(v, OP_FkIfZero, 0, sqlite3VdbeCurrentAddr(v)+2); VdbeCoverage(v); sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_FOREIGNKEY, OE_Abort, 0, P4_STATIC, P5_ConstraintFK); } if( iSkip ){ sqlite3VdbeResolveLabel(v, iSkip); } } } /* ** The second argument points to an FKey object representing a foreign key ** for which pTab is the child table. An UPDATE statement against pTab ** is currently being processed. For each column of the table that is ** actually updated, the corresponding element in the aChange[] array ** is zero or greater (if a column is unmodified the corresponding element ** is set to -1). If the rowid column is modified by the UPDATE statement ** the bChngRowid argument is non-zero. ** ** This function returns true if any of the columns that are part of the ** child key for FK constraint *p are modified. */ static int fkChildIsModified( Table *pTab, /* Table being updated */ FKey *p, /* Foreign key for which pTab is the child */ int *aChange, /* Array indicating modified columns */ int bChngRowid /* True if rowid is modified by this update */ ){ int i; for(i=0; inCol; i++){ int iChildKey = p->aCol[i].iFrom; if( aChange[iChildKey]>=0 ) return 1; if( iChildKey==pTab->iPKey && bChngRowid ) return 1; } return 0; } /* ** The second argument points to an FKey object representing a foreign key ** for which pTab is the parent table. An UPDATE statement against pTab ** is currently being processed. For each column of the table that is ** actually updated, the corresponding element in the aChange[] array ** is zero or greater (if a column is unmodified the corresponding element ** is set to -1). If the rowid column is modified by the UPDATE statement ** the bChngRowid argument is non-zero. ** ** This function returns true if any of the columns that are part of the ** parent key for FK constraint *p are modified. */ static int fkParentIsModified( Table *pTab, FKey *p, int *aChange, int bChngRowid ){ int i; for(i=0; inCol; i++){ char *zKey = p->aCol[i].zCol; int iKey; for(iKey=0; iKeynCol; iKey++){ if( aChange[iKey]>=0 || (iKey==pTab->iPKey && bChngRowid) ){ Column *pCol = &pTab->aCol[iKey]; if( zKey ){ if( 0==sqlite3StrICmp(pCol->zName, zKey) ) return 1; }else if( pCol->colFlags & COLFLAG_PRIMKEY ){ return 1; } } } } return 0; } /* ** Return true if the parser passed as the first argument is being ** used to code a trigger that is really a "SET NULL" action belonging ** to trigger pFKey. */ static int isSetNullAction(Parse *pParse, FKey *pFKey){ Parse *pTop = sqlite3ParseToplevel(pParse); if( pTop->pTriggerPrg ){ Trigger *p = pTop->pTriggerPrg->pTrigger; if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull) || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull) ){ return 1; } } return 0; } /* ** This function is called when inserting, deleting or updating a row of ** table pTab to generate VDBE code to perform foreign key constraint ** processing for the operation. ** ** For a DELETE operation, parameter regOld is passed the index of the ** first register in an array of (pTab->nCol+1) registers containing the ** rowid of the row being deleted, followed by each of the column values ** of the row being deleted, from left to right. Parameter regNew is passed ** zero in this case. ** ** For an INSERT operation, regOld is passed zero and regNew is passed the ** first register of an array of (pTab->nCol+1) registers containing the new ** row data. ** ** For an UPDATE operation, this function is called twice. Once before ** the original record is deleted from the table using the calling convention ** described for DELETE. Then again after the original record is deleted ** but before the new record is inserted using the INSERT convention. */ SQLITE_PRIVATE void sqlite3FkCheck( Parse *pParse, /* Parse context */ Table *pTab, /* Row is being deleted from this table */ int regOld, /* Previous row data is stored here */ int regNew, /* New row data is stored here */ int *aChange, /* Array indicating UPDATEd columns (or 0) */ int bChngRowid /* True if rowid is UPDATEd */ ){ sqlite3 *db = pParse->db; /* Database handle */ FKey *pFKey; /* Used to iterate through FKs */ int iDb; /* Index of database containing pTab */ const char *zDb; /* Name of database containing pTab */ int isIgnoreErrors = pParse->disableTriggers; /* Exactly one of regOld and regNew should be non-zero. */ assert( (regOld==0)!=(regNew==0) ); /* If foreign-keys are disabled, this function is a no-op. */ if( (db->flags&SQLITE_ForeignKeys)==0 ) return; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); zDb = db->aDb[iDb].zName; /* Loop through all the foreign key constraints for which pTab is the ** child table (the table that the foreign key definition is part of). */ for(pFKey=pTab->pFKey; pFKey; pFKey=pFKey->pNextFrom){ Table *pTo; /* Parent table of foreign key pFKey */ Index *pIdx = 0; /* Index on key columns in pTo */ int *aiFree = 0; int *aiCol; int iCol; int i; int bIgnore = 0; if( aChange && sqlite3_stricmp(pTab->zName, pFKey->zTo)!=0 && fkChildIsModified(pTab, pFKey, aChange, bChngRowid)==0 ){ continue; } /* Find the parent table of this foreign key. Also find a unique index ** on the parent key columns in the parent table. If either of these ** schema items cannot be located, set an error in pParse and return ** early. */ if( pParse->disableTriggers ){ pTo = sqlite3FindTable(db, pFKey->zTo, zDb); }else{ pTo = sqlite3LocateTable(pParse, 0, pFKey->zTo, zDb); } if( !pTo || sqlite3FkLocateIndex(pParse, pTo, pFKey, &pIdx, &aiFree) ){ assert( isIgnoreErrors==0 || (regOld!=0 && regNew==0) ); if( !isIgnoreErrors || db->mallocFailed ) return; if( pTo==0 ){ /* If isIgnoreErrors is true, then a table is being dropped. In this ** case SQLite runs a "DELETE FROM xxx" on the table being dropped ** before actually dropping it in order to check FK constraints. ** If the parent table of an FK constraint on the current table is ** missing, behave as if it is empty. i.e. decrement the relevant ** FK counter for each row of the current table with non-NULL keys. */ Vdbe *v = sqlite3GetVdbe(pParse); int iJump = sqlite3VdbeCurrentAddr(v) + pFKey->nCol + 1; for(i=0; inCol; i++){ int iReg = pFKey->aCol[i].iFrom + regOld + 1; sqlite3VdbeAddOp2(v, OP_IsNull, iReg, iJump); VdbeCoverage(v); } sqlite3VdbeAddOp2(v, OP_FkCounter, pFKey->isDeferred, -1); } continue; } assert( pFKey->nCol==1 || (aiFree && pIdx) ); if( aiFree ){ aiCol = aiFree; }else{ iCol = pFKey->aCol[0].iFrom; aiCol = &iCol; } for(i=0; inCol; i++){ if( aiCol[i]==pTab->iPKey ){ aiCol[i] = -1; } #ifndef SQLITE_OMIT_AUTHORIZATION /* Request permission to read the parent key columns. If the ** authorization callback returns SQLITE_IGNORE, behave as if any ** values read from the parent table are NULL. */ if( db->xAuth ){ int rcauth; char *zCol = pTo->aCol[pIdx ? pIdx->aiColumn[i] : pTo->iPKey].zName; rcauth = sqlite3AuthReadCol(pParse, pTo->zName, zCol, iDb); bIgnore = (rcauth==SQLITE_IGNORE); } #endif } /* Take a shared-cache advisory read-lock on the parent table. Allocate ** a cursor to use to search the unique index on the parent key columns ** in the parent table. */ sqlite3TableLock(pParse, iDb, pTo->tnum, 0, pTo->zName); pParse->nTab++; if( regOld!=0 ){ /* A row is being removed from the child table. Search for the parent. ** If the parent does not exist, removing the child row resolves an ** outstanding foreign key constraint violation. */ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regOld, -1, bIgnore); } if( regNew!=0 && !isSetNullAction(pParse, pFKey) ){ /* A row is being added to the child table. If a parent row cannot ** be found, adding the child row has violated the FK constraint. ** ** If this operation is being performed as part of a trigger program ** that is actually a "SET NULL" action belonging to this very ** foreign key, then omit this scan altogether. As all child key ** values are guaranteed to be NULL, it is not possible for adding ** this row to cause an FK violation. */ fkLookupParent(pParse, iDb, pTo, pIdx, pFKey, aiCol, regNew, +1, bIgnore); } sqlite3DbFree(db, aiFree); } /* Loop through all the foreign key constraints that refer to this table. ** (the "child" constraints) */ for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ Index *pIdx = 0; /* Foreign key index for pFKey */ SrcList *pSrc; int *aiCol = 0; if( aChange && fkParentIsModified(pTab, pFKey, aChange, bChngRowid)==0 ){ continue; } if( !pFKey->isDeferred && !(db->flags & SQLITE_DeferFKs) && !pParse->pToplevel && !pParse->isMultiWrite ){ assert( regOld==0 && regNew!=0 ); /* Inserting a single row into a parent table cannot cause (or fix) ** an immediate foreign key violation. So do nothing in this case. */ continue; } if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ){ if( !isIgnoreErrors || db->mallocFailed ) return; continue; } assert( aiCol || pFKey->nCol==1 ); /* Create a SrcList structure containing the child table. We need the ** child table as a SrcList for sqlite3WhereBegin() */ pSrc = sqlite3SrcListAppend(db, 0, 0, 0); if( pSrc ){ struct SrcList_item *pItem = pSrc->a; pItem->pTab = pFKey->pFrom; pItem->zName = pFKey->pFrom->zName; pItem->pTab->nRef++; pItem->iCursor = pParse->nTab++; if( regNew!=0 ){ fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regNew, -1); } if( regOld!=0 ){ int eAction = pFKey->aAction[aChange!=0]; fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1); /* If this is a deferred FK constraint, or a CASCADE or SET NULL ** action applies, then any foreign key violations caused by ** removing the parent key will be rectified by the action trigger. ** So do not set the "may-abort" flag in this case. ** ** Note 1: If the FK is declared "ON UPDATE CASCADE", then the ** may-abort flag will eventually be set on this statement anyway ** (when this function is called as part of processing the UPDATE ** within the action trigger). ** ** Note 2: At first glance it may seem like SQLite could simply omit ** all OP_FkCounter related scans when either CASCADE or SET NULL ** applies. The trouble starts if the CASCADE or SET NULL action ** trigger causes other triggers or action rules attached to the ** child table to fire. In these cases the fk constraint counters ** might be set incorrectly if any OP_FkCounter related scans are ** omitted. */ if( !pFKey->isDeferred && eAction!=OE_Cascade && eAction!=OE_SetNull ){ sqlite3MayAbort(pParse); } } pItem->zName = 0; sqlite3SrcListDelete(db, pSrc); } sqlite3DbFree(db, aiCol); } } #define COLUMN_MASK(x) (((x)>31) ? 0xffffffff : ((u32)1<<(x))) /* ** This function is called before generating code to update or delete a ** row contained in table pTab. */ SQLITE_PRIVATE u32 sqlite3FkOldmask( Parse *pParse, /* Parse context */ Table *pTab /* Table being modified */ ){ u32 mask = 0; if( pParse->db->flags&SQLITE_ForeignKeys ){ FKey *p; int i; for(p=pTab->pFKey; p; p=p->pNextFrom){ for(i=0; inCol; i++) mask |= COLUMN_MASK(p->aCol[i].iFrom); } for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ Index *pIdx = 0; sqlite3FkLocateIndex(pParse, pTab, p, &pIdx, 0); if( pIdx ){ for(i=0; inKeyCol; i++) mask |= COLUMN_MASK(pIdx->aiColumn[i]); } } } return mask; } /* ** This function is called before generating code to update or delete a ** row contained in table pTab. If the operation is a DELETE, then ** parameter aChange is passed a NULL value. For an UPDATE, aChange points ** to an array of size N, where N is the number of columns in table pTab. ** If the i'th column is not modified by the UPDATE, then the corresponding ** entry in the aChange[] array is set to -1. If the column is modified, ** the value is 0 or greater. Parameter chngRowid is set to true if the ** UPDATE statement modifies the rowid fields of the table. ** ** If any foreign key processing will be required, this function returns ** true. If there is no foreign key related processing, this function ** returns false. */ SQLITE_PRIVATE int sqlite3FkRequired( Parse *pParse, /* Parse context */ Table *pTab, /* Table being modified */ int *aChange, /* Non-NULL for UPDATE operations */ int chngRowid /* True for UPDATE that affects rowid */ ){ if( pParse->db->flags&SQLITE_ForeignKeys ){ if( !aChange ){ /* A DELETE operation. Foreign key processing is required if the ** table in question is either the child or parent table for any ** foreign key constraint. */ return (sqlite3FkReferences(pTab) || pTab->pFKey); }else{ /* This is an UPDATE. Foreign key processing is only required if the ** operation modifies one or more child or parent key columns. */ FKey *p; /* Check if any child key columns are being modified. */ for(p=pTab->pFKey; p; p=p->pNextFrom){ if( fkChildIsModified(pTab, p, aChange, chngRowid) ) return 1; } /* Check if any parent key columns are being modified. */ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ if( fkParentIsModified(pTab, p, aChange, chngRowid) ) return 1; } } } return 0; } /* ** This function is called when an UPDATE or DELETE operation is being ** compiled on table pTab, which is the parent table of foreign-key pFKey. ** If the current operation is an UPDATE, then the pChanges parameter is ** passed a pointer to the list of columns being modified. If it is a ** DELETE, pChanges is passed a NULL pointer. ** ** It returns a pointer to a Trigger structure containing a trigger ** equivalent to the ON UPDATE or ON DELETE action specified by pFKey. ** If the action is "NO ACTION" or "RESTRICT", then a NULL pointer is ** returned (these actions require no special handling by the triggers ** sub-system, code for them is created by fkScanChildren()). ** ** For example, if pFKey is the foreign key and pTab is table "p" in ** the following schema: ** ** CREATE TABLE p(pk PRIMARY KEY); ** CREATE TABLE c(ck REFERENCES p ON DELETE CASCADE); ** ** then the returned trigger structure is equivalent to: ** ** CREATE TRIGGER ... DELETE ON p BEGIN ** DELETE FROM c WHERE ck = old.pk; ** END; ** ** The returned pointer is cached as part of the foreign key object. It ** is eventually freed along with the rest of the foreign key object by ** sqlite3FkDelete(). */ static Trigger *fkActionTrigger( Parse *pParse, /* Parse context */ Table *pTab, /* Table being updated or deleted from */ FKey *pFKey, /* Foreign key to get action for */ ExprList *pChanges /* Change-list for UPDATE, NULL for DELETE */ ){ sqlite3 *db = pParse->db; /* Database handle */ int action; /* One of OE_None, OE_Cascade etc. */ Trigger *pTrigger; /* Trigger definition to return */ int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */ action = pFKey->aAction[iAction]; pTrigger = pFKey->apTrigger[iAction]; if( action!=OE_None && !pTrigger ){ u8 enableLookaside; /* Copy of db->lookaside.bEnabled */ char const *zFrom; /* Name of child table */ int nFrom; /* Length in bytes of zFrom */ Index *pIdx = 0; /* Parent key index for this FK */ int *aiCol = 0; /* child table cols -> parent key cols */ TriggerStep *pStep = 0; /* First (only) step of trigger program */ Expr *pWhere = 0; /* WHERE clause of trigger step */ ExprList *pList = 0; /* Changes list if ON UPDATE CASCADE */ Select *pSelect = 0; /* If RESTRICT, "SELECT RAISE(...)" */ int i; /* Iterator variable */ Expr *pWhen = 0; /* WHEN clause for the trigger */ if( sqlite3FkLocateIndex(pParse, pTab, pFKey, &pIdx, &aiCol) ) return 0; assert( aiCol || pFKey->nCol==1 ); for(i=0; inCol; i++){ Token tOld = { "old", 3 }; /* Literal "old" token */ Token tNew = { "new", 3 }; /* Literal "new" token */ Token tFromCol; /* Name of column in child table */ Token tToCol; /* Name of column in parent table */ int iFromCol; /* Idx of column in child table */ Expr *pEq; /* tFromCol = OLD.tToCol */ iFromCol = aiCol ? aiCol[i] : pFKey->aCol[0].iFrom; assert( iFromCol>=0 ); assert( pIdx!=0 || (pTab->iPKey>=0 && pTab->iPKeynCol) ); tToCol.z = pTab->aCol[pIdx ? pIdx->aiColumn[i] : pTab->iPKey].zName; tFromCol.z = pFKey->pFrom->aCol[iFromCol].zName; tToCol.n = sqlite3Strlen30(tToCol.z); tFromCol.n = sqlite3Strlen30(tFromCol.z); /* Create the expression "OLD.zToCol = zFromCol". It is important ** that the "OLD.zToCol" term is on the LHS of the = operator, so ** that the affinity and collation sequence associated with the ** parent table are used for the comparison. */ pEq = sqlite3PExpr(pParse, TK_EQ, sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tOld, 0), sqlite3ExprAlloc(db, TK_ID, &tToCol, 0) , 0), sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0) , 0); pWhere = sqlite3ExprAnd(db, pWhere, pEq); /* For ON UPDATE, construct the next term of the WHEN clause. ** The final WHEN clause will be like this: ** ** WHEN NOT(old.col1 IS new.col1 AND ... AND old.colN IS new.colN) */ if( pChanges ){ pEq = sqlite3PExpr(pParse, TK_IS, sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tOld, 0), sqlite3ExprAlloc(db, TK_ID, &tToCol, 0), 0), sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tNew, 0), sqlite3ExprAlloc(db, TK_ID, &tToCol, 0), 0), 0); pWhen = sqlite3ExprAnd(db, pWhen, pEq); } if( action!=OE_Restrict && (action!=OE_Cascade || pChanges) ){ Expr *pNew; if( action==OE_Cascade ){ pNew = sqlite3PExpr(pParse, TK_DOT, sqlite3ExprAlloc(db, TK_ID, &tNew, 0), sqlite3ExprAlloc(db, TK_ID, &tToCol, 0) , 0); }else if( action==OE_SetDflt ){ Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt; if( pDflt ){ pNew = sqlite3ExprDup(db, pDflt, 0); }else{ pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0); } }else{ pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0); } pList = sqlite3ExprListAppend(pParse, pList, pNew); sqlite3ExprListSetName(pParse, pList, &tFromCol, 0); } } sqlite3DbFree(db, aiCol); zFrom = pFKey->pFrom->zName; nFrom = sqlite3Strlen30(zFrom); if( action==OE_Restrict ){ Token tFrom; Expr *pRaise; tFrom.z = zFrom; tFrom.n = nFrom; pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); if( pRaise ){ pRaise->affinity = OE_Abort; } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), sqlite3SrcListAppend(db, 0, &tFrom, 0), pWhere, 0, 0, 0, 0, 0, 0 ); pWhere = 0; } /* Disable lookaside memory allocation */ enableLookaside = db->lookaside.bEnabled; db->lookaside.bEnabled = 0; pTrigger = (Trigger *)sqlite3DbMallocZero(db, sizeof(Trigger) + /* struct Trigger */ sizeof(TriggerStep) + /* Single step in trigger program */ nFrom + 1 /* Space for pStep->zTarget */ ); if( pTrigger ){ pStep = pTrigger->step_list = (TriggerStep *)&pTrigger[1]; pStep->zTarget = (char *)&pStep[1]; memcpy((char *)pStep->zTarget, zFrom, nFrom); pStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE); pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); if( pWhen ){ pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0, 0); pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); } } /* Re-enable the lookaside buffer, if it was disabled earlier. */ db->lookaside.bEnabled = enableLookaside; sqlite3ExprDelete(db, pWhere); sqlite3ExprDelete(db, pWhen); sqlite3ExprListDelete(db, pList); sqlite3SelectDelete(db, pSelect); if( db->mallocFailed==1 ){ fkTriggerDelete(db, pTrigger); return 0; } assert( pStep!=0 ); switch( action ){ case OE_Restrict: pStep->op = TK_SELECT; break; case OE_Cascade: if( !pChanges ){ pStep->op = TK_DELETE; break; } default: pStep->op = TK_UPDATE; } pStep->pTrig = pTrigger; pTrigger->pSchema = pTab->pSchema; pTrigger->pTabSchema = pTab->pSchema; pFKey->apTrigger[iAction] = pTrigger; pTrigger->op = (pChanges ? TK_UPDATE : TK_DELETE); } return pTrigger; } /* ** This function is called when deleting or updating a row to implement ** any required CASCADE, SET NULL or SET DEFAULT actions. */ SQLITE_PRIVATE void sqlite3FkActions( Parse *pParse, /* Parse context */ Table *pTab, /* Table being updated or deleted from */ ExprList *pChanges, /* Change-list for UPDATE, NULL for DELETE */ int regOld, /* Address of array containing old row */ int *aChange, /* Array indicating UPDATEd columns (or 0) */ int bChngRowid /* True if rowid is UPDATEd */ ){ /* If foreign-key support is enabled, iterate through all FKs that ** refer to table pTab. If there is an action associated with the FK ** for this operation (either update or delete), invoke the associated ** trigger sub-program. */ if( pParse->db->flags&SQLITE_ForeignKeys ){ FKey *pFKey; /* Iterator variable */ for(pFKey = sqlite3FkReferences(pTab); pFKey; pFKey=pFKey->pNextTo){ if( aChange==0 || fkParentIsModified(pTab, pFKey, aChange, bChngRowid) ){ Trigger *pAct = fkActionTrigger(pParse, pTab, pFKey, pChanges); if( pAct ){ sqlite3CodeRowTriggerDirect(pParse, pAct, pTab, regOld, OE_Abort, 0); } } } } } #endif /* ifndef SQLITE_OMIT_TRIGGER */ /* ** Free all memory associated with foreign key definitions attached to ** table pTab. Remove the deleted foreign keys from the Schema.fkeyHash ** hash table. */ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ FKey *pFKey; /* Iterator variable */ FKey *pNext; /* Copy of pFKey->pNextFrom */ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){ /* Remove the FK from the fkeyHash hash table. */ if( !db || db->pnBytesFreed==0 ){ if( pFKey->pPrevTo ){ pFKey->pPrevTo->pNextTo = pFKey->pNextTo; }else{ void *p = (void *)pFKey->pNextTo; const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo); sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p); } if( pFKey->pNextTo ){ pFKey->pNextTo->pPrevTo = pFKey->pPrevTo; } } /* EV: R-30323-21917 Each foreign key constraint in SQLite is ** classified as either immediate or deferred. */ assert( pFKey->isDeferred==0 || pFKey->isDeferred==1 ); /* Delete any triggers created to implement actions for this FK. */ #ifndef SQLITE_OMIT_TRIGGER fkTriggerDelete(db, pFKey->apTrigger[0]); fkTriggerDelete(db, pFKey->apTrigger[1]); #endif pNext = pFKey->pNextFrom; sqlite3DbFree(db, pFKey); } } #endif /* ifndef SQLITE_OMIT_FOREIGN_KEY */ /************** End of fkey.c ************************************************/ /************** Begin file insert.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that are called by the parser ** to handle INSERT statements in SQLite. */ /* #include "sqliteInt.h" */ /* ** Generate code that will ** ** (1) acquire a lock for table pTab then ** (2) open pTab as cursor iCur. ** ** If pTab is a WITHOUT ROWID table, then it is the PRIMARY KEY index ** for that table that is actually opened. */ SQLITE_PRIVATE void sqlite3OpenTable( Parse *pParse, /* Generate code into this VDBE */ int iCur, /* The cursor number of the table */ int iDb, /* The database index in sqlite3.aDb[] */ Table *pTab, /* The table to be opened */ int opcode /* OP_OpenRead or OP_OpenWrite */ ){ Vdbe *v; assert( !IsVirtual(pTab) ); v = sqlite3GetVdbe(pParse); assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); sqlite3TableLock(pParse, iDb, pTab->tnum, (opcode==OP_OpenWrite)?1:0, pTab->zName); if( HasRowid(pTab) ){ sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nCol); VdbeComment((v, "%s", pTab->zName)); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); assert( pPk!=0 ); assert( pPk->tnum==pTab->tnum ); sqlite3VdbeAddOp3(v, opcode, iCur, pPk->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pPk); VdbeComment((v, "%s", pTab->zName)); } } /* ** Return a pointer to the column affinity string associated with index ** pIdx. A column affinity string has one character for each column in ** the table, according to the affinity of the column: ** ** Character Column affinity ** ------------------------------ ** 'A' BLOB ** 'B' TEXT ** 'C' NUMERIC ** 'D' INTEGER ** 'F' REAL ** ** An extra 'D' is appended to the end of the string to cover the ** rowid that appears as the last column in every index. ** ** Memory for the buffer containing the column index affinity string ** is managed along with the rest of the Index structure. It will be ** released when sqlite3DeleteIndex() is called. */ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(Vdbe *v, Index *pIdx){ if( !pIdx->zColAff ){ /* The first time a column affinity string for a particular index is ** required, it is allocated and populated here. It is then stored as ** a member of the Index structure for subsequent use. ** ** The column affinity string will eventually be deleted by ** sqliteDeleteIndex() when the Index structure itself is cleaned ** up. */ int n; Table *pTab = pIdx->pTable; sqlite3 *db = sqlite3VdbeDb(v); pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); if( !pIdx->zColAff ){ db->mallocFailed = 1; return 0; } for(n=0; nnColumn; n++){ i16 x = pIdx->aiColumn[n]; pIdx->zColAff[n] = x<0 ? SQLITE_AFF_INTEGER : pTab->aCol[x].affinity; } pIdx->zColAff[n] = 0; } return pIdx->zColAff; } /* ** Compute the affinity string for table pTab, if it has not already been ** computed. As an optimization, omit trailing SQLITE_AFF_BLOB affinities. ** ** If the affinity exists (if it is no entirely SQLITE_AFF_BLOB values) and ** if iReg>0 then code an OP_Affinity opcode that will set the affinities ** for register iReg and following. Or if affinities exists and iReg==0, ** then just set the P4 operand of the previous opcode (which should be ** an OP_MakeRecord) to the affinity string. ** ** A column affinity string has one character per column: ** ** Character Column affinity ** ------------------------------ ** 'A' BLOB ** 'B' TEXT ** 'C' NUMERIC ** 'D' INTEGER ** 'E' REAL */ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ int i; char *zColAff = pTab->zColAff; if( zColAff==0 ){ sqlite3 *db = sqlite3VdbeDb(v); zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); if( !zColAff ){ db->mallocFailed = 1; return; } for(i=0; inCol; i++){ zColAff[i] = pTab->aCol[i].affinity; } do{ zColAff[i--] = 0; }while( i>=0 && zColAff[i]==SQLITE_AFF_BLOB ); pTab->zColAff = zColAff; } i = sqlite3Strlen30(zColAff); if( i ){ if( iReg ){ sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i); }else{ sqlite3VdbeChangeP4(v, -1, zColAff, i); } } } /* ** Return non-zero if the table pTab in database iDb or any of its indices ** have been opened at any point in the VDBE program. This is used to see if ** a statement of the form "INSERT INTO SELECT ..." can ** run without using a temporary table for the results of the SELECT. */ static int readsTable(Parse *p, int iDb, Table *pTab){ Vdbe *v = sqlite3GetVdbe(p); int i; int iEnd = sqlite3VdbeCurrentAddr(v); #ifndef SQLITE_OMIT_VIRTUALTABLE VTable *pVTab = IsVirtual(pTab) ? sqlite3GetVTable(p->db, pTab) : 0; #endif for(i=1; iopcode==OP_OpenRead && pOp->p3==iDb ){ Index *pIndex; int tnum = pOp->p2; if( tnum==pTab->tnum ){ return 1; } for(pIndex=pTab->pIndex; pIndex; pIndex=pIndex->pNext){ if( tnum==pIndex->tnum ){ return 1; } } } #ifndef SQLITE_OMIT_VIRTUALTABLE if( pOp->opcode==OP_VOpen && pOp->p4.pVtab==pVTab ){ assert( pOp->p4.pVtab!=0 ); assert( pOp->p4type==P4_VTAB ); return 1; } #endif } return 0; } #ifndef SQLITE_OMIT_AUTOINCREMENT /* ** Locate or create an AutoincInfo structure associated with table pTab ** which is in database iDb. Return the register number for the register ** that holds the maximum rowid. ** ** There is at most one AutoincInfo structure per table even if the ** same table is autoincremented multiple times due to inserts within ** triggers. A new AutoincInfo structure is created if this is the ** first use of table pTab. On 2nd and subsequent uses, the original ** AutoincInfo structure is used. ** ** Three memory locations are allocated: ** ** (1) Register to hold the name of the pTab table. ** (2) Register to hold the maximum ROWID of pTab. ** (3) Register to hold the rowid in sqlite_sequence of pTab ** ** The 2nd register is the one that is returned. That is all the ** insert routine needs to know about. */ static int autoIncBegin( Parse *pParse, /* Parsing context */ int iDb, /* Index of the database holding pTab */ Table *pTab /* The table we are writing to */ ){ int memId = 0; /* Register holding maximum rowid */ if( pTab->tabFlags & TF_Autoincrement ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); AutoincInfo *pInfo; pInfo = pToplevel->pAinc; while( pInfo && pInfo->pTab!=pTab ){ pInfo = pInfo->pNext; } if( pInfo==0 ){ pInfo = sqlite3DbMallocRaw(pParse->db, sizeof(*pInfo)); if( pInfo==0 ) return 0; pInfo->pNext = pToplevel->pAinc; pToplevel->pAinc = pInfo; pInfo->pTab = pTab; pInfo->iDb = iDb; pToplevel->nMem++; /* Register to hold name of table */ pInfo->regCtr = ++pToplevel->nMem; /* Max rowid register */ pToplevel->nMem++; /* Rowid in sqlite_sequence */ } memId = pInfo->regCtr; } return memId; } /* ** This routine generates code that will initialize all of the ** register used by the autoincrement tracker. */ SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse){ AutoincInfo *p; /* Information about an AUTOINCREMENT */ sqlite3 *db = pParse->db; /* The database connection */ Db *pDb; /* Database only autoinc table */ int memId; /* Register holding max rowid */ int addr; /* A VDBE address */ Vdbe *v = pParse->pVdbe; /* VDBE under construction */ /* This routine is never called during trigger-generation. It is ** only called from the top-level */ assert( pParse->pTriggerTab==0 ); assert( pParse==sqlite3ParseToplevel(pParse) ); assert( v ); /* We failed long ago if this is not so */ for(p = pParse->pAinc; p; p = p->pNext){ pDb = &db->aDb[p->iDb]; memId = p->regCtr; assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenRead); sqlite3VdbeAddOp3(v, OP_Null, 0, memId, memId+1); addr = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp4(v, OP_String8, 0, memId-1, 0, p->pTab->zName, 0); sqlite3VdbeAddOp2(v, OP_Rewind, 0, addr+9); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Column, 0, 0, memId); sqlite3VdbeAddOp3(v, OP_Ne, memId-1, addr+7, memId); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_JUMPIFNULL); sqlite3VdbeAddOp2(v, OP_Rowid, 0, memId+1); sqlite3VdbeAddOp3(v, OP_Column, 0, 1, memId); sqlite3VdbeAddOp2(v, OP_Goto, 0, addr+9); sqlite3VdbeAddOp2(v, OP_Next, 0, addr+2); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, 0, memId); sqlite3VdbeAddOp0(v, OP_Close); } } /* ** Update the maximum rowid for an autoincrement calculation. ** ** This routine should be called when the top of the stack holds a ** new rowid that is about to be inserted. If that new rowid is ** larger than the maximum rowid in the memId memory cell, then the ** memory cell is updated. The stack is unchanged. */ static void autoIncStep(Parse *pParse, int memId, int regRowid){ if( memId>0 ){ sqlite3VdbeAddOp2(pParse->pVdbe, OP_MemMax, memId, regRowid); } } /* ** This routine generates the code needed to write autoincrement ** maximum rowid values back into the sqlite_sequence register. ** Every statement that might do an INSERT into an autoincrement ** table (either directly or through triggers) needs to call this ** routine just before the "exit" code. */ SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse){ AutoincInfo *p; Vdbe *v = pParse->pVdbe; sqlite3 *db = pParse->db; assert( v ); for(p = pParse->pAinc; p; p = p->pNext){ Db *pDb = &db->aDb[p->iDb]; int j1; int iRec; int memId = p->regCtr; iRec = sqlite3GetTempReg(pParse); assert( sqlite3SchemaMutexHeld(db, 0, pDb->pSchema) ); sqlite3OpenTable(pParse, 0, p->iDb, pDb->pSchema->pSeqTab, OP_OpenWrite); j1 = sqlite3VdbeAddOp1(v, OP_NotNull, memId+1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_NewRowid, 0, memId+1); sqlite3VdbeJumpHere(v, j1); sqlite3VdbeAddOp3(v, OP_MakeRecord, memId-1, 2, iRec); sqlite3VdbeAddOp3(v, OP_Insert, 0, iRec, memId+1); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3VdbeAddOp0(v, OP_Close); sqlite3ReleaseTempReg(pParse, iRec); } } #else /* ** If SQLITE_OMIT_AUTOINCREMENT is defined, then the three routines ** above are all no-ops */ # define autoIncBegin(A,B,C) (0) # define autoIncStep(A,B,C) #endif /* SQLITE_OMIT_AUTOINCREMENT */ /* Forward declaration */ static int xferOptimization( Parse *pParse, /* Parser context */ Table *pDest, /* The table we are inserting into */ Select *pSelect, /* A SELECT statement to use as the data source */ int onError, /* How to handle constraint errors */ int iDbDest /* The database of pDest */ ); /* ** This routine is called to handle SQL of the following forms: ** ** insert into TABLE (IDLIST) values(EXPRLIST),(EXPRLIST),... ** insert into TABLE (IDLIST) select ** insert into TABLE (IDLIST) default values ** ** The IDLIST following the table name is always optional. If omitted, ** then a list of all (non-hidden) columns for the table is substituted. ** The IDLIST appears in the pColumn parameter. pColumn is NULL if IDLIST ** is omitted. ** ** For the pSelect parameter holds the values to be inserted for the ** first two forms shown above. A VALUES clause is really just short-hand ** for a SELECT statement that omits the FROM clause and everything else ** that follows. If the pSelect parameter is NULL, that means that the ** DEFAULT VALUES form of the INSERT statement is intended. ** ** The code generated follows one of four templates. For a simple ** insert with data coming from a single-row VALUES clause, the code executes ** once straight down through. Pseudo-code follows (we call this ** the "1st template"): ** ** open write cursor to
    and its indices ** put VALUES clause expressions into registers ** write the resulting record into
    ** cleanup ** ** The three remaining templates assume the statement is of the form ** ** INSERT INTO
    SELECT ... ** ** If the SELECT clause is of the restricted form "SELECT * FROM " - ** in other words if the SELECT pulls all columns from a single table ** and there is no WHERE or LIMIT or GROUP BY or ORDER BY clauses, and ** if and are distinct tables but have identical ** schemas, including all the same indices, then a special optimization ** is invoked that copies raw records from over to . ** See the xferOptimization() function for the implementation of this ** template. This is the 2nd template. ** ** open a write cursor to
    ** open read cursor on ** transfer all records in over to
    ** close cursors ** foreach index on
    ** open a write cursor on the
    index ** open a read cursor on the corresponding index ** transfer all records from the read to the write cursors ** close cursors ** end foreach ** ** The 3rd template is for when the second template does not apply ** and the SELECT clause does not read from
    at any time. ** The generated code follows this template: ** ** X <- A ** goto B ** A: setup for the SELECT ** loop over the rows in the SELECT ** load values into registers R..R+n ** yield X ** end loop ** cleanup after the SELECT ** end-coroutine X ** B: open write cursor to
    and its indices ** C: yield X, at EOF goto D ** insert the select result into
    from R..R+n ** goto C ** D: cleanup ** ** The 4th template is used if the insert statement takes its ** values from a SELECT but the data is being inserted into a table ** that is also read as part of the SELECT. In the third form, ** we have to use an intermediate table to store the results of ** the select. The template is like this: ** ** X <- A ** goto B ** A: setup for the SELECT ** loop over the tables in the SELECT ** load value into register R..R+n ** yield X ** end loop ** cleanup after the SELECT ** end co-routine R ** B: open temp table ** L: yield X, at EOF goto M ** insert row from R..R+n into temp table ** goto L ** M: open write cursor to
    and its indices ** rewind temp table ** C: loop over rows of intermediate table ** transfer values form intermediate table into
    ** end loop ** D: cleanup */ SQLITE_PRIVATE void sqlite3Insert( Parse *pParse, /* Parser context */ SrcList *pTabList, /* Name of table into which we are inserting */ Select *pSelect, /* A SELECT statement to use as the data source */ IdList *pColumn, /* Column names corresponding to IDLIST. */ int onError /* How to handle constraint errors */ ){ sqlite3 *db; /* The main database structure */ Table *pTab; /* The table to insert into. aka TABLE */ char *zTab; /* Name of the table into which we are inserting */ const char *zDb; /* Name of the database holding this table */ int i, j, idx; /* Loop counters */ Vdbe *v; /* Generate code into this virtual machine */ Index *pIdx; /* For looping over indices of the table */ int nColumn; /* Number of columns in the data */ int nHidden = 0; /* Number of hidden columns if TABLE is virtual */ int iDataCur = 0; /* VDBE cursor that is the main data repository */ int iIdxCur = 0; /* First index cursor */ int ipkColumn = -1; /* Column that is the INTEGER PRIMARY KEY */ int endOfLoop; /* Label for the end of the insertion loop */ int srcTab = 0; /* Data comes from this temporary cursor if >=0 */ int addrInsTop = 0; /* Jump to label "D" */ int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */ SelectDest dest; /* Destination for SELECT on rhs of INSERT */ int iDb; /* Index of database holding TABLE */ Db *pDb; /* The database containing table being inserted into */ u8 useTempTable = 0; /* Store SELECT results in intermediate table */ u8 appendFlag = 0; /* True if the insert is likely to be an append */ u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */ u8 bIdListInOrder; /* True if IDLIST is in table order */ ExprList *pList = 0; /* List of VALUES() to be inserted */ /* Register allocations */ int regFromSelect = 0;/* Base register for data coming from SELECT */ int regAutoinc = 0; /* Register holding the AUTOINCREMENT counter */ int regRowCount = 0; /* Memory cell used for the row counter */ int regIns; /* Block of regs holding rowid+data being inserted */ int regRowid; /* registers holding insert rowid */ int regData; /* register holding first column to insert */ int *aRegIdx = 0; /* One register allocated to each index */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to insert into a view */ Trigger *pTrigger; /* List of triggers on pTab, if required */ int tmask; /* Mask of trigger times */ #endif db = pParse->db; memset(&dest, 0, sizeof(dest)); if( pParse->nErr || db->mallocFailed ){ goto insert_cleanup; } /* If the Select object is really just a simple VALUES() list with a ** single row (the common case) then keep that one row of values ** and discard the other (unused) parts of the pSelect object */ if( pSelect && (pSelect->selFlags & SF_Values)!=0 && pSelect->pPrior==0 ){ pList = pSelect->pEList; pSelect->pEList = 0; sqlite3SelectDelete(db, pSelect); pSelect = 0; } /* Locate the table into which we will be inserting new information. */ assert( pTabList->nSrc==1 ); zTab = pTabList->a[0].zName; if( NEVER(zTab==0) ) goto insert_cleanup; pTab = sqlite3SrcListLookup(pParse, pTabList); if( pTab==0 ){ goto insert_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); assert( iDbnDb ); pDb = &db->aDb[iDb]; zDb = pDb->zName; if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb) ){ goto insert_cleanup; } withoutRowid = !HasRowid(pTab); /* Figure out if we have any triggers and if the table being ** inserted into is a view */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_INSERT, 0, &tmask); isView = pTab->pSelect!=0; #else # define pTrigger 0 # define tmask 0 # define isView 0 #endif #ifdef SQLITE_OMIT_VIEW # undef isView # define isView 0 #endif assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) ); /* If pTab is really a view, make sure it has been initialized. ** ViewGetColumnNames() is a no-op if pTab is not a view. */ if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto insert_cleanup; } /* Cannot insert into a read-only table. */ if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ goto insert_cleanup; } /* Allocate a VDBE */ v = sqlite3GetVdbe(pParse); if( v==0 ) goto insert_cleanup; if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); sqlite3BeginWriteOperation(pParse, pSelect || pTrigger, iDb); #ifndef SQLITE_OMIT_XFER_OPT /* If the statement is of the form ** ** INSERT INTO SELECT * FROM ; ** ** Then special optimizations can be applied that make the transfer ** very fast and which reduce fragmentation of indices. ** ** This is the 2nd template. */ if( pColumn==0 && xferOptimization(pParse, pTab, pSelect, onError, iDb) ){ assert( !pTrigger ); assert( pList==0 ); goto insert_end; } #endif /* SQLITE_OMIT_XFER_OPT */ /* If this is an AUTOINCREMENT table, look up the sequence number in the ** sqlite_sequence table and store it in memory cell regAutoinc. */ regAutoinc = autoIncBegin(pParse, iDb, pTab); /* Allocate registers for holding the rowid of the new row, ** the content of the new row, and the assembled row record. */ regRowid = regIns = pParse->nMem+1; pParse->nMem += pTab->nCol + 1; if( IsVirtual(pTab) ){ regRowid++; pParse->nMem++; } regData = regRowid+1; /* If the INSERT statement included an IDLIST term, then make sure ** all elements of the IDLIST really are columns of the table and ** remember the column indices. ** ** If the table has an INTEGER PRIMARY KEY column and that column ** is named in the IDLIST, then record in the ipkColumn variable ** the index into IDLIST of the primary key column. ipkColumn is ** the index of the primary key as it appears in IDLIST, not as ** is appears in the original table. (The index of the INTEGER ** PRIMARY KEY in the original table is pTab->iPKey.) */ bIdListInOrder = (pTab->tabFlags & TF_OOOHidden)==0; if( pColumn ){ for(i=0; inId; i++){ pColumn->a[i].idx = -1; } for(i=0; inId; i++){ for(j=0; jnCol; j++){ if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zName)==0 ){ pColumn->a[i].idx = j; if( i!=j ) bIdListInOrder = 0; if( j==pTab->iPKey ){ ipkColumn = i; assert( !withoutRowid ); } break; } } if( j>=pTab->nCol ){ if( sqlite3IsRowid(pColumn->a[i].zName) && !withoutRowid ){ ipkColumn = i; bIdListInOrder = 0; }else{ sqlite3ErrorMsg(pParse, "table %S has no column named %s", pTabList, 0, pColumn->a[i].zName); pParse->checkSchema = 1; goto insert_cleanup; } } } } /* Figure out how many columns of data are supplied. If the data ** is coming from a SELECT statement, then generate a co-routine that ** produces a single row of the SELECT on each invocation. The ** co-routine is the common header to the 3rd and 4th templates. */ if( pSelect ){ /* Data is coming from a SELECT or from a multi-row VALUES clause. ** Generate a co-routine to run the SELECT. */ int regYield; /* Register holding co-routine entry-point */ int addrTop; /* Top of the co-routine */ int rc; /* Result code */ regYield = ++pParse->nMem; addrTop = sqlite3VdbeCurrentAddr(v) + 1; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, addrTop); sqlite3SelectDestInit(&dest, SRT_Coroutine, regYield); dest.iSdst = bIdListInOrder ? regData : 0; dest.nSdst = pTab->nCol; rc = sqlite3Select(pParse, pSelect, &dest); regFromSelect = dest.iSdst; if( rc || db->mallocFailed || pParse->nErr ) goto insert_cleanup; sqlite3VdbeAddOp1(v, OP_EndCoroutine, regYield); sqlite3VdbeJumpHere(v, addrTop - 1); /* label B: */ assert( pSelect->pEList ); nColumn = pSelect->pEList->nExpr; /* Set useTempTable to TRUE if the result of the SELECT statement ** should be written into a temporary table (template 4). Set to ** FALSE if each output row of the SELECT can be written directly into ** the destination table (template 3). ** ** A temp table must be used if the table being updated is also one ** of the tables being read by the SELECT statement. Also use a ** temp table in the case of row triggers. */ if( pTrigger || readsTable(pParse, iDb, pTab) ){ useTempTable = 1; } if( useTempTable ){ /* Invoke the coroutine to extract information from the SELECT ** and add it to a transient table srcTab. The code generated ** here is from the 4th template: ** ** B: open temp table ** L: yield X, goto M at EOF ** insert row from R..R+n into temp table ** goto L ** M: ... */ int regRec; /* Register to hold packed record */ int regTempRowid; /* Register to hold temp table ROWID */ int addrL; /* Label "L" */ srcTab = pParse->nTab++; regRec = sqlite3GetTempReg(pParse); regTempRowid = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp2(v, OP_OpenEphemeral, srcTab, nColumn); addrL = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, regFromSelect, nColumn, regRec); sqlite3VdbeAddOp2(v, OP_NewRowid, srcTab, regTempRowid); sqlite3VdbeAddOp3(v, OP_Insert, srcTab, regRec, regTempRowid); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrL); sqlite3VdbeJumpHere(v, addrL); sqlite3ReleaseTempReg(pParse, regRec); sqlite3ReleaseTempReg(pParse, regTempRowid); } }else{ /* This is the case if the data for the INSERT is coming from a ** single-row VALUES clause */ NameContext sNC; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; srcTab = -1; assert( useTempTable==0 ); nColumn = pList ? pList->nExpr : 0; for(i=0; ia[i].pExpr) ){ goto insert_cleanup; } } } /* If there is no IDLIST term but the table has an integer primary ** key, the set the ipkColumn variable to the integer primary key ** column index in the original table definition. */ if( pColumn==0 && nColumn>0 ){ ipkColumn = pTab->iPKey; } /* Make sure the number of columns in the source data matches the number ** of columns to be inserted into the table. */ if( IsVirtual(pTab) ){ for(i=0; inCol; i++){ nHidden += (IsHiddenColumn(&pTab->aCol[i]) ? 1 : 0); } } if( pColumn==0 && nColumn && nColumn!=(pTab->nCol-nHidden) ){ sqlite3ErrorMsg(pParse, "table %S has %d columns but %d values were supplied", pTabList, 0, pTab->nCol-nHidden, nColumn); goto insert_cleanup; } if( pColumn!=0 && nColumn!=pColumn->nId ){ sqlite3ErrorMsg(pParse, "%d values for %d columns", nColumn, pColumn->nId); goto insert_cleanup; } /* Initialize the count of rows to be inserted */ if( db->flags & SQLITE_CountRows ){ regRowCount = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); } /* If this is not a view, open the table and and all indices */ if( !isView ){ int nIdx; nIdx = sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, -1, 0, &iDataCur, &iIdxCur); aRegIdx = sqlite3DbMallocRaw(db, sizeof(int)*(nIdx+1)); if( aRegIdx==0 ){ goto insert_cleanup; } for(i=0; inMem; } } /* This is the top of the main insertion loop */ if( useTempTable ){ /* This block codes the top of loop only. The complete loop is the ** following pseudocode (template 4): ** ** rewind temp table, if empty goto D ** C: loop over rows of intermediate table ** transfer values form intermediate table into
    ** end loop ** D: ... */ addrInsTop = sqlite3VdbeAddOp1(v, OP_Rewind, srcTab); VdbeCoverage(v); addrCont = sqlite3VdbeCurrentAddr(v); }else if( pSelect ){ /* This block codes the top of loop only. The complete loop is the ** following pseudocode (template 3): ** ** C: yield X, at EOF goto D ** insert the select result into
    from R..R+n ** goto C ** D: ... */ addrInsTop = addrCont = sqlite3VdbeAddOp1(v, OP_Yield, dest.iSDParm); VdbeCoverage(v); } /* Run the BEFORE and INSTEAD OF triggers, if there are any */ endOfLoop = sqlite3VdbeMakeLabel(v); if( tmask & TRIGGER_BEFORE ){ int regCols = sqlite3GetTempRange(pParse, pTab->nCol+1); /* build the NEW.* reference row. Note that if there is an INTEGER ** PRIMARY KEY into which a NULL is being inserted, that NULL will be ** translated into a unique ID for the row. But on a BEFORE trigger, ** we do not know what the unique ID will be (because the insert has ** not happened yet) so we substitute a rowid of -1 */ if( ipkColumn<0 ){ sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); }else{ int j1; assert( !withoutRowid ); if( useTempTable ){ sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regCols); }else{ assert( pSelect==0 ); /* Otherwise useTempTable is true */ sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regCols); } j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regCols); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, -1, regCols); sqlite3VdbeJumpHere(v, j1); sqlite3VdbeAddOp1(v, OP_MustBeInt, regCols); VdbeCoverage(v); } /* Cannot have triggers on a virtual table. If it were possible, ** this block would have to account for hidden column. */ assert( !IsVirtual(pTab) ); /* Create the new column data */ for(i=0; inCol; i++){ if( pColumn==0 ){ j = i; }else{ for(j=0; jnId; j++){ if( pColumn->a[j].idx==i ) break; } } if( (!useTempTable && !pList) || (pColumn && j>=pColumn->nId) ){ sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regCols+i+1); }else if( useTempTable ){ sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, regCols+i+1); }else{ assert( pSelect==0 ); /* Otherwise useTempTable is true */ sqlite3ExprCodeAndCache(pParse, pList->a[j].pExpr, regCols+i+1); } } /* If this is an INSERT on a view with an INSTEAD OF INSERT trigger, ** do not attempt any conversions before assembling the record. ** If this is a real table, attempt conversions as required by the ** table column affinities. */ if( !isView ){ sqlite3TableAffinity(v, pTab, regCols+1); } /* Fire BEFORE or INSTEAD OF triggers */ sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_BEFORE, pTab, regCols-pTab->nCol-1, onError, endOfLoop); sqlite3ReleaseTempRange(pParse, regCols, pTab->nCol+1); } /* Compute the content of the next row to insert into a range of ** registers beginning at regIns. */ if( !isView ){ if( IsVirtual(pTab) ){ /* The row that the VUpdate opcode will delete: none */ sqlite3VdbeAddOp2(v, OP_Null, 0, regIns); } if( ipkColumn>=0 ){ if( useTempTable ){ sqlite3VdbeAddOp3(v, OP_Column, srcTab, ipkColumn, regRowid); }else if( pSelect ){ sqlite3VdbeAddOp2(v, OP_Copy, regFromSelect+ipkColumn, regRowid); }else{ VdbeOp *pOp; sqlite3ExprCode(pParse, pList->a[ipkColumn].pExpr, regRowid); pOp = sqlite3VdbeGetOp(v, -1); if( ALWAYS(pOp) && pOp->opcode==OP_Null && !IsVirtual(pTab) ){ appendFlag = 1; pOp->opcode = OP_NewRowid; pOp->p1 = iDataCur; pOp->p2 = regRowid; pOp->p3 = regAutoinc; } } /* If the PRIMARY KEY expression is NULL, then use OP_NewRowid ** to generate a unique primary key value. */ if( !appendFlag ){ int j1; if( !IsVirtual(pTab) ){ j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regRowid); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); sqlite3VdbeJumpHere(v, j1); }else{ j1 = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_IsNull, regRowid, j1+2); VdbeCoverage(v); } sqlite3VdbeAddOp1(v, OP_MustBeInt, regRowid); VdbeCoverage(v); } }else if( IsVirtual(pTab) || withoutRowid ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regRowid); }else{ sqlite3VdbeAddOp3(v, OP_NewRowid, iDataCur, regRowid, regAutoinc); appendFlag = 1; } autoIncStep(pParse, regAutoinc, regRowid); /* Compute data for all columns of the new entry, beginning ** with the first column. */ nHidden = 0; for(i=0; inCol; i++){ int iRegStore = regRowid+1+i; if( i==pTab->iPKey ){ /* The value of the INTEGER PRIMARY KEY column is always a NULL. ** Whenever this column is read, the rowid will be substituted ** in its place. Hence, fill this column with a NULL to avoid ** taking up data space with information that will never be used. ** As there may be shallow copies of this value, make it a soft-NULL */ sqlite3VdbeAddOp1(v, OP_SoftNull, iRegStore); continue; } if( pColumn==0 ){ if( IsHiddenColumn(&pTab->aCol[i]) ){ assert( IsVirtual(pTab) ); j = -1; nHidden++; }else{ j = i - nHidden; } }else{ for(j=0; jnId; j++){ if( pColumn->a[j].idx==i ) break; } } if( j<0 || nColumn==0 || (pColumn && j>=pColumn->nId) ){ sqlite3ExprCodeFactorable(pParse, pTab->aCol[i].pDflt, iRegStore); }else if( useTempTable ){ sqlite3VdbeAddOp3(v, OP_Column, srcTab, j, iRegStore); }else if( pSelect ){ if( regFromSelect!=regData ){ sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+j, iRegStore); } }else{ sqlite3ExprCode(pParse, pList->a[j].pExpr, iRegStore); } } /* Generate code to check constraints and generate index keys and ** do the insertion. */ #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pTab) ){ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); sqlite3VtabMakeWritable(pParse, pTab); sqlite3VdbeAddOp4(v, OP_VUpdate, 1, pTab->nCol+2, regIns, pVTab, P4_VTAB); sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError); sqlite3MayAbort(pParse); }else #endif { int isReplace; /* Set to true if constraints may cause a replace */ sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace ); sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, regIns, aRegIdx, 0, appendFlag, isReplace==0); } } /* Update the count of rows that are inserted */ if( (db->flags & SQLITE_CountRows)!=0 ){ sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } if( pTrigger ){ /* Code AFTER triggers */ sqlite3CodeRowTrigger(pParse, pTrigger, TK_INSERT, 0, TRIGGER_AFTER, pTab, regData-2-pTab->nCol, onError, endOfLoop); } /* The bottom of the main insertion loop, if the data source ** is a SELECT statement. */ sqlite3VdbeResolveLabel(v, endOfLoop); if( useTempTable ){ sqlite3VdbeAddOp2(v, OP_Next, srcTab, addrCont); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addrInsTop); sqlite3VdbeAddOp1(v, OP_Close, srcTab); }else if( pSelect ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrCont); sqlite3VdbeJumpHere(v, addrInsTop); } if( !IsVirtual(pTab) && !isView ){ /* Close all tables opened */ if( iDataCurpIndex; pIdx; pIdx=pIdx->pNext, idx++){ sqlite3VdbeAddOp1(v, OP_Close, idx+iIdxCur); } } insert_end: /* Update the sqlite_sequence table by storing the content of the ** maximum rowid counter values recorded while inserting into ** autoincrement tables. */ if( pParse->nested==0 && pParse->pTriggerTab==0 ){ sqlite3AutoincrementEnd(pParse); } /* ** Return the number of rows inserted. If this routine is ** generating code because of a call to sqlite3NestedParse(), do not ** invoke the callback function. */ if( (db->flags&SQLITE_CountRows) && !pParse->nested && !pParse->pTriggerTab ){ sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows inserted", SQLITE_STATIC); } insert_cleanup: sqlite3SrcListDelete(db, pTabList); sqlite3ExprListDelete(db, pList); sqlite3SelectDelete(db, pSelect); sqlite3IdListDelete(db, pColumn); sqlite3DbFree(db, aRegIdx); } /* Make sure "isView" and other macros defined above are undefined. Otherwise ** they may interfere with compilation of other functions in this file ** (or in another file, if this file becomes part of the amalgamation). */ #ifdef isView #undef isView #endif #ifdef pTrigger #undef pTrigger #endif #ifdef tmask #undef tmask #endif /* ** Generate code to do constraint checks prior to an INSERT or an UPDATE ** on table pTab. ** ** The regNewData parameter is the first register in a range that contains ** the data to be inserted or the data after the update. There will be ** pTab->nCol+1 registers in this range. The first register (the one ** that regNewData points to) will contain the new rowid, or NULL in the ** case of a WITHOUT ROWID table. The second register in the range will ** contain the content of the first table column. The third register will ** contain the content of the second table column. And so forth. ** ** The regOldData parameter is similar to regNewData except that it contains ** the data prior to an UPDATE rather than afterwards. regOldData is zero ** for an INSERT. This routine can distinguish between UPDATE and INSERT by ** checking regOldData for zero. ** ** For an UPDATE, the pkChng boolean is true if the true primary key (the ** rowid for a normal table or the PRIMARY KEY for a WITHOUT ROWID table) ** might be modified by the UPDATE. If pkChng is false, then the key of ** the iDataCur content table is guaranteed to be unchanged by the UPDATE. ** ** For an INSERT, the pkChng boolean indicates whether or not the rowid ** was explicitly specified as part of the INSERT statement. If pkChng ** is zero, it means that the either rowid is computed automatically or ** that the table is a WITHOUT ROWID table and has no rowid. On an INSERT, ** pkChng will only be true if the INSERT statement provides an integer ** value for either the rowid column or its INTEGER PRIMARY KEY alias. ** ** The code generated by this routine will store new index entries into ** registers identified by aRegIdx[]. No index entry is created for ** indices where aRegIdx[i]==0. The order of indices in aRegIdx[] is ** the same as the order of indices on the linked list of indices ** at pTab->pIndex. ** ** The caller must have already opened writeable cursors on the main ** table and all applicable indices (that is to say, all indices for which ** aRegIdx[] is not zero). iDataCur is the cursor for the main table when ** inserting or updating a rowid table, or the cursor for the PRIMARY KEY ** index when operating on a WITHOUT ROWID table. iIdxCur is the cursor ** for the first index in the pTab->pIndex list. Cursors for other indices ** are at iIdxCur+N for the N-th element of the pTab->pIndex list. ** ** This routine also generates code to check constraints. NOT NULL, ** CHECK, and UNIQUE constraints are all checked. If a constraint fails, ** then the appropriate action is performed. There are five possible ** actions: ROLLBACK, ABORT, FAIL, REPLACE, and IGNORE. ** ** Constraint type Action What Happens ** --------------- ---------- ---------------------------------------- ** any ROLLBACK The current transaction is rolled back and ** sqlite3_step() returns immediately with a ** return code of SQLITE_CONSTRAINT. ** ** any ABORT Back out changes from the current command ** only (do not do a complete rollback) then ** cause sqlite3_step() to return immediately ** with SQLITE_CONSTRAINT. ** ** any FAIL Sqlite3_step() returns immediately with a ** return code of SQLITE_CONSTRAINT. The ** transaction is not rolled back and any ** changes to prior rows are retained. ** ** any IGNORE The attempt in insert or update the current ** row is skipped, without throwing an error. ** Processing continues with the next row. ** (There is an immediate jump to ignoreDest.) ** ** NOT NULL REPLACE The NULL value is replace by the default ** value for that column. If the default value ** is NULL, the action is the same as ABORT. ** ** UNIQUE REPLACE The other row that conflicts with the row ** being inserted is removed. ** ** CHECK REPLACE Illegal. The results in an exception. ** ** Which action to take is determined by the overrideError parameter. ** Or if overrideError==OE_Default, then the pParse->onError parameter ** is used. Or if pParse->onError==OE_Default then the onError value ** for the constraint is used. */ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( Parse *pParse, /* The parser context */ Table *pTab, /* The table being inserted or updated */ int *aRegIdx, /* Use register aRegIdx[i] for index i. 0 for unused */ int iDataCur, /* Canonical data cursor (main table or PK index) */ int iIdxCur, /* First index cursor */ int regNewData, /* First register in a range holding values to insert */ int regOldData, /* Previous content. 0 for INSERTs */ u8 pkChng, /* Non-zero if the rowid or PRIMARY KEY changed */ u8 overrideError, /* Override onError to this if not OE_Default */ int ignoreDest, /* Jump to this label on an OE_Ignore resolution */ int *pbMayReplace /* OUT: Set to true if constraint may cause a replace */ ){ Vdbe *v; /* VDBE under constrution */ Index *pIdx; /* Pointer to one of the indices */ Index *pPk = 0; /* The PRIMARY KEY index */ sqlite3 *db; /* Database connection */ int i; /* loop counter */ int ix; /* Index loop counter */ int nCol; /* Number of columns */ int onError; /* Conflict resolution strategy */ int j1; /* Address of jump instruction */ int seenReplace = 0; /* True if REPLACE is used to resolve INT PK conflict */ int nPkField; /* Number of fields in PRIMARY KEY. 1 for ROWID tables */ int ipkTop = 0; /* Top of the rowid change constraint check */ int ipkBottom = 0; /* Bottom of the rowid change constraint check */ u8 isUpdate; /* True if this is an UPDATE operation */ u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */ int regRowid = -1; /* Register holding ROWID value */ isUpdate = regOldData!=0; db = pParse->db; v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ nCol = pTab->nCol; /* pPk is the PRIMARY KEY index for WITHOUT ROWID tables and NULL for ** normal rowid tables. nPkField is the number of key fields in the ** pPk index or 1 for a rowid table. In other words, nPkField is the ** number of fields in the true primary key of the table. */ if( HasRowid(pTab) ){ pPk = 0; nPkField = 1; }else{ pPk = sqlite3PrimaryKeyIndex(pTab); nPkField = pPk->nKeyCol; } /* Record that this module has started */ VdbeModuleComment((v, "BEGIN: GenCnstCks(%d,%d,%d,%d,%d)", iDataCur, iIdxCur, regNewData, regOldData, pkChng)); /* Test all NOT NULL constraints. */ for(i=0; iiPKey ){ continue; } onError = pTab->aCol[i].notNull; if( onError==OE_None ) continue; if( overrideError!=OE_Default ){ onError = overrideError; }else if( onError==OE_Default ){ onError = OE_Abort; } if( onError==OE_Replace && pTab->aCol[i].pDflt==0 ){ onError = OE_Abort; } assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail || onError==OE_Ignore || onError==OE_Replace ); switch( onError ){ case OE_Abort: sqlite3MayAbort(pParse); /* Fall through */ case OE_Rollback: case OE_Fail: { char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, pTab->aCol[i].zName); sqlite3VdbeAddOp4(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, regNewData+1+i, zMsg, P4_DYNAMIC); sqlite3VdbeChangeP5(v, P5_ConstraintNotNull); VdbeCoverage(v); break; } case OE_Ignore: { sqlite3VdbeAddOp2(v, OP_IsNull, regNewData+1+i, ignoreDest); VdbeCoverage(v); break; } default: { assert( onError==OE_Replace ); j1 = sqlite3VdbeAddOp1(v, OP_NotNull, regNewData+1+i); VdbeCoverage(v); sqlite3ExprCode(pParse, pTab->aCol[i].pDflt, regNewData+1+i); sqlite3VdbeJumpHere(v, j1); break; } } } /* Test all CHECK constraints */ #ifndef SQLITE_OMIT_CHECK if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ ExprList *pCheck = pTab->pCheck; pParse->ckBase = regNewData+1; onError = overrideError!=OE_Default ? overrideError : OE_Abort; for(i=0; inExpr; i++){ int allOk = sqlite3VdbeMakeLabel(v); sqlite3ExprIfTrue(pParse, pCheck->a[i].pExpr, allOk, SQLITE_JUMPIFNULL); if( onError==OE_Ignore ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); }else{ char *zName = pCheck->a[i].zName; if( zName==0 ) zName = pTab->zName; if( onError==OE_Replace ) onError = OE_Abort; /* IMP: R-15569-63625 */ sqlite3HaltConstraint(pParse, SQLITE_CONSTRAINT_CHECK, onError, zName, P4_TRANSIENT, P5_ConstraintCheck); } sqlite3VdbeResolveLabel(v, allOk); } } #endif /* !defined(SQLITE_OMIT_CHECK) */ /* If rowid is changing, make sure the new rowid does not previously ** exist in the table. */ if( pkChng && pPk==0 ){ int addrRowidOk = sqlite3VdbeMakeLabel(v); /* Figure out what action to take in case of a rowid collision */ onError = pTab->keyConf; if( overrideError!=OE_Default ){ onError = overrideError; }else if( onError==OE_Default ){ onError = OE_Abort; } if( isUpdate ){ /* pkChng!=0 does not mean that the rowid has change, only that ** it might have changed. Skip the conflict logic below if the rowid ** is unchanged. */ sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverage(v); } /* If the response to a rowid conflict is REPLACE but the response ** to some other UNIQUE constraint is FAIL or IGNORE, then we need ** to defer the running of the rowid conflict checking until after ** the UNIQUE constraints have run. */ if( onError==OE_Replace && overrideError!=OE_Replace ){ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->onError==OE_Ignore || pIdx->onError==OE_Fail ){ ipkTop = sqlite3VdbeAddOp0(v, OP_Goto); break; } } } /* Check to see if the new rowid already exists in the table. Skip ** the following conflict logic if it does not. */ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, addrRowidOk, regNewData); VdbeCoverage(v); /* Generate code that deals with a rowid collision */ switch( onError ){ default: { onError = OE_Abort; /* Fall thru into the next case */ } case OE_Rollback: case OE_Abort: case OE_Fail: { sqlite3RowidConstraint(pParse, onError, pTab); break; } case OE_Replace: { /* If there are DELETE triggers on this table and the ** recursive-triggers flag is set, call GenerateRowDelete() to ** remove the conflicting row from the table. This will fire ** the triggers and remove both the table and index b-tree entries. ** ** Otherwise, if there are no triggers or the recursive-triggers ** flag is not set, but the table has one or more indexes, call ** GenerateRowIndexDelete(). This removes the index b-tree entries ** only. The table b-tree entry will be replaced by the new entry ** when it is inserted. ** ** If either GenerateRowDelete() or GenerateRowIndexDelete() is called, ** also invoke MultiWrite() to indicate that this VDBE may require ** statement rollback (if the statement is aborted after the delete ** takes place). Earlier versions called sqlite3MultiWrite() regardless, ** but being more selective here allows statements like: ** ** REPLACE INTO t(rowid) VALUES($newrowid) ** ** to run without a statement journal if there are no indexes on the ** table. */ Trigger *pTrigger = 0; if( db->flags&SQLITE_RecTriggers ){ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); } if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){ sqlite3MultiWrite(pParse); sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, regNewData, 1, 0, OE_Replace, 1); }else if( pTab->pIndex ){ sqlite3MultiWrite(pParse); sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, 0); } seenReplace = 1; break; } case OE_Ignore: { /*assert( seenReplace==0 );*/ sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); break; } } sqlite3VdbeResolveLabel(v, addrRowidOk); if( ipkTop ){ ipkBottom = sqlite3VdbeAddOp0(v, OP_Goto); sqlite3VdbeJumpHere(v, ipkTop); } } /* Test all UNIQUE constraints by creating entries for each UNIQUE ** index and making sure that duplicate entries do not already exist. ** Compute the revised record entries for indices as we go. ** ** This loop also handles the case of the PRIMARY KEY index for a ** WITHOUT ROWID table. */ for(ix=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, ix++){ int regIdx; /* Range of registers hold conent for pIdx */ int regR; /* Range of registers holding conflicting PK */ int iThisCur; /* Cursor for this UNIQUE index */ int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */ if( aRegIdx[ix]==0 ) continue; /* Skip indices that do not change */ if( bAffinityDone==0 ){ sqlite3TableAffinity(v, pTab, regNewData+1); bAffinityDone = 1; } iThisCur = iIdxCur+ix; addrUniqueOk = sqlite3VdbeMakeLabel(v); /* Skip partial indices for which the WHERE clause is not true */ if( pIdx->pPartIdxWhere ){ sqlite3VdbeAddOp2(v, OP_Null, 0, aRegIdx[ix]); pParse->ckBase = regNewData+1; sqlite3ExprIfFalseDup(pParse, pIdx->pPartIdxWhere, addrUniqueOk, SQLITE_JUMPIFNULL); pParse->ckBase = 0; } /* Create a record for this index entry as it should appear after ** the insert or update. Store that record in the aRegIdx[ix] register */ regIdx = sqlite3GetTempRange(pParse, pIdx->nColumn); for(i=0; inColumn; i++){ int iField = pIdx->aiColumn[i]; int x; if( iField<0 || iField==pTab->iPKey ){ if( regRowid==regIdx+i ) continue; /* ROWID already in regIdx+i */ x = regNewData; regRowid = pIdx->pPartIdxWhere ? -1 : regIdx+i; }else{ x = iField + regNewData + 1; } sqlite3VdbeAddOp2(v, OP_SCopy, x, regIdx+i); VdbeComment((v, "%s", iField<0 ? "rowid" : pTab->aCol[iField].zName)); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]); VdbeComment((v, "for %s", pIdx->zName)); sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn); /* In an UPDATE operation, if this index is the PRIMARY KEY index ** of a WITHOUT ROWID table and there has been no change the ** primary key, then no collision is possible. The collision detection ** logic below can all be skipped. */ if( isUpdate && pPk==pIdx && pkChng==0 ){ sqlite3VdbeResolveLabel(v, addrUniqueOk); continue; } /* Find out what action to take in case there is a uniqueness conflict */ onError = pIdx->onError; if( onError==OE_None ){ sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); sqlite3VdbeResolveLabel(v, addrUniqueOk); continue; /* pIdx is not a UNIQUE index */ } if( overrideError!=OE_Default ){ onError = overrideError; }else if( onError==OE_Default ){ onError = OE_Abort; } /* Check to see if the new index entry will be unique */ sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk, regIdx, pIdx->nKeyCol); VdbeCoverage(v); /* Generate code to handle collisions */ regR = (pIdx==pPk) ? regIdx : sqlite3GetTempRange(pParse, nPkField); if( isUpdate || onError==OE_Replace ){ if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_IdxRowid, iThisCur, regR); /* Conflict only if the rowid of the existing index entry ** is different from old-rowid */ if( isUpdate ){ sqlite3VdbeAddOp3(v, OP_Eq, regR, addrUniqueOk, regOldData); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverage(v); } }else{ int x; /* Extract the PRIMARY KEY from the end of the index entry and ** store it in registers regR..regR+nPk-1 */ if( pIdx!=pPk ){ for(i=0; inKeyCol; i++){ x = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[i]); sqlite3VdbeAddOp3(v, OP_Column, iThisCur, x, regR+i); VdbeComment((v, "%s.%s", pTab->zName, pTab->aCol[pPk->aiColumn[i]].zName)); } } if( isUpdate ){ /* If currently processing the PRIMARY KEY of a WITHOUT ROWID ** table, only conflict if the new PRIMARY KEY values are actually ** different from the old. ** ** For a UNIQUE index, only conflict if the PRIMARY KEY values ** of the matched index row are different from the original PRIMARY ** KEY values of this row before the update. */ int addrJump = sqlite3VdbeCurrentAddr(v)+pPk->nKeyCol; int op = OP_Ne; int regCmp = (IsPrimaryKeyIndex(pIdx) ? regIdx : regR); for(i=0; inKeyCol; i++){ char *p4 = (char*)sqlite3LocateCollSeq(pParse, pPk->azColl[i]); x = pPk->aiColumn[i]; if( i==(pPk->nKeyCol-1) ){ addrJump = addrUniqueOk; op = OP_Eq; } sqlite3VdbeAddOp4(v, op, regOldData+1+x, addrJump, regCmp+i, p4, P4_COLLSEQ ); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); VdbeCoverageIf(v, op==OP_Eq); VdbeCoverageIf(v, op==OP_Ne); } } } } /* Generate code that executes if the new index entry is not unique */ assert( onError==OE_Rollback || onError==OE_Abort || onError==OE_Fail || onError==OE_Ignore || onError==OE_Replace ); switch( onError ){ case OE_Rollback: case OE_Abort: case OE_Fail: { sqlite3UniqueConstraint(pParse, onError, pIdx); break; } case OE_Ignore: { sqlite3VdbeAddOp2(v, OP_Goto, 0, ignoreDest); break; } default: { Trigger *pTrigger = 0; assert( onError==OE_Replace ); sqlite3MultiWrite(pParse); if( db->flags&SQLITE_RecTriggers ){ pTrigger = sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0); } sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, regR, nPkField, 0, OE_Replace, pIdx==pPk); seenReplace = 1; break; } } sqlite3VdbeResolveLabel(v, addrUniqueOk); sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn); if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField); } if( ipkTop ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, ipkTop+1); sqlite3VdbeJumpHere(v, ipkBottom); } *pbMayReplace = seenReplace; VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace)); } /* ** This routine generates code to finish the INSERT or UPDATE operation ** that was started by a prior call to sqlite3GenerateConstraintChecks. ** A consecutive range of registers starting at regNewData contains the ** rowid and the content to be inserted. ** ** The arguments to this routine should be the same as the first six ** arguments to sqlite3GenerateConstraintChecks. */ SQLITE_PRIVATE void sqlite3CompleteInsertion( Parse *pParse, /* The parser context */ Table *pTab, /* the table into which we are inserting */ int iDataCur, /* Cursor of the canonical data source */ int iIdxCur, /* First index cursor */ int regNewData, /* Range of content */ int *aRegIdx, /* Register used by each index. 0 for unused indices */ int isUpdate, /* True for UPDATE, False for INSERT */ int appendBias, /* True if this is likely to be an append */ int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */ ){ Vdbe *v; /* Prepared statements under construction */ Index *pIdx; /* An index being inserted or updated */ u8 pik_flags; /* flag values passed to the btree insert */ int regData; /* Content registers (after the rowid) */ int regRec; /* Register holding assembled record for the table */ int i; /* Loop counter */ u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */ v = sqlite3GetVdbe(pParse); assert( v!=0 ); assert( pTab->pSelect==0 ); /* This table is not a VIEW */ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ if( aRegIdx[i]==0 ) continue; bAffinityDone = 1; if( pIdx->pPartIdxWhere ){ sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2); VdbeCoverage(v); } sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i]); pik_flags = 0; if( useSeekResult ) pik_flags = OPFLAG_USESEEKRESULT; if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ assert( pParse->nested==0 ); pik_flags |= OPFLAG_NCHANGE; } if( pik_flags ) sqlite3VdbeChangeP5(v, pik_flags); } if( !HasRowid(pTab) ) return; regData = regNewData + 1; regRec = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec); if( !bAffinityDone ) sqlite3TableAffinity(v, pTab, 0); sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol); if( pParse->nested ){ pik_flags = 0; }else{ pik_flags = OPFLAG_NCHANGE; pik_flags |= (isUpdate?OPFLAG_ISUPDATE:OPFLAG_LASTROWID); } if( appendBias ){ pik_flags |= OPFLAG_APPEND; } if( useSeekResult ){ pik_flags |= OPFLAG_USESEEKRESULT; } sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData); if( !pParse->nested ){ sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); } sqlite3VdbeChangeP5(v, pik_flags); } /* ** Allocate cursors for the pTab table and all its indices and generate ** code to open and initialized those cursors. ** ** The cursor for the object that contains the complete data (normally ** the table itself, but the PRIMARY KEY index in the case of a WITHOUT ** ROWID table) is returned in *piDataCur. The first index cursor is ** returned in *piIdxCur. The number of indices is returned. ** ** Use iBase as the first cursor (either the *piDataCur for rowid tables ** or the first index for WITHOUT ROWID tables) if it is non-negative. ** If iBase is negative, then allocate the next available cursor. ** ** For a rowid table, *piDataCur will be exactly one less than *piIdxCur. ** For a WITHOUT ROWID table, *piDataCur will be somewhere in the range ** of *piIdxCurs, depending on where the PRIMARY KEY index appears on the ** pTab->pIndex list. ** ** If pTab is a virtual table, then this routine is a no-op and the ** *piDataCur and *piIdxCur values are left uninitialized. */ SQLITE_PRIVATE int sqlite3OpenTableAndIndices( Parse *pParse, /* Parsing context */ Table *pTab, /* Table to be opened */ int op, /* OP_OpenRead or OP_OpenWrite */ int iBase, /* Use this for the table cursor, if there is one */ u8 *aToOpen, /* If not NULL: boolean for each table and index */ int *piDataCur, /* Write the database source cursor number here */ int *piIdxCur /* Write the first index cursor number here */ ){ int i; int iDb; int iDataCur; Index *pIdx; Vdbe *v; assert( op==OP_OpenRead || op==OP_OpenWrite ); if( IsVirtual(pTab) ){ /* This routine is a no-op for virtual tables. Leave the output ** variables *piDataCur and *piIdxCur uninitialized so that valgrind ** can detect if they are used by mistake in the caller. */ return 0; } iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); v = sqlite3GetVdbe(pParse); assert( v!=0 ); if( iBase<0 ) iBase = pParse->nTab; iDataCur = iBase++; if( piDataCur ) *piDataCur = iDataCur; if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){ sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op); }else{ sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName); } if( piIdxCur ) *piIdxCur = iBase; for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ int iIdxCur = iBase++; assert( pIdx->pSchema==pTab->pSchema ); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) && piDataCur ){ *piDataCur = iIdxCur; } if( aToOpen==0 || aToOpen[i+1] ){ sqlite3VdbeAddOp3(v, op, iIdxCur, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "%s", pIdx->zName)); } } if( iBase>pParse->nTab ) pParse->nTab = iBase; return i; } #ifdef SQLITE_TEST /* ** The following global variable is incremented whenever the ** transfer optimization is used. This is used for testing ** purposes only - to make sure the transfer optimization really ** is happening when it is supposed to. */ SQLITE_API int sqlite3_xferopt_count; #endif /* SQLITE_TEST */ #ifndef SQLITE_OMIT_XFER_OPT /* ** Check to collation names to see if they are compatible. */ static int xferCompatibleCollation(const char *z1, const char *z2){ if( z1==0 ){ return z2==0; } if( z2==0 ){ return 0; } return sqlite3StrICmp(z1, z2)==0; } /* ** Check to see if index pSrc is compatible as a source of data ** for index pDest in an insert transfer optimization. The rules ** for a compatible index: ** ** * The index is over the same set of columns ** * The same DESC and ASC markings occurs on all columns ** * The same onError processing (OE_Abort, OE_Ignore, etc) ** * The same collating sequence on each column ** * The index has the exact same WHERE clause */ static int xferCompatibleIndex(Index *pDest, Index *pSrc){ int i; assert( pDest && pSrc ); assert( pDest->pTable!=pSrc->pTable ); if( pDest->nKeyCol!=pSrc->nKeyCol ){ return 0; /* Different number of columns */ } if( pDest->onError!=pSrc->onError ){ return 0; /* Different conflict resolution strategies */ } for(i=0; inKeyCol; i++){ if( pSrc->aiColumn[i]!=pDest->aiColumn[i] ){ return 0; /* Different columns indexed */ } if( pSrc->aSortOrder[i]!=pDest->aSortOrder[i] ){ return 0; /* Different sort orders */ } if( !xferCompatibleCollation(pSrc->azColl[i],pDest->azColl[i]) ){ return 0; /* Different collating sequences */ } } if( sqlite3ExprCompare(pSrc->pPartIdxWhere, pDest->pPartIdxWhere, -1) ){ return 0; /* Different WHERE clauses */ } /* If no test above fails then the indices must be compatible */ return 1; } /* ** Attempt the transfer optimization on INSERTs of the form ** ** INSERT INTO tab1 SELECT * FROM tab2; ** ** The xfer optimization transfers raw records from tab2 over to tab1. ** Columns are not decoded and reassembled, which greatly improves ** performance. Raw index records are transferred in the same way. ** ** The xfer optimization is only attempted if tab1 and tab2 are compatible. ** There are lots of rules for determining compatibility - see comments ** embedded in the code for details. ** ** This routine returns TRUE if the optimization is guaranteed to be used. ** Sometimes the xfer optimization will only work if the destination table ** is empty - a factor that can only be determined at run-time. In that ** case, this routine generates code for the xfer optimization but also ** does a test to see if the destination table is empty and jumps over the ** xfer optimization code if the test fails. In that case, this routine ** returns FALSE so that the caller will know to go ahead and generate ** an unoptimized transfer. This routine also returns FALSE if there ** is no chance that the xfer optimization can be applied. ** ** This optimization is particularly useful at making VACUUM run faster. */ static int xferOptimization( Parse *pParse, /* Parser context */ Table *pDest, /* The table we are inserting into */ Select *pSelect, /* A SELECT statement to use as the data source */ int onError, /* How to handle constraint errors */ int iDbDest /* The database of pDest */ ){ sqlite3 *db = pParse->db; ExprList *pEList; /* The result set of the SELECT */ Table *pSrc; /* The table in the FROM clause of SELECT */ Index *pSrcIdx, *pDestIdx; /* Source and destination indices */ struct SrcList_item *pItem; /* An element of pSelect->pSrc */ int i; /* Loop counter */ int iDbSrc; /* The database of pSrc */ int iSrc, iDest; /* Cursors from source and destination */ int addr1, addr2; /* Loop addresses */ int emptyDestTest = 0; /* Address of test for empty pDest */ int emptySrcTest = 0; /* Address of test for empty pSrc */ Vdbe *v; /* The VDBE we are building */ int regAutoinc; /* Memory register used by AUTOINC */ int destHasUniqueIdx = 0; /* True if pDest has a UNIQUE index */ int regData, regRowid; /* Registers holding data and rowid */ if( pSelect==0 ){ return 0; /* Must be of the form INSERT INTO ... SELECT ... */ } if( pParse->pWith || pSelect->pWith ){ /* Do not attempt to process this query if there are an WITH clauses ** attached to it. Proceeding may generate a false "no such table: xxx" ** error if pSelect reads from a CTE named "xxx". */ return 0; } if( sqlite3TriggerList(pParse, pDest) ){ return 0; /* tab1 must not have triggers */ } #ifndef SQLITE_OMIT_VIRTUALTABLE if( pDest->tabFlags & TF_Virtual ){ return 0; /* tab1 must not be a virtual table */ } #endif if( onError==OE_Default ){ if( pDest->iPKey>=0 ) onError = pDest->keyConf; if( onError==OE_Default ) onError = OE_Abort; } assert(pSelect->pSrc); /* allocated even if there is no FROM clause */ if( pSelect->pSrc->nSrc!=1 ){ return 0; /* FROM clause must have exactly one term */ } if( pSelect->pSrc->a[0].pSelect ){ return 0; /* FROM clause cannot contain a subquery */ } if( pSelect->pWhere ){ return 0; /* SELECT may not have a WHERE clause */ } if( pSelect->pOrderBy ){ return 0; /* SELECT may not have an ORDER BY clause */ } /* Do not need to test for a HAVING clause. If HAVING is present but ** there is no ORDER BY, we will get an error. */ if( pSelect->pGroupBy ){ return 0; /* SELECT may not have a GROUP BY clause */ } if( pSelect->pLimit ){ return 0; /* SELECT may not have a LIMIT clause */ } assert( pSelect->pOffset==0 ); /* Must be so if pLimit==0 */ if( pSelect->pPrior ){ return 0; /* SELECT may not be a compound query */ } if( pSelect->selFlags & SF_Distinct ){ return 0; /* SELECT may not be DISTINCT */ } pEList = pSelect->pEList; assert( pEList!=0 ); if( pEList->nExpr!=1 ){ return 0; /* The result set must have exactly one column */ } assert( pEList->a[0].pExpr ); if( pEList->a[0].pExpr->op!=TK_ALL ){ return 0; /* The result set must be the special operator "*" */ } /* At this point we have established that the statement is of the ** correct syntactic form to participate in this optimization. Now ** we have to check the semantics. */ pItem = pSelect->pSrc->a; pSrc = sqlite3LocateTableItem(pParse, 0, pItem); if( pSrc==0 ){ return 0; /* FROM clause does not contain a real table */ } if( pSrc==pDest ){ return 0; /* tab1 and tab2 may not be the same table */ } if( HasRowid(pDest)!=HasRowid(pSrc) ){ return 0; /* source and destination must both be WITHOUT ROWID or not */ } #ifndef SQLITE_OMIT_VIRTUALTABLE if( pSrc->tabFlags & TF_Virtual ){ return 0; /* tab2 must not be a virtual table */ } #endif if( pSrc->pSelect ){ return 0; /* tab2 may not be a view */ } if( pDest->nCol!=pSrc->nCol ){ return 0; /* Number of columns must be the same in tab1 and tab2 */ } if( pDest->iPKey!=pSrc->iPKey ){ return 0; /* Both tables must have the same INTEGER PRIMARY KEY */ } for(i=0; inCol; i++){ Column *pDestCol = &pDest->aCol[i]; Column *pSrcCol = &pSrc->aCol[i]; if( pDestCol->affinity!=pSrcCol->affinity ){ return 0; /* Affinity must be the same on all columns */ } if( !xferCompatibleCollation(pDestCol->zColl, pSrcCol->zColl) ){ return 0; /* Collating sequence must be the same on all columns */ } if( pDestCol->notNull && !pSrcCol->notNull ){ return 0; /* tab2 must be NOT NULL if tab1 is */ } /* Default values for second and subsequent columns need to match. */ if( i>0 && ((pDestCol->zDflt==0)!=(pSrcCol->zDflt==0) || (pDestCol->zDflt && strcmp(pDestCol->zDflt, pSrcCol->zDflt)!=0)) ){ return 0; /* Default values must be the same for all columns */ } } for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ if( IsUniqueIndex(pDestIdx) ){ destHasUniqueIdx = 1; } for(pSrcIdx=pSrc->pIndex; pSrcIdx; pSrcIdx=pSrcIdx->pNext){ if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; } if( pSrcIdx==0 ){ return 0; /* pDestIdx has no corresponding index in pSrc */ } } #ifndef SQLITE_OMIT_CHECK if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){ return 0; /* Tables have different CHECK constraints. Ticket #2252 */ } #endif #ifndef SQLITE_OMIT_FOREIGN_KEY /* Disallow the transfer optimization if the destination table constains ** any foreign key constraints. This is more restrictive than necessary. ** But the main beneficiary of the transfer optimization is the VACUUM ** command, and the VACUUM command disables foreign key constraints. So ** the extra complication to make this rule less restrictive is probably ** not worth the effort. Ticket [6284df89debdfa61db8073e062908af0c9b6118e] */ if( (db->flags & SQLITE_ForeignKeys)!=0 && pDest->pFKey!=0 ){ return 0; } #endif if( (db->flags & SQLITE_CountRows)!=0 ){ return 0; /* xfer opt does not play well with PRAGMA count_changes */ } /* If we get this far, it means that the xfer optimization is at ** least a possibility, though it might only work if the destination ** table (tab1) is initially empty. */ #ifdef SQLITE_TEST sqlite3_xferopt_count++; #endif iDbSrc = sqlite3SchemaToIndex(db, pSrc->pSchema); v = sqlite3GetVdbe(pParse); sqlite3CodeVerifySchema(pParse, iDbSrc); iSrc = pParse->nTab++; iDest = pParse->nTab++; regAutoinc = autoIncBegin(pParse, iDbDest, pDest); regData = sqlite3GetTempReg(pParse); regRowid = sqlite3GetTempReg(pParse); sqlite3OpenTable(pParse, iDest, iDbDest, pDest, OP_OpenWrite); assert( HasRowid(pDest) || destHasUniqueIdx ); if( (db->flags & SQLITE_Vacuum)==0 && ( (pDest->iPKey<0 && pDest->pIndex!=0) /* (1) */ || destHasUniqueIdx /* (2) */ || (onError!=OE_Abort && onError!=OE_Rollback) /* (3) */ )){ /* In some circumstances, we are able to run the xfer optimization ** only if the destination table is initially empty. Unless the ** SQLITE_Vacuum flag is set, this block generates code to make ** that determination. If SQLITE_Vacuum is set, then the destination ** table is always empty. ** ** Conditions under which the destination must be empty: ** ** (1) There is no INTEGER PRIMARY KEY but there are indices. ** (If the destination is not initially empty, the rowid fields ** of index entries might need to change.) ** ** (2) The destination has a unique index. (The xfer optimization ** is unable to test uniqueness.) ** ** (3) onError is something other than OE_Abort and OE_Rollback. */ addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iDest, 0); VdbeCoverage(v); emptyDestTest = sqlite3VdbeAddOp2(v, OP_Goto, 0, 0); sqlite3VdbeJumpHere(v, addr1); } if( HasRowid(pSrc) ){ sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead); emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); if( pDest->iPKey>=0 ){ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); addr2 = sqlite3VdbeAddOp3(v, OP_NotExists, iDest, 0, regRowid); VdbeCoverage(v); sqlite3RowidConstraint(pParse, onError, pDest); sqlite3VdbeJumpHere(v, addr2); autoIncStep(pParse, regAutoinc, regRowid); }else if( pDest->pIndex==0 ){ addr1 = sqlite3VdbeAddOp2(v, OP_NewRowid, iDest, regRowid); }else{ addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid); assert( (pDest->tabFlags & TF_Autoincrement)==0 ); } sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData); sqlite3VdbeAddOp3(v, OP_Insert, iDest, regData, regRowid); sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND); sqlite3VdbeChangeP4(v, -1, pDest->zName, 0); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); }else{ sqlite3TableLock(pParse, iDbDest, pDest->tnum, 1, pDest->zName); sqlite3TableLock(pParse, iDbSrc, pSrc->tnum, 0, pSrc->zName); } for(pDestIdx=pDest->pIndex; pDestIdx; pDestIdx=pDestIdx->pNext){ u8 idxInsFlags = 0; for(pSrcIdx=pSrc->pIndex; ALWAYS(pSrcIdx); pSrcIdx=pSrcIdx->pNext){ if( xferCompatibleIndex(pDestIdx, pSrcIdx) ) break; } assert( pSrcIdx ); sqlite3VdbeAddOp3(v, OP_OpenRead, iSrc, pSrcIdx->tnum, iDbSrc); sqlite3VdbeSetP4KeyInfo(pParse, pSrcIdx); VdbeComment((v, "%s", pSrcIdx->zName)); sqlite3VdbeAddOp3(v, OP_OpenWrite, iDest, pDestIdx->tnum, iDbDest); sqlite3VdbeSetP4KeyInfo(pParse, pDestIdx); sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR); VdbeComment((v, "%s", pDestIdx->zName)); addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData); if( db->flags & SQLITE_Vacuum ){ /* This INSERT command is part of a VACUUM operation, which guarantees ** that the destination table is empty. If all indexed columns use ** collation sequence BINARY, then it can also be assumed that the ** index will be populated by inserting keys in strictly sorted ** order. In this case, instead of seeking within the b-tree as part ** of every OP_IdxInsert opcode, an OP_Last is added before the ** OP_IdxInsert to seek to the point within the b-tree where each key ** should be inserted. This is faster. ** ** If any of the indexed columns use a collation sequence other than ** BINARY, this optimization is disabled. This is because the user ** might change the definition of a collation sequence and then run ** a VACUUM command. In that case keys may not be written in strictly ** sorted order. */ for(i=0; inColumn; i++){ char *zColl = pSrcIdx->azColl[i]; assert( zColl!=0 ); if( sqlite3_stricmp("BINARY", zColl) ) break; } if( i==pSrcIdx->nColumn ){ idxInsFlags = OPFLAG_USESEEKRESULT; sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1); } } if( !HasRowid(pSrc) && pDestIdx->idxType==2 ){ idxInsFlags |= OPFLAG_NCHANGE; } sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1); sqlite3VdbeChangeP5(v, idxInsFlags); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr1); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); } if( emptySrcTest ) sqlite3VdbeJumpHere(v, emptySrcTest); sqlite3ReleaseTempReg(pParse, regRowid); sqlite3ReleaseTempReg(pParse, regData); if( emptyDestTest ){ sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0); sqlite3VdbeJumpHere(v, emptyDestTest); sqlite3VdbeAddOp2(v, OP_Close, iDest, 0); return 0; }else{ return 1; } } #endif /* SQLITE_OMIT_XFER_OPT */ /************** End of insert.c **********************************************/ /************** Begin file legacy.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Main file for the SQLite library. The routines in this file ** implement the programmer interface to the library. Routines in ** other files are for internal use by SQLite and should not be ** accessed by users of the library. */ /* #include "sqliteInt.h" */ /* ** Execute SQL code. Return one of the SQLITE_ success/failure ** codes. Also write an error message into memory obtained from ** malloc() and make *pzErrMsg point to that message. ** ** If the SQL is a query, then for each row in the query result ** the xCallback() function is called. pArg becomes the first ** argument to xCallback(). If xCallback=NULL then no callback ** is invoked, even for queries. */ SQLITE_API int SQLITE_STDCALL sqlite3_exec( sqlite3 *db, /* The database on which the SQL executes */ const char *zSql, /* The SQL to be executed */ sqlite3_callback xCallback, /* Invoke this callback routine */ void *pArg, /* First argument to xCallback() */ char **pzErrMsg /* Write error messages here */ ){ int rc = SQLITE_OK; /* Return code */ const char *zLeftover; /* Tail of unprocessed SQL */ sqlite3_stmt *pStmt = 0; /* The current SQL statement */ char **azCols = 0; /* Names of result columns */ int callbackIsInit; /* True if callback data is initialized */ if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; if( zSql==0 ) zSql = ""; sqlite3_mutex_enter(db->mutex); sqlite3Error(db, SQLITE_OK); while( rc==SQLITE_OK && zSql[0] ){ int nCol; char **azVals = 0; pStmt = 0; rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, &zLeftover); assert( rc==SQLITE_OK || pStmt==0 ); if( rc!=SQLITE_OK ){ continue; } if( !pStmt ){ /* this happens for a comment or white-space */ zSql = zLeftover; continue; } callbackIsInit = 0; nCol = sqlite3_column_count(pStmt); while( 1 ){ int i; rc = sqlite3_step(pStmt); /* Invoke the callback function if required */ if( xCallback && (SQLITE_ROW==rc || (SQLITE_DONE==rc && !callbackIsInit && db->flags&SQLITE_NullCallback)) ){ if( !callbackIsInit ){ azCols = sqlite3DbMallocZero(db, 2*nCol*sizeof(const char*) + 1); if( azCols==0 ){ goto exec_out; } for(i=0; imallocFailed = 1; goto exec_out; } } } if( xCallback(pArg, nCol, azVals, azCols) ){ /* EVIDENCE-OF: R-38229-40159 If the callback function to ** sqlite3_exec() returns non-zero, then sqlite3_exec() will ** return SQLITE_ABORT. */ rc = SQLITE_ABORT; sqlite3VdbeFinalize((Vdbe *)pStmt); pStmt = 0; sqlite3Error(db, SQLITE_ABORT); goto exec_out; } } if( rc!=SQLITE_ROW ){ rc = sqlite3VdbeFinalize((Vdbe *)pStmt); pStmt = 0; zSql = zLeftover; while( sqlite3Isspace(zSql[0]) ) zSql++; break; } } sqlite3DbFree(db, azCols); azCols = 0; } exec_out: if( pStmt ) sqlite3VdbeFinalize((Vdbe *)pStmt); sqlite3DbFree(db, azCols); rc = sqlite3ApiExit(db, rc); if( rc!=SQLITE_OK && pzErrMsg ){ int nErrMsg = 1 + sqlite3Strlen30(sqlite3_errmsg(db)); *pzErrMsg = sqlite3Malloc(nErrMsg); if( *pzErrMsg ){ memcpy(*pzErrMsg, sqlite3_errmsg(db), nErrMsg); }else{ rc = SQLITE_NOMEM; sqlite3Error(db, SQLITE_NOMEM); } }else if( pzErrMsg ){ *pzErrMsg = 0; } assert( (rc&db->errMask)==rc ); sqlite3_mutex_leave(db->mutex); return rc; } /************** End of legacy.c **********************************************/ /************** Begin file loadext.c *****************************************/ /* ** 2006 June 7 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to dynamically load extensions into ** the SQLite library. */ #ifndef SQLITE_CORE #define SQLITE_CORE 1 /* Disable the API redefinition in sqlite3ext.h */ #endif /************** Include sqlite3ext.h in the middle of loadext.c **************/ /************** Begin file sqlite3ext.h **************************************/ /* ** 2006 June 7 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This header file defines the SQLite interface for use by ** shared libraries that want to be imported as extensions into ** an SQLite instance. Shared libraries that intend to be loaded ** as extensions by SQLite should #include this file instead of ** sqlite3.h. */ #ifndef _SQLITE3EXT_H_ #define _SQLITE3EXT_H_ /* #include "sqlite3.h" */ typedef struct sqlite3_api_routines sqlite3_api_routines; /* ** The following structure holds pointers to all of the SQLite API ** routines. ** ** WARNING: In order to maintain backwards compatibility, add new ** interfaces to the end of this structure only. If you insert new ** interfaces in the middle of this structure, then older different ** versions of SQLite will not be able to load each other's shared ** libraries! */ struct sqlite3_api_routines { void * (*aggregate_context)(sqlite3_context*,int nBytes); int (*aggregate_count)(sqlite3_context*); int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); int (*bind_double)(sqlite3_stmt*,int,double); int (*bind_int)(sqlite3_stmt*,int,int); int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); int (*bind_null)(sqlite3_stmt*,int); int (*bind_parameter_count)(sqlite3_stmt*); int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); const char * (*bind_parameter_name)(sqlite3_stmt*,int); int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); int (*busy_timeout)(sqlite3*,int ms); int (*changes)(sqlite3*); int (*close)(sqlite3*); int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, int eTextRep,const char*)); int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, int eTextRep,const void*)); const void * (*column_blob)(sqlite3_stmt*,int iCol); int (*column_bytes)(sqlite3_stmt*,int iCol); int (*column_bytes16)(sqlite3_stmt*,int iCol); int (*column_count)(sqlite3_stmt*pStmt); const char * (*column_database_name)(sqlite3_stmt*,int); const void * (*column_database_name16)(sqlite3_stmt*,int); const char * (*column_decltype)(sqlite3_stmt*,int i); const void * (*column_decltype16)(sqlite3_stmt*,int); double (*column_double)(sqlite3_stmt*,int iCol); int (*column_int)(sqlite3_stmt*,int iCol); sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); const char * (*column_name)(sqlite3_stmt*,int); const void * (*column_name16)(sqlite3_stmt*,int); const char * (*column_origin_name)(sqlite3_stmt*,int); const void * (*column_origin_name16)(sqlite3_stmt*,int); const char * (*column_table_name)(sqlite3_stmt*,int); const void * (*column_table_name16)(sqlite3_stmt*,int); const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); const void * (*column_text16)(sqlite3_stmt*,int iCol); int (*column_type)(sqlite3_stmt*,int iCol); sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); void * (*commit_hook)(sqlite3*,int(*)(void*),void*); int (*complete)(const char*sql); int (*complete16)(const void*sql); int (*create_collation)(sqlite3*,const char*,int,void*, int(*)(void*,int,const void*,int,const void*)); int (*create_collation16)(sqlite3*,const void*,int,void*, int(*)(void*,int,const void*,int,const void*)); int (*create_function)(sqlite3*,const char*,int,int,void*, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*)); int (*create_function16)(sqlite3*,const void*,int,int,void*, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*)); int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); int (*data_count)(sqlite3_stmt*pStmt); sqlite3 * (*db_handle)(sqlite3_stmt*); int (*declare_vtab)(sqlite3*,const char*); int (*enable_shared_cache)(int); int (*errcode)(sqlite3*db); const char * (*errmsg)(sqlite3*); const void * (*errmsg16)(sqlite3*); int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); int (*expired)(sqlite3_stmt*); int (*finalize)(sqlite3_stmt*pStmt); void (*free)(void*); void (*free_table)(char**result); int (*get_autocommit)(sqlite3*); void * (*get_auxdata)(sqlite3_context*,int); int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); int (*global_recover)(void); void (*interruptx)(sqlite3*); sqlite_int64 (*last_insert_rowid)(sqlite3*); const char * (*libversion)(void); int (*libversion_number)(void); void *(*malloc)(int); char * (*mprintf)(const char*,...); int (*open)(const char*,sqlite3**); int (*open16)(const void*,sqlite3**); int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); void *(*realloc)(void*,int); int (*reset)(sqlite3_stmt*pStmt); void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); void (*result_double)(sqlite3_context*,double); void (*result_error)(sqlite3_context*,const char*,int); void (*result_error16)(sqlite3_context*,const void*,int); void (*result_int)(sqlite3_context*,int); void (*result_int64)(sqlite3_context*,sqlite_int64); void (*result_null)(sqlite3_context*); void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); void (*result_value)(sqlite3_context*,sqlite3_value*); void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, const char*,const char*),void*); void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); char * (*snprintf)(int,char*,const char*,...); int (*step)(sqlite3_stmt*); int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, char const**,char const**,int*,int*,int*); void (*thread_cleanup)(void); int (*total_changes)(sqlite3*); void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, sqlite_int64),void*); void * (*user_data)(sqlite3_context*); const void * (*value_blob)(sqlite3_value*); int (*value_bytes)(sqlite3_value*); int (*value_bytes16)(sqlite3_value*); double (*value_double)(sqlite3_value*); int (*value_int)(sqlite3_value*); sqlite_int64 (*value_int64)(sqlite3_value*); int (*value_numeric_type)(sqlite3_value*); const unsigned char * (*value_text)(sqlite3_value*); const void * (*value_text16)(sqlite3_value*); const void * (*value_text16be)(sqlite3_value*); const void * (*value_text16le)(sqlite3_value*); int (*value_type)(sqlite3_value*); char *(*vmprintf)(const char*,va_list); /* Added ??? */ int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); /* Added by 3.3.13 */ int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); int (*clear_bindings)(sqlite3_stmt*); /* Added by 3.4.1 */ int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, void (*xDestroy)(void *)); /* Added by 3.5.0 */ int (*bind_zeroblob)(sqlite3_stmt*,int,int); int (*blob_bytes)(sqlite3_blob*); int (*blob_close)(sqlite3_blob*); int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, int,sqlite3_blob**); int (*blob_read)(sqlite3_blob*,void*,int,int); int (*blob_write)(sqlite3_blob*,const void*,int,int); int (*create_collation_v2)(sqlite3*,const char*,int,void*, int(*)(void*,int,const void*,int,const void*), void(*)(void*)); int (*file_control)(sqlite3*,const char*,int,void*); sqlite3_int64 (*memory_highwater)(int); sqlite3_int64 (*memory_used)(void); sqlite3_mutex *(*mutex_alloc)(int); void (*mutex_enter)(sqlite3_mutex*); void (*mutex_free)(sqlite3_mutex*); void (*mutex_leave)(sqlite3_mutex*); int (*mutex_try)(sqlite3_mutex*); int (*open_v2)(const char*,sqlite3**,int,const char*); int (*release_memory)(int); void (*result_error_nomem)(sqlite3_context*); void (*result_error_toobig)(sqlite3_context*); int (*sleep)(int); void (*soft_heap_limit)(int); sqlite3_vfs *(*vfs_find)(const char*); int (*vfs_register)(sqlite3_vfs*,int); int (*vfs_unregister)(sqlite3_vfs*); int (*xthreadsafe)(void); void (*result_zeroblob)(sqlite3_context*,int); void (*result_error_code)(sqlite3_context*,int); int (*test_control)(int, ...); void (*randomness)(int,void*); sqlite3 *(*context_db_handle)(sqlite3_context*); int (*extended_result_codes)(sqlite3*,int); int (*limit)(sqlite3*,int,int); sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); const char *(*sql)(sqlite3_stmt*); int (*status)(int,int*,int*,int); int (*backup_finish)(sqlite3_backup*); sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); int (*backup_pagecount)(sqlite3_backup*); int (*backup_remaining)(sqlite3_backup*); int (*backup_step)(sqlite3_backup*,int); const char *(*compileoption_get)(int); int (*compileoption_used)(const char*); int (*create_function_v2)(sqlite3*,const char*,int,int,void*, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*), void(*xDestroy)(void*)); int (*db_config)(sqlite3*,int,...); sqlite3_mutex *(*db_mutex)(sqlite3*); int (*db_status)(sqlite3*,int,int*,int*,int); int (*extended_errcode)(sqlite3*); void (*log)(int,const char*,...); sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); const char *(*sourceid)(void); int (*stmt_status)(sqlite3_stmt*,int,int); int (*strnicmp)(const char*,const char*,int); int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); int (*wal_autocheckpoint)(sqlite3*,int); int (*wal_checkpoint)(sqlite3*,const char*); void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); int (*vtab_config)(sqlite3*,int op,...); int (*vtab_on_conflict)(sqlite3*); /* Version 3.7.16 and later */ int (*close_v2)(sqlite3*); const char *(*db_filename)(sqlite3*,const char*); int (*db_readonly)(sqlite3*,const char*); int (*db_release_memory)(sqlite3*); const char *(*errstr)(int); int (*stmt_busy)(sqlite3_stmt*); int (*stmt_readonly)(sqlite3_stmt*); int (*stricmp)(const char*,const char*); int (*uri_boolean)(const char*,const char*,int); sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); const char *(*uri_parameter)(const char*,const char*); char *(*vsnprintf)(int,char*,const char*,va_list); int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); /* Version 3.8.7 and later */ int (*auto_extension)(void(*)(void)); int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64, void(*)(void*)); int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64, void(*)(void*),unsigned char); int (*cancel_auto_extension)(void(*)(void)); int (*load_extension)(sqlite3*,const char*,const char*,char**); void *(*malloc64)(sqlite3_uint64); sqlite3_uint64 (*msize)(void*); void *(*realloc64)(void*,sqlite3_uint64); void (*reset_auto_extension)(void); void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64, void(*)(void*)); void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64, void(*)(void*), unsigned char); int (*strglob)(const char*,const char*); /* Version 3.8.11 and later */ sqlite3_value *(*value_dup)(const sqlite3_value*); void (*value_free)(sqlite3_value*); int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64); int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64); }; /* ** The following macros redefine the API routines so that they are ** redirected through the global sqlite3_api structure. ** ** This header file is also used by the loadext.c source file ** (part of the main SQLite library - not an extension) so that ** it can get access to the sqlite3_api_routines structure ** definition. But the main library does not want to redefine ** the API. So the redefinition macros are only valid if the ** SQLITE_CORE macros is undefined. */ #ifndef SQLITE_CORE #define sqlite3_aggregate_context sqlite3_api->aggregate_context #ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_aggregate_count sqlite3_api->aggregate_count #endif #define sqlite3_bind_blob sqlite3_api->bind_blob #define sqlite3_bind_double sqlite3_api->bind_double #define sqlite3_bind_int sqlite3_api->bind_int #define sqlite3_bind_int64 sqlite3_api->bind_int64 #define sqlite3_bind_null sqlite3_api->bind_null #define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count #define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index #define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name #define sqlite3_bind_text sqlite3_api->bind_text #define sqlite3_bind_text16 sqlite3_api->bind_text16 #define sqlite3_bind_value sqlite3_api->bind_value #define sqlite3_busy_handler sqlite3_api->busy_handler #define sqlite3_busy_timeout sqlite3_api->busy_timeout #define sqlite3_changes sqlite3_api->changes #define sqlite3_close sqlite3_api->close #define sqlite3_collation_needed sqlite3_api->collation_needed #define sqlite3_collation_needed16 sqlite3_api->collation_needed16 #define sqlite3_column_blob sqlite3_api->column_blob #define sqlite3_column_bytes sqlite3_api->column_bytes #define sqlite3_column_bytes16 sqlite3_api->column_bytes16 #define sqlite3_column_count sqlite3_api->column_count #define sqlite3_column_database_name sqlite3_api->column_database_name #define sqlite3_column_database_name16 sqlite3_api->column_database_name16 #define sqlite3_column_decltype sqlite3_api->column_decltype #define sqlite3_column_decltype16 sqlite3_api->column_decltype16 #define sqlite3_column_double sqlite3_api->column_double #define sqlite3_column_int sqlite3_api->column_int #define sqlite3_column_int64 sqlite3_api->column_int64 #define sqlite3_column_name sqlite3_api->column_name #define sqlite3_column_name16 sqlite3_api->column_name16 #define sqlite3_column_origin_name sqlite3_api->column_origin_name #define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 #define sqlite3_column_table_name sqlite3_api->column_table_name #define sqlite3_column_table_name16 sqlite3_api->column_table_name16 #define sqlite3_column_text sqlite3_api->column_text #define sqlite3_column_text16 sqlite3_api->column_text16 #define sqlite3_column_type sqlite3_api->column_type #define sqlite3_column_value sqlite3_api->column_value #define sqlite3_commit_hook sqlite3_api->commit_hook #define sqlite3_complete sqlite3_api->complete #define sqlite3_complete16 sqlite3_api->complete16 #define sqlite3_create_collation sqlite3_api->create_collation #define sqlite3_create_collation16 sqlite3_api->create_collation16 #define sqlite3_create_function sqlite3_api->create_function #define sqlite3_create_function16 sqlite3_api->create_function16 #define sqlite3_create_module sqlite3_api->create_module #define sqlite3_create_module_v2 sqlite3_api->create_module_v2 #define sqlite3_data_count sqlite3_api->data_count #define sqlite3_db_handle sqlite3_api->db_handle #define sqlite3_declare_vtab sqlite3_api->declare_vtab #define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache #define sqlite3_errcode sqlite3_api->errcode #define sqlite3_errmsg sqlite3_api->errmsg #define sqlite3_errmsg16 sqlite3_api->errmsg16 #define sqlite3_exec sqlite3_api->exec #ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_expired sqlite3_api->expired #endif #define sqlite3_finalize sqlite3_api->finalize #define sqlite3_free sqlite3_api->free #define sqlite3_free_table sqlite3_api->free_table #define sqlite3_get_autocommit sqlite3_api->get_autocommit #define sqlite3_get_auxdata sqlite3_api->get_auxdata #define sqlite3_get_table sqlite3_api->get_table #ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_global_recover sqlite3_api->global_recover #endif #define sqlite3_interrupt sqlite3_api->interruptx #define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid #define sqlite3_libversion sqlite3_api->libversion #define sqlite3_libversion_number sqlite3_api->libversion_number #define sqlite3_malloc sqlite3_api->malloc #define sqlite3_mprintf sqlite3_api->mprintf #define sqlite3_open sqlite3_api->open #define sqlite3_open16 sqlite3_api->open16 #define sqlite3_prepare sqlite3_api->prepare #define sqlite3_prepare16 sqlite3_api->prepare16 #define sqlite3_prepare_v2 sqlite3_api->prepare_v2 #define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 #define sqlite3_profile sqlite3_api->profile #define sqlite3_progress_handler sqlite3_api->progress_handler #define sqlite3_realloc sqlite3_api->realloc #define sqlite3_reset sqlite3_api->reset #define sqlite3_result_blob sqlite3_api->result_blob #define sqlite3_result_double sqlite3_api->result_double #define sqlite3_result_error sqlite3_api->result_error #define sqlite3_result_error16 sqlite3_api->result_error16 #define sqlite3_result_int sqlite3_api->result_int #define sqlite3_result_int64 sqlite3_api->result_int64 #define sqlite3_result_null sqlite3_api->result_null #define sqlite3_result_text sqlite3_api->result_text #define sqlite3_result_text16 sqlite3_api->result_text16 #define sqlite3_result_text16be sqlite3_api->result_text16be #define sqlite3_result_text16le sqlite3_api->result_text16le #define sqlite3_result_value sqlite3_api->result_value #define sqlite3_rollback_hook sqlite3_api->rollback_hook #define sqlite3_set_authorizer sqlite3_api->set_authorizer #define sqlite3_set_auxdata sqlite3_api->set_auxdata #define sqlite3_snprintf sqlite3_api->snprintf #define sqlite3_step sqlite3_api->step #define sqlite3_table_column_metadata sqlite3_api->table_column_metadata #define sqlite3_thread_cleanup sqlite3_api->thread_cleanup #define sqlite3_total_changes sqlite3_api->total_changes #define sqlite3_trace sqlite3_api->trace #ifndef SQLITE_OMIT_DEPRECATED #define sqlite3_transfer_bindings sqlite3_api->transfer_bindings #endif #define sqlite3_update_hook sqlite3_api->update_hook #define sqlite3_user_data sqlite3_api->user_data #define sqlite3_value_blob sqlite3_api->value_blob #define sqlite3_value_bytes sqlite3_api->value_bytes #define sqlite3_value_bytes16 sqlite3_api->value_bytes16 #define sqlite3_value_double sqlite3_api->value_double #define sqlite3_value_int sqlite3_api->value_int #define sqlite3_value_int64 sqlite3_api->value_int64 #define sqlite3_value_numeric_type sqlite3_api->value_numeric_type #define sqlite3_value_text sqlite3_api->value_text #define sqlite3_value_text16 sqlite3_api->value_text16 #define sqlite3_value_text16be sqlite3_api->value_text16be #define sqlite3_value_text16le sqlite3_api->value_text16le #define sqlite3_value_type sqlite3_api->value_type #define sqlite3_vmprintf sqlite3_api->vmprintf #define sqlite3_overload_function sqlite3_api->overload_function #define sqlite3_prepare_v2 sqlite3_api->prepare_v2 #define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 #define sqlite3_clear_bindings sqlite3_api->clear_bindings #define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob #define sqlite3_blob_bytes sqlite3_api->blob_bytes #define sqlite3_blob_close sqlite3_api->blob_close #define sqlite3_blob_open sqlite3_api->blob_open #define sqlite3_blob_read sqlite3_api->blob_read #define sqlite3_blob_write sqlite3_api->blob_write #define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 #define sqlite3_file_control sqlite3_api->file_control #define sqlite3_memory_highwater sqlite3_api->memory_highwater #define sqlite3_memory_used sqlite3_api->memory_used #define sqlite3_mutex_alloc sqlite3_api->mutex_alloc #define sqlite3_mutex_enter sqlite3_api->mutex_enter #define sqlite3_mutex_free sqlite3_api->mutex_free #define sqlite3_mutex_leave sqlite3_api->mutex_leave #define sqlite3_mutex_try sqlite3_api->mutex_try #define sqlite3_open_v2 sqlite3_api->open_v2 #define sqlite3_release_memory sqlite3_api->release_memory #define sqlite3_result_error_nomem sqlite3_api->result_error_nomem #define sqlite3_result_error_toobig sqlite3_api->result_error_toobig #define sqlite3_sleep sqlite3_api->sleep #define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit #define sqlite3_vfs_find sqlite3_api->vfs_find #define sqlite3_vfs_register sqlite3_api->vfs_register #define sqlite3_vfs_unregister sqlite3_api->vfs_unregister #define sqlite3_threadsafe sqlite3_api->xthreadsafe #define sqlite3_result_zeroblob sqlite3_api->result_zeroblob #define sqlite3_result_error_code sqlite3_api->result_error_code #define sqlite3_test_control sqlite3_api->test_control #define sqlite3_randomness sqlite3_api->randomness #define sqlite3_context_db_handle sqlite3_api->context_db_handle #define sqlite3_extended_result_codes sqlite3_api->extended_result_codes #define sqlite3_limit sqlite3_api->limit #define sqlite3_next_stmt sqlite3_api->next_stmt #define sqlite3_sql sqlite3_api->sql #define sqlite3_status sqlite3_api->status #define sqlite3_backup_finish sqlite3_api->backup_finish #define sqlite3_backup_init sqlite3_api->backup_init #define sqlite3_backup_pagecount sqlite3_api->backup_pagecount #define sqlite3_backup_remaining sqlite3_api->backup_remaining #define sqlite3_backup_step sqlite3_api->backup_step #define sqlite3_compileoption_get sqlite3_api->compileoption_get #define sqlite3_compileoption_used sqlite3_api->compileoption_used #define sqlite3_create_function_v2 sqlite3_api->create_function_v2 #define sqlite3_db_config sqlite3_api->db_config #define sqlite3_db_mutex sqlite3_api->db_mutex #define sqlite3_db_status sqlite3_api->db_status #define sqlite3_extended_errcode sqlite3_api->extended_errcode #define sqlite3_log sqlite3_api->log #define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 #define sqlite3_sourceid sqlite3_api->sourceid #define sqlite3_stmt_status sqlite3_api->stmt_status #define sqlite3_strnicmp sqlite3_api->strnicmp #define sqlite3_unlock_notify sqlite3_api->unlock_notify #define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint #define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint #define sqlite3_wal_hook sqlite3_api->wal_hook #define sqlite3_blob_reopen sqlite3_api->blob_reopen #define sqlite3_vtab_config sqlite3_api->vtab_config #define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict /* Version 3.7.16 and later */ #define sqlite3_close_v2 sqlite3_api->close_v2 #define sqlite3_db_filename sqlite3_api->db_filename #define sqlite3_db_readonly sqlite3_api->db_readonly #define sqlite3_db_release_memory sqlite3_api->db_release_memory #define sqlite3_errstr sqlite3_api->errstr #define sqlite3_stmt_busy sqlite3_api->stmt_busy #define sqlite3_stmt_readonly sqlite3_api->stmt_readonly #define sqlite3_stricmp sqlite3_api->stricmp #define sqlite3_uri_boolean sqlite3_api->uri_boolean #define sqlite3_uri_int64 sqlite3_api->uri_int64 #define sqlite3_uri_parameter sqlite3_api->uri_parameter #define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf #define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 /* Version 3.8.7 and later */ #define sqlite3_auto_extension sqlite3_api->auto_extension #define sqlite3_bind_blob64 sqlite3_api->bind_blob64 #define sqlite3_bind_text64 sqlite3_api->bind_text64 #define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension #define sqlite3_load_extension sqlite3_api->load_extension #define sqlite3_malloc64 sqlite3_api->malloc64 #define sqlite3_msize sqlite3_api->msize #define sqlite3_realloc64 sqlite3_api->realloc64 #define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension #define sqlite3_result_blob64 sqlite3_api->result_blob64 #define sqlite3_result_text64 sqlite3_api->result_text64 #define sqlite3_strglob sqlite3_api->strglob /* Version 3.8.11 and later */ #define sqlite3_value_dup sqlite3_api->value_dup #define sqlite3_value_free sqlite3_api->value_free #define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64 #define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64 #endif /* SQLITE_CORE */ #ifndef SQLITE_CORE /* This case when the file really is being compiled as a loadable ** extension */ # define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; # define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; # define SQLITE_EXTENSION_INIT3 \ extern const sqlite3_api_routines *sqlite3_api; #else /* This case when the file is being statically linked into the ** application */ # define SQLITE_EXTENSION_INIT1 /*no-op*/ # define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ # define SQLITE_EXTENSION_INIT3 /*no-op*/ #endif #endif /* _SQLITE3EXT_H_ */ /************** End of sqlite3ext.h ******************************************/ /************** Continuing where we left off in loadext.c ********************/ /* #include "sqliteInt.h" */ /* #include */ #ifndef SQLITE_OMIT_LOAD_EXTENSION /* ** Some API routines are omitted when various features are ** excluded from a build of SQLite. Substitute a NULL pointer ** for any missing APIs. */ #ifndef SQLITE_ENABLE_COLUMN_METADATA # define sqlite3_column_database_name 0 # define sqlite3_column_database_name16 0 # define sqlite3_column_table_name 0 # define sqlite3_column_table_name16 0 # define sqlite3_column_origin_name 0 # define sqlite3_column_origin_name16 0 #endif #ifdef SQLITE_OMIT_AUTHORIZATION # define sqlite3_set_authorizer 0 #endif #ifdef SQLITE_OMIT_UTF16 # define sqlite3_bind_text16 0 # define sqlite3_collation_needed16 0 # define sqlite3_column_decltype16 0 # define sqlite3_column_name16 0 # define sqlite3_column_text16 0 # define sqlite3_complete16 0 # define sqlite3_create_collation16 0 # define sqlite3_create_function16 0 # define sqlite3_errmsg16 0 # define sqlite3_open16 0 # define sqlite3_prepare16 0 # define sqlite3_prepare16_v2 0 # define sqlite3_result_error16 0 # define sqlite3_result_text16 0 # define sqlite3_result_text16be 0 # define sqlite3_result_text16le 0 # define sqlite3_value_text16 0 # define sqlite3_value_text16be 0 # define sqlite3_value_text16le 0 # define sqlite3_column_database_name16 0 # define sqlite3_column_table_name16 0 # define sqlite3_column_origin_name16 0 #endif #ifdef SQLITE_OMIT_COMPLETE # define sqlite3_complete 0 # define sqlite3_complete16 0 #endif #ifdef SQLITE_OMIT_DECLTYPE # define sqlite3_column_decltype16 0 # define sqlite3_column_decltype 0 #endif #ifdef SQLITE_OMIT_PROGRESS_CALLBACK # define sqlite3_progress_handler 0 #endif #ifdef SQLITE_OMIT_VIRTUALTABLE # define sqlite3_create_module 0 # define sqlite3_create_module_v2 0 # define sqlite3_declare_vtab 0 # define sqlite3_vtab_config 0 # define sqlite3_vtab_on_conflict 0 #endif #ifdef SQLITE_OMIT_SHARED_CACHE # define sqlite3_enable_shared_cache 0 #endif #ifdef SQLITE_OMIT_TRACE # define sqlite3_profile 0 # define sqlite3_trace 0 #endif #ifdef SQLITE_OMIT_GET_TABLE # define sqlite3_free_table 0 # define sqlite3_get_table 0 #endif #ifdef SQLITE_OMIT_INCRBLOB #define sqlite3_bind_zeroblob 0 #define sqlite3_blob_bytes 0 #define sqlite3_blob_close 0 #define sqlite3_blob_open 0 #define sqlite3_blob_read 0 #define sqlite3_blob_write 0 #define sqlite3_blob_reopen 0 #endif /* ** The following structure contains pointers to all SQLite API routines. ** A pointer to this structure is passed into extensions when they are ** loaded so that the extension can make calls back into the SQLite ** library. ** ** When adding new APIs, add them to the bottom of this structure ** in order to preserve backwards compatibility. ** ** Extensions that use newer APIs should first call the ** sqlite3_libversion_number() to make sure that the API they ** intend to use is supported by the library. Extensions should ** also check to make sure that the pointer to the function is ** not NULL before calling it. */ static const sqlite3_api_routines sqlite3Apis = { sqlite3_aggregate_context, #ifndef SQLITE_OMIT_DEPRECATED sqlite3_aggregate_count, #else 0, #endif sqlite3_bind_blob, sqlite3_bind_double, sqlite3_bind_int, sqlite3_bind_int64, sqlite3_bind_null, sqlite3_bind_parameter_count, sqlite3_bind_parameter_index, sqlite3_bind_parameter_name, sqlite3_bind_text, sqlite3_bind_text16, sqlite3_bind_value, sqlite3_busy_handler, sqlite3_busy_timeout, sqlite3_changes, sqlite3_close, sqlite3_collation_needed, sqlite3_collation_needed16, sqlite3_column_blob, sqlite3_column_bytes, sqlite3_column_bytes16, sqlite3_column_count, sqlite3_column_database_name, sqlite3_column_database_name16, sqlite3_column_decltype, sqlite3_column_decltype16, sqlite3_column_double, sqlite3_column_int, sqlite3_column_int64, sqlite3_column_name, sqlite3_column_name16, sqlite3_column_origin_name, sqlite3_column_origin_name16, sqlite3_column_table_name, sqlite3_column_table_name16, sqlite3_column_text, sqlite3_column_text16, sqlite3_column_type, sqlite3_column_value, sqlite3_commit_hook, sqlite3_complete, sqlite3_complete16, sqlite3_create_collation, sqlite3_create_collation16, sqlite3_create_function, sqlite3_create_function16, sqlite3_create_module, sqlite3_data_count, sqlite3_db_handle, sqlite3_declare_vtab, sqlite3_enable_shared_cache, sqlite3_errcode, sqlite3_errmsg, sqlite3_errmsg16, sqlite3_exec, #ifndef SQLITE_OMIT_DEPRECATED sqlite3_expired, #else 0, #endif sqlite3_finalize, sqlite3_free, sqlite3_free_table, sqlite3_get_autocommit, sqlite3_get_auxdata, sqlite3_get_table, 0, /* Was sqlite3_global_recover(), but that function is deprecated */ sqlite3_interrupt, sqlite3_last_insert_rowid, sqlite3_libversion, sqlite3_libversion_number, sqlite3_malloc, sqlite3_mprintf, sqlite3_open, sqlite3_open16, sqlite3_prepare, sqlite3_prepare16, sqlite3_profile, sqlite3_progress_handler, sqlite3_realloc, sqlite3_reset, sqlite3_result_blob, sqlite3_result_double, sqlite3_result_error, sqlite3_result_error16, sqlite3_result_int, sqlite3_result_int64, sqlite3_result_null, sqlite3_result_text, sqlite3_result_text16, sqlite3_result_text16be, sqlite3_result_text16le, sqlite3_result_value, sqlite3_rollback_hook, sqlite3_set_authorizer, sqlite3_set_auxdata, sqlite3_snprintf, sqlite3_step, sqlite3_table_column_metadata, #ifndef SQLITE_OMIT_DEPRECATED sqlite3_thread_cleanup, #else 0, #endif sqlite3_total_changes, sqlite3_trace, #ifndef SQLITE_OMIT_DEPRECATED sqlite3_transfer_bindings, #else 0, #endif sqlite3_update_hook, sqlite3_user_data, sqlite3_value_blob, sqlite3_value_bytes, sqlite3_value_bytes16, sqlite3_value_double, sqlite3_value_int, sqlite3_value_int64, sqlite3_value_numeric_type, sqlite3_value_text, sqlite3_value_text16, sqlite3_value_text16be, sqlite3_value_text16le, sqlite3_value_type, sqlite3_vmprintf, /* ** The original API set ends here. All extensions can call any ** of the APIs above provided that the pointer is not NULL. But ** before calling APIs that follow, extension should check the ** sqlite3_libversion_number() to make sure they are dealing with ** a library that is new enough to support that API. ************************************************************************* */ sqlite3_overload_function, /* ** Added after 3.3.13 */ sqlite3_prepare_v2, sqlite3_prepare16_v2, sqlite3_clear_bindings, /* ** Added for 3.4.1 */ sqlite3_create_module_v2, /* ** Added for 3.5.0 */ sqlite3_bind_zeroblob, sqlite3_blob_bytes, sqlite3_blob_close, sqlite3_blob_open, sqlite3_blob_read, sqlite3_blob_write, sqlite3_create_collation_v2, sqlite3_file_control, sqlite3_memory_highwater, sqlite3_memory_used, #ifdef SQLITE_MUTEX_OMIT 0, 0, 0, 0, 0, #else sqlite3_mutex_alloc, sqlite3_mutex_enter, sqlite3_mutex_free, sqlite3_mutex_leave, sqlite3_mutex_try, #endif sqlite3_open_v2, sqlite3_release_memory, sqlite3_result_error_nomem, sqlite3_result_error_toobig, sqlite3_sleep, sqlite3_soft_heap_limit, sqlite3_vfs_find, sqlite3_vfs_register, sqlite3_vfs_unregister, /* ** Added for 3.5.8 */ sqlite3_threadsafe, sqlite3_result_zeroblob, sqlite3_result_error_code, sqlite3_test_control, sqlite3_randomness, sqlite3_context_db_handle, /* ** Added for 3.6.0 */ sqlite3_extended_result_codes, sqlite3_limit, sqlite3_next_stmt, sqlite3_sql, sqlite3_status, /* ** Added for 3.7.4 */ sqlite3_backup_finish, sqlite3_backup_init, sqlite3_backup_pagecount, sqlite3_backup_remaining, sqlite3_backup_step, #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS sqlite3_compileoption_get, sqlite3_compileoption_used, #else 0, 0, #endif sqlite3_create_function_v2, sqlite3_db_config, sqlite3_db_mutex, sqlite3_db_status, sqlite3_extended_errcode, sqlite3_log, sqlite3_soft_heap_limit64, sqlite3_sourceid, sqlite3_stmt_status, sqlite3_strnicmp, #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY sqlite3_unlock_notify, #else 0, #endif #ifndef SQLITE_OMIT_WAL sqlite3_wal_autocheckpoint, sqlite3_wal_checkpoint, sqlite3_wal_hook, #else 0, 0, 0, #endif sqlite3_blob_reopen, sqlite3_vtab_config, sqlite3_vtab_on_conflict, sqlite3_close_v2, sqlite3_db_filename, sqlite3_db_readonly, sqlite3_db_release_memory, sqlite3_errstr, sqlite3_stmt_busy, sqlite3_stmt_readonly, sqlite3_stricmp, sqlite3_uri_boolean, sqlite3_uri_int64, sqlite3_uri_parameter, sqlite3_vsnprintf, sqlite3_wal_checkpoint_v2, /* Version 3.8.7 and later */ sqlite3_auto_extension, sqlite3_bind_blob64, sqlite3_bind_text64, sqlite3_cancel_auto_extension, sqlite3_load_extension, sqlite3_malloc64, sqlite3_msize, sqlite3_realloc64, sqlite3_reset_auto_extension, sqlite3_result_blob64, sqlite3_result_text64, sqlite3_strglob, /* Version 3.8.11 and later */ (sqlite3_value*(*)(const sqlite3_value*))sqlite3_value_dup, sqlite3_value_free, sqlite3_result_zeroblob64, sqlite3_bind_zeroblob64 }; /* ** Attempt to load an SQLite extension library contained in the file ** zFile. The entry point is zProc. zProc may be 0 in which case a ** default entry point name (sqlite3_extension_init) is used. Use ** of the default name is recommended. ** ** Return SQLITE_OK on success and SQLITE_ERROR if something goes wrong. ** ** If an error occurs and pzErrMsg is not 0, then fill *pzErrMsg with ** error message text. The calling function should free this memory ** by calling sqlite3DbFree(db, ). */ static int sqlite3LoadExtension( sqlite3 *db, /* Load the extension into this database connection */ const char *zFile, /* Name of the shared library containing extension */ const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ char **pzErrMsg /* Put error message here if not 0 */ ){ sqlite3_vfs *pVfs = db->pVfs; void *handle; int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); char *zErrmsg = 0; const char *zEntry; char *zAltEntry = 0; void **aHandle; u64 nMsg = 300 + sqlite3Strlen30(zFile); int ii; /* Shared library endings to try if zFile cannot be loaded as written */ static const char *azEndings[] = { #if SQLITE_OS_WIN "dll" #elif defined(__APPLE__) "dylib" #else "so" #endif }; if( pzErrMsg ) *pzErrMsg = 0; /* Ticket #1863. To avoid a creating security problems for older ** applications that relink against newer versions of SQLite, the ** ability to run load_extension is turned off by default. One ** must call sqlite3_enable_load_extension() to turn on extension ** loading. Otherwise you get the following error. */ if( (db->flags & SQLITE_LoadExtension)==0 ){ if( pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("not authorized"); } return SQLITE_ERROR; } zEntry = zProc ? zProc : "sqlite3_extension_init"; handle = sqlite3OsDlOpen(pVfs, zFile); #if SQLITE_OS_UNIX || SQLITE_OS_WIN for(ii=0; ii sqlite3_example_init ** C:/lib/mathfuncs.dll ==> sqlite3_mathfuncs_init */ if( xInit==0 && zProc==0 ){ int iFile, iEntry, c; int ncFile = sqlite3Strlen30(zFile); zAltEntry = sqlite3_malloc64(ncFile+30); if( zAltEntry==0 ){ sqlite3OsDlClose(pVfs, handle); return SQLITE_NOMEM; } memcpy(zAltEntry, "sqlite3_", 8); for(iFile=ncFile-1; iFile>=0 && zFile[iFile]!='/'; iFile--){} iFile++; if( sqlite3_strnicmp(zFile+iFile, "lib", 3)==0 ) iFile += 3; for(iEntry=8; (c = zFile[iFile])!=0 && c!='.'; iFile++){ if( sqlite3Isalpha(c) ){ zAltEntry[iEntry++] = (char)sqlite3UpperToLower[(unsigned)c]; } } memcpy(zAltEntry+iEntry, "_init", 6); zEntry = zAltEntry; xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) sqlite3OsDlSym(pVfs, handle, zEntry); } if( xInit==0 ){ if( pzErrMsg ){ nMsg += sqlite3Strlen30(zEntry); *pzErrMsg = zErrmsg = sqlite3_malloc64(nMsg); if( zErrmsg ){ sqlite3_snprintf(nMsg, zErrmsg, "no entry point [%s] in shared library [%s]", zEntry, zFile); sqlite3OsDlError(pVfs, nMsg-1, zErrmsg); } } sqlite3OsDlClose(pVfs, handle); sqlite3_free(zAltEntry); return SQLITE_ERROR; } sqlite3_free(zAltEntry); if( xInit(db, &zErrmsg, &sqlite3Apis) ){ if( pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); } sqlite3_free(zErrmsg); sqlite3OsDlClose(pVfs, handle); return SQLITE_ERROR; } /* Append the new shared library handle to the db->aExtension array. */ aHandle = sqlite3DbMallocZero(db, sizeof(handle)*(db->nExtension+1)); if( aHandle==0 ){ return SQLITE_NOMEM; } if( db->nExtension>0 ){ memcpy(aHandle, db->aExtension, sizeof(handle)*db->nExtension); } sqlite3DbFree(db, db->aExtension); db->aExtension = aHandle; db->aExtension[db->nExtension++] = handle; return SQLITE_OK; } SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( sqlite3 *db, /* Load the extension into this database connection */ const char *zFile, /* Name of the shared library containing extension */ const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */ char **pzErrMsg /* Put error message here if not 0 */ ){ int rc; sqlite3_mutex_enter(db->mutex); rc = sqlite3LoadExtension(db, zFile, zProc, pzErrMsg); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Call this routine when the database connection is closing in order ** to clean up loaded extensions */ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){ int i; assert( sqlite3_mutex_held(db->mutex) ); for(i=0; inExtension; i++){ sqlite3OsDlClose(db->pVfs, db->aExtension[i]); } sqlite3DbFree(db, db->aExtension); } /* ** Enable or disable extension loading. Extension loading is disabled by ** default so as not to open security holes in older applications. */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff){ sqlite3_mutex_enter(db->mutex); if( onoff ){ db->flags |= SQLITE_LoadExtension; }else{ db->flags &= ~SQLITE_LoadExtension; } sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } #endif /* SQLITE_OMIT_LOAD_EXTENSION */ /* ** The auto-extension code added regardless of whether or not extension ** loading is supported. We need a dummy sqlite3Apis pointer for that ** code if regular extension loading is not available. This is that ** dummy pointer. */ #ifdef SQLITE_OMIT_LOAD_EXTENSION static const sqlite3_api_routines sqlite3Apis = { 0 }; #endif /* ** The following object holds the list of automatically loaded ** extensions. ** ** This list is shared across threads. The SQLITE_MUTEX_STATIC_MASTER ** mutex must be held while accessing this list. */ typedef struct sqlite3AutoExtList sqlite3AutoExtList; static SQLITE_WSD struct sqlite3AutoExtList { u32 nExt; /* Number of entries in aExt[] */ void (**aExt)(void); /* Pointers to the extension init functions */ } sqlite3Autoext = { 0, 0 }; /* The "wsdAutoext" macro will resolve to the autoextension ** state vector. If writable static data is unsupported on the target, ** we have to locate the state vector at run-time. In the more common ** case where writable static data is supported, wsdStat can refer directly ** to the "sqlite3Autoext" state vector declared above. */ #ifdef SQLITE_OMIT_WSD # define wsdAutoextInit \ sqlite3AutoExtList *x = &GLOBAL(sqlite3AutoExtList,sqlite3Autoext) # define wsdAutoext x[0] #else # define wsdAutoextInit # define wsdAutoext sqlite3Autoext #endif /* ** Register a statically linked extension that is automatically ** loaded by every new database connection. */ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){ int rc = SQLITE_OK; #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ){ return rc; }else #endif { u32 i; #if SQLITE_THREADSAFE sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif wsdAutoextInit; sqlite3_mutex_enter(mutex); for(i=0; i=0; i--){ if( wsdAutoext.aExt[i]==xInit ){ wsdAutoext.nExt--; wsdAutoext.aExt[i] = wsdAutoext.aExt[wsdAutoext.nExt]; n++; break; } } sqlite3_mutex_leave(mutex); return n; } /* ** Reset the automatic extension loading mechanism. */ SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void){ #ifndef SQLITE_OMIT_AUTOINIT if( sqlite3_initialize()==SQLITE_OK ) #endif { #if SQLITE_THREADSAFE sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif wsdAutoextInit; sqlite3_mutex_enter(mutex); sqlite3_free(wsdAutoext.aExt); wsdAutoext.aExt = 0; wsdAutoext.nExt = 0; sqlite3_mutex_leave(mutex); } } /* ** Load all automatic extensions. ** ** If anything goes wrong, set an error in the database connection. */ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ u32 i; int go = 1; int rc; int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); wsdAutoextInit; if( wsdAutoext.nExt==0 ){ /* Common case: early out without every having to acquire a mutex */ return; } for(i=0; go; i++){ char *zErrmsg; #if SQLITE_THREADSAFE sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif sqlite3_mutex_enter(mutex); if( i>=wsdAutoext.nExt ){ xInit = 0; go = 0; }else{ xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) wsdAutoext.aExt[i]; } sqlite3_mutex_leave(mutex); zErrmsg = 0; if( xInit && (rc = xInit(db, &zErrmsg, &sqlite3Apis))!=0 ){ sqlite3ErrorWithMsg(db, rc, "automatic extension loading failed: %s", zErrmsg); go = 0; } sqlite3_free(zErrmsg); } } /************** End of loadext.c *********************************************/ /************** Begin file pragma.c ******************************************/ /* ** 2003 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to implement the PRAGMA command. */ /* #include "sqliteInt.h" */ #if !defined(SQLITE_ENABLE_LOCKING_STYLE) # if defined(__APPLE__) # define SQLITE_ENABLE_LOCKING_STYLE 1 # else # define SQLITE_ENABLE_LOCKING_STYLE 0 # endif #endif /*************************************************************************** ** The "pragma.h" include file is an automatically generated file that ** that includes the PragType_XXXX macro definitions and the aPragmaName[] ** object. This ensures that the aPragmaName[] table is arranged in ** lexicographical order to facility a binary search of the pragma name. ** Do not edit pragma.h directly. Edit and rerun the script in at ** ../tool/mkpragmatab.tcl. */ /************** Include pragma.h in the middle of pragma.c *******************/ /************** Begin file pragma.h ******************************************/ /* DO NOT EDIT! ** This file is automatically generated by the script at ** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit ** that script and rerun it. */ #define PragTyp_HEADER_VALUE 0 #define PragTyp_AUTO_VACUUM 1 #define PragTyp_FLAG 2 #define PragTyp_BUSY_TIMEOUT 3 #define PragTyp_CACHE_SIZE 4 #define PragTyp_CASE_SENSITIVE_LIKE 5 #define PragTyp_COLLATION_LIST 6 #define PragTyp_COMPILE_OPTIONS 7 #define PragTyp_DATA_STORE_DIRECTORY 8 #define PragTyp_DATABASE_LIST 9 #define PragTyp_DEFAULT_CACHE_SIZE 10 #define PragTyp_ENCODING 11 #define PragTyp_FOREIGN_KEY_CHECK 12 #define PragTyp_FOREIGN_KEY_LIST 13 #define PragTyp_INCREMENTAL_VACUUM 14 #define PragTyp_INDEX_INFO 15 #define PragTyp_INDEX_LIST 16 #define PragTyp_INTEGRITY_CHECK 17 #define PragTyp_JOURNAL_MODE 18 #define PragTyp_JOURNAL_SIZE_LIMIT 19 #define PragTyp_LOCK_PROXY_FILE 20 #define PragTyp_LOCKING_MODE 21 #define PragTyp_PAGE_COUNT 22 #define PragTyp_MMAP_SIZE 23 #define PragTyp_PAGE_SIZE 24 #define PragTyp_SECURE_DELETE 25 #define PragTyp_SHRINK_MEMORY 26 #define PragTyp_SOFT_HEAP_LIMIT 27 #define PragTyp_STATS 28 #define PragTyp_SYNCHRONOUS 29 #define PragTyp_TABLE_INFO 30 #define PragTyp_TEMP_STORE 31 #define PragTyp_TEMP_STORE_DIRECTORY 32 #define PragTyp_THREADS 33 #define PragTyp_WAL_AUTOCHECKPOINT 34 #define PragTyp_WAL_CHECKPOINT 35 #define PragTyp_ACTIVATE_EXTENSIONS 36 #define PragTyp_HEXKEY 37 #define PragTyp_KEY 38 #define PragTyp_REKEY 39 #define PragTyp_LOCK_STATUS 40 #define PragTyp_PARSER_TRACE 41 #define PragFlag_NeedSchema 0x01 #define PragFlag_ReadOnly 0x02 static const struct sPragmaNames { const char *const zName; /* Name of pragma */ u8 ePragTyp; /* PragTyp_XXX value */ u8 mPragFlag; /* Zero or more PragFlag_XXX values */ u32 iArg; /* Extra argument */ } aPragmaNames[] = { #if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD) { /* zName: */ "activate_extensions", /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "application_id", /* ePragTyp: */ PragTyp_HEADER_VALUE, /* ePragFlag: */ 0, /* iArg: */ BTREE_APPLICATION_ID }, #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) { /* zName: */ "auto_vacuum", /* ePragTyp: */ PragTyp_AUTO_VACUUM, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_AUTOMATIC_INDEX) { /* zName: */ "automatic_index", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_AutoIndex }, #endif #endif { /* zName: */ "busy_timeout", /* ePragTyp: */ PragTyp_BUSY_TIMEOUT, /* ePragFlag: */ 0, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "cache_size", /* ePragTyp: */ PragTyp_CACHE_SIZE, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "cache_spill", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_CacheSpill }, #endif { /* zName: */ "case_sensitive_like", /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "cell_size_check", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_CellSizeCk }, #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "checkpoint_fullfsync", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_CkptFullFSync }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) { /* zName: */ "collation_list", /* ePragTyp: */ PragTyp_COLLATION_LIST, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS) { /* zName: */ "compile_options", /* ePragTyp: */ PragTyp_COMPILE_OPTIONS, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "count_changes", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_CountRows }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN { /* zName: */ "data_store_directory", /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "data_version", /* ePragTyp: */ PragTyp_HEADER_VALUE, /* ePragFlag: */ PragFlag_ReadOnly, /* iArg: */ BTREE_DATA_VERSION }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) { /* zName: */ "database_list", /* ePragTyp: */ PragTyp_DATABASE_LIST, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) { /* zName: */ "default_cache_size", /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) { /* zName: */ "defer_foreign_keys", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_DeferFKs }, #endif #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "empty_result_callbacks", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_NullCallback }, #endif #if !defined(SQLITE_OMIT_UTF16) { /* zName: */ "encoding", /* ePragTyp: */ PragTyp_ENCODING, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) { /* zName: */ "foreign_key_check", /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FOREIGN_KEY) { /* zName: */ "foreign_key_list", /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER) { /* zName: */ "foreign_keys", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_ForeignKeys }, #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "freelist_count", /* ePragTyp: */ PragTyp_HEADER_VALUE, /* ePragFlag: */ PragFlag_ReadOnly, /* iArg: */ BTREE_FREE_PAGE_COUNT }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "full_column_names", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_FullColNames }, { /* zName: */ "fullfsync", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_FullFSync }, #endif #if defined(SQLITE_HAS_CODEC) { /* zName: */ "hexkey", /* ePragTyp: */ PragTyp_HEXKEY, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "hexrekey", /* ePragTyp: */ PragTyp_HEXKEY, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if !defined(SQLITE_OMIT_CHECK) { /* zName: */ "ignore_check_constraints", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_IgnoreChecks }, #endif #endif #if !defined(SQLITE_OMIT_AUTOVACUUM) { /* zName: */ "incremental_vacuum", /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) { /* zName: */ "index_info", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, { /* zName: */ "index_list", /* ePragTyp: */ PragTyp_INDEX_LIST, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, { /* zName: */ "index_xinfo", /* ePragTyp: */ PragTyp_INDEX_INFO, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 1 }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) { /* zName: */ "integrity_check", /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "journal_mode", /* ePragTyp: */ PragTyp_JOURNAL_MODE, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, { /* zName: */ "journal_size_limit", /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if defined(SQLITE_HAS_CODEC) { /* zName: */ "key", /* ePragTyp: */ PragTyp_KEY, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "legacy_file_format", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_LegacyFileFmt }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE { /* zName: */ "lock_proxy_file", /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) { /* zName: */ "lock_status", /* ePragTyp: */ PragTyp_LOCK_STATUS, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "locking_mode", /* ePragTyp: */ PragTyp_LOCKING_MODE, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "max_page_count", /* ePragTyp: */ PragTyp_PAGE_COUNT, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, { /* zName: */ "mmap_size", /* ePragTyp: */ PragTyp_MMAP_SIZE, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "page_count", /* ePragTyp: */ PragTyp_PAGE_COUNT, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, { /* zName: */ "page_size", /* ePragTyp: */ PragTyp_PAGE_SIZE, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if defined(SQLITE_DEBUG) { /* zName: */ "parser_trace", /* ePragTyp: */ PragTyp_PARSER_TRACE, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "query_only", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_QueryOnly }, #endif #if !defined(SQLITE_OMIT_INTEGRITY_CHECK) { /* zName: */ "quick_check", /* ePragTyp: */ PragTyp_INTEGRITY_CHECK, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "read_uncommitted", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_ReadUncommitted }, { /* zName: */ "recursive_triggers", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_RecTriggers }, #endif #if defined(SQLITE_HAS_CODEC) { /* zName: */ "rekey", /* ePragTyp: */ PragTyp_REKEY, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "reverse_unordered_selects", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_ReverseOrder }, #endif #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "schema_version", /* ePragTyp: */ PragTyp_HEADER_VALUE, /* ePragFlag: */ 0, /* iArg: */ BTREE_SCHEMA_VERSION }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "secure_delete", /* ePragTyp: */ PragTyp_SECURE_DELETE, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "short_column_names", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_ShortColNames }, #endif { /* zName: */ "shrink_memory", /* ePragTyp: */ PragTyp_SHRINK_MEMORY, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "soft_heap_limit", /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT, /* ePragFlag: */ 0, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if defined(SQLITE_DEBUG) { /* zName: */ "sql_trace", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_SqlTrace }, #endif #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) { /* zName: */ "stats", /* ePragTyp: */ PragTyp_STATS, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "synchronous", /* ePragTyp: */ PragTyp_SYNCHRONOUS, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) { /* zName: */ "table_info", /* ePragTyp: */ PragTyp_TABLE_INFO, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) { /* zName: */ "temp_store", /* ePragTyp: */ PragTyp_TEMP_STORE, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "temp_store_directory", /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY, /* ePragFlag: */ 0, /* iArg: */ 0 }, #endif { /* zName: */ "threads", /* ePragTyp: */ PragTyp_THREADS, /* ePragFlag: */ 0, /* iArg: */ 0 }, #if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS) { /* zName: */ "user_version", /* ePragTyp: */ PragTyp_HEADER_VALUE, /* ePragFlag: */ 0, /* iArg: */ BTREE_USER_VERSION }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) #if defined(SQLITE_DEBUG) { /* zName: */ "vdbe_addoptrace", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_VdbeAddopTrace }, { /* zName: */ "vdbe_debug", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace }, { /* zName: */ "vdbe_eqp", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_VdbeEQP }, { /* zName: */ "vdbe_listing", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_VdbeListing }, { /* zName: */ "vdbe_trace", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_VdbeTrace }, #endif #endif #if !defined(SQLITE_OMIT_WAL) { /* zName: */ "wal_autocheckpoint", /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT, /* ePragFlag: */ 0, /* iArg: */ 0 }, { /* zName: */ "wal_checkpoint", /* ePragTyp: */ PragTyp_WAL_CHECKPOINT, /* ePragFlag: */ PragFlag_NeedSchema, /* iArg: */ 0 }, #endif #if !defined(SQLITE_OMIT_FLAG_PRAGMAS) { /* zName: */ "writable_schema", /* ePragTyp: */ PragTyp_FLAG, /* ePragFlag: */ 0, /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode }, #endif }; /* Number of pragmas: 60 on by default, 73 total. */ /************** End of pragma.h **********************************************/ /************** Continuing where we left off in pragma.c *********************/ /* ** Interpret the given string as a safety level. Return 0 for OFF, ** 1 for ON or NORMAL and 2 for FULL. Return 1 for an empty or ** unrecognized string argument. The FULL option is disallowed ** if the omitFull parameter it 1. ** ** Note that the values returned are one less that the values that ** should be passed into sqlite3BtreeSetSafetyLevel(). The is done ** to support legacy SQL code. The safety level used to be boolean ** and older scripts may have used numbers 0 for OFF and 1 for ON. */ static u8 getSafetyLevel(const char *z, int omitFull, u8 dflt){ /* 123456789 123456789 */ static const char zText[] = "onoffalseyestruefull"; static const u8 iOffset[] = {0, 1, 2, 4, 9, 12, 16}; static const u8 iLength[] = {2, 2, 3, 5, 3, 4, 4}; static const u8 iValue[] = {1, 0, 0, 0, 1, 1, 2}; int i, n; if( sqlite3Isdigit(*z) ){ return (u8)sqlite3Atoi(z); } n = sqlite3Strlen30(z); for(i=0; i=0&&i<=2)?i:0); } #endif /* ifndef SQLITE_OMIT_AUTOVACUUM */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** Interpret the given string as a temp db location. Return 1 for file ** backed temporary databases, 2 for the Red-Black tree in memory database ** and 0 to use the compile-time default. */ static int getTempStore(const char *z){ if( z[0]>='0' && z[0]<='2' ){ return z[0] - '0'; }else if( sqlite3StrICmp(z, "file")==0 ){ return 1; }else if( sqlite3StrICmp(z, "memory")==0 ){ return 2; }else{ return 0; } } #endif /* SQLITE_PAGER_PRAGMAS */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** Invalidate temp storage, either when the temp storage is changed ** from default, or when 'file' and the temp_store_directory has changed */ static int invalidateTempStorage(Parse *pParse){ sqlite3 *db = pParse->db; if( db->aDb[1].pBt!=0 ){ if( !db->autoCommit || sqlite3BtreeIsInReadTrans(db->aDb[1].pBt) ){ sqlite3ErrorMsg(pParse, "temporary storage cannot be changed " "from within a transaction"); return SQLITE_ERROR; } sqlite3BtreeClose(db->aDb[1].pBt); db->aDb[1].pBt = 0; sqlite3ResetAllSchemasOfConnection(db); } return SQLITE_OK; } #endif /* SQLITE_PAGER_PRAGMAS */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** If the TEMP database is open, close it and mark the database schema ** as needing reloading. This must be done when using the SQLITE_TEMP_STORE ** or DEFAULT_TEMP_STORE pragmas. */ static int changeTempStorage(Parse *pParse, const char *zStorageType){ int ts = getTempStore(zStorageType); sqlite3 *db = pParse->db; if( db->temp_store==ts ) return SQLITE_OK; if( invalidateTempStorage( pParse ) != SQLITE_OK ){ return SQLITE_ERROR; } db->temp_store = (u8)ts; return SQLITE_OK; } #endif /* SQLITE_PAGER_PRAGMAS */ /* ** Generate code to return a single integer value. */ static void returnSingleInt(Parse *pParse, const char *zLabel, i64 value){ Vdbe *v = sqlite3GetVdbe(pParse); int nMem = ++pParse->nMem; i64 *pI64 = sqlite3DbMallocRaw(pParse->db, sizeof(value)); if( pI64 ){ memcpy(pI64, &value, sizeof(value)); } sqlite3VdbeAddOp4(v, OP_Int64, 0, nMem, 0, (char*)pI64, P4_INT64); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLabel, SQLITE_STATIC); sqlite3VdbeAddOp2(v, OP_ResultRow, nMem, 1); } /* ** Set the safety_level and pager flags for pager iDb. Or if iDb<0 ** set these values for all pagers. */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS static void setAllPagerFlags(sqlite3 *db){ if( db->autoCommit ){ Db *pDb = db->aDb; int n = db->nDb; assert( SQLITE_FullFSync==PAGER_FULLFSYNC ); assert( SQLITE_CkptFullFSync==PAGER_CKPT_FULLFSYNC ); assert( SQLITE_CacheSpill==PAGER_CACHESPILL ); assert( (PAGER_FULLFSYNC | PAGER_CKPT_FULLFSYNC | PAGER_CACHESPILL) == PAGER_FLAGS_MASK ); assert( (pDb->safety_level & PAGER_SYNCHRONOUS_MASK)==pDb->safety_level ); while( (n--) > 0 ){ if( pDb->pBt ){ sqlite3BtreeSetPagerFlags(pDb->pBt, pDb->safety_level | (db->flags & PAGER_FLAGS_MASK) ); } pDb++; } } } #else # define setAllPagerFlags(X) /* no-op */ #endif /* ** Return a human-readable name for a constraint resolution action. */ #ifndef SQLITE_OMIT_FOREIGN_KEY static const char *actionName(u8 action){ const char *zName; switch( action ){ case OE_SetNull: zName = "SET NULL"; break; case OE_SetDflt: zName = "SET DEFAULT"; break; case OE_Cascade: zName = "CASCADE"; break; case OE_Restrict: zName = "RESTRICT"; break; default: zName = "NO ACTION"; assert( action==OE_None ); break; } return zName; } #endif /* ** Parameter eMode must be one of the PAGER_JOURNALMODE_XXX constants ** defined in pager.h. This function returns the associated lowercase ** journal-mode name. */ SQLITE_PRIVATE const char *sqlite3JournalModename(int eMode){ static char * const azModeName[] = { "delete", "persist", "off", "truncate", "memory" #ifndef SQLITE_OMIT_WAL , "wal" #endif }; assert( PAGER_JOURNALMODE_DELETE==0 ); assert( PAGER_JOURNALMODE_PERSIST==1 ); assert( PAGER_JOURNALMODE_OFF==2 ); assert( PAGER_JOURNALMODE_TRUNCATE==3 ); assert( PAGER_JOURNALMODE_MEMORY==4 ); assert( PAGER_JOURNALMODE_WAL==5 ); assert( eMode>=0 && eMode<=ArraySize(azModeName) ); if( eMode==ArraySize(azModeName) ) return 0; return azModeName[eMode]; } /* ** Process a pragma statement. ** ** Pragmas are of this form: ** ** PRAGMA [database.]id [= value] ** ** The identifier might also be a string. The value is a string, and ** identifier, or a number. If minusFlag is true, then the value is ** a number that was preceded by a minus sign. ** ** If the left side is "database.id" then pId1 is the database name ** and pId2 is the id. If the left side is just "id" then pId1 is the ** id and pId2 is any empty string. */ SQLITE_PRIVATE void sqlite3Pragma( Parse *pParse, Token *pId1, /* First part of [database.]id field */ Token *pId2, /* Second part of [database.]id field, or NULL */ Token *pValue, /* Token for , or NULL */ int minusFlag /* True if a '-' sign preceded */ ){ char *zLeft = 0; /* Nul-terminated UTF-8 string */ char *zRight = 0; /* Nul-terminated UTF-8 string , or NULL */ const char *zDb = 0; /* The database name */ Token *pId; /* Pointer to token */ char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */ int iDb; /* Database index for */ int lwr, upr, mid = 0; /* Binary search bounds */ int rc; /* return value form SQLITE_FCNTL_PRAGMA */ sqlite3 *db = pParse->db; /* The database connection */ Db *pDb; /* The specific database being pragmaed */ Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */ const struct sPragmaNames *pPragma; if( v==0 ) return; sqlite3VdbeRunOnlyOnce(v); pParse->nMem = 2; /* Interpret the [database.] part of the pragma statement. iDb is the ** index of the database this pragma is being applied to in db.aDb[]. */ iDb = sqlite3TwoPartName(pParse, pId1, pId2, &pId); if( iDb<0 ) return; pDb = &db->aDb[iDb]; /* If the temp database has been explicitly named as part of the ** pragma, make sure it is open. */ if( iDb==1 && sqlite3OpenTempDatabase(pParse) ){ return; } zLeft = sqlite3NameFromToken(db, pId); if( !zLeft ) return; if( minusFlag ){ zRight = sqlite3MPrintf(db, "-%T", pValue); }else{ zRight = sqlite3NameFromToken(db, pValue); } assert( pId2 ); zDb = pId2->n>0 ? pDb->zName : 0; if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){ goto pragma_out; } /* Send an SQLITE_FCNTL_PRAGMA file-control to the underlying VFS ** connection. If it returns SQLITE_OK, then assume that the VFS ** handled the pragma and generate a no-op prepared statement. ** ** IMPLEMENTATION-OF: R-12238-55120 Whenever a PRAGMA statement is parsed, ** an SQLITE_FCNTL_PRAGMA file control is sent to the open sqlite3_file ** object corresponding to the database file to which the pragma ** statement refers. ** ** IMPLEMENTATION-OF: R-29875-31678 The argument to the SQLITE_FCNTL_PRAGMA ** file control is an array of pointers to strings (char**) in which the ** second element of the array is the name of the pragma and the third ** element is the argument to the pragma or NULL if the pragma has no ** argument. */ aFcntl[0] = 0; aFcntl[1] = zLeft; aFcntl[2] = zRight; aFcntl[3] = 0; db->busyHandler.nBusy = 0; rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl); if( rc==SQLITE_OK ){ if( aFcntl[0] ){ int nMem = ++pParse->nMem; sqlite3VdbeAddOp4(v, OP_String8, 0, nMem, 0, aFcntl[0], 0); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "result", SQLITE_STATIC); sqlite3VdbeAddOp2(v, OP_ResultRow, nMem, 1); sqlite3_free(aFcntl[0]); } goto pragma_out; } if( rc!=SQLITE_NOTFOUND ){ if( aFcntl[0] ){ sqlite3ErrorMsg(pParse, "%s", aFcntl[0]); sqlite3_free(aFcntl[0]); } pParse->nErr++; pParse->rc = rc; goto pragma_out; } /* Locate the pragma in the lookup table */ lwr = 0; upr = ArraySize(aPragmaNames)-1; while( lwr<=upr ){ mid = (lwr+upr)/2; rc = sqlite3_stricmp(zLeft, aPragmaNames[mid].zName); if( rc==0 ) break; if( rc<0 ){ upr = mid - 1; }else{ lwr = mid + 1; } } if( lwr>upr ) goto pragma_out; pPragma = &aPragmaNames[mid]; /* Make sure the database schema is loaded if the pragma requires that */ if( (pPragma->mPragFlag & PragFlag_NeedSchema)!=0 ){ if( sqlite3ReadSchema(pParse) ) goto pragma_out; } /* Jump to the appropriate pragma handler */ switch( pPragma->ePragTyp ){ #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED) /* ** PRAGMA [database.]default_cache_size ** PRAGMA [database.]default_cache_size=N ** ** The first form reports the current persistent setting for the ** page cache size. The value returned is the maximum number of ** pages in the page cache. The second form sets both the current ** page cache size value and the persistent page cache size value ** stored in the database file. ** ** Older versions of SQLite would set the default cache size to a ** negative number to indicate synchronous=OFF. These days, synchronous ** is always on by default regardless of the sign of the default cache ** size. But continue to take the absolute value of the default cache ** size of historical compatibility. */ case PragTyp_DEFAULT_CACHE_SIZE: { static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList getCacheSize[] = { { OP_Transaction, 0, 0, 0}, /* 0 */ { OP_ReadCookie, 0, 1, BTREE_DEFAULT_CACHE_SIZE}, /* 1 */ { OP_IfPos, 1, 8, 0}, { OP_Integer, 0, 2, 0}, { OP_Subtract, 1, 2, 1}, { OP_IfPos, 1, 8, 0}, { OP_Integer, 0, 1, 0}, /* 6 */ { OP_Noop, 0, 0, 0}, { OP_ResultRow, 1, 1, 0}, }; int addr; sqlite3VdbeUsesBtree(v, iDb); if( !zRight ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cache_size", SQLITE_STATIC); pParse->nMem += 2; addr = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize,iLn); sqlite3VdbeChangeP1(v, addr, iDb); sqlite3VdbeChangeP1(v, addr+1, iDb); sqlite3VdbeChangeP1(v, addr+6, SQLITE_DEFAULT_CACHE_SIZE); }else{ int size = sqlite3AbsInt32(sqlite3Atoi(zRight)); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3VdbeAddOp2(v, OP_Integer, size, 1); sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_DEFAULT_CACHE_SIZE, 1); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); } break; } #endif /* !SQLITE_OMIT_PAGER_PRAGMAS && !SQLITE_OMIT_DEPRECATED */ #if !defined(SQLITE_OMIT_PAGER_PRAGMAS) /* ** PRAGMA [database.]page_size ** PRAGMA [database.]page_size=N ** ** The first form reports the current setting for the ** database page size in bytes. The second form sets the ** database page size value. The value can only be set if ** the database has not yet been created. */ case PragTyp_PAGE_SIZE: { Btree *pBt = pDb->pBt; assert( pBt!=0 ); if( !zRight ){ int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0; returnSingleInt(pParse, "page_size", size); }else{ /* Malloc may fail when setting the page-size, as there is an internal ** buffer that the pager module resizes using sqlite3_realloc(). */ db->nextPagesize = sqlite3Atoi(zRight); if( SQLITE_NOMEM==sqlite3BtreeSetPageSize(pBt, db->nextPagesize,-1,0) ){ db->mallocFailed = 1; } } break; } /* ** PRAGMA [database.]secure_delete ** PRAGMA [database.]secure_delete=ON/OFF ** ** The first form reports the current setting for the ** secure_delete flag. The second form changes the secure_delete ** flag setting and reports thenew value. */ case PragTyp_SECURE_DELETE: { Btree *pBt = pDb->pBt; int b = -1; assert( pBt!=0 ); if( zRight ){ b = sqlite3GetBoolean(zRight, 0); } if( pId2->n==0 && b>=0 ){ int ii; for(ii=0; iinDb; ii++){ sqlite3BtreeSecureDelete(db->aDb[ii].pBt, b); } } b = sqlite3BtreeSecureDelete(pBt, b); returnSingleInt(pParse, "secure_delete", b); break; } /* ** PRAGMA [database.]max_page_count ** PRAGMA [database.]max_page_count=N ** ** The first form reports the current setting for the ** maximum number of pages in the database file. The ** second form attempts to change this setting. Both ** forms return the current setting. ** ** The absolute value of N is used. This is undocumented and might ** change. The only purpose is to provide an easy way to test ** the sqlite3AbsInt32() function. ** ** PRAGMA [database.]page_count ** ** Return the number of pages in the specified database. */ case PragTyp_PAGE_COUNT: { int iReg; sqlite3CodeVerifySchema(pParse, iDb); iReg = ++pParse->nMem; if( sqlite3Tolower(zLeft[0])=='p' ){ sqlite3VdbeAddOp2(v, OP_Pagecount, iDb, iReg); }else{ sqlite3VdbeAddOp3(v, OP_MaxPgcnt, iDb, iReg, sqlite3AbsInt32(sqlite3Atoi(zRight))); } sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); break; } /* ** PRAGMA [database.]locking_mode ** PRAGMA [database.]locking_mode = (normal|exclusive) */ case PragTyp_LOCKING_MODE: { const char *zRet = "normal"; int eMode = getLockingMode(zRight); if( pId2->n==0 && eMode==PAGER_LOCKINGMODE_QUERY ){ /* Simple "PRAGMA locking_mode;" statement. This is a query for ** the current default locking mode (which may be different to ** the locking-mode of the main database). */ eMode = db->dfltLockMode; }else{ Pager *pPager; if( pId2->n==0 ){ /* This indicates that no database name was specified as part ** of the PRAGMA command. In this case the locking-mode must be ** set on all attached databases, as well as the main db file. ** ** Also, the sqlite3.dfltLockMode variable is set so that ** any subsequently attached databases also use the specified ** locking mode. */ int ii; assert(pDb==&db->aDb[0]); for(ii=2; iinDb; ii++){ pPager = sqlite3BtreePager(db->aDb[ii].pBt); sqlite3PagerLockingMode(pPager, eMode); } db->dfltLockMode = (u8)eMode; } pPager = sqlite3BtreePager(pDb->pBt); eMode = sqlite3PagerLockingMode(pPager, eMode); } assert( eMode==PAGER_LOCKINGMODE_NORMAL || eMode==PAGER_LOCKINGMODE_EXCLUSIVE ); if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){ zRet = "exclusive"; } sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "locking_mode", SQLITE_STATIC); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zRet, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); break; } /* ** PRAGMA [database.]journal_mode ** PRAGMA [database.]journal_mode = ** (delete|persist|off|truncate|memory|wal|off) */ case PragTyp_JOURNAL_MODE: { int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */ int ii; /* Loop counter */ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "journal_mode", SQLITE_STATIC); if( zRight==0 ){ /* If there is no "=MODE" part of the pragma, do a query for the ** current mode */ eMode = PAGER_JOURNALMODE_QUERY; }else{ const char *zMode; int n = sqlite3Strlen30(zRight); for(eMode=0; (zMode = sqlite3JournalModename(eMode))!=0; eMode++){ if( sqlite3StrNICmp(zRight, zMode, n)==0 ) break; } if( !zMode ){ /* If the "=MODE" part does not match any known journal mode, ** then do a query */ eMode = PAGER_JOURNALMODE_QUERY; } } if( eMode==PAGER_JOURNALMODE_QUERY && pId2->n==0 ){ /* Convert "PRAGMA journal_mode" into "PRAGMA main.journal_mode" */ iDb = 0; pId2->n = 1; } for(ii=db->nDb-1; ii>=0; ii--){ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ sqlite3VdbeUsesBtree(v, ii); sqlite3VdbeAddOp3(v, OP_JournalMode, ii, 1, eMode); } } sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); break; } /* ** PRAGMA [database.]journal_size_limit ** PRAGMA [database.]journal_size_limit=N ** ** Get or set the size limit on rollback journal files. */ case PragTyp_JOURNAL_SIZE_LIMIT: { Pager *pPager = sqlite3BtreePager(pDb->pBt); i64 iLimit = -2; if( zRight ){ sqlite3DecOrHexToI64(zRight, &iLimit); if( iLimit<-1 ) iLimit = -1; } iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit); returnSingleInt(pParse, "journal_size_limit", iLimit); break; } #endif /* SQLITE_OMIT_PAGER_PRAGMAS */ /* ** PRAGMA [database.]auto_vacuum ** PRAGMA [database.]auto_vacuum=N ** ** Get or set the value of the database 'auto-vacuum' parameter. ** The value is one of: 0 NONE 1 FULL 2 INCREMENTAL */ #ifndef SQLITE_OMIT_AUTOVACUUM case PragTyp_AUTO_VACUUM: { Btree *pBt = pDb->pBt; assert( pBt!=0 ); if( !zRight ){ returnSingleInt(pParse, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt)); }else{ int eAuto = getAutoVacuum(zRight); assert( eAuto>=0 && eAuto<=2 ); db->nextAutovac = (u8)eAuto; /* Call SetAutoVacuum() to set initialize the internal auto and ** incr-vacuum flags. This is required in case this connection ** creates the database file. It is important that it is created ** as an auto-vacuum capable db. */ rc = sqlite3BtreeSetAutoVacuum(pBt, eAuto); if( rc==SQLITE_OK && (eAuto==1 || eAuto==2) ){ /* When setting the auto_vacuum mode to either "full" or ** "incremental", write the value of meta[6] in the database ** file. Before writing to meta[6], check that meta[3] indicates ** that this really is an auto-vacuum capable database. */ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList setMeta6[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ { OP_ReadCookie, 0, 1, BTREE_LARGEST_ROOT_PAGE}, { OP_If, 1, 0, 0}, /* 2 */ { OP_Halt, SQLITE_OK, OE_Abort, 0}, /* 3 */ { OP_Integer, 0, 1, 0}, /* 4 */ { OP_SetCookie, 0, BTREE_INCR_VACUUM, 1}, /* 5 */ }; int iAddr; iAddr = sqlite3VdbeAddOpList(v, ArraySize(setMeta6), setMeta6, iLn); sqlite3VdbeChangeP1(v, iAddr, iDb); sqlite3VdbeChangeP1(v, iAddr+1, iDb); sqlite3VdbeChangeP2(v, iAddr+2, iAddr+4); sqlite3VdbeChangeP1(v, iAddr+4, eAuto-1); sqlite3VdbeChangeP1(v, iAddr+5, iDb); sqlite3VdbeUsesBtree(v, iDb); } } break; } #endif /* ** PRAGMA [database.]incremental_vacuum(N) ** ** Do N steps of incremental vacuuming on a database. */ #ifndef SQLITE_OMIT_AUTOVACUUM case PragTyp_INCREMENTAL_VACUUM: { int iLimit, addr; if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){ iLimit = 0x7fffffff; } sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3VdbeAddOp2(v, OP_Integer, iLimit, 1); addr = sqlite3VdbeAddOp1(v, OP_IncrVacuum, iDb); VdbeCoverage(v); sqlite3VdbeAddOp1(v, OP_ResultRow, 1); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr); break; } #endif #ifndef SQLITE_OMIT_PAGER_PRAGMAS /* ** PRAGMA [database.]cache_size ** PRAGMA [database.]cache_size=N ** ** The first form reports the current local setting for the ** page cache size. The second form sets the local ** page cache size value. If N is positive then that is the ** number of pages in the cache. If N is negative, then the ** number of pages is adjusted so that the cache uses -N kibibytes ** of memory. */ case PragTyp_CACHE_SIZE: { assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( !zRight ){ if( sqlite3ReadSchema(pParse) ) goto pragma_out; returnSingleInt(pParse, "cache_size", pDb->pSchema->cache_size); }else{ int size = sqlite3Atoi(zRight); pDb->pSchema->cache_size = size; sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); if( sqlite3ReadSchema(pParse) ) goto pragma_out; } break; } /* ** PRAGMA [database.]mmap_size(N) ** ** Used to set mapping size limit. The mapping size limit is ** used to limit the aggregate size of all memory mapped regions of the ** database file. If this parameter is set to zero, then memory mapping ** is not used at all. If N is negative, then the default memory map ** limit determined by sqlite3_config(SQLITE_CONFIG_MMAP_SIZE) is set. ** The parameter N is measured in bytes. ** ** This value is advisory. The underlying VFS is free to memory map ** as little or as much as it wants. Except, if N is set to 0 then the ** upper layers will never invoke the xFetch interfaces to the VFS. */ case PragTyp_MMAP_SIZE: { sqlite3_int64 sz; #if SQLITE_MAX_MMAP_SIZE>0 assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( zRight ){ int ii; sqlite3DecOrHexToI64(zRight, &sz); if( sz<0 ) sz = sqlite3GlobalConfig.szMmap; if( pId2->n==0 ) db->szMmap = sz; for(ii=db->nDb-1; ii>=0; ii--){ if( db->aDb[ii].pBt && (ii==iDb || pId2->n==0) ){ sqlite3BtreeSetMmapLimit(db->aDb[ii].pBt, sz); } } } sz = -1; rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_MMAP_SIZE, &sz); #else sz = 0; rc = SQLITE_OK; #endif if( rc==SQLITE_OK ){ returnSingleInt(pParse, "mmap_size", sz); }else if( rc!=SQLITE_NOTFOUND ){ pParse->nErr++; pParse->rc = rc; } break; } /* ** PRAGMA temp_store ** PRAGMA temp_store = "default"|"memory"|"file" ** ** Return or set the local value of the temp_store flag. Changing ** the local value does not make changes to the disk file and the default ** value will be restored the next time the database is opened. ** ** Note that it is possible for the library compile-time options to ** override this setting */ case PragTyp_TEMP_STORE: { if( !zRight ){ returnSingleInt(pParse, "temp_store", db->temp_store); }else{ changeTempStorage(pParse, zRight); } break; } /* ** PRAGMA temp_store_directory ** PRAGMA temp_store_directory = ""|"directory_name" ** ** Return or set the local value of the temp_store_directory flag. Changing ** the value sets a specific directory to be used for temporary files. ** Setting to a null string reverts to the default temporary directory search. ** If temporary directory is changed, then invalidateTempStorage. ** */ case PragTyp_TEMP_STORE_DIRECTORY: { if( !zRight ){ if( sqlite3_temp_directory ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "temp_store_directory", SQLITE_STATIC); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_temp_directory, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ int res; rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); if( rc!=SQLITE_OK || res==0 ){ sqlite3ErrorMsg(pParse, "not a writable directory"); goto pragma_out; } } if( SQLITE_TEMP_STORE==0 || (SQLITE_TEMP_STORE==1 && db->temp_store<=1) || (SQLITE_TEMP_STORE==2 && db->temp_store==1) ){ invalidateTempStorage(pParse); } sqlite3_free(sqlite3_temp_directory); if( zRight[0] ){ sqlite3_temp_directory = sqlite3_mprintf("%s", zRight); }else{ sqlite3_temp_directory = 0; } #endif /* SQLITE_OMIT_WSD */ } break; } #if SQLITE_OS_WIN /* ** PRAGMA data_store_directory ** PRAGMA data_store_directory = ""|"directory_name" ** ** Return or set the local value of the data_store_directory flag. Changing ** the value sets a specific directory to be used for database files that ** were specified with a relative pathname. Setting to a null string reverts ** to the default database directory, which for database files specified with ** a relative path will probably be based on the current directory for the ** process. Database file specified with an absolute path are not impacted ** by this setting, regardless of its value. ** */ case PragTyp_DATA_STORE_DIRECTORY: { if( !zRight ){ if( sqlite3_data_directory ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "data_store_directory", SQLITE_STATIC); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, sqlite3_data_directory, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } }else{ #ifndef SQLITE_OMIT_WSD if( zRight[0] ){ int res; rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res); if( rc!=SQLITE_OK || res==0 ){ sqlite3ErrorMsg(pParse, "not a writable directory"); goto pragma_out; } } sqlite3_free(sqlite3_data_directory); if( zRight[0] ){ sqlite3_data_directory = sqlite3_mprintf("%s", zRight); }else{ sqlite3_data_directory = 0; } #endif /* SQLITE_OMIT_WSD */ } break; } #endif #if SQLITE_ENABLE_LOCKING_STYLE /* ** PRAGMA [database.]lock_proxy_file ** PRAGMA [database.]lock_proxy_file = ":auto:"|"lock_file_path" ** ** Return or set the value of the lock_proxy_file flag. Changing ** the value sets a specific file to be used for database access locks. ** */ case PragTyp_LOCK_PROXY_FILE: { if( !zRight ){ Pager *pPager = sqlite3BtreePager(pDb->pBt); char *proxy_file_path = NULL; sqlite3_file *pFile = sqlite3PagerFile(pPager); sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE, &proxy_file_path); if( proxy_file_path ){ sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "lock_proxy_file", SQLITE_STATIC); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, proxy_file_path, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } }else{ Pager *pPager = sqlite3BtreePager(pDb->pBt); sqlite3_file *pFile = sqlite3PagerFile(pPager); int res; if( zRight[0] ){ res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, zRight); } else { res=sqlite3OsFileControl(pFile, SQLITE_SET_LOCKPROXYFILE, NULL); } if( res!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "failed to set lock proxy file"); goto pragma_out; } } break; } #endif /* SQLITE_ENABLE_LOCKING_STYLE */ /* ** PRAGMA [database.]synchronous ** PRAGMA [database.]synchronous=OFF|ON|NORMAL|FULL ** ** Return or set the local value of the synchronous flag. Changing ** the local value does not make changes to the disk file and the ** default value will be restored the next time the database is ** opened. */ case PragTyp_SYNCHRONOUS: { if( !zRight ){ returnSingleInt(pParse, "synchronous", pDb->safety_level-1); }else{ if( !db->autoCommit ){ sqlite3ErrorMsg(pParse, "Safety level may not be changed inside a transaction"); }else{ int iLevel = (getSafetyLevel(zRight,0,1)+1) & PAGER_SYNCHRONOUS_MASK; if( iLevel==0 ) iLevel = 1; pDb->safety_level = iLevel; setAllPagerFlags(db); } } break; } #endif /* SQLITE_OMIT_PAGER_PRAGMAS */ #ifndef SQLITE_OMIT_FLAG_PRAGMAS case PragTyp_FLAG: { if( zRight==0 ){ returnSingleInt(pParse, pPragma->zName, (db->flags & pPragma->iArg)!=0 ); }else{ int mask = pPragma->iArg; /* Mask of bits to set or clear. */ if( db->autoCommit==0 ){ /* Foreign key support may not be enabled or disabled while not ** in auto-commit mode. */ mask &= ~(SQLITE_ForeignKeys); } #if SQLITE_USER_AUTHENTICATION if( db->auth.authLevel==UAUTH_User ){ /* Do not allow non-admin users to modify the schema arbitrarily */ mask &= ~(SQLITE_WriteSchema); } #endif if( sqlite3GetBoolean(zRight, 0) ){ db->flags |= mask; }else{ db->flags &= ~mask; if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; } /* Many of the flag-pragmas modify the code generated by the SQL ** compiler (eg. count_changes). So add an opcode to expire all ** compiled SQL statements after modifying a pragma value. */ sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); setAllPagerFlags(db); } break; } #endif /* SQLITE_OMIT_FLAG_PRAGMAS */ #ifndef SQLITE_OMIT_SCHEMA_PRAGMAS /* ** PRAGMA table_info(
    ) ** ** Return a single row for each column of the named table. The columns of ** the returned data set are: ** ** cid: Column id (numbered from left to right, starting at 0) ** name: Column name ** type: Column declaration type. ** notnull: True if 'NOT NULL' is part of column declaration ** dflt_value: The default value for the column, if any. */ case PragTyp_TABLE_INFO: if( zRight ){ Table *pTab; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ int i, k; int nHidden = 0; Column *pCol; Index *pPk = sqlite3PrimaryKeyIndex(pTab); sqlite3VdbeSetNumCols(v, 6); pParse->nMem = 6; sqlite3CodeVerifySchema(pParse, iDb); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "cid", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "type", SQLITE_STATIC); sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "notnull", SQLITE_STATIC); sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "dflt_value", SQLITE_STATIC); sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "pk", SQLITE_STATIC); sqlite3ViewGetColumnNames(pParse, pTab); for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ if( IsHiddenColumn(pCol) ){ nHidden++; continue; } sqlite3VdbeAddOp2(v, OP_Integer, i-nHidden, 1); sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pCol->zName, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pCol->zType ? pCol->zType : "", 0); sqlite3VdbeAddOp2(v, OP_Integer, (pCol->notNull ? 1 : 0), 4); if( pCol->zDflt ){ sqlite3VdbeAddOp4(v, OP_String8, 0, 5, 0, (char*)pCol->zDflt, 0); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, 5); } if( (pCol->colFlags & COLFLAG_PRIMKEY)==0 ){ k = 0; }else if( pPk==0 ){ k = 1; }else{ for(k=1; k<=pTab->nCol && pPk->aiColumn[k-1]!=i; k++){} } sqlite3VdbeAddOp2(v, OP_Integer, k, 6); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 6); } } } break; case PragTyp_STATS: { Index *pIdx; HashElem *i; v = sqlite3GetVdbe(pParse); sqlite3VdbeSetNumCols(v, 4); pParse->nMem = 4; sqlite3CodeVerifySchema(pParse, iDb); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "index", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "width", SQLITE_STATIC); sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "height", SQLITE_STATIC); for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){ Table *pTab = sqliteHashData(i); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, pTab->zName, 0); sqlite3VdbeAddOp2(v, OP_Null, 0, 2); sqlite3VdbeAddOp2(v, OP_Integer, (int)sqlite3LogEstToInt(pTab->szTabRow), 3); sqlite3VdbeAddOp2(v, OP_Integer, (int)sqlite3LogEstToInt(pTab->nRowLogEst), 4); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0); sqlite3VdbeAddOp2(v, OP_Integer, (int)sqlite3LogEstToInt(pIdx->szIdxRow), 3); sqlite3VdbeAddOp2(v, OP_Integer, (int)sqlite3LogEstToInt(pIdx->aiRowLogEst[0]), 4); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 4); } } } break; case PragTyp_INDEX_INFO: if( zRight ){ Index *pIdx; Table *pTab; pIdx = sqlite3FindIndex(db, zRight, zDb); if( pIdx ){ int i; int mx; if( pPragma->iArg ){ /* PRAGMA index_xinfo (newer version with more rows and columns) */ mx = pIdx->nColumn; pParse->nMem = 6; }else{ /* PRAGMA index_info (legacy version) */ mx = pIdx->nKeyCol; pParse->nMem = 3; } pTab = pIdx->pTable; sqlite3VdbeSetNumCols(v, pParse->nMem); sqlite3CodeVerifySchema(pParse, iDb); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seqno", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "cid", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "name", SQLITE_STATIC); if( pPragma->iArg ){ sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "desc", SQLITE_STATIC); sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "coll", SQLITE_STATIC); sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "key", SQLITE_STATIC); } for(i=0; iaiColumn[i]; sqlite3VdbeAddOp2(v, OP_Integer, i, 1); sqlite3VdbeAddOp2(v, OP_Integer, cnum, 2); if( cnum<0 ){ sqlite3VdbeAddOp2(v, OP_Null, 0, 3); }else{ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pTab->aCol[cnum].zName, 0); } if( pPragma->iArg ){ sqlite3VdbeAddOp2(v, OP_Integer, pIdx->aSortOrder[i], 4); sqlite3VdbeAddOp4(v, OP_String8, 0, 5, 0, pIdx->azColl[i], 0); sqlite3VdbeAddOp2(v, OP_Integer, inKeyCol, 6); } sqlite3VdbeAddOp2(v, OP_ResultRow, 1, pParse->nMem); } } } break; case PragTyp_INDEX_LIST: if( zRight ){ Index *pIdx; Table *pTab; int i; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ v = sqlite3GetVdbe(pParse); sqlite3VdbeSetNumCols(v, 5); pParse->nMem = 5; sqlite3CodeVerifySchema(pParse, iDb); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "unique", SQLITE_STATIC); sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "origin", SQLITE_STATIC); sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "partial", SQLITE_STATIC); for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){ const char *azOrigin[] = { "c", "u", "pk" }; sqlite3VdbeAddOp2(v, OP_Integer, i, 1); sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pIdx->zName, 0); sqlite3VdbeAddOp2(v, OP_Integer, IsUniqueIndex(pIdx), 3); sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, azOrigin[pIdx->idxType], 0); sqlite3VdbeAddOp2(v, OP_Integer, pIdx->pPartIdxWhere!=0, 5); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 5); } } } break; case PragTyp_DATABASE_LIST: { int i; sqlite3VdbeSetNumCols(v, 3); pParse->nMem = 3; sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "file", SQLITE_STATIC); for(i=0; inDb; i++){ if( db->aDb[i].pBt==0 ) continue; assert( db->aDb[i].zName!=0 ); sqlite3VdbeAddOp2(v, OP_Integer, i, 1); sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, db->aDb[i].zName, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3BtreeGetFilename(db->aDb[i].pBt), 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); } } break; case PragTyp_COLLATION_LIST: { int i = 0; HashElem *p; sqlite3VdbeSetNumCols(v, 2); pParse->nMem = 2; sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "seq", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "name", SQLITE_STATIC); for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){ CollSeq *pColl = (CollSeq *)sqliteHashData(p); sqlite3VdbeAddOp2(v, OP_Integer, i++, 1); sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, pColl->zName, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); } } break; #endif /* SQLITE_OMIT_SCHEMA_PRAGMAS */ #ifndef SQLITE_OMIT_FOREIGN_KEY case PragTyp_FOREIGN_KEY_LIST: if( zRight ){ FKey *pFK; Table *pTab; pTab = sqlite3FindTable(db, zRight, zDb); if( pTab ){ v = sqlite3GetVdbe(pParse); pFK = pTab->pFKey; if( pFK ){ int i = 0; sqlite3VdbeSetNumCols(v, 8); pParse->nMem = 8; sqlite3CodeVerifySchema(pParse, iDb); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "id", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "seq", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "table", SQLITE_STATIC); sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "from", SQLITE_STATIC); sqlite3VdbeSetColName(v, 4, COLNAME_NAME, "to", SQLITE_STATIC); sqlite3VdbeSetColName(v, 5, COLNAME_NAME, "on_update", SQLITE_STATIC); sqlite3VdbeSetColName(v, 6, COLNAME_NAME, "on_delete", SQLITE_STATIC); sqlite3VdbeSetColName(v, 7, COLNAME_NAME, "match", SQLITE_STATIC); while(pFK){ int j; for(j=0; jnCol; j++){ char *zCol = pFK->aCol[j].zCol; char *zOnDelete = (char *)actionName(pFK->aAction[0]); char *zOnUpdate = (char *)actionName(pFK->aAction[1]); sqlite3VdbeAddOp2(v, OP_Integer, i, 1); sqlite3VdbeAddOp2(v, OP_Integer, j, 2); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pFK->zTo, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, pTab->aCol[pFK->aCol[j].iFrom].zName, 0); sqlite3VdbeAddOp4(v, zCol ? OP_String8 : OP_Null, 0, 5, 0, zCol, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 6, 0, zOnUpdate, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 7, 0, zOnDelete, 0); sqlite3VdbeAddOp4(v, OP_String8, 0, 8, 0, "NONE", 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 8); } ++i; pFK = pFK->pNextFrom; } } } } break; #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ #ifndef SQLITE_OMIT_FOREIGN_KEY #ifndef SQLITE_OMIT_TRIGGER case PragTyp_FOREIGN_KEY_CHECK: { FKey *pFK; /* A foreign key constraint */ Table *pTab; /* Child table contain "REFERENCES" keyword */ Table *pParent; /* Parent table that child points to */ Index *pIdx; /* Index in the parent table */ int i; /* Loop counter: Foreign key number for pTab */ int j; /* Loop counter: Field of the foreign key */ HashElem *k; /* Loop counter: Next table in schema */ int x; /* result variable */ int regResult; /* 3 registers to hold a result row */ int regKey; /* Register to hold key for checking the FK */ int regRow; /* Registers to hold a row from pTab */ int addrTop; /* Top of a loop checking foreign keys */ int addrOk; /* Jump here if the key is OK */ int *aiCols; /* child to parent column mapping */ regResult = pParse->nMem+1; pParse->nMem += 4; regKey = ++pParse->nMem; regRow = ++pParse->nMem; v = sqlite3GetVdbe(pParse); sqlite3VdbeSetNumCols(v, 4); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "table", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "rowid", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "parent", SQLITE_STATIC); sqlite3VdbeSetColName(v, 3, COLNAME_NAME, "fkid", SQLITE_STATIC); sqlite3CodeVerifySchema(pParse, iDb); k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash); while( k ){ if( zRight ){ pTab = sqlite3LocateTable(pParse, 0, zRight, zDb); k = 0; }else{ pTab = (Table*)sqliteHashData(k); k = sqliteHashNext(k); } if( pTab==0 || pTab->pFKey==0 ) continue; sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); sqlite3VdbeAddOp4(v, OP_String8, 0, regResult, 0, pTab->zName, P4_TRANSIENT); for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); if( pParent==0 ) continue; pIdx = 0; sqlite3TableLock(pParse, iDb, pParent->tnum, 0, pParent->zName); x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, 0); if( x==0 ){ if( pIdx==0 ){ sqlite3OpenTable(pParse, i, iDb, pParent, OP_OpenRead); }else{ sqlite3VdbeAddOp3(v, OP_OpenRead, i, pIdx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); } }else{ k = 0; break; } } assert( pParse->nErr>0 || pFK==0 ); if( pFK ) break; if( pParse->nTabnTab = i; addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, 0); VdbeCoverage(v); for(i=1, pFK=pTab->pFKey; pFK; i++, pFK=pFK->pNextFrom){ pParent = sqlite3FindTable(db, pFK->zTo, zDb); pIdx = 0; aiCols = 0; if( pParent ){ x = sqlite3FkLocateIndex(pParse, pParent, pFK, &pIdx, &aiCols); assert( x==0 ); } addrOk = sqlite3VdbeMakeLabel(v); if( pParent && pIdx==0 ){ int iKey = pFK->aCol[0].iFrom; assert( iKey>=0 && iKeynCol ); if( iKey!=pTab->iPKey ){ sqlite3VdbeAddOp3(v, OP_Column, 0, iKey, regRow); sqlite3ColumnDefault(v, pTab, iKey, regRow); sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_MustBeInt, regRow, sqlite3VdbeCurrentAddr(v)+3); VdbeCoverage(v); }else{ sqlite3VdbeAddOp2(v, OP_Rowid, 0, regRow); } sqlite3VdbeAddOp3(v, OP_NotExists, i, 0, regRow); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrOk); sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); }else{ for(j=0; jnCol; j++){ sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, aiCols ? aiCols[j] : pFK->aCol[j].iFrom, regRow+j); sqlite3VdbeAddOp2(v, OP_IsNull, regRow+j, addrOk); VdbeCoverage(v); } if( pParent ){ sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey, sqlite3IndexAffinityStr(v,pIdx), pFK->nCol); sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0); VdbeCoverage(v); } } sqlite3VdbeAddOp2(v, OP_Rowid, 0, regResult+1); sqlite3VdbeAddOp4(v, OP_String8, 0, regResult+2, 0, pFK->zTo, P4_TRANSIENT); sqlite3VdbeAddOp2(v, OP_Integer, i-1, regResult+3); sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, 4); sqlite3VdbeResolveLabel(v, addrOk); sqlite3DbFree(db, aiCols); } sqlite3VdbeAddOp2(v, OP_Next, 0, addrTop+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addrTop); } } break; #endif /* !defined(SQLITE_OMIT_TRIGGER) */ #endif /* !defined(SQLITE_OMIT_FOREIGN_KEY) */ #ifndef NDEBUG case PragTyp_PARSER_TRACE: { if( zRight ){ if( sqlite3GetBoolean(zRight, 0) ){ sqlite3ParserTrace(stderr, "parser: "); }else{ sqlite3ParserTrace(0, 0); } } } break; #endif /* Reinstall the LIKE and GLOB functions. The variant of LIKE ** used will be case sensitive or not depending on the RHS. */ case PragTyp_CASE_SENSITIVE_LIKE: { if( zRight ){ sqlite3RegisterLikeFunctions(db, sqlite3GetBoolean(zRight, 0)); } } break; #ifndef SQLITE_INTEGRITY_CHECK_ERROR_MAX # define SQLITE_INTEGRITY_CHECK_ERROR_MAX 100 #endif #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* Pragma "quick_check" is reduced version of ** integrity_check designed to detect most database corruption ** without most of the overhead of a full integrity-check. */ case PragTyp_INTEGRITY_CHECK: { int i, j, addr, mxErr; /* Code that appears at the end of the integrity check. If no error ** messages have been generated, output OK. Otherwise output the ** error message */ static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList endCode[] = { { OP_IfNeg, 1, 0, 0}, /* 0 */ { OP_String8, 0, 3, 0}, /* 1 */ { OP_ResultRow, 3, 1, 0}, }; int isQuick = (sqlite3Tolower(zLeft[0])=='q'); /* If the PRAGMA command was of the form "PRAGMA .integrity_check", ** then iDb is set to the index of the database identified by . ** In this case, the integrity of database iDb only is verified by ** the VDBE created below. ** ** Otherwise, if the command was simply "PRAGMA integrity_check" (or ** "PRAGMA quick_check"), then iDb is set to 0. In this case, set iDb ** to -1 here, to indicate that the VDBE should verify the integrity ** of all attached databases. */ assert( iDb>=0 ); assert( iDb==0 || pId2->z ); if( pId2->z==0 ) iDb = -1; /* Initialize the VDBE program */ pParse->nMem = 6; sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "integrity_check", SQLITE_STATIC); /* Set the maximum error count */ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; if( zRight ){ sqlite3GetInt32(zRight, &mxErr); if( mxErr<=0 ){ mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX; } } sqlite3VdbeAddOp2(v, OP_Integer, mxErr, 1); /* reg[1] holds errors left */ /* Do an integrity check on each database file */ for(i=0; inDb; i++){ HashElem *x; Hash *pTbls; int cnt = 0; if( OMIT_TEMPDB && i==1 ) continue; if( iDb>=0 && i!=iDb ) continue; sqlite3CodeVerifySchema(pParse, i); addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Halt if out of errors */ VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeJumpHere(v, addr); /* Do an integrity check of the B-Tree ** ** Begin by filling registers 2, 3, ... with the root pages numbers ** for all tables and indices in the database. */ assert( sqlite3SchemaMutexHeld(db, i, 0) ); pTbls = &db->aDb[i].pSchema->tblHash; for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx; if( HasRowid(pTab) ){ sqlite3VdbeAddOp2(v, OP_Integer, pTab->tnum, 2+cnt); VdbeComment((v, "%s", pTab->zName)); cnt++; } for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ sqlite3VdbeAddOp2(v, OP_Integer, pIdx->tnum, 2+cnt); VdbeComment((v, "%s", pIdx->zName)); cnt++; } } /* Make sure sufficient number of registers have been allocated */ pParse->nMem = MAX( pParse->nMem, cnt+8 ); /* Do the b-tree integrity checks */ sqlite3VdbeAddOp3(v, OP_IntegrityCk, 2, cnt, 1); sqlite3VdbeChangeP5(v, (u8)i); addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName), P4_DYNAMIC); sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1); sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2); sqlite3VdbeAddOp2(v, OP_ResultRow, 2, 1); sqlite3VdbeJumpHere(v, addr); /* Make sure all the indices are constructed correctly. */ for(x=sqliteHashFirst(pTbls); x && !isQuick; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx, *pPk; Index *pPrior = 0; int loopTop; int iDataCur, iIdxCur; int r1 = -1; if( pTab->pIndex==0 ) continue; pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); addr = sqlite3VdbeAddOp1(v, OP_IfPos, 1); /* Stop if out of errors */ VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeJumpHere(v, addr); sqlite3ExprCacheClear(pParse); sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 1, 0, &iDataCur, &iIdxCur); sqlite3VdbeAddOp2(v, OP_Integer, 0, 7); for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ sqlite3VdbeAddOp2(v, OP_Integer, 0, 8+j); /* index entries counter */ } pParse->nMem = MAX(pParse->nMem, 8+j); sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); /* Verify that all NOT NULL columns really are NOT NULL */ for(j=0; jnCol; j++){ char *zErr; int jmp2, jmp3; if( j==pTab->iPKey ) continue; if( pTab->aCol[j].notNull==0 ) continue; sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pTab->aCol[j].zName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1); jmp3 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v); sqlite3VdbeAddOp0(v, OP_Halt); sqlite3VdbeJumpHere(v, jmp2); sqlite3VdbeJumpHere(v, jmp3); } /* Validate index entries for the current row */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ int jmp2, jmp3, jmp4, jmp5; int ckUniq = sqlite3VdbeMakeLabel(v); if( pPk==pIdx ) continue; r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3, pPrior, r1); pPrior = pIdx; sqlite3VdbeAddOp2(v, OP_AddImm, 8+j, 1); /* increment entry count */ /* Verify that an index entry exists for the current table row */ jmp2 = sqlite3VdbeAddOp4Int(v, OP_Found, iIdxCur+j, ckUniq, r1, pIdx->nColumn); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, "row ", P4_STATIC); sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, " missing from index ", P4_STATIC); sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); jmp5 = sqlite3VdbeAddOp4(v, OP_String8, 0, 4, 0, pIdx->zName, P4_TRANSIENT); sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); sqlite3VdbeAddOp2(v, OP_ResultRow, 3, 1); jmp4 = sqlite3VdbeAddOp1(v, OP_IfPos, 1); VdbeCoverage(v); sqlite3VdbeAddOp0(v, OP_Halt); sqlite3VdbeJumpHere(v, jmp2); /* For UNIQUE indexes, verify that only one entry exists with the ** current key. The entry is unique if (1) any column is NULL ** or (2) the next entry has a different key */ if( IsUniqueIndex(pIdx) ){ int uniqOk = sqlite3VdbeMakeLabel(v); int jmp6; int kk; for(kk=0; kknKeyCol; kk++){ int iCol = pIdx->aiColumn[kk]; assert( iCol>=0 && iColnCol ); if( pTab->aCol[iCol].notNull ) continue; sqlite3VdbeAddOp2(v, OP_IsNull, r1+kk, uniqOk); VdbeCoverage(v); } jmp6 = sqlite3VdbeAddOp1(v, OP_Next, iIdxCur+j); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, uniqOk); sqlite3VdbeJumpHere(v, jmp6); sqlite3VdbeAddOp4Int(v, OP_IdxGT, iIdxCur+j, uniqOk, r1, pIdx->nKeyCol); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); /* Decrement error limit */ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, "non-unique entry in index ", P4_STATIC); sqlite3VdbeAddOp2(v, OP_Goto, 0, jmp5); sqlite3VdbeResolveLabel(v, uniqOk); } sqlite3VdbeJumpHere(v, jmp4); sqlite3ResolvePartIdxLabel(pParse, jmp3); } sqlite3VdbeAddOp2(v, OP_Next, iDataCur, loopTop); VdbeCoverage(v); sqlite3VdbeJumpHere(v, loopTop-1); #ifndef SQLITE_OMIT_BTREECOUNT sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, "wrong # of entries in index ", P4_STATIC); for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ if( pPk==pIdx ) continue; addr = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_IfPos, 1, addr+2); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Halt, 0, 0); sqlite3VdbeAddOp2(v, OP_Count, iIdxCur+j, 3); sqlite3VdbeAddOp3(v, OP_Eq, 8+j, addr+8, 3); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_NOTNULL); sqlite3VdbeAddOp2(v, OP_AddImm, 1, -1); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, pIdx->zName, P4_TRANSIENT); sqlite3VdbeAddOp3(v, OP_Concat, 3, 2, 7); sqlite3VdbeAddOp2(v, OP_ResultRow, 7, 1); } #endif /* SQLITE_OMIT_BTREECOUNT */ } } addr = sqlite3VdbeAddOpList(v, ArraySize(endCode), endCode, iLn); sqlite3VdbeChangeP3(v, addr, -mxErr); sqlite3VdbeJumpHere(v, addr); sqlite3VdbeChangeP4(v, addr+1, "ok", P4_STATIC); } break; #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_UTF16 /* ** PRAGMA encoding ** PRAGMA encoding = "utf-8"|"utf-16"|"utf-16le"|"utf-16be" ** ** In its first form, this pragma returns the encoding of the main ** database. If the database is not initialized, it is initialized now. ** ** The second form of this pragma is a no-op if the main database file ** has not already been initialized. In this case it sets the default ** encoding that will be used for the main database file if a new file ** is created. If an existing main database file is opened, then the ** default text encoding for the existing database is used. ** ** In all cases new databases created using the ATTACH command are ** created to use the same default text encoding as the main database. If ** the main database has not been initialized and/or created when ATTACH ** is executed, this is done before the ATTACH operation. ** ** In the second form this pragma sets the text encoding to be used in ** new database files created using this database handle. It is only ** useful if invoked immediately after the main database i */ case PragTyp_ENCODING: { static const struct EncName { char *zName; u8 enc; } encnames[] = { { "UTF8", SQLITE_UTF8 }, { "UTF-8", SQLITE_UTF8 }, /* Must be element [1] */ { "UTF-16le", SQLITE_UTF16LE }, /* Must be element [2] */ { "UTF-16be", SQLITE_UTF16BE }, /* Must be element [3] */ { "UTF16le", SQLITE_UTF16LE }, { "UTF16be", SQLITE_UTF16BE }, { "UTF-16", 0 }, /* SQLITE_UTF16NATIVE */ { "UTF16", 0 }, /* SQLITE_UTF16NATIVE */ { 0, 0 } }; const struct EncName *pEnc; if( !zRight ){ /* "PRAGMA encoding" */ if( sqlite3ReadSchema(pParse) ) goto pragma_out; sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "encoding", SQLITE_STATIC); sqlite3VdbeAddOp2(v, OP_String8, 0, 1); assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 ); assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE ); assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE ); sqlite3VdbeChangeP4(v, -1, encnames[ENC(pParse->db)].zName, P4_STATIC); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); }else{ /* "PRAGMA encoding = XXX" */ /* Only change the value of sqlite.enc if the database handle is not ** initialized. If the main database exists, the new sqlite.enc value ** will be overwritten when the schema is next loaded. If it does not ** already exists, it will be created to use the new encoding value. */ if( !(DbHasProperty(db, 0, DB_SchemaLoaded)) || DbHasProperty(db, 0, DB_Empty) ){ for(pEnc=&encnames[0]; pEnc->zName; pEnc++){ if( 0==sqlite3StrICmp(zRight, pEnc->zName) ){ SCHEMA_ENC(db) = ENC(db) = pEnc->enc ? pEnc->enc : SQLITE_UTF16NATIVE; break; } } if( !pEnc->zName ){ sqlite3ErrorMsg(pParse, "unsupported encoding: %s", zRight); } } } } break; #endif /* SQLITE_OMIT_UTF16 */ #ifndef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS /* ** PRAGMA [database.]schema_version ** PRAGMA [database.]schema_version = ** ** PRAGMA [database.]user_version ** PRAGMA [database.]user_version = ** ** PRAGMA [database.]freelist_count = ** ** PRAGMA [database.]application_id ** PRAGMA [database.]application_id = ** ** The pragma's schema_version and user_version are used to set or get ** the value of the schema-version and user-version, respectively. Both ** the schema-version and the user-version are 32-bit signed integers ** stored in the database header. ** ** The schema-cookie is usually only manipulated internally by SQLite. It ** is incremented by SQLite whenever the database schema is modified (by ** creating or dropping a table or index). The schema version is used by ** SQLite each time a query is executed to ensure that the internal cache ** of the schema used when compiling the SQL query matches the schema of ** the database against which the compiled query is actually executed. ** Subverting this mechanism by using "PRAGMA schema_version" to modify ** the schema-version is potentially dangerous and may lead to program ** crashes or database corruption. Use with caution! ** ** The user-version is not used internally by SQLite. It may be used by ** applications for any purpose. */ case PragTyp_HEADER_VALUE: { int iCookie = pPragma->iArg; /* Which cookie to read or write */ sqlite3VdbeUsesBtree(v, iDb); if( zRight && (pPragma->mPragFlag & PragFlag_ReadOnly)==0 ){ /* Write the specified cookie value */ static const VdbeOpList setCookie[] = { { OP_Transaction, 0, 1, 0}, /* 0 */ { OP_Integer, 0, 1, 0}, /* 1 */ { OP_SetCookie, 0, 0, 1}, /* 2 */ }; int addr = sqlite3VdbeAddOpList(v, ArraySize(setCookie), setCookie, 0); sqlite3VdbeChangeP1(v, addr, iDb); sqlite3VdbeChangeP1(v, addr+1, sqlite3Atoi(zRight)); sqlite3VdbeChangeP1(v, addr+2, iDb); sqlite3VdbeChangeP2(v, addr+2, iCookie); }else{ /* Read the specified cookie value */ static const VdbeOpList readCookie[] = { { OP_Transaction, 0, 0, 0}, /* 0 */ { OP_ReadCookie, 0, 1, 0}, /* 1 */ { OP_ResultRow, 1, 1, 0} }; int addr = sqlite3VdbeAddOpList(v, ArraySize(readCookie), readCookie, 0); sqlite3VdbeChangeP1(v, addr, iDb); sqlite3VdbeChangeP1(v, addr+1, iDb); sqlite3VdbeChangeP3(v, addr+1, iCookie); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT); } } break; #endif /* SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS /* ** PRAGMA compile_options ** ** Return the names of all compile-time options used in this build, ** one option per row. */ case PragTyp_COMPILE_OPTIONS: { int i = 0; const char *zOpt; sqlite3VdbeSetNumCols(v, 1); pParse->nMem = 1; sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "compile_option", SQLITE_STATIC); while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){ sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, zOpt, 0); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } } break; #endif /* SQLITE_OMIT_COMPILEOPTION_DIAGS */ #ifndef SQLITE_OMIT_WAL /* ** PRAGMA [database.]wal_checkpoint = passive|full|restart|truncate ** ** Checkpoint the database. */ case PragTyp_WAL_CHECKPOINT: { int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED); int eMode = SQLITE_CHECKPOINT_PASSIVE; if( zRight ){ if( sqlite3StrICmp(zRight, "full")==0 ){ eMode = SQLITE_CHECKPOINT_FULL; }else if( sqlite3StrICmp(zRight, "restart")==0 ){ eMode = SQLITE_CHECKPOINT_RESTART; }else if( sqlite3StrICmp(zRight, "truncate")==0 ){ eMode = SQLITE_CHECKPOINT_TRUNCATE; } } sqlite3VdbeSetNumCols(v, 3); pParse->nMem = 3; sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "busy", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "log", SQLITE_STATIC); sqlite3VdbeSetColName(v, 2, COLNAME_NAME, "checkpointed", SQLITE_STATIC); sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3); } break; /* ** PRAGMA wal_autocheckpoint ** PRAGMA wal_autocheckpoint = N ** ** Configure a database connection to automatically checkpoint a database ** after accumulating N frames in the log. Or query for the current value ** of N. */ case PragTyp_WAL_AUTOCHECKPOINT: { if( zRight ){ sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight)); } returnSingleInt(pParse, "wal_autocheckpoint", db->xWalCallback==sqlite3WalDefaultHook ? SQLITE_PTR_TO_INT(db->pWalArg) : 0); } break; #endif /* ** PRAGMA shrink_memory ** ** IMPLEMENTATION-OF: R-23445-46109 This pragma causes the database ** connection on which it is invoked to free up as much memory as it ** can, by calling sqlite3_db_release_memory(). */ case PragTyp_SHRINK_MEMORY: { sqlite3_db_release_memory(db); break; } /* ** PRAGMA busy_timeout ** PRAGMA busy_timeout = N ** ** Call sqlite3_busy_timeout(db, N). Return the current timeout value ** if one is set. If no busy handler or a different busy handler is set ** then 0 is returned. Setting the busy_timeout to 0 or negative ** disables the timeout. */ /*case PragTyp_BUSY_TIMEOUT*/ default: { assert( pPragma->ePragTyp==PragTyp_BUSY_TIMEOUT ); if( zRight ){ sqlite3_busy_timeout(db, sqlite3Atoi(zRight)); } returnSingleInt(pParse, "timeout", db->busyTimeout); break; } /* ** PRAGMA soft_heap_limit ** PRAGMA soft_heap_limit = N ** ** IMPLEMENTATION-OF: R-26343-45930 This pragma invokes the ** sqlite3_soft_heap_limit64() interface with the argument N, if N is ** specified and is a non-negative integer. ** IMPLEMENTATION-OF: R-64451-07163 The soft_heap_limit pragma always ** returns the same integer that would be returned by the ** sqlite3_soft_heap_limit64(-1) C-language function. */ case PragTyp_SOFT_HEAP_LIMIT: { sqlite3_int64 N; if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){ sqlite3_soft_heap_limit64(N); } returnSingleInt(pParse, "soft_heap_limit", sqlite3_soft_heap_limit64(-1)); break; } /* ** PRAGMA threads ** PRAGMA threads = N ** ** Configure the maximum number of worker threads. Return the new ** maximum, which might be less than requested. */ case PragTyp_THREADS: { sqlite3_int64 N; if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK && N>=0 ){ sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff)); } returnSingleInt(pParse, "threads", sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1)); break; } #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) /* ** Report the current state of file logs for all databases */ case PragTyp_LOCK_STATUS: { static const char *const azLockName[] = { "unlocked", "shared", "reserved", "pending", "exclusive" }; int i; sqlite3VdbeSetNumCols(v, 2); pParse->nMem = 2; sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "database", SQLITE_STATIC); sqlite3VdbeSetColName(v, 1, COLNAME_NAME, "status", SQLITE_STATIC); for(i=0; inDb; i++){ Btree *pBt; const char *zState = "unknown"; int j; if( db->aDb[i].zName==0 ) continue; sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, db->aDb[i].zName, P4_STATIC); pBt = db->aDb[i].pBt; if( pBt==0 || sqlite3BtreePager(pBt)==0 ){ zState = "closed"; }else if( sqlite3_file_control(db, i ? db->aDb[i].zName : 0, SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){ zState = azLockName[j]; } sqlite3VdbeAddOp4(v, OP_String8, 0, 2, 0, zState, P4_STATIC); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2); } break; } #endif #ifdef SQLITE_HAS_CODEC case PragTyp_KEY: { if( zRight ) sqlite3_key_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); break; } case PragTyp_REKEY: { if( zRight ) sqlite3_rekey_v2(db, zDb, zRight, sqlite3Strlen30(zRight)); break; } case PragTyp_HEXKEY: { if( zRight ){ u8 iByte; int i; char zKey[40]; for(i=0, iByte=0; idb; if( !db->mallocFailed && (db->flags & SQLITE_RecoveryMode)==0 ){ char *z; if( zObj==0 ) zObj = "?"; z = sqlite3_mprintf("malformed database schema (%s)", zObj); if( z && zExtra ) z = sqlite3_mprintf("%z - %s", z, zExtra); sqlite3DbFree(db, *pData->pzErrMsg); *pData->pzErrMsg = z; if( z==0 ) db->mallocFailed = 1; } pData->rc = db->mallocFailed ? SQLITE_NOMEM : SQLITE_CORRUPT_BKPT; } /* ** This is the callback routine for the code that initializes the ** database. See sqlite3Init() below for additional information. ** This routine is also called from the OP_ParseSchema opcode of the VDBE. ** ** Each callback contains the following information: ** ** argv[0] = name of thing being created ** argv[1] = root page number for table or index. 0 for trigger or view. ** argv[2] = SQL text for the CREATE statement. ** */ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char **NotUsed){ InitData *pData = (InitData*)pInit; sqlite3 *db = pData->db; int iDb = pData->iDb; assert( argc==3 ); UNUSED_PARAMETER2(NotUsed, argc); assert( sqlite3_mutex_held(db->mutex) ); DbClearProperty(db, iDb, DB_Empty); if( db->mallocFailed ){ corruptSchema(pData, argv[0], 0); return 1; } assert( iDb>=0 && iDbnDb ); if( argv==0 ) return 0; /* Might happen if EMPTY_RESULT_CALLBACKS are on */ if( argv[1]==0 ){ corruptSchema(pData, argv[0], 0); }else if( sqlite3_strnicmp(argv[2],"create ",7)==0 ){ /* Call the parser to process a CREATE TABLE, INDEX or VIEW. ** But because db->init.busy is set to 1, no VDBE code is generated ** or executed. All the parser does is build the internal data ** structures that describe the table, index, or view. */ int rc; sqlite3_stmt *pStmt; TESTONLY(int rcp); /* Return code from sqlite3_prepare() */ assert( db->init.busy ); db->init.iDb = iDb; db->init.newTnum = sqlite3Atoi(argv[1]); db->init.orphanTrigger = 0; TESTONLY(rcp = ) sqlite3_prepare(db, argv[2], -1, &pStmt, 0); rc = db->errCode; assert( (rc&0xFF)==(rcp&0xFF) ); db->init.iDb = 0; if( SQLITE_OK!=rc ){ if( db->init.orphanTrigger ){ assert( iDb==1 ); }else{ pData->rc = rc; if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; }else if( rc!=SQLITE_INTERRUPT && (rc&0xFF)!=SQLITE_LOCKED ){ corruptSchema(pData, argv[0], sqlite3_errmsg(db)); } } } sqlite3_finalize(pStmt); }else if( argv[0]==0 || (argv[2]!=0 && argv[2][0]!=0) ){ corruptSchema(pData, argv[0], 0); }else{ /* If the SQL column is blank it means this is an index that ** was created to be the PRIMARY KEY or to fulfill a UNIQUE ** constraint for a CREATE TABLE. The index should have already ** been created when we processed the CREATE TABLE. All we have ** to do here is record the root page number for that index. */ Index *pIndex; pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zName); if( pIndex==0 ){ /* This can occur if there exists an index on a TEMP table which ** has the same name as another index on a permanent index. Since ** the permanent table is hidden by the TEMP table, we can also ** safely ignore the index on the permanent table. */ /* Do Nothing */; }else if( sqlite3GetInt32(argv[1], &pIndex->tnum)==0 ){ corruptSchema(pData, argv[0], "invalid rootpage"); } } return 0; } /* ** Attempt to read the database schema and initialize internal ** data structures for a single database file. The index of the ** database file is given by iDb. iDb==0 is used for the main ** database. iDb==1 should never be used. iDb>=2 is used for ** auxiliary databases. Return one of the SQLITE_ error codes to ** indicate success or failure. */ static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){ int rc; int i; #ifndef SQLITE_OMIT_DEPRECATED int size; #endif Table *pTab; Db *pDb; char const *azArg[4]; int meta[5]; InitData initData; char const *zMasterSchema; char const *zMasterName; int openedTransaction = 0; /* ** The master database table has a structure like this */ static const char master_schema[] = "CREATE TABLE sqlite_master(\n" " type text,\n" " name text,\n" " tbl_name text,\n" " rootpage integer,\n" " sql text\n" ")" ; #ifndef SQLITE_OMIT_TEMPDB static const char temp_master_schema[] = "CREATE TEMP TABLE sqlite_temp_master(\n" " type text,\n" " name text,\n" " tbl_name text,\n" " rootpage integer,\n" " sql text\n" ")" ; #else #define temp_master_schema 0 #endif assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pSchema ); assert( sqlite3_mutex_held(db->mutex) ); assert( iDb==1 || sqlite3BtreeHoldsMutex(db->aDb[iDb].pBt) ); /* zMasterSchema and zInitScript are set to point at the master schema ** and initialisation script appropriate for the database being ** initialized. zMasterName is the name of the master table. */ if( !OMIT_TEMPDB && iDb==1 ){ zMasterSchema = temp_master_schema; }else{ zMasterSchema = master_schema; } zMasterName = SCHEMA_TABLE(iDb); /* Construct the schema tables. */ azArg[0] = zMasterName; azArg[1] = "1"; azArg[2] = zMasterSchema; azArg[3] = 0; initData.db = db; initData.iDb = iDb; initData.rc = SQLITE_OK; initData.pzErrMsg = pzErrMsg; sqlite3InitCallback(&initData, 3, (char **)azArg, 0); if( initData.rc ){ rc = initData.rc; goto error_out; } pTab = sqlite3FindTable(db, zMasterName, db->aDb[iDb].zName); if( ALWAYS(pTab) ){ pTab->tabFlags |= TF_Readonly; } /* Create a cursor to hold the database open */ pDb = &db->aDb[iDb]; if( pDb->pBt==0 ){ if( !OMIT_TEMPDB && ALWAYS(iDb==1) ){ DbSetProperty(db, 1, DB_SchemaLoaded); } return SQLITE_OK; } /* If there is not already a read-only (or read-write) transaction opened ** on the b-tree database, open one now. If a transaction is opened, it ** will be closed before this function returns. */ sqlite3BtreeEnter(pDb->pBt); if( !sqlite3BtreeIsInReadTrans(pDb->pBt) ){ rc = sqlite3BtreeBeginTrans(pDb->pBt, 0); if( rc!=SQLITE_OK ){ sqlite3SetString(pzErrMsg, db, sqlite3ErrStr(rc)); goto initone_error_out; } openedTransaction = 1; } /* Get the database meta information. ** ** Meta values are as follows: ** meta[0] Schema cookie. Changes with each schema change. ** meta[1] File format of schema layer. ** meta[2] Size of the page cache. ** meta[3] Largest rootpage (auto/incr_vacuum mode) ** meta[4] Db text encoding. 1:UTF-8 2:UTF-16LE 3:UTF-16BE ** meta[5] User version ** meta[6] Incremental vacuum mode ** meta[7] unused ** meta[8] unused ** meta[9] unused ** ** Note: The #defined SQLITE_UTF* symbols in sqliteInt.h correspond to ** the possible values of meta[4]. */ for(i=0; ipBt, i+1, (u32 *)&meta[i]); } pDb->pSchema->schema_cookie = meta[BTREE_SCHEMA_VERSION-1]; /* If opening a non-empty database, check the text encoding. For the ** main database, set sqlite3.enc to the encoding of the main database. ** For an attached db, it is an error if the encoding is not the same ** as sqlite3.enc. */ if( meta[BTREE_TEXT_ENCODING-1] ){ /* text encoding */ if( iDb==0 ){ #ifndef SQLITE_OMIT_UTF16 u8 encoding; /* If opening the main database, set ENC(db). */ encoding = (u8)meta[BTREE_TEXT_ENCODING-1] & 3; if( encoding==0 ) encoding = SQLITE_UTF8; ENC(db) = encoding; #else ENC(db) = SQLITE_UTF8; #endif }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( meta[BTREE_TEXT_ENCODING-1]!=ENC(db) ){ sqlite3SetString(pzErrMsg, db, "attached databases must use the same" " text encoding as main database"); rc = SQLITE_ERROR; goto initone_error_out; } } }else{ DbSetProperty(db, iDb, DB_Empty); } pDb->pSchema->enc = ENC(db); if( pDb->pSchema->cache_size==0 ){ #ifndef SQLITE_OMIT_DEPRECATED size = sqlite3AbsInt32(meta[BTREE_DEFAULT_CACHE_SIZE-1]); if( size==0 ){ size = SQLITE_DEFAULT_CACHE_SIZE; } pDb->pSchema->cache_size = size; #else pDb->pSchema->cache_size = SQLITE_DEFAULT_CACHE_SIZE; #endif sqlite3BtreeSetCacheSize(pDb->pBt, pDb->pSchema->cache_size); } /* ** file_format==1 Version 3.0.0. ** file_format==2 Version 3.1.3. // ALTER TABLE ADD COLUMN ** file_format==3 Version 3.1.4. // ditto but with non-NULL defaults ** file_format==4 Version 3.3.0. // DESC indices. Boolean constants */ pDb->pSchema->file_format = (u8)meta[BTREE_FILE_FORMAT-1]; if( pDb->pSchema->file_format==0 ){ pDb->pSchema->file_format = 1; } if( pDb->pSchema->file_format>SQLITE_MAX_FILE_FORMAT ){ sqlite3SetString(pzErrMsg, db, "unsupported file format"); rc = SQLITE_ERROR; goto initone_error_out; } /* Ticket #2804: When we open a database in the newer file format, ** clear the legacy_file_format pragma flag so that a VACUUM will ** not downgrade the database and thus invalidate any descending ** indices that the user might have created. */ if( iDb==0 && meta[BTREE_FILE_FORMAT-1]>=4 ){ db->flags &= ~SQLITE_LegacyFileFmt; } /* Read the schema information out of the schema tables */ assert( db->init.busy ); { char *zSql; zSql = sqlite3MPrintf(db, "SELECT name, rootpage, sql FROM '%q'.%s ORDER BY rowid", db->aDb[iDb].zName, zMasterName); #ifndef SQLITE_OMIT_AUTHORIZATION { sqlite3_xauth xAuth; xAuth = db->xAuth; db->xAuth = 0; #endif rc = sqlite3_exec(db, zSql, sqlite3InitCallback, &initData, 0); #ifndef SQLITE_OMIT_AUTHORIZATION db->xAuth = xAuth; } #endif if( rc==SQLITE_OK ) rc = initData.rc; sqlite3DbFree(db, zSql); #ifndef SQLITE_OMIT_ANALYZE if( rc==SQLITE_OK ){ sqlite3AnalysisLoad(db, iDb); } #endif } if( db->mallocFailed ){ rc = SQLITE_NOMEM; sqlite3ResetAllSchemasOfConnection(db); } if( rc==SQLITE_OK || (db->flags&SQLITE_RecoveryMode)){ /* Black magic: If the SQLITE_RecoveryMode flag is set, then consider ** the schema loaded, even if errors occurred. In this situation the ** current sqlite3_prepare() operation will fail, but the following one ** will attempt to compile the supplied statement against whatever subset ** of the schema was loaded before the error occurred. The primary ** purpose of this is to allow access to the sqlite_master table ** even when its contents have been corrupted. */ DbSetProperty(db, iDb, DB_SchemaLoaded); rc = SQLITE_OK; } /* Jump here for an error that occurs after successfully allocating ** curMain and calling sqlite3BtreeEnter(). For an error that occurs ** before that point, jump to error_out. */ initone_error_out: if( openedTransaction ){ sqlite3BtreeCommit(pDb->pBt); } sqlite3BtreeLeave(pDb->pBt); error_out: if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ db->mallocFailed = 1; } return rc; } /* ** Initialize all database files - the main database file, the file ** used to store temporary tables, and any additional database files ** created using ATTACH statements. Return a success code. If an ** error occurs, write an error message into *pzErrMsg. ** ** After a database is initialized, the DB_SchemaLoaded bit is set ** bit is set in the flags field of the Db structure. If the database ** file was of zero-length, then the DB_Empty flag is also set. */ SQLITE_PRIVATE int sqlite3Init(sqlite3 *db, char **pzErrMsg){ int i, rc; int commit_internal = !(db->flags&SQLITE_InternChanges); assert( sqlite3_mutex_held(db->mutex) ); assert( sqlite3BtreeHoldsMutex(db->aDb[0].pBt) ); assert( db->init.busy==0 ); rc = SQLITE_OK; db->init.busy = 1; ENC(db) = SCHEMA_ENC(db); for(i=0; rc==SQLITE_OK && inDb; i++){ if( DbHasProperty(db, i, DB_SchemaLoaded) || i==1 ) continue; rc = sqlite3InitOne(db, i, pzErrMsg); if( rc ){ sqlite3ResetOneSchema(db, i); } } /* Once all the other databases have been initialized, load the schema ** for the TEMP database. This is loaded last, as the TEMP database ** schema may contain references to objects in other databases. */ #ifndef SQLITE_OMIT_TEMPDB assert( db->nDb>1 ); if( rc==SQLITE_OK && !DbHasProperty(db, 1, DB_SchemaLoaded) ){ rc = sqlite3InitOne(db, 1, pzErrMsg); if( rc ){ sqlite3ResetOneSchema(db, 1); } } #endif db->init.busy = 0; if( rc==SQLITE_OK && commit_internal ){ sqlite3CommitInternalChanges(db); } return rc; } /* ** This routine is a no-op if the database schema is already initialized. ** Otherwise, the schema is loaded. An error code is returned. */ SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse){ int rc = SQLITE_OK; sqlite3 *db = pParse->db; assert( sqlite3_mutex_held(db->mutex) ); if( !db->init.busy ){ rc = sqlite3Init(db, &pParse->zErrMsg); } if( rc!=SQLITE_OK ){ pParse->rc = rc; pParse->nErr++; } return rc; } /* ** Check schema cookies in all databases. If any cookie is out ** of date set pParse->rc to SQLITE_SCHEMA. If all schema cookies ** make no changes to pParse->rc. */ static void schemaIsValid(Parse *pParse){ sqlite3 *db = pParse->db; int iDb; int rc; int cookie; assert( pParse->checkSchema ); assert( sqlite3_mutex_held(db->mutex) ); for(iDb=0; iDbnDb; iDb++){ int openedTransaction = 0; /* True if a transaction is opened */ Btree *pBt = db->aDb[iDb].pBt; /* Btree database to read cookie from */ if( pBt==0 ) continue; /* If there is not already a read-only (or read-write) transaction opened ** on the b-tree database, open one now. If a transaction is opened, it ** will be closed immediately after reading the meta-value. */ if( !sqlite3BtreeIsInReadTrans(pBt) ){ rc = sqlite3BtreeBeginTrans(pBt, 0); if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ){ db->mallocFailed = 1; } if( rc!=SQLITE_OK ) return; openedTransaction = 1; } /* Read the schema cookie from the database. If it does not match the ** value stored as part of the in-memory schema representation, ** set Parse.rc to SQLITE_SCHEMA. */ sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){ sqlite3ResetOneSchema(db, iDb); pParse->rc = SQLITE_SCHEMA; } /* Close the transaction, if one was opened. */ if( openedTransaction ){ sqlite3BtreeCommit(pBt); } } } /* ** Convert a schema pointer into the iDb index that indicates ** which database file in db->aDb[] the schema refers to. ** ** If the same database is attached more than once, the first ** attached database is returned. */ SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *pSchema){ int i = -1000000; /* If pSchema is NULL, then return -1000000. This happens when code in ** expr.c is trying to resolve a reference to a transient table (i.e. one ** created by a sub-select). In this case the return value of this ** function should never be used. ** ** We return -1000000 instead of the more usual -1 simply because using ** -1000000 as the incorrect index into db->aDb[] is much ** more likely to cause a segfault than -1 (of course there are assert() ** statements too, but it never hurts to play the odds). */ assert( sqlite3_mutex_held(db->mutex) ); if( pSchema ){ for(i=0; ALWAYS(inDb); i++){ if( db->aDb[i].pSchema==pSchema ){ break; } } assert( i>=0 && inDb ); } return i; } /* ** Free all memory allocations in the pParse object */ SQLITE_PRIVATE void sqlite3ParserReset(Parse *pParse){ if( pParse ){ sqlite3 *db = pParse->db; sqlite3DbFree(db, pParse->aLabel); sqlite3ExprListDelete(db, pParse->pConstExpr); } } /* ** Compile the UTF-8 encoded SQL statement zSql into a statement handle. */ static int sqlite3Prepare( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ Vdbe *pReprepare, /* VM being reprepared */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ Parse *pParse; /* Parsing context */ char *zErrMsg = 0; /* Error message */ int rc = SQLITE_OK; /* Result code */ int i; /* Loop counter */ /* Allocate the parsing context */ pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); if( pParse==0 ){ rc = SQLITE_NOMEM; goto end_prepare; } pParse->pReprepare = pReprepare; assert( ppStmt && *ppStmt==0 ); assert( !db->mallocFailed ); assert( sqlite3_mutex_held(db->mutex) ); /* Check to verify that it is possible to get a read lock on all ** database schemas. The inability to get a read lock indicates that ** some other database connection is holding a write-lock, which in ** turn means that the other connection has made uncommitted changes ** to the schema. ** ** Were we to proceed and prepare the statement against the uncommitted ** schema changes and if those schema changes are subsequently rolled ** back and different changes are made in their place, then when this ** prepared statement goes to run the schema cookie would fail to detect ** the schema change. Disaster would follow. ** ** This thread is currently holding mutexes on all Btrees (because ** of the sqlite3BtreeEnterAll() in sqlite3LockAndPrepare()) so it ** is not possible for another thread to start a new schema change ** while this routine is running. Hence, we do not need to hold ** locks on the schema, we just need to make sure nobody else is ** holding them. ** ** Note that setting READ_UNCOMMITTED overrides most lock detection, ** but it does *not* override schema lock detection, so this all still ** works even if READ_UNCOMMITTED is set. */ for(i=0; inDb; i++) { Btree *pBt = db->aDb[i].pBt; if( pBt ){ assert( sqlite3BtreeHoldsMutex(pBt) ); rc = sqlite3BtreeSchemaLocked(pBt); if( rc ){ const char *zDb = db->aDb[i].zName; sqlite3ErrorWithMsg(db, rc, "database schema is locked: %s", zDb); testcase( db->flags & SQLITE_ReadUncommitted ); goto end_prepare; } } } sqlite3VtabUnlockList(db); pParse->db = db; pParse->nQueryLoop = 0; /* Logarithmic, so 0 really means 1 */ if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; testcase( nBytes==mxLen ); testcase( nBytes==mxLen+1 ); if( nBytes>mxLen ){ sqlite3ErrorWithMsg(db, SQLITE_TOOBIG, "statement too long"); rc = sqlite3ApiExit(db, SQLITE_TOOBIG); goto end_prepare; } zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes); if( zSqlCopy ){ sqlite3RunParser(pParse, zSqlCopy, &zErrMsg); sqlite3DbFree(db, zSqlCopy); pParse->zTail = &zSql[pParse->zTail-zSqlCopy]; }else{ pParse->zTail = &zSql[nBytes]; } }else{ sqlite3RunParser(pParse, zSql, &zErrMsg); } assert( 0==pParse->nQueryLoop ); if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM; } if( pParse->rc==SQLITE_DONE ) pParse->rc = SQLITE_OK; if( pParse->checkSchema ){ schemaIsValid(pParse); } if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM; } if( pzTail ){ *pzTail = pParse->zTail; } rc = pParse->rc; #ifndef SQLITE_OMIT_EXPLAIN if( rc==SQLITE_OK && pParse->pVdbe && pParse->explain ){ static const char * const azColName[] = { "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", "selectid", "order", "from", "detail" }; int iFirst, mx; if( pParse->explain==2 ){ sqlite3VdbeSetNumCols(pParse->pVdbe, 4); iFirst = 8; mx = 12; }else{ sqlite3VdbeSetNumCols(pParse->pVdbe, 8); iFirst = 0; mx = 8; } for(i=iFirst; ipVdbe, i-iFirst, COLNAME_NAME, azColName[i], SQLITE_STATIC); } } #endif if( db->init.busy==0 ){ Vdbe *pVdbe = pParse->pVdbe; sqlite3VdbeSetSql(pVdbe, zSql, (int)(pParse->zTail-zSql), saveSqlFlag); } if( pParse->pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){ sqlite3VdbeFinalize(pParse->pVdbe); assert(!(*ppStmt)); }else{ *ppStmt = (sqlite3_stmt*)pParse->pVdbe; } if( zErrMsg ){ sqlite3ErrorWithMsg(db, rc, "%s", zErrMsg); sqlite3DbFree(db, zErrMsg); }else{ sqlite3Error(db, rc); } /* Delete any TriggerPrg structures allocated while parsing this statement. */ while( pParse->pTriggerPrg ){ TriggerPrg *pT = pParse->pTriggerPrg; pParse->pTriggerPrg = pT->pNext; sqlite3DbFree(db, pT); } end_prepare: sqlite3ParserReset(pParse); sqlite3StackFree(db, pParse); rc = sqlite3ApiExit(db, rc); assert( (rc&db->errMask)==rc ); return rc; } static int sqlite3LockAndPrepare( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ int saveSqlFlag, /* True to copy SQL text into the sqlite3_stmt */ Vdbe *pOld, /* VM being reprepared */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ int rc; #ifdef SQLITE_ENABLE_API_ARMOR if( ppStmt==0 ) return SQLITE_MISUSE_BKPT; #endif *ppStmt = 0; if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex); sqlite3BtreeEnterAll(db); rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); if( rc==SQLITE_SCHEMA ){ sqlite3_finalize(*ppStmt); rc = sqlite3Prepare(db, zSql, nBytes, saveSqlFlag, pOld, ppStmt, pzTail); } sqlite3BtreeLeaveAll(db); sqlite3_mutex_leave(db->mutex); assert( rc==SQLITE_OK || *ppStmt==0 ); return rc; } /* ** Rerun the compilation of a statement after a schema change. ** ** If the statement is successfully recompiled, return SQLITE_OK. Otherwise, ** if the statement cannot be recompiled because another connection has ** locked the sqlite3_master table, return SQLITE_LOCKED. If any other error ** occurs, return SQLITE_SCHEMA. */ SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){ int rc; sqlite3_stmt *pNew; const char *zSql; sqlite3 *db; assert( sqlite3_mutex_held(sqlite3VdbeDb(p)->mutex) ); zSql = sqlite3_sql((sqlite3_stmt *)p); assert( zSql!=0 ); /* Reprepare only called for prepare_v2() statements */ db = sqlite3VdbeDb(p); assert( sqlite3_mutex_held(db->mutex) ); rc = sqlite3LockAndPrepare(db, zSql, -1, 0, p, &pNew, 0); if( rc ){ if( rc==SQLITE_NOMEM ){ db->mallocFailed = 1; } assert( pNew==0 ); return rc; }else{ assert( pNew!=0 ); } sqlite3VdbeSwap((Vdbe*)pNew, p); sqlite3TransferBindings(pNew, (sqlite3_stmt*)p); sqlite3VdbeResetStepResult((Vdbe*)pNew); sqlite3VdbeFinalize((Vdbe*)pNew); return SQLITE_OK; } /* ** Two versions of the official API. Legacy and new use. In the legacy ** version, the original SQL text is not saved in the prepared statement ** and so if a schema change occurs, SQLITE_SCHEMA is returned by ** sqlite3_step(). In the new version, the original SQL text is retained ** and the statement is automatically recompiled if an schema change ** occurs. */ SQLITE_API int SQLITE_STDCALL sqlite3_prepare( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ int rc; rc = sqlite3LockAndPrepare(db,zSql,nBytes,0,0,ppStmt,pzTail); assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2( sqlite3 *db, /* Database handle. */ const char *zSql, /* UTF-8 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const char **pzTail /* OUT: End of parsed string */ ){ int rc; rc = sqlite3LockAndPrepare(db,zSql,nBytes,1,0,ppStmt,pzTail); assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ return rc; } #ifndef SQLITE_OMIT_UTF16 /* ** Compile the UTF-16 encoded SQL statement zSql into a statement handle. */ static int sqlite3Prepare16( sqlite3 *db, /* Database handle. */ const void *zSql, /* UTF-16 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ int saveSqlFlag, /* True to save SQL text into the sqlite3_stmt */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const void **pzTail /* OUT: End of parsed string */ ){ /* This function currently works by first transforming the UTF-16 ** encoded string to UTF-8, then invoking sqlite3_prepare(). The ** tricky bit is figuring out the pointer to return in *pzTail. */ char *zSql8; const char *zTail8 = 0; int rc = SQLITE_OK; #ifdef SQLITE_ENABLE_API_ARMOR if( ppStmt==0 ) return SQLITE_MISUSE_BKPT; #endif *ppStmt = 0; if( !sqlite3SafetyCheckOk(db)||zSql==0 ){ return SQLITE_MISUSE_BKPT; } if( nBytes>=0 ){ int sz; const char *z = (const char*)zSql; for(sz=0; szmutex); zSql8 = sqlite3Utf16to8(db, zSql, nBytes, SQLITE_UTF16NATIVE); if( zSql8 ){ rc = sqlite3LockAndPrepare(db, zSql8, -1, saveSqlFlag, 0, ppStmt, &zTail8); } if( zTail8 && pzTail ){ /* If sqlite3_prepare returns a tail pointer, we calculate the ** equivalent pointer into the UTF-16 string by counting the unicode ** characters between zSql8 and zTail8, and then returning a pointer ** the same number of characters into the UTF-16 string. */ int chars_parsed = sqlite3Utf8CharLen(zSql8, (int)(zTail8-zSql8)); *pzTail = (u8 *)zSql + sqlite3Utf16ByteLen(zSql, chars_parsed); } sqlite3DbFree(db, zSql8); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Two versions of the official API. Legacy and new use. In the legacy ** version, the original SQL text is not saved in the prepared statement ** and so if a schema change occurs, SQLITE_SCHEMA is returned by ** sqlite3_step(). In the new version, the original SQL text is retained ** and the statement is automatically recompiled if an schema change ** occurs. */ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16( sqlite3 *db, /* Database handle. */ const void *zSql, /* UTF-16 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const void **pzTail /* OUT: End of parsed string */ ){ int rc; rc = sqlite3Prepare16(db,zSql,nBytes,0,ppStmt,pzTail); assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ return rc; } SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2( sqlite3 *db, /* Database handle. */ const void *zSql, /* UTF-16 encoded SQL statement. */ int nBytes, /* Length of zSql in bytes. */ sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */ const void **pzTail /* OUT: End of parsed string */ ){ int rc; rc = sqlite3Prepare16(db,zSql,nBytes,1,ppStmt,pzTail); assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */ return rc; } #endif /* SQLITE_OMIT_UTF16 */ /************** End of prepare.c *********************************************/ /************** Begin file select.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that are called by the parser ** to handle SELECT statements in SQLite. */ /* #include "sqliteInt.h" */ /* ** Trace output macros */ #if SELECTTRACE_ENABLED /***/ int sqlite3SelectTrace = 0; # define SELECTTRACE(K,P,S,X) \ if(sqlite3SelectTrace&(K)) \ sqlite3DebugPrintf("%*s%s.%p: ",(P)->nSelectIndent*2-2,"",\ (S)->zSelName,(S)),\ sqlite3DebugPrintf X #else # define SELECTTRACE(K,P,S,X) #endif /* ** An instance of the following object is used to record information about ** how to process the DISTINCT keyword, to simplify passing that information ** into the selectInnerLoop() routine. */ typedef struct DistinctCtx DistinctCtx; struct DistinctCtx { u8 isTnct; /* True if the DISTINCT keyword is present */ u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */ int tabTnct; /* Ephemeral table used for DISTINCT processing */ int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */ }; /* ** An instance of the following object is used to record information about ** the ORDER BY (or GROUP BY) clause of query is being coded. */ typedef struct SortCtx SortCtx; struct SortCtx { ExprList *pOrderBy; /* The ORDER BY (or GROUP BY clause) */ int nOBSat; /* Number of ORDER BY terms satisfied by indices */ int iECursor; /* Cursor number for the sorter */ int regReturn; /* Register holding block-output return address */ int labelBkOut; /* Start label for the block-output subroutine */ int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */ u8 sortFlags; /* Zero or more SORTFLAG_* bits */ }; #define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */ /* ** Delete all the content of a Select structure. Deallocate the structure ** itself only if bFree is true. */ static void clearSelect(sqlite3 *db, Select *p, int bFree){ while( p ){ Select *pPrior = p->pPrior; sqlite3ExprListDelete(db, p->pEList); sqlite3SrcListDelete(db, p->pSrc); sqlite3ExprDelete(db, p->pWhere); sqlite3ExprListDelete(db, p->pGroupBy); sqlite3ExprDelete(db, p->pHaving); sqlite3ExprListDelete(db, p->pOrderBy); sqlite3ExprDelete(db, p->pLimit); sqlite3ExprDelete(db, p->pOffset); sqlite3WithDelete(db, p->pWith); if( bFree ) sqlite3DbFree(db, p); p = pPrior; bFree = 1; } } /* ** Initialize a SelectDest structure. */ SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){ pDest->eDest = (u8)eDest; pDest->iSDParm = iParm; pDest->affSdst = 0; pDest->iSdst = 0; pDest->nSdst = 0; } /* ** Allocate a new Select structure and return a pointer to that ** structure. */ SQLITE_PRIVATE Select *sqlite3SelectNew( Parse *pParse, /* Parsing context */ ExprList *pEList, /* which columns to include in the result */ SrcList *pSrc, /* the FROM clause -- which tables to scan */ Expr *pWhere, /* the WHERE clause */ ExprList *pGroupBy, /* the GROUP BY clause */ Expr *pHaving, /* the HAVING clause */ ExprList *pOrderBy, /* the ORDER BY clause */ u16 selFlags, /* Flag parameters, such as SF_Distinct */ Expr *pLimit, /* LIMIT value. NULL means not used */ Expr *pOffset /* OFFSET value. NULL means no offset */ ){ Select *pNew; Select standin; sqlite3 *db = pParse->db; pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); if( pNew==0 ){ assert( db->mallocFailed ); pNew = &standin; memset(pNew, 0, sizeof(*pNew)); } if( pEList==0 ){ pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db,TK_ALL,0)); } pNew->pEList = pEList; if( pSrc==0 ) pSrc = sqlite3DbMallocZero(db, sizeof(*pSrc)); pNew->pSrc = pSrc; pNew->pWhere = pWhere; pNew->pGroupBy = pGroupBy; pNew->pHaving = pHaving; pNew->pOrderBy = pOrderBy; pNew->selFlags = selFlags; pNew->op = TK_SELECT; pNew->pLimit = pLimit; pNew->pOffset = pOffset; assert( pOffset==0 || pLimit!=0 || pParse->nErr>0 || db->mallocFailed!=0 ); pNew->addrOpenEphm[0] = -1; pNew->addrOpenEphm[1] = -1; if( db->mallocFailed ) { clearSelect(db, pNew, pNew!=&standin); pNew = 0; }else{ assert( pNew->pSrc!=0 || pParse->nErr>0 ); } assert( pNew!=&standin ); return pNew; } #if SELECTTRACE_ENABLED /* ** Set the name of a Select object */ SQLITE_PRIVATE void sqlite3SelectSetName(Select *p, const char *zName){ if( p && zName ){ sqlite3_snprintf(sizeof(p->zSelName), p->zSelName, "%s", zName); } } #endif /* ** Delete the given Select structure and all of its substructures. */ SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ clearSelect(db, p, 1); } /* ** Return a pointer to the right-most SELECT statement in a compound. */ static Select *findRightmost(Select *p){ while( p->pNext ) p = p->pNext; return p; } /* ** Given 1 to 3 identifiers preceding the JOIN keyword, determine the ** type of join. Return an integer constant that expresses that type ** in terms of the following bit values: ** ** JT_INNER ** JT_CROSS ** JT_OUTER ** JT_NATURAL ** JT_LEFT ** JT_RIGHT ** ** A full outer join is the combination of JT_LEFT and JT_RIGHT. ** ** If an illegal or unsupported join type is seen, then still return ** a join type, but put an error in the pParse structure. */ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){ int jointype = 0; Token *apAll[3]; Token *p; /* 0123456789 123456789 123456789 123 */ static const char zKeyText[] = "naturaleftouterightfullinnercross"; static const struct { u8 i; /* Beginning of keyword text in zKeyText[] */ u8 nChar; /* Length of the keyword in characters */ u8 code; /* Join type mask */ } aKeyword[] = { /* natural */ { 0, 7, JT_NATURAL }, /* left */ { 6, 4, JT_LEFT|JT_OUTER }, /* outer */ { 10, 5, JT_OUTER }, /* right */ { 14, 5, JT_RIGHT|JT_OUTER }, /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER }, /* inner */ { 23, 5, JT_INNER }, /* cross */ { 28, 5, JT_INNER|JT_CROSS }, }; int i, j; apAll[0] = pA; apAll[1] = pB; apAll[2] = pC; for(i=0; i<3 && apAll[i]; i++){ p = apAll[i]; for(j=0; jn==aKeyword[j].nChar && sqlite3StrNICmp((char*)p->z, &zKeyText[aKeyword[j].i], p->n)==0 ){ jointype |= aKeyword[j].code; break; } } testcase( j==0 || j==1 || j==2 || j==3 || j==4 || j==5 || j==6 ); if( j>=ArraySize(aKeyword) ){ jointype |= JT_ERROR; break; } } if( (jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) || (jointype & JT_ERROR)!=0 ){ const char *zSp = " "; assert( pB!=0 ); if( pC==0 ){ zSp++; } sqlite3ErrorMsg(pParse, "unknown or unsupported join type: " "%T %T%s%T", pA, pB, zSp, pC); jointype = JT_INNER; }else if( (jointype & JT_OUTER)!=0 && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){ sqlite3ErrorMsg(pParse, "RIGHT and FULL OUTER JOINs are not currently supported"); jointype = JT_INNER; } return jointype; } /* ** Return the index of a column in a table. Return -1 if the column ** is not contained in the table. */ static int columnIndex(Table *pTab, const char *zCol){ int i; for(i=0; inCol; i++){ if( sqlite3StrICmp(pTab->aCol[i].zName, zCol)==0 ) return i; } return -1; } /* ** Search the first N tables in pSrc, from left to right, looking for a ** table that has a column named zCol. ** ** When found, set *piTab and *piCol to the table index and column index ** of the matching column and return TRUE. ** ** If not found, return FALSE. */ static int tableAndColumnIndex( SrcList *pSrc, /* Array of tables to search */ int N, /* Number of tables in pSrc->a[] to search */ const char *zCol, /* Name of the column we are looking for */ int *piTab, /* Write index of pSrc->a[] here */ int *piCol /* Write index of pSrc->a[*piTab].pTab->aCol[] here */ ){ int i; /* For looping over tables in pSrc */ int iCol; /* Index of column matching zCol */ assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */ for(i=0; ia[i].pTab, zCol); if( iCol>=0 ){ if( piTab ){ *piTab = i; *piCol = iCol; } return 1; } } return 0; } /* ** This function is used to add terms implied by JOIN syntax to the ** WHERE clause expression of a SELECT statement. The new term, which ** is ANDed with the existing WHERE clause, is of the form: ** ** (tab1.col1 = tab2.col2) ** ** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the ** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is ** column iColRight of tab2. */ static void addWhereTerm( Parse *pParse, /* Parsing context */ SrcList *pSrc, /* List of tables in FROM clause */ int iLeft, /* Index of first table to join in pSrc */ int iColLeft, /* Index of column in first table */ int iRight, /* Index of second table in pSrc */ int iColRight, /* Index of column in second table */ int isOuterJoin, /* True if this is an OUTER join */ Expr **ppWhere /* IN/OUT: The WHERE clause to add to */ ){ sqlite3 *db = pParse->db; Expr *pE1; Expr *pE2; Expr *pEq; assert( iLeftnSrc>iRight ); assert( pSrc->a[iLeft].pTab ); assert( pSrc->a[iRight].pTab ); pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft); pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight); pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2, 0); if( pEq && isOuterJoin ){ ExprSetProperty(pEq, EP_FromJoin); assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(pEq, EP_NoReduce); pEq->iRightJoinTable = (i16)pE2->iTable; } *ppWhere = sqlite3ExprAnd(db, *ppWhere, pEq); } /* ** Set the EP_FromJoin property on all terms of the given expression. ** And set the Expr.iRightJoinTable to iTable for every term in the ** expression. ** ** The EP_FromJoin property is used on terms of an expression to tell ** the LEFT OUTER JOIN processing logic that this term is part of the ** join restriction specified in the ON or USING clause and not a part ** of the more general WHERE clause. These terms are moved over to the ** WHERE clause during join processing but we need to remember that they ** originated in the ON or USING clause. ** ** The Expr.iRightJoinTable tells the WHERE clause processing that the ** expression depends on table iRightJoinTable even if that table is not ** explicitly mentioned in the expression. That information is needed ** for cases like this: ** ** SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.b AND t1.x=5 ** ** The where clause needs to defer the handling of the t1.x=5 ** term until after the t2 loop of the join. In that way, a ** NULL t2 row will be inserted whenever t1.x!=5. If we do not ** defer the handling of t1.x=5, it will be processed immediately ** after the t1 loop and rows with t1.x!=5 will never appear in ** the output, which is incorrect. */ static void setJoinExpr(Expr *p, int iTable){ while( p ){ ExprSetProperty(p, EP_FromJoin); assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); ExprSetVVAProperty(p, EP_NoReduce); p->iRightJoinTable = (i16)iTable; if( p->op==TK_FUNCTION && p->x.pList ){ int i; for(i=0; ix.pList->nExpr; i++){ setJoinExpr(p->x.pList->a[i].pExpr, iTable); } } setJoinExpr(p->pLeft, iTable); p = p->pRight; } } /* ** This routine processes the join information for a SELECT statement. ** ON and USING clauses are converted into extra terms of the WHERE clause. ** NATURAL joins also create extra WHERE clause terms. ** ** The terms of a FROM clause are contained in the Select.pSrc structure. ** The left most table is the first entry in Select.pSrc. The right-most ** table is the last entry. The join operator is held in the entry to ** the left. Thus entry 0 contains the join operator for the join between ** entries 0 and 1. Any ON or USING clauses associated with the join are ** also attached to the left entry. ** ** This routine returns the number of errors encountered. */ static int sqliteProcessJoin(Parse *pParse, Select *p){ SrcList *pSrc; /* All tables in the FROM clause */ int i, j; /* Loop counters */ struct SrcList_item *pLeft; /* Left table being joined */ struct SrcList_item *pRight; /* Right table being joined */ pSrc = p->pSrc; pLeft = &pSrc->a[0]; pRight = &pLeft[1]; for(i=0; inSrc-1; i++, pRight++, pLeft++){ Table *pLeftTab = pLeft->pTab; Table *pRightTab = pRight->pTab; int isOuter; if( NEVER(pLeftTab==0 || pRightTab==0) ) continue; isOuter = (pRight->jointype & JT_OUTER)!=0; /* When the NATURAL keyword is present, add WHERE clause terms for ** every column that the two tables have in common. */ if( pRight->jointype & JT_NATURAL ){ if( pRight->pOn || pRight->pUsing ){ sqlite3ErrorMsg(pParse, "a NATURAL join may not have " "an ON or USING clause", 0); return 1; } for(j=0; jnCol; j++){ char *zName; /* Name of column in the right table */ int iLeft; /* Matching left table */ int iLeftCol; /* Matching column in the left table */ zName = pRightTab->aCol[j].zName; if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) ){ addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j, isOuter, &p->pWhere); } } } /* Disallow both ON and USING clauses in the same join */ if( pRight->pOn && pRight->pUsing ){ sqlite3ErrorMsg(pParse, "cannot have both ON and USING " "clauses in the same join"); return 1; } /* Add the ON clause to the end of the WHERE clause, connected by ** an AND operator. */ if( pRight->pOn ){ if( isOuter ) setJoinExpr(pRight->pOn, pRight->iCursor); p->pWhere = sqlite3ExprAnd(pParse->db, p->pWhere, pRight->pOn); pRight->pOn = 0; } /* Create extra terms on the WHERE clause for each column named ** in the USING clause. Example: If the two tables to be joined are ** A and B and the USING clause names X, Y, and Z, then add this ** to the WHERE clause: A.X=B.X AND A.Y=B.Y AND A.Z=B.Z ** Report an error if any column mentioned in the USING clause is ** not contained in both tables to be joined. */ if( pRight->pUsing ){ IdList *pList = pRight->pUsing; for(j=0; jnId; j++){ char *zName; /* Name of the term in the USING clause */ int iLeft; /* Table on the left with matching column name */ int iLeftCol; /* Column number of matching column on the left */ int iRightCol; /* Column number of matching column on the right */ zName = pList->a[j].zName; iRightCol = columnIndex(pRightTab, zName); if( iRightCol<0 || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol) ){ sqlite3ErrorMsg(pParse, "cannot join using column %s - column " "not present in both tables", zName); return 1; } addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol, isOuter, &p->pWhere); } } } return 0; } /* Forward reference */ static KeyInfo *keyInfoFromExprList( Parse *pParse, /* Parsing context */ ExprList *pList, /* Form the KeyInfo object from this ExprList */ int iStart, /* Begin with this column of pList */ int nExtra /* Add this many extra columns to the end */ ); /* ** Generate code that will push the record in registers regData ** through regData+nData-1 onto the sorter. */ static void pushOntoSorter( Parse *pParse, /* Parser context */ SortCtx *pSort, /* Information about the ORDER BY clause */ Select *pSelect, /* The whole SELECT statement */ int regData, /* First register holding data to be sorted */ int nData, /* Number of elements in the data array */ int nPrefixReg /* No. of reg prior to regData available for use */ ){ Vdbe *v = pParse->pVdbe; /* Stmt under construction */ int bSeq = ((pSort->sortFlags & SORTFLAG_UseSorter)==0); int nExpr = pSort->pOrderBy->nExpr; /* No. of ORDER BY terms */ int nBase = nExpr + bSeq + nData; /* Fields in sorter record */ int regBase; /* Regs for sorter record */ int regRecord = ++pParse->nMem; /* Assembled sorter record */ int nOBSat = pSort->nOBSat; /* ORDER BY terms to skip */ int op; /* Opcode to add sorter record to sorter */ assert( bSeq==0 || bSeq==1 ); if( nPrefixReg ){ assert( nPrefixReg==nExpr+bSeq ); regBase = regData - nExpr - bSeq; }else{ regBase = pParse->nMem + 1; pParse->nMem += nBase; } sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, SQLITE_ECEL_DUP); if( bSeq ){ sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr); } if( nPrefixReg==0 ){ sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord); if( nOBSat>0 ){ int regPrevKey; /* The first nOBSat columns of the previous row */ int addrFirst; /* Address of the OP_IfNot opcode */ int addrJmp; /* Address of the OP_Jump opcode */ VdbeOp *pOp; /* Opcode that opens the sorter */ int nKey; /* Number of sorting key columns, including OP_Sequence */ KeyInfo *pKI; /* Original KeyInfo on the sorter table */ regPrevKey = pParse->nMem+1; pParse->nMem += pSort->nOBSat; nKey = nExpr - pSort->nOBSat + bSeq; if( bSeq ){ addrFirst = sqlite3VdbeAddOp1(v, OP_IfNot, regBase+nExpr); }else{ addrFirst = sqlite3VdbeAddOp1(v, OP_SequenceTest, pSort->iECursor); } VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Compare, regPrevKey, regBase, pSort->nOBSat); pOp = sqlite3VdbeGetOp(v, pSort->addrSortIndex); if( pParse->db->mallocFailed ) return; pOp->p2 = nKey + nData; pKI = pOp->p4.pKeyInfo; memset(pKI->aSortOrder, 0, pKI->nField); /* Makes OP_Jump below testable */ sqlite3VdbeChangeP4(v, -1, (char*)pKI, P4_KEYINFO); testcase( pKI->nXField>2 ); pOp->p4.pKeyInfo = keyInfoFromExprList(pParse, pSort->pOrderBy, nOBSat, pKI->nXField-1); addrJmp = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v); pSort->labelBkOut = sqlite3VdbeMakeLabel(v); pSort->regReturn = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); sqlite3VdbeAddOp1(v, OP_ResetSorter, pSort->iECursor); sqlite3VdbeJumpHere(v, addrFirst); sqlite3ExprCodeMove(pParse, regBase, regPrevKey, pSort->nOBSat); sqlite3VdbeJumpHere(v, addrJmp); } if( pSort->sortFlags & SORTFLAG_UseSorter ){ op = OP_SorterInsert; }else{ op = OP_IdxInsert; } sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord); if( pSelect->iLimit ){ int addr; int iLimit; if( pSelect->iOffset ){ iLimit = pSelect->iOffset+1; }else{ iLimit = pSelect->iLimit; } addr = sqlite3VdbeAddOp3(v, OP_IfNotZero, iLimit, 0, -1); VdbeCoverage(v); sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor); sqlite3VdbeAddOp1(v, OP_Delete, pSort->iECursor); sqlite3VdbeJumpHere(v, addr); } } /* ** Add code to implement the OFFSET */ static void codeOffset( Vdbe *v, /* Generate code into this VM */ int iOffset, /* Register holding the offset counter */ int iContinue /* Jump here to skip the current record */ ){ if( iOffset>0 ){ int addr; addr = sqlite3VdbeAddOp3(v, OP_IfNeg, iOffset, 0, -1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, iContinue); VdbeComment((v, "skip OFFSET records")); sqlite3VdbeJumpHere(v, addr); } } /* ** Add code that will check to make sure the N registers starting at iMem ** form a distinct entry. iTab is a sorting index that holds previously ** seen combinations of the N values. A new entry is made in iTab ** if the current N values are new. ** ** A jump to addrRepeat is made and the N+1 values are popped from the ** stack if the top N elements are not distinct. */ static void codeDistinct( Parse *pParse, /* Parsing and code generating context */ int iTab, /* A sorting index used to test for distinctness */ int addrRepeat, /* Jump to here if not distinct */ int N, /* Number of elements */ int iMem /* First element */ ){ Vdbe *v; int r1; v = pParse->pVdbe; r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1); sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1); sqlite3ReleaseTempReg(pParse, r1); } #ifndef SQLITE_OMIT_SUBQUERY /* ** Generate an error message when a SELECT is used within a subexpression ** (example: "a IN (SELECT * FROM table)") but it has more than 1 result ** column. We do this in a subroutine because the error used to occur ** in multiple places. (The error only occurs in one place now, but we ** retain the subroutine to minimize code disruption.) */ static int checkForMultiColumnSelectError( Parse *pParse, /* Parse context. */ SelectDest *pDest, /* Destination of SELECT results */ int nExpr /* Number of result columns returned by SELECT */ ){ int eDest = pDest->eDest; if( nExpr>1 && (eDest==SRT_Mem || eDest==SRT_Set) ){ sqlite3ErrorMsg(pParse, "only a single result allowed for " "a SELECT that is part of an expression"); return 1; }else{ return 0; } } #endif /* ** This routine generates the code for the inside of the inner loop ** of a SELECT. ** ** If srcTab is negative, then the pEList expressions ** are evaluated in order to get the data for this row. If srcTab is ** zero or more, then data is pulled from srcTab and pEList is used only ** to get number columns and the datatype for each column. */ static void selectInnerLoop( Parse *pParse, /* The parser context */ Select *p, /* The complete select statement being coded */ ExprList *pEList, /* List of values being extracted */ int srcTab, /* Pull data from this table */ SortCtx *pSort, /* If not NULL, info on how to process ORDER BY */ DistinctCtx *pDistinct, /* If not NULL, info on how to process DISTINCT */ SelectDest *pDest, /* How to dispose of the results */ int iContinue, /* Jump here to continue with next row */ int iBreak /* Jump here to break out of the inner loop */ ){ Vdbe *v = pParse->pVdbe; int i; int hasDistinct; /* True if the DISTINCT keyword is present */ int regResult; /* Start of memory holding result set */ int eDest = pDest->eDest; /* How to dispose of results */ int iParm = pDest->iSDParm; /* First argument to disposal method */ int nResultCol; /* Number of result columns */ int nPrefixReg = 0; /* Number of extra registers before regResult */ assert( v ); assert( pEList!=0 ); hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP; if( pSort && pSort->pOrderBy==0 ) pSort = 0; if( pSort==0 && !hasDistinct ){ assert( iContinue!=0 ); codeOffset(v, p->iOffset, iContinue); } /* Pull the requested columns. */ nResultCol = pEList->nExpr; if( pDest->iSdst==0 ){ if( pSort ){ nPrefixReg = pSort->pOrderBy->nExpr; if( !(pSort->sortFlags & SORTFLAG_UseSorter) ) nPrefixReg++; pParse->nMem += nPrefixReg; } pDest->iSdst = pParse->nMem+1; pParse->nMem += nResultCol; }else if( pDest->iSdst+nResultCol > pParse->nMem ){ /* This is an error condition that can result, for example, when a SELECT ** on the right-hand side of an INSERT contains more result columns than ** there are columns in the table on the left. The error will be caught ** and reported later. But we need to make sure enough memory is allocated ** to avoid other spurious errors in the meantime. */ pParse->nMem += nResultCol; } pDest->nSdst = nResultCol; regResult = pDest->iSdst; if( srcTab>=0 ){ for(i=0; ia[i].zName)); } }else if( eDest!=SRT_Exists ){ /* If the destination is an EXISTS(...) expression, the actual ** values returned by the SELECT are not required. */ u8 ecelFlags; if( eDest==SRT_Mem || eDest==SRT_Output || eDest==SRT_Coroutine ){ ecelFlags = SQLITE_ECEL_DUP; }else{ ecelFlags = 0; } sqlite3ExprCodeExprList(pParse, pEList, regResult, ecelFlags); } /* If the DISTINCT keyword was present on the SELECT statement ** and this row has been seen before, then do not make this row ** part of the result. */ if( hasDistinct ){ switch( pDistinct->eTnctType ){ case WHERE_DISTINCT_ORDERED: { VdbeOp *pOp; /* No longer required OpenEphemeral instr. */ int iJump; /* Jump destination */ int regPrev; /* Previous row content */ /* Allocate space for the previous row */ regPrev = pParse->nMem+1; pParse->nMem += nResultCol; /* Change the OP_OpenEphemeral coded earlier to an OP_Null ** sets the MEM_Cleared bit on the first register of the ** previous value. This will cause the OP_Ne below to always ** fail on the first iteration of the loop even if the first ** row is all NULLs. */ sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); pOp = sqlite3VdbeGetOp(v, pDistinct->addrTnct); pOp->opcode = OP_Null; pOp->p1 = 1; pOp->p2 = regPrev; iJump = sqlite3VdbeCurrentAddr(v) + nResultCol; for(i=0; ia[i].pExpr); if( idb->mallocFailed ); sqlite3VdbeAddOp3(v, OP_Copy, regResult, regPrev, nResultCol-1); break; } case WHERE_DISTINCT_UNIQUE: { sqlite3VdbeChangeToNoop(v, pDistinct->addrTnct); break; } default: { assert( pDistinct->eTnctType==WHERE_DISTINCT_UNORDERED ); codeDistinct(pParse, pDistinct->tabTnct, iContinue, nResultCol, regResult); break; } } if( pSort==0 ){ codeOffset(v, p->iOffset, iContinue); } } switch( eDest ){ /* In this mode, write each query result to the key of the temporary ** table iParm. */ #ifndef SQLITE_OMIT_COMPOUND_SELECT case SRT_Union: { int r1; r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1); sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); sqlite3ReleaseTempReg(pParse, r1); break; } /* Construct a record from the query result, but instead of ** saving that record, use it as a key to delete elements from ** the temporary table iParm. */ case SRT_Except: { sqlite3VdbeAddOp3(v, OP_IdxDelete, iParm, regResult, nResultCol); break; } #endif /* SQLITE_OMIT_COMPOUND_SELECT */ /* Store the result as data using a unique key. */ case SRT_Fifo: case SRT_DistFifo: case SRT_Table: case SRT_EphemTab: { int r1 = sqlite3GetTempRange(pParse, nPrefixReg+1); testcase( eDest==SRT_Table ); testcase( eDest==SRT_EphemTab ); testcase( eDest==SRT_Fifo ); testcase( eDest==SRT_DistFifo ); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); #ifndef SQLITE_OMIT_CTE if( eDest==SRT_DistFifo ){ /* If the destination is DistFifo, then cursor (iParm+1) is open ** on an ephemeral index. If the current row is already present ** in the index, do not write it to the output. If not, add the ** current row to the index and proceed with writing it to the ** output table as well. */ int addr = sqlite3VdbeCurrentAddr(v) + 4; sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r1); assert( pSort==0 ); } #endif if( pSort ){ pushOntoSorter(pParse, pSort, p, r1+nPrefixReg, 1, nPrefixReg); }else{ int r2 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, r2); sqlite3VdbeAddOp3(v, OP_Insert, iParm, r1, r2); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3ReleaseTempReg(pParse, r2); } sqlite3ReleaseTempRange(pParse, r1, nPrefixReg+1); break; } #ifndef SQLITE_OMIT_SUBQUERY /* If we are creating a set for an "expr IN (SELECT ...)" construct, ** then there should be a single item on the stack. Write this ** item into the set table with bogus data. */ case SRT_Set: { assert( nResultCol==1 ); pDest->affSdst = sqlite3CompareAffinity(pEList->a[0].pExpr, pDest->affSdst); if( pSort ){ /* At first glance you would think we could optimize out the ** ORDER BY in this case since the order of entries in the set ** does not matter. But there might be a LIMIT clause, in which ** case the order does matter */ pushOntoSorter(pParse, pSort, p, regResult, 1, nPrefixReg); }else{ int r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult,1,r1, &pDest->affSdst, 1); sqlite3ExprCacheAffinityChange(pParse, regResult, 1); sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); sqlite3ReleaseTempReg(pParse, r1); } break; } /* If any row exist in the result set, record that fact and abort. */ case SRT_Exists: { sqlite3VdbeAddOp2(v, OP_Integer, 1, iParm); /* The LIMIT clause will terminate the loop for us */ break; } /* If this is a scalar select that is part of an expression, then ** store the results in the appropriate memory cell and break out ** of the scan loop. */ case SRT_Mem: { assert( nResultCol==1 ); if( pSort ){ pushOntoSorter(pParse, pSort, p, regResult, 1, nPrefixReg); }else{ assert( regResult==iParm ); /* The LIMIT clause will jump out of the loop for us */ } break; } #endif /* #ifndef SQLITE_OMIT_SUBQUERY */ case SRT_Coroutine: /* Send data to a co-routine */ case SRT_Output: { /* Return the results */ testcase( eDest==SRT_Coroutine ); testcase( eDest==SRT_Output ); if( pSort ){ pushOntoSorter(pParse, pSort, p, regResult, nResultCol, nPrefixReg); }else if( eDest==SRT_Coroutine ){ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); }else{ sqlite3VdbeAddOp2(v, OP_ResultRow, regResult, nResultCol); sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol); } break; } #ifndef SQLITE_OMIT_CTE /* Write the results into a priority queue that is order according to ** pDest->pOrderBy (in pSO). pDest->iSDParm (in iParm) is the cursor for an ** index with pSO->nExpr+2 columns. Build a key using pSO for the first ** pSO->nExpr columns, then make sure all keys are unique by adding a ** final OP_Sequence column. The last column is the record as a blob. */ case SRT_DistQueue: case SRT_Queue: { int nKey; int r1, r2, r3; int addrTest = 0; ExprList *pSO; pSO = pDest->pOrderBy; assert( pSO ); nKey = pSO->nExpr; r1 = sqlite3GetTempReg(pParse); r2 = sqlite3GetTempRange(pParse, nKey+2); r3 = r2+nKey+1; if( eDest==SRT_DistQueue ){ /* If the destination is DistQueue, then cursor (iParm+1) is open ** on a second ephemeral index that holds all values every previously ** added to the queue. */ addrTest = sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, 0, regResult, nResultCol); VdbeCoverage(v); } sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r3); if( eDest==SRT_DistQueue ){ sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r3); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); } for(i=0; ia[i].u.x.iOrderByCol - 1, r2+i); } sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey); sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1); sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1); if( addrTest ) sqlite3VdbeJumpHere(v, addrTest); sqlite3ReleaseTempReg(pParse, r1); sqlite3ReleaseTempRange(pParse, r2, nKey+2); break; } #endif /* SQLITE_OMIT_CTE */ #if !defined(SQLITE_OMIT_TRIGGER) /* Discard the results. This is used for SELECT statements inside ** the body of a TRIGGER. The purpose of such selects is to call ** user-defined functions that have side effects. We do not care ** about the actual results of the select. */ default: { assert( eDest==SRT_Discard ); break; } #endif } /* Jump to the end of the loop if the LIMIT is reached. Except, if ** there is a sorter, in which case the sorter has already limited ** the output for us. */ if( pSort==0 && p->iLimit ){ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v); } } /* ** Allocate a KeyInfo object sufficient for an index of N key columns and ** X extra columns. */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ KeyInfo *p = sqlite3DbMallocZero(0, sizeof(KeyInfo) + (N+X)*(sizeof(CollSeq*)+1)); if( p ){ p->aSortOrder = (u8*)&p->aColl[N+X]; p->nField = (u16)N; p->nXField = (u16)X; p->enc = ENC(db); p->db = db; p->nRef = 1; }else{ db->mallocFailed = 1; } return p; } /* ** Deallocate a KeyInfo object */ SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){ if( p ){ assert( p->nRef>0 ); p->nRef--; if( p->nRef==0 ) sqlite3DbFree(0, p); } } /* ** Make a new pointer to a KeyInfo object */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo *p){ if( p ){ assert( p->nRef>0 ); p->nRef++; } return p; } #ifdef SQLITE_DEBUG /* ** Return TRUE if a KeyInfo object can be change. The KeyInfo object ** can only be changed if this is just a single reference to the object. ** ** This routine is used only inside of assert() statements. */ SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo *p){ return p->nRef==1; } #endif /* SQLITE_DEBUG */ /* ** Given an expression list, generate a KeyInfo structure that records ** the collating sequence for each expression in that expression list. ** ** If the ExprList is an ORDER BY or GROUP BY clause then the resulting ** KeyInfo structure is appropriate for initializing a virtual index to ** implement that clause. If the ExprList is the result set of a SELECT ** then the KeyInfo structure is appropriate for initializing a virtual ** index to implement a DISTINCT test. ** ** Space to hold the KeyInfo structure is obtained from malloc. The calling ** function is responsible for seeing that this structure is eventually ** freed. */ static KeyInfo *keyInfoFromExprList( Parse *pParse, /* Parsing context */ ExprList *pList, /* Form the KeyInfo object from this ExprList */ int iStart, /* Begin with this column of pList */ int nExtra /* Add this many extra columns to the end */ ){ int nExpr; KeyInfo *pInfo; struct ExprList_item *pItem; sqlite3 *db = pParse->db; int i; nExpr = pList->nExpr; pInfo = sqlite3KeyInfoAlloc(db, nExpr-iStart, nExtra+1); if( pInfo ){ assert( sqlite3KeyInfoIsWriteable(pInfo) ); for(i=iStart, pItem=pList->a+iStart; ipExpr); if( !pColl ) pColl = db->pDfltColl; pInfo->aColl[i-iStart] = pColl; pInfo->aSortOrder[i-iStart] = pItem->sortOrder; } } return pInfo; } /* ** Name of the connection operator, used for error messages. */ static const char *selectOpName(int id){ char *z; switch( id ){ case TK_ALL: z = "UNION ALL"; break; case TK_INTERSECT: z = "INTERSECT"; break; case TK_EXCEPT: z = "EXCEPT"; break; default: z = "UNION"; break; } return z; } #ifndef SQLITE_OMIT_EXPLAIN /* ** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function ** is a no-op. Otherwise, it adds a single row of output to the EQP result, ** where the caption is of the form: ** ** "USE TEMP B-TREE FOR xxx" ** ** where xxx is one of "DISTINCT", "ORDER BY" or "GROUP BY". Exactly which ** is determined by the zUsage argument. */ static void explainTempTable(Parse *pParse, const char *zUsage){ if( pParse->explain==2 ){ Vdbe *v = pParse->pVdbe; char *zMsg = sqlite3MPrintf(pParse->db, "USE TEMP B-TREE FOR %s", zUsage); sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); } } /* ** Assign expression b to lvalue a. A second, no-op, version of this macro ** is provided when SQLITE_OMIT_EXPLAIN is defined. This allows the code ** in sqlite3Select() to assign values to structure member variables that ** only exist if SQLITE_OMIT_EXPLAIN is not defined without polluting the ** code with #ifndef directives. */ # define explainSetInteger(a, b) a = b #else /* No-op versions of the explainXXX() functions and macros. */ # define explainTempTable(y,z) # define explainSetInteger(y,z) #endif #if !defined(SQLITE_OMIT_EXPLAIN) && !defined(SQLITE_OMIT_COMPOUND_SELECT) /* ** Unless an "EXPLAIN QUERY PLAN" command is being processed, this function ** is a no-op. Otherwise, it adds a single row of output to the EQP result, ** where the caption is of one of the two forms: ** ** "COMPOSITE SUBQUERIES iSub1 and iSub2 (op)" ** "COMPOSITE SUBQUERIES iSub1 and iSub2 USING TEMP B-TREE (op)" ** ** where iSub1 and iSub2 are the integers passed as the corresponding ** function parameters, and op is the text representation of the parameter ** of the same name. The parameter "op" must be one of TK_UNION, TK_EXCEPT, ** TK_INTERSECT or TK_ALL. The first form is used if argument bUseTmp is ** false, or the second form if it is true. */ static void explainComposite( Parse *pParse, /* Parse context */ int op, /* One of TK_UNION, TK_EXCEPT etc. */ int iSub1, /* Subquery id 1 */ int iSub2, /* Subquery id 2 */ int bUseTmp /* True if a temp table was used */ ){ assert( op==TK_UNION || op==TK_EXCEPT || op==TK_INTERSECT || op==TK_ALL ); if( pParse->explain==2 ){ Vdbe *v = pParse->pVdbe; char *zMsg = sqlite3MPrintf( pParse->db, "COMPOUND SUBQUERIES %d AND %d %s(%s)", iSub1, iSub2, bUseTmp?"USING TEMP B-TREE ":"", selectOpName(op) ); sqlite3VdbeAddOp4(v, OP_Explain, pParse->iSelectId, 0, 0, zMsg, P4_DYNAMIC); } } #else /* No-op versions of the explainXXX() functions and macros. */ # define explainComposite(v,w,x,y,z) #endif /* ** If the inner loop was generated using a non-null pOrderBy argument, ** then the results were placed in a sorter. After the loop is terminated ** we need to run the sorter and output the results. The following ** routine generates the code needed to do that. */ static void generateSortTail( Parse *pParse, /* Parsing context */ Select *p, /* The SELECT statement */ SortCtx *pSort, /* Information on the ORDER BY clause */ int nColumn, /* Number of columns of data */ SelectDest *pDest /* Write the sorted results here */ ){ Vdbe *v = pParse->pVdbe; /* The prepared statement */ int addrBreak = sqlite3VdbeMakeLabel(v); /* Jump here to exit loop */ int addrContinue = sqlite3VdbeMakeLabel(v); /* Jump here for next cycle */ int addr; int addrOnce = 0; int iTab; ExprList *pOrderBy = pSort->pOrderBy; int eDest = pDest->eDest; int iParm = pDest->iSDParm; int regRow; int regRowid; int nKey; int iSortTab; /* Sorter cursor to read from */ int nSortData; /* Trailing values to read from sorter */ int i; int bSeq; /* True if sorter record includes seq. no. */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS struct ExprList_item *aOutEx = p->pEList->a; #endif if( pSort->labelBkOut ){ sqlite3VdbeAddOp2(v, OP_Gosub, pSort->regReturn, pSort->labelBkOut); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrBreak); sqlite3VdbeResolveLabel(v, pSort->labelBkOut); } iTab = pSort->iECursor; if( eDest==SRT_Output || eDest==SRT_Coroutine ){ regRowid = 0; regRow = pDest->iSdst; nSortData = nColumn; }else{ regRowid = sqlite3GetTempReg(pParse); regRow = sqlite3GetTempReg(pParse); nSortData = 1; } nKey = pOrderBy->nExpr - pSort->nOBSat; if( pSort->sortFlags & SORTFLAG_UseSorter ){ int regSortOut = ++pParse->nMem; iSortTab = pParse->nTab++; if( pSort->labelBkOut ){ addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v); } sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut, nKey+1+nSortData); if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce); addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak); VdbeCoverage(v); codeOffset(v, p->iOffset, addrContinue); sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab); bSeq = 0; }else{ addr = 1 + sqlite3VdbeAddOp2(v, OP_Sort, iTab, addrBreak); VdbeCoverage(v); codeOffset(v, p->iOffset, addrContinue); iSortTab = iTab; bSeq = 1; } for(i=0; iaffSdst, 1); sqlite3ExprCacheAffinityChange(pParse, regRow, 1); sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid); break; } case SRT_Mem: { assert( nColumn==1 ); sqlite3ExprCodeMove(pParse, regRow, iParm, 1); /* The LIMIT clause will terminate the loop for us */ break; } #endif default: { assert( eDest==SRT_Output || eDest==SRT_Coroutine ); testcase( eDest==SRT_Output ); testcase( eDest==SRT_Coroutine ); if( eDest==SRT_Output ){ sqlite3VdbeAddOp2(v, OP_ResultRow, pDest->iSdst, nColumn); sqlite3ExprCacheAffinityChange(pParse, pDest->iSdst, nColumn); }else{ sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); } break; } } if( regRowid ){ sqlite3ReleaseTempReg(pParse, regRow); sqlite3ReleaseTempReg(pParse, regRowid); } /* The bottom of the loop */ sqlite3VdbeResolveLabel(v, addrContinue); if( pSort->sortFlags & SORTFLAG_UseSorter ){ sqlite3VdbeAddOp2(v, OP_SorterNext, iTab, addr); VdbeCoverage(v); }else{ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v); } if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn); sqlite3VdbeResolveLabel(v, addrBreak); } /* ** Return a pointer to a string containing the 'declaration type' of the ** expression pExpr. The string may be treated as static by the caller. ** ** Also try to estimate the size of the returned value and return that ** result in *pEstWidth. ** ** The declaration type is the exact datatype definition extracted from the ** original CREATE TABLE statement if the expression is a column. The ** declaration type for a ROWID field is INTEGER. Exactly when an expression ** is considered a column can be complex in the presence of subqueries. The ** result-set expression in all of the following SELECT statements is ** considered a column by this function. ** ** SELECT col FROM tbl; ** SELECT (SELECT col FROM tbl; ** SELECT (SELECT col FROM tbl); ** SELECT abc FROM (SELECT col AS abc FROM tbl); ** ** The declaration type for any expression other than a column is NULL. ** ** This routine has either 3 or 6 parameters depending on whether or not ** the SQLITE_ENABLE_COLUMN_METADATA compile-time option is used. */ #ifdef SQLITE_ENABLE_COLUMN_METADATA # define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,C,D,E,F) #else /* if !defined(SQLITE_ENABLE_COLUMN_METADATA) */ # define columnType(A,B,C,D,E,F) columnTypeImpl(A,B,F) #endif static const char *columnTypeImpl( NameContext *pNC, Expr *pExpr, #ifdef SQLITE_ENABLE_COLUMN_METADATA const char **pzOrigDb, const char **pzOrigTab, const char **pzOrigCol, #endif u8 *pEstWidth ){ char const *zType = 0; int j; u8 estWidth = 1; #ifdef SQLITE_ENABLE_COLUMN_METADATA char const *zOrigDb = 0; char const *zOrigTab = 0; char const *zOrigCol = 0; #endif if( NEVER(pExpr==0) || pNC->pSrcList==0 ) return 0; switch( pExpr->op ){ case TK_AGG_COLUMN: case TK_COLUMN: { /* The expression is a column. Locate the table the column is being ** extracted from in NameContext.pSrcList. This table may be real ** database table or a subquery. */ Table *pTab = 0; /* Table structure column is extracted from */ Select *pS = 0; /* Select the column is extracted from */ int iCol = pExpr->iColumn; /* Index of column in pTab */ testcase( pExpr->op==TK_AGG_COLUMN ); testcase( pExpr->op==TK_COLUMN ); while( pNC && !pTab ){ SrcList *pTabList = pNC->pSrcList; for(j=0;jnSrc && pTabList->a[j].iCursor!=pExpr->iTable;j++); if( jnSrc ){ pTab = pTabList->a[j].pTab; pS = pTabList->a[j].pSelect; }else{ pNC = pNC->pNext; } } if( pTab==0 ){ /* At one time, code such as "SELECT new.x" within a trigger would ** cause this condition to run. Since then, we have restructured how ** trigger code is generated and so this condition is no longer ** possible. However, it can still be true for statements like ** the following: ** ** CREATE TABLE t1(col INTEGER); ** SELECT (SELECT t1.col) FROM FROM t1; ** ** when columnType() is called on the expression "t1.col" in the ** sub-select. In this case, set the column type to NULL, even ** though it should really be "INTEGER". ** ** This is not a problem, as the column type of "t1.col" is never ** used. When columnType() is called on the expression ** "(SELECT t1.col)", the correct type is returned (see the TK_SELECT ** branch below. */ break; } assert( pTab && pExpr->pTab==pTab ); if( pS ){ /* The "table" is actually a sub-select or a view in the FROM clause ** of the SELECT statement. Return the declaration type and origin ** data for the result-set column of the sub-select. */ if( iCol>=0 && ALWAYS(iColpEList->nExpr) ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see ** test case misc2.2.2) - it always evaluates to NULL. ** ** The ALWAYS() is because iCol>=pS->pEList->nExpr will have been ** caught already by name resolution. */ NameContext sNC; Expr *p = pS->pEList->a[iCol].pExpr; sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; zType = columnType(&sNC, p,&zOrigDb,&zOrigTab,&zOrigCol, &estWidth); } }else if( pTab->pSchema ){ /* A real table */ assert( !pS ); if( iCol<0 ) iCol = pTab->iPKey; assert( iCol==-1 || (iCol>=0 && iColnCol) ); #ifdef SQLITE_ENABLE_COLUMN_METADATA if( iCol<0 ){ zType = "INTEGER"; zOrigCol = "rowid"; }else{ zType = pTab->aCol[iCol].zType; zOrigCol = pTab->aCol[iCol].zName; estWidth = pTab->aCol[iCol].szEst; } zOrigTab = pTab->zName; if( pNC->pParse ){ int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema); zOrigDb = pNC->pParse->db->aDb[iDb].zName; } #else if( iCol<0 ){ zType = "INTEGER"; }else{ zType = pTab->aCol[iCol].zType; estWidth = pTab->aCol[iCol].szEst; } #endif } break; } #ifndef SQLITE_OMIT_SUBQUERY case TK_SELECT: { /* The expression is a sub-select. Return the declaration type and ** origin info for the single column in the result set of the SELECT ** statement. */ NameContext sNC; Select *pS = pExpr->x.pSelect; Expr *p = pS->pEList->a[0].pExpr; assert( ExprHasProperty(pExpr, EP_xIsSelect) ); sNC.pSrcList = pS->pSrc; sNC.pNext = pNC; sNC.pParse = pNC->pParse; zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, &estWidth); break; } #endif } #ifdef SQLITE_ENABLE_COLUMN_METADATA if( pzOrigDb ){ assert( pzOrigTab && pzOrigCol ); *pzOrigDb = zOrigDb; *pzOrigTab = zOrigTab; *pzOrigCol = zOrigCol; } #endif if( pEstWidth ) *pEstWidth = estWidth; return zType; } /* ** Generate code that will tell the VDBE the declaration types of columns ** in the result set. */ static void generateColumnTypes( Parse *pParse, /* Parser context */ SrcList *pTabList, /* List of tables */ ExprList *pEList /* Expressions defining the result set */ ){ #ifndef SQLITE_OMIT_DECLTYPE Vdbe *v = pParse->pVdbe; int i; NameContext sNC; sNC.pSrcList = pTabList; sNC.pParse = pParse; for(i=0; inExpr; i++){ Expr *p = pEList->a[i].pExpr; const char *zType; #ifdef SQLITE_ENABLE_COLUMN_METADATA const char *zOrigDb = 0; const char *zOrigTab = 0; const char *zOrigCol = 0; zType = columnType(&sNC, p, &zOrigDb, &zOrigTab, &zOrigCol, 0); /* The vdbe must make its own copy of the column-type and other ** column specific strings, in case the schema is reset before this ** virtual machine is deleted. */ sqlite3VdbeSetColName(v, i, COLNAME_DATABASE, zOrigDb, SQLITE_TRANSIENT); sqlite3VdbeSetColName(v, i, COLNAME_TABLE, zOrigTab, SQLITE_TRANSIENT); sqlite3VdbeSetColName(v, i, COLNAME_COLUMN, zOrigCol, SQLITE_TRANSIENT); #else zType = columnType(&sNC, p, 0, 0, 0, 0); #endif sqlite3VdbeSetColName(v, i, COLNAME_DECLTYPE, zType, SQLITE_TRANSIENT); } #endif /* !defined(SQLITE_OMIT_DECLTYPE) */ } /* ** Generate code that will tell the VDBE the names of columns ** in the result set. This information is used to provide the ** azCol[] values in the callback. */ static void generateColumnNames( Parse *pParse, /* Parser context */ SrcList *pTabList, /* List of tables */ ExprList *pEList /* Expressions defining the result set */ ){ Vdbe *v = pParse->pVdbe; int i, j; sqlite3 *db = pParse->db; int fullNames, shortNames; #ifndef SQLITE_OMIT_EXPLAIN /* If this is an EXPLAIN, skip this step */ if( pParse->explain ){ return; } #endif if( pParse->colNamesSet || NEVER(v==0) || db->mallocFailed ) return; pParse->colNamesSet = 1; fullNames = (db->flags & SQLITE_FullColNames)!=0; shortNames = (db->flags & SQLITE_ShortColNames)!=0; sqlite3VdbeSetNumCols(v, pEList->nExpr); for(i=0; inExpr; i++){ Expr *p; p = pEList->a[i].pExpr; if( NEVER(p==0) ) continue; if( pEList->a[i].zName ){ char *zName = pEList->a[i].zName; sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT); }else if( (p->op==TK_COLUMN || p->op==TK_AGG_COLUMN) && pTabList ){ Table *pTab; char *zCol; int iCol = p->iColumn; for(j=0; ALWAYS(jnSrc); j++){ if( pTabList->a[j].iCursor==p->iTable ) break; } assert( jnSrc ); pTab = pTabList->a[j].pTab; if( iCol<0 ) iCol = pTab->iPKey; assert( iCol==-1 || (iCol>=0 && iColnCol) ); if( iCol<0 ){ zCol = "rowid"; }else{ zCol = pTab->aCol[iCol].zName; } if( !shortNames && !fullNames ){ sqlite3VdbeSetColName(v, i, COLNAME_NAME, sqlite3DbStrDup(db, pEList->a[i].zSpan), SQLITE_DYNAMIC); }else if( fullNames ){ char *zName = 0; zName = sqlite3MPrintf(db, "%s.%s", pTab->zName, zCol); sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_DYNAMIC); }else{ sqlite3VdbeSetColName(v, i, COLNAME_NAME, zCol, SQLITE_TRANSIENT); } }else{ const char *z = pEList->a[i].zSpan; z = z==0 ? sqlite3MPrintf(db, "column%d", i+1) : sqlite3DbStrDup(db, z); sqlite3VdbeSetColName(v, i, COLNAME_NAME, z, SQLITE_DYNAMIC); } } generateColumnTypes(pParse, pTabList, pEList); } /* ** Given an expression list (which is really the list of expressions ** that form the result set of a SELECT statement) compute appropriate ** column names for a table that would hold the expression list. ** ** All column names will be unique. ** ** Only the column names are computed. Column.zType, Column.zColl, ** and other fields of Column are zeroed. ** ** Return SQLITE_OK on success. If a memory allocation error occurs, ** store NULL in *paCol and 0 in *pnCol and return SQLITE_NOMEM. */ static int selectColumnsFromExprList( Parse *pParse, /* Parsing context */ ExprList *pEList, /* Expr list from which to derive column names */ i16 *pnCol, /* Write the number of columns here */ Column **paCol /* Write the new column list here */ ){ sqlite3 *db = pParse->db; /* Database connection */ int i, j; /* Loop counters */ int cnt; /* Index added to make the name unique */ Column *aCol, *pCol; /* For looping over result columns */ int nCol; /* Number of columns in the result set */ Expr *p; /* Expression for a single result column */ char *zName; /* Column name */ int nName; /* Size of name in zName[] */ if( pEList ){ nCol = pEList->nExpr; aCol = sqlite3DbMallocZero(db, sizeof(aCol[0])*nCol); testcase( aCol==0 ); }else{ nCol = 0; aCol = 0; } *pnCol = nCol; *paCol = aCol; for(i=0, pCol=aCol; ia[i].pExpr); if( (zName = pEList->a[i].zName)!=0 ){ /* If the column contains an "AS " phrase, use as the name */ zName = sqlite3DbStrDup(db, zName); }else{ Expr *pColExpr = p; /* The expression that is the result column name */ Table *pTab; /* Table associated with this expression */ while( pColExpr->op==TK_DOT ){ pColExpr = pColExpr->pRight; assert( pColExpr!=0 ); } if( pColExpr->op==TK_COLUMN && ALWAYS(pColExpr->pTab!=0) ){ /* For columns use the column name name */ int iCol = pColExpr->iColumn; pTab = pColExpr->pTab; if( iCol<0 ) iCol = pTab->iPKey; zName = sqlite3MPrintf(db, "%s", iCol>=0 ? pTab->aCol[iCol].zName : "rowid"); }else if( pColExpr->op==TK_ID ){ assert( !ExprHasProperty(pColExpr, EP_IntValue) ); zName = sqlite3MPrintf(db, "%s", pColExpr->u.zToken); }else{ /* Use the original text of the column expression as its name */ zName = sqlite3MPrintf(db, "%s", pEList->a[i].zSpan); } } if( db->mallocFailed ){ sqlite3DbFree(db, zName); break; } /* Make sure the column name is unique. If the name is not unique, ** append an integer to the name so that it becomes unique. */ nName = sqlite3Strlen30(zName); for(j=cnt=0; j1 && sqlite3Isdigit(zName[k]); k--){} if( k>=0 && zName[k]==':' ) nName = k; zName[nName] = 0; zNewName = sqlite3MPrintf(db, "%s:%d", zName, ++cnt); sqlite3DbFree(db, zName); zName = zNewName; j = -1; if( zName==0 ) break; } } pCol->zName = zName; } if( db->mallocFailed ){ for(j=0; jdb; NameContext sNC; Column *pCol; CollSeq *pColl; int i; Expr *p; struct ExprList_item *a; u64 szAll = 0; assert( pSelect!=0 ); assert( (pSelect->selFlags & SF_Resolved)!=0 ); assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed ); if( db->mallocFailed ) return; memset(&sNC, 0, sizeof(sNC)); sNC.pSrcList = pSelect->pSrc; a = pSelect->pEList->a; for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ p = a[i].pExpr; if( pCol->zType==0 ){ pCol->zType = sqlite3DbStrDup(db, columnType(&sNC, p,0,0,0, &pCol->szEst)); } szAll += pCol->szEst; pCol->affinity = sqlite3ExprAffinity(p); if( pCol->affinity==0 ) pCol->affinity = SQLITE_AFF_BLOB; pColl = sqlite3ExprCollSeq(pParse, p); if( pColl && pCol->zColl==0 ){ pCol->zColl = sqlite3DbStrDup(db, pColl->zName); } } pTab->szTabRow = sqlite3LogEst(szAll*4); } /* ** Given a SELECT statement, generate a Table structure that describes ** the result set of that SELECT. */ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ Table *pTab; sqlite3 *db = pParse->db; int savedFlags; savedFlags = db->flags; db->flags &= ~SQLITE_FullColNames; db->flags |= SQLITE_ShortColNames; sqlite3SelectPrep(pParse, pSelect, 0); if( pParse->nErr ) return 0; while( pSelect->pPrior ) pSelect = pSelect->pPrior; db->flags = savedFlags; pTab = sqlite3DbMallocZero(db, sizeof(Table) ); if( pTab==0 ){ return 0; } /* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside ** is disabled */ assert( db->lookaside.bEnabled==0 ); pTab->nRef = 1; pTab->zName = 0; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); selectColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); selectAddColumnTypeAndCollation(pParse, pTab, pSelect); pTab->iPKey = -1; if( db->mallocFailed ){ sqlite3DeleteTable(db, pTab); return 0; } return pTab; } /* ** Get a VDBE for the given parser context. Create a new one if necessary. ** If an error occurs, return NULL and leave a message in pParse. */ SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){ Vdbe *v = pParse->pVdbe; if( v==0 ){ v = pParse->pVdbe = sqlite3VdbeCreate(pParse); if( v ) sqlite3VdbeAddOp0(v, OP_Init); if( pParse->pToplevel==0 && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst) ){ pParse->okConstFactor = 1; } } return v; } /* ** Compute the iLimit and iOffset fields of the SELECT based on the ** pLimit and pOffset expressions. pLimit and pOffset hold the expressions ** that appear in the original SQL statement after the LIMIT and OFFSET ** keywords. Or NULL if those keywords are omitted. iLimit and iOffset ** are the integer memory register numbers for counters used to compute ** the limit and offset. If there is no limit and/or offset, then ** iLimit and iOffset are negative. ** ** This routine changes the values of iLimit and iOffset only if ** a limit or offset is defined by pLimit and pOffset. iLimit and ** iOffset should have been preset to appropriate default values (zero) ** prior to calling this routine. ** ** The iOffset register (if it exists) is initialized to the value ** of the OFFSET. The iLimit register is initialized to LIMIT. Register ** iOffset+1 is initialized to LIMIT+OFFSET. ** ** Only if pLimit!=0 or pOffset!=0 do the limit registers get ** redefined. The UNION ALL operator uses this property to force ** the reuse of the same limit and offset registers across multiple ** SELECT statements. */ static void computeLimitRegisters(Parse *pParse, Select *p, int iBreak){ Vdbe *v = 0; int iLimit = 0; int iOffset; int addr1, n; if( p->iLimit ) return; /* ** "LIMIT -1" always shows all rows. There is some ** controversy about what the correct behavior should be. ** The current implementation interprets "LIMIT 0" to mean ** no rows. */ sqlite3ExprCacheClear(pParse); assert( p->pOffset==0 || p->pLimit!=0 ); if( p->pLimit ){ p->iLimit = iLimit = ++pParse->nMem; v = sqlite3GetVdbe(pParse); assert( v!=0 ); if( sqlite3ExprIsInteger(p->pLimit, &n) ){ sqlite3VdbeAddOp2(v, OP_Integer, n, iLimit); VdbeComment((v, "LIMIT counter")); if( n==0 ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, iBreak); }else if( n>=0 && p->nSelectRow>(u64)n ){ p->nSelectRow = n; } }else{ sqlite3ExprCode(pParse, p->pLimit, iLimit); sqlite3VdbeAddOp1(v, OP_MustBeInt, iLimit); VdbeCoverage(v); VdbeComment((v, "LIMIT counter")); sqlite3VdbeAddOp2(v, OP_IfNot, iLimit, iBreak); VdbeCoverage(v); } if( p->pOffset ){ p->iOffset = iOffset = ++pParse->nMem; pParse->nMem++; /* Allocate an extra register for limit+offset */ sqlite3ExprCode(pParse, p->pOffset, iOffset); sqlite3VdbeAddOp1(v, OP_MustBeInt, iOffset); VdbeCoverage(v); VdbeComment((v, "OFFSET counter")); addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iOffset); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, 0, iOffset); sqlite3VdbeJumpHere(v, addr1); sqlite3VdbeAddOp3(v, OP_Add, iLimit, iOffset, iOffset+1); VdbeComment((v, "LIMIT+OFFSET")); addr1 = sqlite3VdbeAddOp1(v, OP_IfPos, iLimit); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Integer, -1, iOffset+1); sqlite3VdbeJumpHere(v, addr1); } } } #ifndef SQLITE_OMIT_COMPOUND_SELECT /* ** Return the appropriate collating sequence for the iCol-th column of ** the result set for the compound-select statement "p". Return NULL if ** the column has no default collating sequence. ** ** The collating sequence for the compound select is taken from the ** left-most term of the select that has a collating sequence. */ static CollSeq *multiSelectCollSeq(Parse *pParse, Select *p, int iCol){ CollSeq *pRet; if( p->pPrior ){ pRet = multiSelectCollSeq(pParse, p->pPrior, iCol); }else{ pRet = 0; } assert( iCol>=0 ); /* iCol must be less than p->pEList->nExpr. Otherwise an error would ** have been thrown during name resolution and we would not have gotten ** this far */ if( pRet==0 && ALWAYS(iColpEList->nExpr) ){ pRet = sqlite3ExprCollSeq(pParse, p->pEList->a[iCol].pExpr); } return pRet; } /* ** The select statement passed as the second parameter is a compound SELECT ** with an ORDER BY clause. This function allocates and returns a KeyInfo ** structure suitable for implementing the ORDER BY. ** ** Space to hold the KeyInfo structure is obtained from malloc. The calling ** function is responsible for ensuring that this structure is eventually ** freed. */ static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){ ExprList *pOrderBy = p->pOrderBy; int nOrderBy = p->pOrderBy->nExpr; sqlite3 *db = pParse->db; KeyInfo *pRet = sqlite3KeyInfoAlloc(db, nOrderBy+nExtra, 1); if( pRet ){ int i; for(i=0; ia[i]; Expr *pTerm = pItem->pExpr; CollSeq *pColl; if( pTerm->flags & EP_Collate ){ pColl = sqlite3ExprCollSeq(pParse, pTerm); }else{ pColl = multiSelectCollSeq(pParse, p, pItem->u.x.iOrderByCol-1); if( pColl==0 ) pColl = db->pDfltColl; pOrderBy->a[i].pExpr = sqlite3ExprAddCollateString(pParse, pTerm, pColl->zName); } assert( sqlite3KeyInfoIsWriteable(pRet) ); pRet->aColl[i] = pColl; pRet->aSortOrder[i] = pOrderBy->a[i].sortOrder; } } return pRet; } #ifndef SQLITE_OMIT_CTE /* ** This routine generates VDBE code to compute the content of a WITH RECURSIVE ** query of the form: ** ** AS ( UNION [ALL] ) ** \___________/ \_______________/ ** p->pPrior p ** ** ** There is exactly one reference to the recursive-table in the FROM clause ** of recursive-query, marked with the SrcList->a[].isRecursive flag. ** ** The setup-query runs once to generate an initial set of rows that go ** into a Queue table. Rows are extracted from the Queue table one by ** one. Each row extracted from Queue is output to pDest. Then the single ** extracted row (now in the iCurrent table) becomes the content of the ** recursive-table for a recursive-query run. The output of the recursive-query ** is added back into the Queue table. Then another row is extracted from Queue ** and the iteration continues until the Queue table is empty. ** ** If the compound query operator is UNION then no duplicate rows are ever ** inserted into the Queue table. The iDistinct table keeps a copy of all rows ** that have ever been inserted into Queue and causes duplicates to be ** discarded. If the operator is UNION ALL, then duplicates are allowed. ** ** If the query has an ORDER BY, then entries in the Queue table are kept in ** ORDER BY order and the first entry is extracted for each cycle. Without ** an ORDER BY, the Queue table is just a FIFO. ** ** If a LIMIT clause is provided, then the iteration stops after LIMIT rows ** have been output to pDest. A LIMIT of zero means to output no rows and a ** negative LIMIT means to output all rows. If there is also an OFFSET clause ** with a positive value, then the first OFFSET outputs are discarded rather ** than being sent to pDest. The LIMIT count does not begin until after OFFSET ** rows have been skipped. */ static void generateWithRecursiveQuery( Parse *pParse, /* Parsing context */ Select *p, /* The recursive SELECT to be coded */ SelectDest *pDest /* What to do with query results */ ){ SrcList *pSrc = p->pSrc; /* The FROM clause of the recursive query */ int nCol = p->pEList->nExpr; /* Number of columns in the recursive table */ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ Select *pSetup = p->pPrior; /* The setup query */ int addrTop; /* Top of the loop */ int addrCont, addrBreak; /* CONTINUE and BREAK addresses */ int iCurrent = 0; /* The Current table */ int regCurrent; /* Register holding Current table */ int iQueue; /* The Queue table */ int iDistinct = 0; /* To ensure unique results if UNION */ int eDest = SRT_Fifo; /* How to write to Queue */ SelectDest destQueue; /* SelectDest targetting the Queue table */ int i; /* Loop counter */ int rc; /* Result code */ ExprList *pOrderBy; /* The ORDER BY clause */ Expr *pLimit, *pOffset; /* Saved LIMIT and OFFSET */ int regLimit, regOffset; /* Registers used by LIMIT and OFFSET */ /* Obtain authorization to do a recursive query */ if( sqlite3AuthCheck(pParse, SQLITE_RECURSIVE, 0, 0, 0) ) return; /* Process the LIMIT and OFFSET clauses, if they exist */ addrBreak = sqlite3VdbeMakeLabel(v); computeLimitRegisters(pParse, p, addrBreak); pLimit = p->pLimit; pOffset = p->pOffset; regLimit = p->iLimit; regOffset = p->iOffset; p->pLimit = p->pOffset = 0; p->iLimit = p->iOffset = 0; pOrderBy = p->pOrderBy; /* Locate the cursor number of the Current table */ for(i=0; ALWAYS(inSrc); i++){ if( pSrc->a[i].isRecursive ){ iCurrent = pSrc->a[i].iCursor; break; } } /* Allocate cursors numbers for Queue and Distinct. The cursor number for ** the Distinct table must be exactly one greater than Queue in order ** for the SRT_DistFifo and SRT_DistQueue destinations to work. */ iQueue = pParse->nTab++; if( p->op==TK_UNION ){ eDest = pOrderBy ? SRT_DistQueue : SRT_DistFifo; iDistinct = pParse->nTab++; }else{ eDest = pOrderBy ? SRT_Queue : SRT_Fifo; } sqlite3SelectDestInit(&destQueue, eDest, iQueue); /* Allocate cursors for Current, Queue, and Distinct. */ regCurrent = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_OpenPseudo, iCurrent, regCurrent, nCol); if( pOrderBy ){ KeyInfo *pKeyInfo = multiSelectOrderByKeyInfo(pParse, p, 1); sqlite3VdbeAddOp4(v, OP_OpenEphemeral, iQueue, pOrderBy->nExpr+2, 0, (char*)pKeyInfo, P4_KEYINFO); destQueue.pOrderBy = pOrderBy; }else{ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iQueue, nCol); } VdbeComment((v, "Queue table")); if( iDistinct ){ p->addrOpenEphm[0] = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iDistinct, 0); p->selFlags |= SF_UsesEphemeral; } /* Detach the ORDER BY clause from the compound SELECT */ p->pOrderBy = 0; /* Store the results of the setup-query in Queue. */ pSetup->pNext = 0; rc = sqlite3Select(pParse, pSetup, &destQueue); pSetup->pNext = p; if( rc ) goto end_of_recursive_query; /* Find the next row in the Queue and output that row */ addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, iQueue, addrBreak); VdbeCoverage(v); /* Transfer the next row in Queue over to Current */ sqlite3VdbeAddOp1(v, OP_NullRow, iCurrent); /* To reset column cache */ if( pOrderBy ){ sqlite3VdbeAddOp3(v, OP_Column, iQueue, pOrderBy->nExpr+1, regCurrent); }else{ sqlite3VdbeAddOp2(v, OP_RowData, iQueue, regCurrent); } sqlite3VdbeAddOp1(v, OP_Delete, iQueue); /* Output the single row in Current */ addrCont = sqlite3VdbeMakeLabel(v); codeOffset(v, regOffset, addrCont); selectInnerLoop(pParse, p, p->pEList, iCurrent, 0, 0, pDest, addrCont, addrBreak); if( regLimit ){ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, regLimit, addrBreak); VdbeCoverage(v); } sqlite3VdbeResolveLabel(v, addrCont); /* Execute the recursive SELECT taking the single row in Current as ** the value for the recursive-table. Store the results in the Queue. */ if( p->selFlags & SF_Aggregate ){ sqlite3ErrorMsg(pParse, "recursive aggregate queries not supported"); }else{ p->pPrior = 0; sqlite3Select(pParse, p, &destQueue); assert( p->pPrior==0 ); p->pPrior = pSetup; } /* Keep running the loop until the Queue is empty */ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); sqlite3VdbeResolveLabel(v, addrBreak); end_of_recursive_query: sqlite3ExprListDelete(pParse->db, p->pOrderBy); p->pOrderBy = pOrderBy; p->pLimit = pLimit; p->pOffset = pOffset; return; } #endif /* SQLITE_OMIT_CTE */ /* Forward references */ static int multiSelectOrderBy( Parse *pParse, /* Parsing context */ Select *p, /* The right-most of SELECTs to be coded */ SelectDest *pDest /* What to do with query results */ ); /* ** Handle the special case of a compound-select that originates from a ** VALUES clause. By handling this as a special case, we avoid deep ** recursion, and thus do not need to enforce the SQLITE_LIMIT_COMPOUND_SELECT ** on a VALUES clause. ** ** Because the Select object originates from a VALUES clause: ** (1) It has no LIMIT or OFFSET ** (2) All terms are UNION ALL ** (3) There is no ORDER BY clause */ static int multiSelectValues( Parse *pParse, /* Parsing context */ Select *p, /* The right-most of SELECTs to be coded */ SelectDest *pDest /* What to do with query results */ ){ Select *pPrior; int nRow = 1; int rc = 0; assert( p->selFlags & SF_MultiValue ); do{ assert( p->selFlags & SF_Values ); assert( p->op==TK_ALL || (p->op==TK_SELECT && p->pPrior==0) ); assert( p->pLimit==0 ); assert( p->pOffset==0 ); assert( p->pNext==0 || p->pEList->nExpr==p->pNext->pEList->nExpr ); if( p->pPrior==0 ) break; assert( p->pPrior->pNext==p ); p = p->pPrior; nRow++; }while(1); while( p ){ pPrior = p->pPrior; p->pPrior = 0; rc = sqlite3Select(pParse, p, pDest); p->pPrior = pPrior; if( rc ) break; p->nSelectRow = nRow; p = p->pNext; } return rc; } /* ** This routine is called to process a compound query form from ** two or more separate queries using UNION, UNION ALL, EXCEPT, or ** INTERSECT ** ** "p" points to the right-most of the two queries. the query on the ** left is p->pPrior. The left query could also be a compound query ** in which case this routine will be called recursively. ** ** The results of the total query are to be written into a destination ** of type eDest with parameter iParm. ** ** Example 1: Consider a three-way compound SQL statement. ** ** SELECT a FROM t1 UNION SELECT b FROM t2 UNION SELECT c FROM t3 ** ** This statement is parsed up as follows: ** ** SELECT c FROM t3 ** | ** `-----> SELECT b FROM t2 ** | ** `------> SELECT a FROM t1 ** ** The arrows in the diagram above represent the Select.pPrior pointer. ** So if this routine is called with p equal to the t3 query, then ** pPrior will be the t2 query. p->op will be TK_UNION in this case. ** ** Notice that because of the way SQLite parses compound SELECTs, the ** individual selects always group from left to right. */ static int multiSelect( Parse *pParse, /* Parsing context */ Select *p, /* The right-most of SELECTs to be coded */ SelectDest *pDest /* What to do with query results */ ){ int rc = SQLITE_OK; /* Success code from a subroutine */ Select *pPrior; /* Another SELECT immediately to our left */ Vdbe *v; /* Generate code to this VDBE */ SelectDest dest; /* Alternative data destination */ Select *pDelete = 0; /* Chain of simple selects to delete */ sqlite3 *db; /* Database connection */ #ifndef SQLITE_OMIT_EXPLAIN int iSub1 = 0; /* EQP id of left-hand query */ int iSub2 = 0; /* EQP id of right-hand query */ #endif /* Make sure there is no ORDER BY or LIMIT clause on prior SELECTs. Only ** the last (right-most) SELECT in the series may have an ORDER BY or LIMIT. */ assert( p && p->pPrior ); /* Calling function guarantees this much */ assert( (p->selFlags & SF_Recursive)==0 || p->op==TK_ALL || p->op==TK_UNION ); db = pParse->db; pPrior = p->pPrior; dest = *pDest; if( pPrior->pOrderBy ){ sqlite3ErrorMsg(pParse,"ORDER BY clause should come after %s not before", selectOpName(p->op)); rc = 1; goto multi_select_end; } if( pPrior->pLimit ){ sqlite3ErrorMsg(pParse,"LIMIT clause should come after %s not before", selectOpName(p->op)); rc = 1; goto multi_select_end; } v = sqlite3GetVdbe(pParse); assert( v!=0 ); /* The VDBE already created by calling function */ /* Create the destination temporary table if necessary */ if( dest.eDest==SRT_EphemTab ){ assert( p->pEList ); sqlite3VdbeAddOp2(v, OP_OpenEphemeral, dest.iSDParm, p->pEList->nExpr); sqlite3VdbeChangeP5(v, BTREE_UNORDERED); dest.eDest = SRT_Table; } /* Special handling for a compound-select that originates as a VALUES clause. */ if( p->selFlags & SF_MultiValue ){ rc = multiSelectValues(pParse, p, &dest); goto multi_select_end; } /* Make sure all SELECTs in the statement have the same number of elements ** in their result sets. */ assert( p->pEList && pPrior->pEList ); assert( p->pEList->nExpr==pPrior->pEList->nExpr ); #ifndef SQLITE_OMIT_CTE if( p->selFlags & SF_Recursive ){ generateWithRecursiveQuery(pParse, p, &dest); }else #endif /* Compound SELECTs that have an ORDER BY clause are handled separately. */ if( p->pOrderBy ){ return multiSelectOrderBy(pParse, p, pDest); }else /* Generate code for the left and right SELECT statements. */ switch( p->op ){ case TK_ALL: { int addr = 0; int nLimit; assert( !pPrior->pLimit ); pPrior->iLimit = p->iLimit; pPrior->iOffset = p->iOffset; pPrior->pLimit = p->pLimit; pPrior->pOffset = p->pOffset; explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &dest); p->pLimit = 0; p->pOffset = 0; if( rc ){ goto multi_select_end; } p->pPrior = 0; p->iLimit = pPrior->iLimit; p->iOffset = pPrior->iOffset; if( p->iLimit ){ addr = sqlite3VdbeAddOp1(v, OP_IfNot, p->iLimit); VdbeCoverage(v); VdbeComment((v, "Jump ahead if LIMIT reached")); } explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &dest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; p->pPrior = pPrior; p->nSelectRow += pPrior->nSelectRow; if( pPrior->pLimit && sqlite3ExprIsInteger(pPrior->pLimit, &nLimit) && nLimit>0 && p->nSelectRow > (u64)nLimit ){ p->nSelectRow = nLimit; } if( addr ){ sqlite3VdbeJumpHere(v, addr); } break; } case TK_EXCEPT: case TK_UNION: { int unionTab; /* Cursor number of the temporary table holding result */ u8 op = 0; /* One of the SRT_ operations to apply to self */ int priorOp; /* The SRT_ operation to apply to prior selects */ Expr *pLimit, *pOffset; /* Saved values of p->nLimit and p->nOffset */ int addr; SelectDest uniondest; testcase( p->op==TK_EXCEPT ); testcase( p->op==TK_UNION ); priorOp = SRT_Union; if( dest.eDest==priorOp ){ /* We can reuse a temporary table generated by a SELECT to our ** right. */ assert( p->pLimit==0 ); /* Not allowed on leftward elements */ assert( p->pOffset==0 ); /* Not allowed on leftward elements */ unionTab = dest.iSDParm; }else{ /* We will need to create our own temporary table to hold the ** intermediate results. */ unionTab = pParse->nTab++; assert( p->pOrderBy==0 ); addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, unionTab, 0); assert( p->addrOpenEphm[0] == -1 ); p->addrOpenEphm[0] = addr; findRightmost(p)->selFlags |= SF_UsesEphemeral; assert( p->pEList ); } /* Code the SELECT statements to our left */ assert( !pPrior->pOrderBy ); sqlite3SelectDestInit(&uniondest, priorOp, unionTab); explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end; } /* Code the current SELECT statement */ if( p->op==TK_EXCEPT ){ op = SRT_Except; }else{ assert( p->op==TK_UNION ); op = SRT_Union; } p->pPrior = 0; pLimit = p->pLimit; p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; uniondest.eDest = op; explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); /* Query flattening in sqlite3Select() might refill p->pOrderBy. ** Be sure to delete p->pOrderBy, therefore, to avoid a memory leak. */ sqlite3ExprListDelete(db, p->pOrderBy); pDelete = p->pPrior; p->pPrior = pPrior; p->pOrderBy = 0; if( p->op==TK_UNION ) p->nSelectRow += pPrior->nSelectRow; sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; p->iLimit = 0; p->iOffset = 0; /* Convert the data in the temporary table into whatever form ** it is that we currently need. */ assert( unionTab==dest.iSDParm || dest.eDest!=priorOp ); if( dest.eDest!=priorOp ){ int iCont, iBreak, iStart; assert( p->pEList ); if( dest.eDest==SRT_Output ){ Select *pFirst = p; while( pFirst->pPrior ) pFirst = pFirst->pPrior; generateColumnNames(pParse, 0, pFirst->pEList); } iBreak = sqlite3VdbeMakeLabel(v); iCont = sqlite3VdbeMakeLabel(v); computeLimitRegisters(pParse, p, iBreak); sqlite3VdbeAddOp2(v, OP_Rewind, unionTab, iBreak); VdbeCoverage(v); iStart = sqlite3VdbeCurrentAddr(v); selectInnerLoop(pParse, p, p->pEList, unionTab, 0, 0, &dest, iCont, iBreak); sqlite3VdbeResolveLabel(v, iCont); sqlite3VdbeAddOp2(v, OP_Next, unionTab, iStart); VdbeCoverage(v); sqlite3VdbeResolveLabel(v, iBreak); sqlite3VdbeAddOp2(v, OP_Close, unionTab, 0); } break; } default: assert( p->op==TK_INTERSECT ); { int tab1, tab2; int iCont, iBreak, iStart; Expr *pLimit, *pOffset; int addr; SelectDest intersectdest; int r1; /* INTERSECT is different from the others since it requires ** two temporary tables. Hence it has its own case. Begin ** by allocating the tables we will need. */ tab1 = pParse->nTab++; tab2 = pParse->nTab++; assert( p->pOrderBy==0 ); addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab1, 0); assert( p->addrOpenEphm[0] == -1 ); p->addrOpenEphm[0] = addr; findRightmost(p)->selFlags |= SF_UsesEphemeral; assert( p->pEList ); /* Code the SELECTs to our left into temporary table "tab1". */ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); explainSetInteger(iSub1, pParse->iNextSelectId); rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end; } /* Code the current SELECT into temporary table "tab2" */ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, tab2, 0); assert( p->addrOpenEphm[1] == -1 ); p->addrOpenEphm[1] = addr; p->pPrior = 0; pLimit = p->pLimit; p->pLimit = 0; pOffset = p->pOffset; p->pOffset = 0; intersectdest.iSDParm = tab2; explainSetInteger(iSub2, pParse->iNextSelectId); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; p->pPrior = pPrior; if( p->nSelectRow>pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; sqlite3ExprDelete(db, p->pLimit); p->pLimit = pLimit; p->pOffset = pOffset; /* Generate code to take the intersection of the two temporary ** tables. */ assert( p->pEList ); if( dest.eDest==SRT_Output ){ Select *pFirst = p; while( pFirst->pPrior ) pFirst = pFirst->pPrior; generateColumnNames(pParse, 0, pFirst->pEList); } iBreak = sqlite3VdbeMakeLabel(v); iCont = sqlite3VdbeMakeLabel(v); computeLimitRegisters(pParse, p, iBreak); sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v); r1 = sqlite3GetTempReg(pParse); iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1); sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v); sqlite3ReleaseTempReg(pParse, r1); selectInnerLoop(pParse, p, p->pEList, tab1, 0, 0, &dest, iCont, iBreak); sqlite3VdbeResolveLabel(v, iCont); sqlite3VdbeAddOp2(v, OP_Next, tab1, iStart); VdbeCoverage(v); sqlite3VdbeResolveLabel(v, iBreak); sqlite3VdbeAddOp2(v, OP_Close, tab2, 0); sqlite3VdbeAddOp2(v, OP_Close, tab1, 0); break; } } explainComposite(pParse, p->op, iSub1, iSub2, p->op!=TK_ALL); /* Compute collating sequences used by ** temporary tables needed to implement the compound select. ** Attach the KeyInfo structure to all temporary tables. ** ** This section is run by the right-most SELECT statement only. ** SELECT statements to the left always skip this part. The right-most ** SELECT might also skip this part if it has no ORDER BY clause and ** no temp tables are required. */ if( p->selFlags & SF_UsesEphemeral ){ int i; /* Loop counter */ KeyInfo *pKeyInfo; /* Collating sequence for the result set */ Select *pLoop; /* For looping through SELECT statements */ CollSeq **apColl; /* For looping through pKeyInfo->aColl[] */ int nCol; /* Number of columns in result set */ assert( p->pNext==0 ); nCol = p->pEList->nExpr; pKeyInfo = sqlite3KeyInfoAlloc(db, nCol, 1); if( !pKeyInfo ){ rc = SQLITE_NOMEM; goto multi_select_end; } for(i=0, apColl=pKeyInfo->aColl; ipDfltColl; } } for(pLoop=p; pLoop; pLoop=pLoop->pPrior){ for(i=0; i<2; i++){ int addr = pLoop->addrOpenEphm[i]; if( addr<0 ){ /* If [0] is unused then [1] is also unused. So we can ** always safely abort as soon as the first unused slot is found */ assert( pLoop->addrOpenEphm[1]<0 ); break; } sqlite3VdbeChangeP2(v, addr, nCol); sqlite3VdbeChangeP4(v, addr, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); pLoop->addrOpenEphm[i] = -1; } } sqlite3KeyInfoUnref(pKeyInfo); } multi_select_end: pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; sqlite3SelectDelete(db, pDelete); return rc; } #endif /* SQLITE_OMIT_COMPOUND_SELECT */ /* ** Error message for when two or more terms of a compound select have different ** size result sets. */ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){ if( p->selFlags & SF_Values ){ sqlite3ErrorMsg(pParse, "all VALUES must have the same number of terms"); }else{ sqlite3ErrorMsg(pParse, "SELECTs to the left and right of %s" " do not have the same number of result columns", selectOpName(p->op)); } } /* ** Code an output subroutine for a coroutine implementation of a ** SELECT statment. ** ** The data to be output is contained in pIn->iSdst. There are ** pIn->nSdst columns to be output. pDest is where the output should ** be sent. ** ** regReturn is the number of the register holding the subroutine ** return address. ** ** If regPrev>0 then it is the first register in a vector that ** records the previous output. mem[regPrev] is a flag that is false ** if there has been no previous output. If regPrev>0 then code is ** generated to suppress duplicates. pKeyInfo is used for comparing ** keys. ** ** If the LIMIT found in p->iLimit is reached, jump immediately to ** iBreak. */ static int generateOutputSubroutine( Parse *pParse, /* Parsing context */ Select *p, /* The SELECT statement */ SelectDest *pIn, /* Coroutine supplying data */ SelectDest *pDest, /* Where to send the data */ int regReturn, /* The return address register */ int regPrev, /* Previous result register. No uniqueness if 0 */ KeyInfo *pKeyInfo, /* For comparing with previous entry */ int iBreak /* Jump here if we hit the LIMIT */ ){ Vdbe *v = pParse->pVdbe; int iContinue; int addr; addr = sqlite3VdbeCurrentAddr(v); iContinue = sqlite3VdbeMakeLabel(v); /* Suppress duplicates for UNION, EXCEPT, and INTERSECT */ if( regPrev ){ int j1, j2; j1 = sqlite3VdbeAddOp1(v, OP_IfNot, regPrev); VdbeCoverage(v); j2 = sqlite3VdbeAddOp4(v, OP_Compare, pIn->iSdst, regPrev+1, pIn->nSdst, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); sqlite3VdbeAddOp3(v, OP_Jump, j2+2, iContinue, j2+2); VdbeCoverage(v); sqlite3VdbeJumpHere(v, j1); sqlite3VdbeAddOp3(v, OP_Copy, pIn->iSdst, regPrev+1, pIn->nSdst-1); sqlite3VdbeAddOp2(v, OP_Integer, 1, regPrev); } if( pParse->db->mallocFailed ) return 0; /* Suppress the first OFFSET entries if there is an OFFSET clause */ codeOffset(v, p->iOffset, iContinue); assert( pDest->eDest!=SRT_Exists ); assert( pDest->eDest!=SRT_Table ); switch( pDest->eDest ){ /* Store the result as data using a unique key. */ case SRT_EphemTab: { int r1 = sqlite3GetTempReg(pParse); int r2 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst, r1); sqlite3VdbeAddOp2(v, OP_NewRowid, pDest->iSDParm, r2); sqlite3VdbeAddOp3(v, OP_Insert, pDest->iSDParm, r1, r2); sqlite3VdbeChangeP5(v, OPFLAG_APPEND); sqlite3ReleaseTempReg(pParse, r2); sqlite3ReleaseTempReg(pParse, r1); break; } #ifndef SQLITE_OMIT_SUBQUERY /* If we are creating a set for an "expr IN (SELECT ...)" construct, ** then there should be a single item on the stack. Write this ** item into the set table with bogus data. */ case SRT_Set: { int r1; assert( pIn->nSdst==1 || pParse->nErr>0 ); pDest->affSdst = sqlite3CompareAffinity(p->pEList->a[0].pExpr, pDest->affSdst); r1 = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, 1, r1, &pDest->affSdst,1); sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, 1); sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iSDParm, r1); sqlite3ReleaseTempReg(pParse, r1); break; } /* If this is a scalar select that is part of an expression, then ** store the results in the appropriate memory cell and break out ** of the scan loop. */ case SRT_Mem: { assert( pIn->nSdst==1 || pParse->nErr>0 ); testcase( pIn->nSdst!=1 ); sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSDParm, 1); /* The LIMIT clause will jump out of the loop for us */ break; } #endif /* #ifndef SQLITE_OMIT_SUBQUERY */ /* The results are stored in a sequence of registers ** starting at pDest->iSdst. Then the co-routine yields. */ case SRT_Coroutine: { if( pDest->iSdst==0 ){ pDest->iSdst = sqlite3GetTempRange(pParse, pIn->nSdst); pDest->nSdst = pIn->nSdst; } sqlite3ExprCodeMove(pParse, pIn->iSdst, pDest->iSdst, pIn->nSdst); sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm); break; } /* If none of the above, then the result destination must be ** SRT_Output. This routine is never called with any other ** destination other than the ones handled above or SRT_Output. ** ** For SRT_Output, results are stored in a sequence of registers. ** Then the OP_ResultRow opcode is used to cause sqlite3_step() to ** return the next row of result. */ default: { assert( pDest->eDest==SRT_Output ); sqlite3VdbeAddOp2(v, OP_ResultRow, pIn->iSdst, pIn->nSdst); sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst); break; } } /* Jump to the end of the loop if the LIMIT is reached. */ if( p->iLimit ){ sqlite3VdbeAddOp2(v, OP_DecrJumpZero, p->iLimit, iBreak); VdbeCoverage(v); } /* Generate the subroutine return */ sqlite3VdbeResolveLabel(v, iContinue); sqlite3VdbeAddOp1(v, OP_Return, regReturn); return addr; } /* ** Alternative compound select code generator for cases when there ** is an ORDER BY clause. ** ** We assume a query of the following form: ** ** ORDER BY ** ** is one of UNION ALL, UNION, EXCEPT, or INTERSECT. The idea ** is to code both and with the ORDER BY clause as ** co-routines. Then run the co-routines in parallel and merge the results ** into the output. In addition to the two coroutines (called selectA and ** selectB) there are 7 subroutines: ** ** outA: Move the output of the selectA coroutine into the output ** of the compound query. ** ** outB: Move the output of the selectB coroutine into the output ** of the compound query. (Only generated for UNION and ** UNION ALL. EXCEPT and INSERTSECT never output a row that ** appears only in B.) ** ** AltB: Called when there is data from both coroutines and AB. ** ** EofA: Called when data is exhausted from selectA. ** ** EofB: Called when data is exhausted from selectB. ** ** The implementation of the latter five subroutines depend on which ** is used: ** ** ** UNION ALL UNION EXCEPT INTERSECT ** ------------- ----------------- -------------- ----------------- ** AltB: outA, nextA outA, nextA outA, nextA nextA ** ** AeqB: outA, nextA nextA nextA outA, nextA ** ** AgtB: outB, nextB outB, nextB nextB nextB ** ** EofA: outB, nextB outB, nextB halt halt ** ** EofB: outA, nextA outA, nextA outA, nextA halt ** ** In the AltB, AeqB, and AgtB subroutines, an EOF on A following nextA ** causes an immediate jump to EofA and an EOF on B following nextB causes ** an immediate jump to EofB. Within EofA and EofB, and EOF on entry or ** following nextX causes a jump to the end of the select processing. ** ** Duplicate removal in the UNION, EXCEPT, and INTERSECT cases is handled ** within the output subroutine. The regPrev register set holds the previously ** output value. A comparison is made against this value and the output ** is skipped if the next results would be the same as the previous. ** ** The implementation plan is to implement the two coroutines and seven ** subroutines first, then put the control logic at the bottom. Like this: ** ** goto Init ** coA: coroutine for left query (A) ** coB: coroutine for right query (B) ** outA: output one row of A ** outB: output one row of B (UNION and UNION ALL only) ** EofA: ... ** EofB: ... ** AltB: ... ** AeqB: ... ** AgtB: ... ** Init: initialize coroutine registers ** yield coA ** if eof(A) goto EofA ** yield coB ** if eof(B) goto EofB ** Cmpr: Compare A, B ** Jump AltB, AeqB, AgtB ** End: ... ** ** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not ** actually called using Gosub and they do not Return. EofA and EofB loop ** until all data is exhausted then jump to the "end" labe. AltB, AeqB, ** and AgtB jump to either L2 or to one of EofA or EofB. */ #ifndef SQLITE_OMIT_COMPOUND_SELECT static int multiSelectOrderBy( Parse *pParse, /* Parsing context */ Select *p, /* The right-most of SELECTs to be coded */ SelectDest *pDest /* What to do with query results */ ){ int i, j; /* Loop counters */ Select *pPrior; /* Another SELECT immediately to our left */ Vdbe *v; /* Generate code to this VDBE */ SelectDest destA; /* Destination for coroutine A */ SelectDest destB; /* Destination for coroutine B */ int regAddrA; /* Address register for select-A coroutine */ int regAddrB; /* Address register for select-B coroutine */ int addrSelectA; /* Address of the select-A coroutine */ int addrSelectB; /* Address of the select-B coroutine */ int regOutA; /* Address register for the output-A subroutine */ int regOutB; /* Address register for the output-B subroutine */ int addrOutA; /* Address of the output-A subroutine */ int addrOutB = 0; /* Address of the output-B subroutine */ int addrEofA; /* Address of the select-A-exhausted subroutine */ int addrEofA_noB; /* Alternate addrEofA if B is uninitialized */ int addrEofB; /* Address of the select-B-exhausted subroutine */ int addrAltB; /* Address of the AB subroutine */ int regLimitA; /* Limit register for select-A */ int regLimitB; /* Limit register for select-A */ int regPrev; /* A range of registers to hold previous output */ int savedLimit; /* Saved value of p->iLimit */ int savedOffset; /* Saved value of p->iOffset */ int labelCmpr; /* Label for the start of the merge algorithm */ int labelEnd; /* Label for the end of the overall SELECT stmt */ int j1; /* Jump instructions that get retargetted */ int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ KeyInfo *pKeyMerge; /* Comparison information for merging rows */ sqlite3 *db; /* Database connection */ ExprList *pOrderBy; /* The ORDER BY clause */ int nOrderBy; /* Number of terms in the ORDER BY clause */ int *aPermute; /* Mapping from ORDER BY terms to result set columns */ #ifndef SQLITE_OMIT_EXPLAIN int iSub1; /* EQP id of left-hand query */ int iSub2; /* EQP id of right-hand query */ #endif assert( p->pOrderBy!=0 ); assert( pKeyDup==0 ); /* "Managed" code needs this. Ticket #3382. */ db = pParse->db; v = pParse->pVdbe; assert( v!=0 ); /* Already thrown the error if VDBE alloc failed */ labelEnd = sqlite3VdbeMakeLabel(v); labelCmpr = sqlite3VdbeMakeLabel(v); /* Patch up the ORDER BY clause */ op = p->op; pPrior = p->pPrior; assert( pPrior->pOrderBy==0 ); pOrderBy = p->pOrderBy; assert( pOrderBy ); nOrderBy = pOrderBy->nExpr; /* For operators other than UNION ALL we have to make sure that ** the ORDER BY clause covers every term of the result set. Add ** terms to the ORDER BY clause as necessary. */ if( op!=TK_ALL ){ for(i=1; db->mallocFailed==0 && i<=p->pEList->nExpr; i++){ struct ExprList_item *pItem; for(j=0, pItem=pOrderBy->a; ju.x.iOrderByCol>0 ); if( pItem->u.x.iOrderByCol==i ) break; } if( j==nOrderBy ){ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); if( pNew==0 ) return SQLITE_NOMEM; pNew->flags |= EP_IntValue; pNew->u.iValue = i; pOrderBy = sqlite3ExprListAppend(pParse, pOrderBy, pNew); if( pOrderBy ) pOrderBy->a[nOrderBy++].u.x.iOrderByCol = (u16)i; } } } /* Compute the comparison permutation and keyinfo that is used with ** the permutation used to determine if the next ** row of results comes from selectA or selectB. Also add explicit ** collations to the ORDER BY clause terms so that when the subqueries ** to the right and the left are evaluated, they use the correct ** collation. */ aPermute = sqlite3DbMallocRaw(db, sizeof(int)*nOrderBy); if( aPermute ){ struct ExprList_item *pItem; for(i=0, pItem=pOrderBy->a; iu.x.iOrderByCol>0 ); assert( pItem->u.x.iOrderByCol<=p->pEList->nExpr ); aPermute[i] = pItem->u.x.iOrderByCol - 1; } pKeyMerge = multiSelectOrderByKeyInfo(pParse, p, 1); }else{ pKeyMerge = 0; } /* Reattach the ORDER BY clause to the query. */ p->pOrderBy = pOrderBy; pPrior->pOrderBy = sqlite3ExprListDup(pParse->db, pOrderBy, 0); /* Allocate a range of temporary registers and the KeyInfo needed ** for the logic that removes duplicate result rows when the ** operator is UNION, EXCEPT, or INTERSECT (but not UNION ALL). */ if( op==TK_ALL ){ regPrev = 0; }else{ int nExpr = p->pEList->nExpr; assert( nOrderBy>=nExpr || db->mallocFailed ); regPrev = pParse->nMem+1; pParse->nMem += nExpr+1; sqlite3VdbeAddOp2(v, OP_Integer, 0, regPrev); pKeyDup = sqlite3KeyInfoAlloc(db, nExpr, 1); if( pKeyDup ){ assert( sqlite3KeyInfoIsWriteable(pKeyDup) ); for(i=0; iaColl[i] = multiSelectCollSeq(pParse, p, i); pKeyDup->aSortOrder[i] = 0; } } } /* Separate the left and the right query from one another */ p->pPrior = 0; pPrior->pNext = 0; sqlite3ResolveOrderGroupBy(pParse, p, p->pOrderBy, "ORDER"); if( pPrior->pPrior==0 ){ sqlite3ResolveOrderGroupBy(pParse, pPrior, pPrior->pOrderBy, "ORDER"); } /* Compute the limit registers */ computeLimitRegisters(pParse, p, labelEnd); if( p->iLimit && op==TK_ALL ){ regLimitA = ++pParse->nMem; regLimitB = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Copy, p->iOffset ? p->iOffset+1 : p->iLimit, regLimitA); sqlite3VdbeAddOp2(v, OP_Copy, regLimitA, regLimitB); }else{ regLimitA = regLimitB = 0; } sqlite3ExprDelete(db, p->pLimit); p->pLimit = 0; sqlite3ExprDelete(db, p->pOffset); p->pOffset = 0; regAddrA = ++pParse->nMem; regAddrB = ++pParse->nMem; regOutA = ++pParse->nMem; regOutB = ++pParse->nMem; sqlite3SelectDestInit(&destA, SRT_Coroutine, regAddrA); sqlite3SelectDestInit(&destB, SRT_Coroutine, regAddrB); /* Generate a coroutine to evaluate the SELECT statement to the ** left of the compound operator - the "A" select. */ addrSelectA = sqlite3VdbeCurrentAddr(v) + 1; j1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrA, 0, addrSelectA); VdbeComment((v, "left SELECT")); pPrior->iLimit = regLimitA; explainSetInteger(iSub1, pParse->iNextSelectId); sqlite3Select(pParse, pPrior, &destA); sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrA); sqlite3VdbeJumpHere(v, j1); /* Generate a coroutine to evaluate the SELECT statement on ** the right - the "B" select */ addrSelectB = sqlite3VdbeCurrentAddr(v) + 1; j1 = sqlite3VdbeAddOp3(v, OP_InitCoroutine, regAddrB, 0, addrSelectB); VdbeComment((v, "right SELECT")); savedLimit = p->iLimit; savedOffset = p->iOffset; p->iLimit = regLimitB; p->iOffset = 0; explainSetInteger(iSub2, pParse->iNextSelectId); sqlite3Select(pParse, p, &destB); p->iLimit = savedLimit; p->iOffset = savedOffset; sqlite3VdbeAddOp1(v, OP_EndCoroutine, regAddrB); /* Generate a subroutine that outputs the current row of the A ** select as the next output row of the compound select. */ VdbeNoopComment((v, "Output routine for A")); addrOutA = generateOutputSubroutine(pParse, p, &destA, pDest, regOutA, regPrev, pKeyDup, labelEnd); /* Generate a subroutine that outputs the current row of the B ** select as the next output row of the compound select. */ if( op==TK_ALL || op==TK_UNION ){ VdbeNoopComment((v, "Output routine for B")); addrOutB = generateOutputSubroutine(pParse, p, &destB, pDest, regOutB, regPrev, pKeyDup, labelEnd); } sqlite3KeyInfoUnref(pKeyDup); /* Generate a subroutine to run when the results from select A ** are exhausted and only data in select B remains. */ if( op==TK_EXCEPT || op==TK_INTERSECT ){ addrEofA_noB = addrEofA = labelEnd; }else{ VdbeNoopComment((v, "eof-A subroutine")); addrEofA = sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); addrEofA_noB = sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, labelEnd); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofA); p->nSelectRow += pPrior->nSelectRow; } /* Generate a subroutine to run when the results from select B ** are exhausted and only data in select A remains. */ if( op==TK_INTERSECT ){ addrEofB = addrEofA; if( p->nSelectRow > pPrior->nSelectRow ) p->nSelectRow = pPrior->nSelectRow; }else{ VdbeNoopComment((v, "eof-B subroutine")); addrEofB = sqlite3VdbeAddOp2(v, OP_Gosub, regOutA, addrOutA); sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, labelEnd); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEofB); } /* Generate code to handle the case of AB */ VdbeNoopComment((v, "A-gt-B subroutine")); addrAgtB = sqlite3VdbeCurrentAddr(v); if( op==TK_ALL || op==TK_UNION ){ sqlite3VdbeAddOp2(v, OP_Gosub, regOutB, addrOutB); } sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Goto, 0, labelCmpr); /* This code runs once to initialize everything. */ sqlite3VdbeJumpHere(v, j1); sqlite3VdbeAddOp2(v, OP_Yield, regAddrA, addrEofA_noB); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Yield, regAddrB, addrEofB); VdbeCoverage(v); /* Implement the main merge loop */ sqlite3VdbeResolveLabel(v, labelCmpr); sqlite3VdbeAddOp4(v, OP_Permutation, 0, 0, 0, (char*)aPermute, P4_INTARRAY); sqlite3VdbeAddOp4(v, OP_Compare, destA.iSdst, destB.iSdst, nOrderBy, (char*)pKeyMerge, P4_KEYINFO); sqlite3VdbeChangeP5(v, OPFLAG_PERMUTE); sqlite3VdbeAddOp3(v, OP_Jump, addrAltB, addrAeqB, addrAgtB); VdbeCoverage(v); /* Jump to the this point in order to terminate the query. */ sqlite3VdbeResolveLabel(v, labelEnd); /* Set the number of output columns */ if( pDest->eDest==SRT_Output ){ Select *pFirst = pPrior; while( pFirst->pPrior ) pFirst = pFirst->pPrior; generateColumnNames(pParse, 0, pFirst->pEList); } /* Reassembly the compound query so that it will be freed correctly ** by the calling function */ if( p->pPrior ){ sqlite3SelectDelete(db, p->pPrior); } p->pPrior = pPrior; pPrior->pNext = p; /*** TBD: Insert subroutine calls to close cursors on incomplete **** subqueries ****/ explainComposite(pParse, p->op, iSub1, iSub2, 0); return pParse->nErr!=0; } #endif #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* Forward Declarations */ static void substExprList(sqlite3*, ExprList*, int, ExprList*); static void substSelect(sqlite3*, Select *, int, ExprList *); /* ** Scan through the expression pExpr. Replace every reference to ** a column in table number iTable with a copy of the iColumn-th ** entry in pEList. (But leave references to the ROWID column ** unchanged.) ** ** This routine is part of the flattening procedure. A subquery ** whose result set is defined by pEList appears as entry in the ** FROM clause of a SELECT such that the VDBE cursor assigned to that ** FORM clause entry is iTable. This routine make the necessary ** changes to pExpr so that it refers directly to the source table ** of the subquery rather the result set of the subquery. */ static Expr *substExpr( sqlite3 *db, /* Report malloc errors to this connection */ Expr *pExpr, /* Expr in which substitution occurs */ int iTable, /* Table to be substituted */ ExprList *pEList /* Substitute expressions */ ){ if( pExpr==0 ) return 0; if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){ if( pExpr->iColumn<0 ){ pExpr->op = TK_NULL; }else{ Expr *pNew; assert( pEList!=0 && pExpr->iColumnnExpr ); assert( pExpr->pLeft==0 && pExpr->pRight==0 ); pNew = sqlite3ExprDup(db, pEList->a[pExpr->iColumn].pExpr, 0); sqlite3ExprDelete(db, pExpr); pExpr = pNew; } }else{ pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList); pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ substSelect(db, pExpr->x.pSelect, iTable, pEList); }else{ substExprList(db, pExpr->x.pList, iTable, pEList); } } return pExpr; } static void substExprList( sqlite3 *db, /* Report malloc errors here */ ExprList *pList, /* List to scan and in which to make substitutes */ int iTable, /* Table to be substituted */ ExprList *pEList /* Substitute values */ ){ int i; if( pList==0 ) return; for(i=0; inExpr; i++){ pList->a[i].pExpr = substExpr(db, pList->a[i].pExpr, iTable, pEList); } } static void substSelect( sqlite3 *db, /* Report malloc errors here */ Select *p, /* SELECT statement in which to make substitutions */ int iTable, /* Table to be replaced */ ExprList *pEList /* Substitute values */ ){ SrcList *pSrc; struct SrcList_item *pItem; int i; if( !p ) return; substExprList(db, p->pEList, iTable, pEList); substExprList(db, p->pGroupBy, iTable, pEList); substExprList(db, p->pOrderBy, iTable, pEList); p->pHaving = substExpr(db, p->pHaving, iTable, pEList); p->pWhere = substExpr(db, p->pWhere, iTable, pEList); substSelect(db, p->pPrior, iTable, pEList); pSrc = p->pSrc; assert( pSrc ); /* Even for (SELECT 1) we have: pSrc!=0 but pSrc->nSrc==0 */ if( ALWAYS(pSrc) ){ for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){ substSelect(db, pItem->pSelect, iTable, pEList); } } } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization. ** This routine returns 1 if it makes changes and 0 if no flattening occurs. ** ** To understand the concept of flattening, consider the following ** query: ** ** SELECT a FROM (SELECT x+y AS a FROM t1 WHERE z<100) WHERE a>5 ** ** The default way of implementing this query is to execute the ** subquery first and store the results in a temporary table, then ** run the outer query on that temporary table. This requires two ** passes over the data. Furthermore, because the temporary table ** has no indices, the WHERE clause on the outer query cannot be ** optimized. ** ** This routine attempts to rewrite queries such as the above into ** a single flat select, like this: ** ** SELECT x+y AS a FROM t1 WHERE z<100 AND a>5 ** ** The code generated for this simplification gives the same result ** but only has to scan the data once. And because indices might ** exist on the table t1, a complete scan of the data might be ** avoided. ** ** Flattening is only attempted if all of the following are true: ** ** (1) The subquery and the outer query do not both use aggregates. ** ** (2) The subquery is not an aggregate or (2a) the outer query is not a join ** and (2b) the outer query does not use subqueries other than the one ** FROM-clause subquery that is a candidate for flattening. (2b is ** due to ticket [2f7170d73bf9abf80] from 2015-02-09.) ** ** (3) The subquery is not the right operand of a left outer join ** (Originally ticket #306. Strengthened by ticket #3300) ** ** (4) The subquery is not DISTINCT. ** ** (**) At one point restrictions (4) and (5) defined a subset of DISTINCT ** sub-queries that were excluded from this optimization. Restriction ** (4) has since been expanded to exclude all DISTINCT subqueries. ** ** (6) The subquery does not use aggregates or the outer query is not ** DISTINCT. ** ** (7) The subquery has a FROM clause. TODO: For subqueries without ** A FROM clause, consider adding a FROM close with the special ** table sqlite_once that consists of a single row containing a ** single NULL. ** ** (8) The subquery does not use LIMIT or the outer query is not a join. ** ** (9) The subquery does not use LIMIT or the outer query does not use ** aggregates. ** ** (**) Restriction (10) was removed from the code on 2005-02-05 but we ** accidently carried the comment forward until 2014-09-15. Original ** text: "The subquery does not use aggregates or the outer query ** does not use LIMIT." ** ** (11) The subquery and the outer query do not both have ORDER BY clauses. ** ** (**) Not implemented. Subsumed into restriction (3). Was previously ** a separate restriction deriving from ticket #350. ** ** (13) The subquery and outer query do not both use LIMIT. ** ** (14) The subquery does not use OFFSET. ** ** (15) The outer query is not part of a compound select or the ** subquery does not have a LIMIT clause. ** (See ticket #2339 and ticket [02a8e81d44]). ** ** (16) The outer query is not an aggregate or the subquery does ** not contain ORDER BY. (Ticket #2942) This used to not matter ** until we introduced the group_concat() function. ** ** (17) The sub-query is not a compound select, or it is a UNION ALL ** compound clause made up entirely of non-aggregate queries, and ** the parent query: ** ** * is not itself part of a compound select, ** * is not an aggregate or DISTINCT query, and ** * is not a join ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, ** LIMIT and OFFSET clauses. The subquery cannot use any compound ** operator other than UNION ALL because all the other compound ** operators have an implied DISTINCT which is disallowed by ** restriction (4). ** ** Also, each component of the sub-query must return the same number ** of result columns. This is actually a requirement for any compound ** SELECT statement, but all the code here does is make sure that no ** such (illegal) sub-query is flattened. The caller will detect the ** syntax error and return a detailed message. ** ** (18) If the sub-query is a compound select, then all terms of the ** ORDER by clause of the parent must be simple references to ** columns of the sub-query. ** ** (19) The subquery does not use LIMIT or the outer query does not ** have a WHERE clause. ** ** (20) If the sub-query is a compound select, then it must not use ** an ORDER BY clause. Ticket #3773. We could relax this constraint ** somewhat by saying that the terms of the ORDER BY clause must ** appear as unmodified result columns in the outer query. But we ** have other optimizations in mind to deal with that case. ** ** (21) The subquery does not use LIMIT or the outer query is not ** DISTINCT. (See ticket [752e1646fc]). ** ** (22) The subquery is not a recursive CTE. ** ** (23) The parent is not a recursive CTE, or the sub-query is not a ** compound query. This restriction is because transforming the ** parent to a compound query confuses the code that handles ** recursive queries in multiSelect(). ** ** (24) The subquery is not an aggregate that uses the built-in min() or ** or max() functions. (Without this restriction, a query like: ** "SELECT x FROM (SELECT max(y), x FROM t1)" would not necessarily ** return the value X for which Y was maximal.) ** ** ** In this routine, the "p" parameter is a pointer to the outer query. ** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query ** uses aggregates and subqueryIsAgg is true if the subquery uses aggregates. ** ** If flattening is not attempted, this routine is a no-op and returns 0. ** If flattening is attempted this routine returns 1. ** ** All of the expression analysis must occur on both the outer query and ** the subquery before this routine runs. */ static int flattenSubquery( Parse *pParse, /* Parsing context */ Select *p, /* The parent or outer SELECT statement */ int iFrom, /* Index in p->pSrc->a[] of the inner subquery */ int isAgg, /* True if outer SELECT uses aggregate functions */ int subqueryIsAgg /* True if the subquery uses aggregate functions */ ){ const char *zSavedAuthContext = pParse->zAuthContext; Select *pParent; Select *pSub; /* The inner query or "subquery" */ Select *pSub1; /* Pointer to the rightmost select in sub-query */ SrcList *pSrc; /* The FROM clause of the outer query */ SrcList *pSubSrc; /* The FROM clause of the subquery */ ExprList *pList; /* The result set of the outer query */ int iParent; /* VDBE cursor number of the pSub result set temp table */ int i; /* Loop counter */ Expr *pWhere; /* The WHERE clause */ struct SrcList_item *pSubitem; /* The subquery */ sqlite3 *db = pParse->db; /* Check to see if flattening is permitted. Return 0 if not. */ assert( p!=0 ); assert( p->pPrior==0 ); /* Unable to flatten compound queries */ if( OptimizationDisabled(db, SQLITE_QueryFlattener) ) return 0; pSrc = p->pSrc; assert( pSrc && iFrom>=0 && iFromnSrc ); pSubitem = &pSrc->a[iFrom]; iParent = pSubitem->iCursor; pSub = pSubitem->pSelect; assert( pSub!=0 ); if( subqueryIsAgg ){ if( isAgg ) return 0; /* Restriction (1) */ if( pSrc->nSrc>1 ) return 0; /* Restriction (2a) */ if( (p->pWhere && ExprHasProperty(p->pWhere,EP_Subquery)) || (sqlite3ExprListFlags(p->pEList) & EP_Subquery)!=0 || (sqlite3ExprListFlags(p->pOrderBy) & EP_Subquery)!=0 ){ return 0; /* Restriction (2b) */ } } pSubSrc = pSub->pSrc; assert( pSubSrc ); /* Prior to version 3.1.2, when LIMIT and OFFSET had to be simple constants, ** not arbitrary expressions, we allowed some combining of LIMIT and OFFSET ** because they could be computed at compile-time. But when LIMIT and OFFSET ** became arbitrary expressions, we were forced to add restrictions (13) ** and (14). */ if( pSub->pLimit && p->pLimit ) return 0; /* Restriction (13) */ if( pSub->pOffset ) return 0; /* Restriction (14) */ if( (p->selFlags & SF_Compound)!=0 && pSub->pLimit ){ return 0; /* Restriction (15) */ } if( pSubSrc->nSrc==0 ) return 0; /* Restriction (7) */ if( pSub->selFlags & SF_Distinct ) return 0; /* Restriction (5) */ if( pSub->pLimit && (pSrc->nSrc>1 || isAgg) ){ return 0; /* Restrictions (8)(9) */ } if( (p->selFlags & SF_Distinct)!=0 && subqueryIsAgg ){ return 0; /* Restriction (6) */ } if( p->pOrderBy && pSub->pOrderBy ){ return 0; /* Restriction (11) */ } if( isAgg && pSub->pOrderBy ) return 0; /* Restriction (16) */ if( pSub->pLimit && p->pWhere ) return 0; /* Restriction (19) */ if( pSub->pLimit && (p->selFlags & SF_Distinct)!=0 ){ return 0; /* Restriction (21) */ } testcase( pSub->selFlags & SF_Recursive ); testcase( pSub->selFlags & SF_MinMaxAgg ); if( pSub->selFlags & (SF_Recursive|SF_MinMaxAgg) ){ return 0; /* Restrictions (22) and (24) */ } if( (p->selFlags & SF_Recursive) && pSub->pPrior ){ return 0; /* Restriction (23) */ } /* OBSOLETE COMMENT 1: ** Restriction 3: If the subquery is a join, make sure the subquery is ** not used as the right operand of an outer join. Examples of why this ** is not allowed: ** ** t1 LEFT OUTER JOIN (t2 JOIN t3) ** ** If we flatten the above, we would get ** ** (t1 LEFT OUTER JOIN t2) JOIN t3 ** ** which is not at all the same thing. ** ** OBSOLETE COMMENT 2: ** Restriction 12: If the subquery is the right operand of a left outer ** join, make sure the subquery has no WHERE clause. ** An examples of why this is not allowed: ** ** t1 LEFT OUTER JOIN (SELECT * FROM t2 WHERE t2.x>0) ** ** If we flatten the above, we would get ** ** (t1 LEFT OUTER JOIN t2) WHERE t2.x>0 ** ** But the t2.x>0 test will always fail on a NULL row of t2, which ** effectively converts the OUTER JOIN into an INNER JOIN. ** ** THIS OVERRIDES OBSOLETE COMMENTS 1 AND 2 ABOVE: ** Ticket #3300 shows that flattening the right term of a LEFT JOIN ** is fraught with danger. Best to avoid the whole thing. If the ** subquery is the right term of a LEFT JOIN, then do not flatten. */ if( (pSubitem->jointype & JT_OUTER)!=0 ){ return 0; } /* Restriction 17: If the sub-query is a compound SELECT, then it must ** use only the UNION ALL operator. And none of the simple select queries ** that make up the compound SELECT are allowed to be aggregate or distinct ** queries. */ if( pSub->pPrior ){ if( pSub->pOrderBy ){ return 0; /* Restriction 20 */ } if( isAgg || (p->selFlags & SF_Distinct)!=0 || pSrc->nSrc!=1 ){ return 0; } for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){ testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct ); testcase( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))==SF_Aggregate ); assert( pSub->pSrc!=0 ); assert( pSub->pEList->nExpr==pSub1->pEList->nExpr ); if( (pSub1->selFlags & (SF_Distinct|SF_Aggregate))!=0 || (pSub1->pPrior && pSub1->op!=TK_ALL) || pSub1->pSrc->nSrc<1 ){ return 0; } testcase( pSub1->pSrc->nSrc>1 ); } /* Restriction 18. */ if( p->pOrderBy ){ int ii; for(ii=0; iipOrderBy->nExpr; ii++){ if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; } } } /***** If we reach this point, flattening is permitted. *****/ SELECTTRACE(1,pParse,p,("flatten %s.%p from term %d\n", pSub->zSelName, pSub, iFrom)); /* Authorize the subquery */ pParse->zAuthContext = pSubitem->zName; TESTONLY(i =) sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0); testcase( i==SQLITE_DENY ); pParse->zAuthContext = zSavedAuthContext; /* If the sub-query is a compound SELECT statement, then (by restrictions ** 17 and 18 above) it must be a UNION ALL and the parent query must ** be of the form: ** ** SELECT FROM () ** ** followed by any ORDER BY, LIMIT and/or OFFSET clauses. This block ** creates N-1 copies of the parent query without any ORDER BY, LIMIT or ** OFFSET clauses and joins them to the left-hand-side of the original ** using UNION ALL operators. In this case N is the number of simple ** select statements in the compound sub-query. ** ** Example: ** ** SELECT a+1 FROM ( ** SELECT x FROM tab ** UNION ALL ** SELECT y FROM tab ** UNION ALL ** SELECT abs(z*2) FROM tab2 ** ) WHERE a!=5 ORDER BY 1 ** ** Transformed into: ** ** SELECT x+1 FROM tab WHERE x+1!=5 ** UNION ALL ** SELECT y+1 FROM tab WHERE y+1!=5 ** UNION ALL ** SELECT abs(z*2)+1 FROM tab2 WHERE abs(z*2)+1!=5 ** ORDER BY 1 ** ** We call this the "compound-subquery flattening". */ for(pSub=pSub->pPrior; pSub; pSub=pSub->pPrior){ Select *pNew; ExprList *pOrderBy = p->pOrderBy; Expr *pLimit = p->pLimit; Expr *pOffset = p->pOffset; Select *pPrior = p->pPrior; p->pOrderBy = 0; p->pSrc = 0; p->pPrior = 0; p->pLimit = 0; p->pOffset = 0; pNew = sqlite3SelectDup(db, p, 0); sqlite3SelectSetName(pNew, pSub->zSelName); p->pOffset = pOffset; p->pLimit = pLimit; p->pOrderBy = pOrderBy; p->pSrc = pSrc; p->op = TK_ALL; if( pNew==0 ){ p->pPrior = pPrior; }else{ pNew->pPrior = pPrior; if( pPrior ) pPrior->pNext = pNew; pNew->pNext = p; p->pPrior = pNew; SELECTTRACE(2,pParse,p, ("compound-subquery flattener creates %s.%p as peer\n", pNew->zSelName, pNew)); } if( db->mallocFailed ) return 1; } /* Begin flattening the iFrom-th entry of the FROM clause ** in the outer query. */ pSub = pSub1 = pSubitem->pSelect; /* Delete the transient table structure associated with the ** subquery */ sqlite3DbFree(db, pSubitem->zDatabase); sqlite3DbFree(db, pSubitem->zName); sqlite3DbFree(db, pSubitem->zAlias); pSubitem->zDatabase = 0; pSubitem->zName = 0; pSubitem->zAlias = 0; pSubitem->pSelect = 0; /* Defer deleting the Table object associated with the ** subquery until code generation is ** complete, since there may still exist Expr.pTab entries that ** refer to the subquery even after flattening. Ticket #3346. ** ** pSubitem->pTab is always non-NULL by test restrictions and tests above. */ if( ALWAYS(pSubitem->pTab!=0) ){ Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); pTabToDel->pNextZombie = pToplevel->pZombieTab; pToplevel->pZombieTab = pTabToDel; }else{ pTabToDel->nRef--; } pSubitem->pTab = 0; } /* The following loop runs once for each term in a compound-subquery ** flattening (as described above). If we are doing a different kind ** of flattening - a flattening other than a compound-subquery flattening - ** then this loop only runs once. ** ** This loop moves all of the FROM elements of the subquery into the ** the FROM clause of the outer query. Before doing this, remember ** the cursor number for the original outer query FROM element in ** iParent. The iParent cursor will never be used. Subsequent code ** will scan expressions looking for iParent references and replace ** those references with expressions that resolve to the subquery FROM ** elements we are now copying in. */ for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){ int nSubSrc; u8 jointype = 0; pSubSrc = pSub->pSrc; /* FROM clause of subquery */ nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */ pSrc = pParent->pSrc; /* FROM clause of the outer query */ if( pSrc ){ assert( pParent==p ); /* First time through the loop */ jointype = pSubitem->jointype; }else{ assert( pParent!=p ); /* 2nd and subsequent times through the loop */ pSrc = pParent->pSrc = sqlite3SrcListAppend(db, 0, 0, 0); if( pSrc==0 ){ assert( db->mallocFailed ); break; } } /* The subquery uses a single slot of the FROM clause of the outer ** query. If the subquery has more than one element in its FROM clause, ** then expand the outer query to make space for it to hold all elements ** of the subquery. ** ** Example: ** ** SELECT * FROM tabA, (SELECT * FROM sub1, sub2), tabB; ** ** The outer query has 3 slots in its FROM clause. One slot of the ** outer query (the middle slot) is used by the subquery. The next ** block of code will expand the out query to 4 slots. The middle ** slot is expanded to two slots in order to make space for the ** two elements in the FROM clause of the subquery. */ if( nSubSrc>1 ){ pParent->pSrc = pSrc = sqlite3SrcListEnlarge(db, pSrc, nSubSrc-1,iFrom+1); if( db->mallocFailed ){ break; } } /* Transfer the FROM clause terms from the subquery into the ** outer query. */ for(i=0; ia[i+iFrom].pUsing); pSrc->a[i+iFrom] = pSubSrc->a[i]; memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i])); } pSrc->a[iFrom].jointype = jointype; /* Now begin substituting subquery result set expressions for ** references to the iParent in the outer query. ** ** Example: ** ** SELECT a+5, b*10 FROM (SELECT x*3 AS a, y+10 AS b FROM t1) WHERE a>b; ** \ \_____________ subquery __________/ / ** \_____________________ outer query ______________________________/ ** ** We look at every expression in the outer query and every place we see ** "a" we substitute "x*3" and every place we see "b" we substitute "y+10". */ pList = pParent->pEList; for(i=0; inExpr; i++){ if( pList->a[i].zName==0 ){ char *zName = sqlite3DbStrDup(db, pList->a[i].zSpan); sqlite3Dequote(zName); pList->a[i].zName = zName; } } substExprList(db, pParent->pEList, iParent, pSub->pEList); if( isAgg ){ substExprList(db, pParent->pGroupBy, iParent, pSub->pEList); pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); } if( pSub->pOrderBy ){ /* At this point, any non-zero iOrderByCol values indicate that the ** ORDER BY column expression is identical to the iOrderByCol'th ** expression returned by SELECT statement pSub. Since these values ** do not necessarily correspond to columns in SELECT statement pParent, ** zero them before transfering the ORDER BY clause. ** ** Not doing this may cause an error if a subsequent call to this ** function attempts to flatten a compound sub-query into pParent ** (the only way this can happen is if the compound sub-query is ** currently part of pSub->pSrc). See ticket [d11a6e908f]. */ ExprList *pOrderBy = pSub->pOrderBy; for(i=0; inExpr; i++){ pOrderBy->a[i].u.x.iOrderByCol = 0; } assert( pParent->pOrderBy==0 ); assert( pSub->pPrior==0 ); pParent->pOrderBy = pOrderBy; pSub->pOrderBy = 0; }else if( pParent->pOrderBy ){ substExprList(db, pParent->pOrderBy, iParent, pSub->pEList); } if( pSub->pWhere ){ pWhere = sqlite3ExprDup(db, pSub->pWhere, 0); }else{ pWhere = 0; } if( subqueryIsAgg ){ assert( pParent->pHaving==0 ); pParent->pHaving = pParent->pWhere; pParent->pWhere = pWhere; pParent->pHaving = substExpr(db, pParent->pHaving, iParent, pSub->pEList); pParent->pHaving = sqlite3ExprAnd(db, pParent->pHaving, sqlite3ExprDup(db, pSub->pHaving, 0)); assert( pParent->pGroupBy==0 ); pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0); }else{ pParent->pWhere = substExpr(db, pParent->pWhere, iParent, pSub->pEList); pParent->pWhere = sqlite3ExprAnd(db, pParent->pWhere, pWhere); } /* The flattened query is distinct if either the inner or the ** outer query is distinct. */ pParent->selFlags |= pSub->selFlags & SF_Distinct; /* ** SELECT ... FROM (SELECT ... LIMIT a OFFSET b) LIMIT x OFFSET y; ** ** One is tempted to try to add a and b to combine the limits. But this ** does not work if either limit is negative. */ if( pSub->pLimit ){ pParent->pLimit = pSub->pLimit; pSub->pLimit = 0; } } /* Finially, delete what is left of the subquery and return ** success. */ sqlite3SelectDelete(db, pSub1); #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After flattening:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif return 1; } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** Make copies of relevant WHERE clause terms of the outer query into ** the WHERE clause of subquery. Example: ** ** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1) WHERE x=5 AND y=10; ** ** Transformed into: ** ** SELECT * FROM (SELECT a AS x, c-d AS y FROM t1 WHERE a=5 AND c-d=10) ** WHERE x=5 AND y=10; ** ** The hope is that the terms added to the inner query will make it more ** efficient. ** ** Do not attempt this optimization if: ** ** (1) The inner query is an aggregate. (In that case, we'd really want ** to copy the outer WHERE-clause terms onto the HAVING clause of the ** inner query. But they probably won't help there so do not bother.) ** ** (2) The inner query is the recursive part of a common table expression. ** ** (3) The inner query has a LIMIT clause (since the changes to the WHERE ** close would change the meaning of the LIMIT). ** ** (4) The inner query is the right operand of a LEFT JOIN. (The caller ** enforces this restriction since this routine does not have enough ** information to know.) ** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. */ static int pushDownWhereTerms( sqlite3 *db, /* The database connection (for malloc()) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ int iCursor /* Cursor number of the subquery */ ){ Expr *pNew; int nChng = 0; if( pWhere==0 ) return 0; if( (pSubq->selFlags & (SF_Aggregate|SF_Recursive))!=0 ){ return 0; /* restrictions (1) and (2) */ } if( pSubq->pLimit!=0 ){ return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ nChng += pushDownWhereTerms(db, pSubq, pWhere->pRight, iCursor); pWhere = pWhere->pLeft; } if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){ nChng++; while( pSubq ){ pNew = sqlite3ExprDup(db, pWhere, 0); pNew = substExpr(db, pNew, iCursor, pSubq->pEList); pSubq->pWhere = sqlite3ExprAnd(db, pSubq->pWhere, pNew); pSubq = pSubq->pPrior; } } return nChng; } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ /* ** Based on the contents of the AggInfo structure indicated by the first ** argument, this function checks if the following are true: ** ** * the query contains just a single aggregate function, ** * the aggregate function is either min() or max(), and ** * the argument to the aggregate function is a column value. ** ** If all of the above are true, then WHERE_ORDERBY_MIN or WHERE_ORDERBY_MAX ** is returned as appropriate. Also, *ppMinMax is set to point to the ** list of arguments passed to the aggregate before returning. ** ** Or, if the conditions above are not met, *ppMinMax is set to 0 and ** WHERE_ORDERBY_NORMAL is returned. */ static u8 minMaxQuery(AggInfo *pAggInfo, ExprList **ppMinMax){ int eRet = WHERE_ORDERBY_NORMAL; /* Return value */ *ppMinMax = 0; if( pAggInfo->nFunc==1 ){ Expr *pExpr = pAggInfo->aFunc[0].pExpr; /* Aggregate function */ ExprList *pEList = pExpr->x.pList; /* Arguments to agg function */ assert( pExpr->op==TK_AGG_FUNCTION ); if( pEList && pEList->nExpr==1 && pEList->a[0].pExpr->op==TK_AGG_COLUMN ){ const char *zFunc = pExpr->u.zToken; if( sqlite3StrICmp(zFunc, "min")==0 ){ eRet = WHERE_ORDERBY_MIN; *ppMinMax = pEList; }else if( sqlite3StrICmp(zFunc, "max")==0 ){ eRet = WHERE_ORDERBY_MAX; *ppMinMax = pEList; } } } assert( *ppMinMax==0 || (*ppMinMax)->nExpr==1 ); return eRet; } /* ** The select statement passed as the first argument is an aggregate query. ** The second argument is the associated aggregate-info object. This ** function tests if the SELECT is of the form: ** ** SELECT count(*) FROM ** ** where table is a database table, not a sub-select or view. If the query ** does match this pattern, then a pointer to the Table object representing ** is returned. Otherwise, 0 is returned. */ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){ Table *pTab; Expr *pExpr; assert( !p->pGroupBy ); if( p->pWhere || p->pEList->nExpr!=1 || p->pSrc->nSrc!=1 || p->pSrc->a[0].pSelect ){ return 0; } pTab = p->pSrc->a[0].pTab; pExpr = p->pEList->a[0].pExpr; assert( pTab && !pTab->pSelect && pExpr ); if( IsVirtual(pTab) ) return 0; if( pExpr->op!=TK_AGG_FUNCTION ) return 0; if( NEVER(pAggInfo->nFunc==0) ) return 0; if( (pAggInfo->aFunc[0].pFunc->funcFlags&SQLITE_FUNC_COUNT)==0 ) return 0; if( pExpr->flags&EP_Distinct ) return 0; return pTab; } /* ** If the source-list item passed as an argument was augmented with an ** INDEXED BY clause, then try to locate the specified index. If there ** was such a clause and the named index cannot be found, return ** SQLITE_ERROR and leave an error in pParse. Otherwise, populate ** pFrom->pIndex and return SQLITE_OK. */ SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *pParse, struct SrcList_item *pFrom){ if( pFrom->pTab && pFrom->zIndexedBy ){ Table *pTab = pFrom->pTab; char *zIndexedBy = pFrom->zIndexedBy; Index *pIdx; for(pIdx=pTab->pIndex; pIdx && sqlite3StrICmp(pIdx->zName, zIndexedBy); pIdx=pIdx->pNext ); if( !pIdx ){ sqlite3ErrorMsg(pParse, "no such index: %s", zIndexedBy, 0); pParse->checkSchema = 1; return SQLITE_ERROR; } pFrom->pIndex = pIdx; } return SQLITE_OK; } /* ** Detect compound SELECT statements that use an ORDER BY clause with ** an alternative collating sequence. ** ** SELECT ... FROM t1 EXCEPT SELECT ... FROM t2 ORDER BY .. COLLATE ... ** ** These are rewritten as a subquery: ** ** SELECT * FROM (SELECT ... FROM t1 EXCEPT SELECT ... FROM t2) ** ORDER BY ... COLLATE ... ** ** This transformation is necessary because the multiSelectOrderBy() routine ** above that generates the code for a compound SELECT with an ORDER BY clause ** uses a merge algorithm that requires the same collating sequence on the ** result columns as on the ORDER BY clause. See ticket ** http://www.sqlite.org/src/info/6709574d2a ** ** This transformation is only needed for EXCEPT, INTERSECT, and UNION. ** The UNION ALL operator works fine with multiSelectOrderBy() even when ** there are COLLATE terms in the ORDER BY. */ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){ int i; Select *pNew; Select *pX; sqlite3 *db; struct ExprList_item *a; SrcList *pNewSrc; Parse *pParse; Token dummy; if( p->pPrior==0 ) return WRC_Continue; if( p->pOrderBy==0 ) return WRC_Continue; for(pX=p; pX && (pX->op==TK_ALL || pX->op==TK_SELECT); pX=pX->pPrior){} if( pX==0 ) return WRC_Continue; a = p->pOrderBy->a; for(i=p->pOrderBy->nExpr-1; i>=0; i--){ if( a[i].pExpr->flags & EP_Collate ) break; } if( i<0 ) return WRC_Continue; /* If we reach this point, that means the transformation is required. */ pParse = pWalker->pParse; db = pParse->db; pNew = sqlite3DbMallocZero(db, sizeof(*pNew) ); if( pNew==0 ) return WRC_Abort; memset(&dummy, 0, sizeof(dummy)); pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0); if( pNewSrc==0 ) return WRC_Abort; *pNew = *p; p->pSrc = pNewSrc; p->pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ALL, 0)); p->op = TK_SELECT; p->pWhere = 0; pNew->pGroupBy = 0; pNew->pHaving = 0; pNew->pOrderBy = 0; p->pPrior = 0; p->pNext = 0; p->pWith = 0; p->selFlags &= ~SF_Compound; assert( (p->selFlags & SF_Converted)==0 ); p->selFlags |= SF_Converted; assert( pNew->pPrior!=0 ); pNew->pPrior->pNext = pNew; pNew->pLimit = 0; pNew->pOffset = 0; return WRC_Continue; } #ifndef SQLITE_OMIT_CTE /* ** Argument pWith (which may be NULL) points to a linked list of nested ** WITH contexts, from inner to outermost. If the table identified by ** FROM clause element pItem is really a common-table-expression (CTE) ** then return a pointer to the CTE definition for that table. Otherwise ** return NULL. ** ** If a non-NULL value is returned, set *ppContext to point to the With ** object that the returned CTE belongs to. */ static struct Cte *searchWith( With *pWith, /* Current outermost WITH clause */ struct SrcList_item *pItem, /* FROM clause element to resolve */ With **ppContext /* OUT: WITH clause return value belongs to */ ){ const char *zName; if( pItem->zDatabase==0 && (zName = pItem->zName)!=0 ){ With *p; for(p=pWith; p; p=p->pOuter){ int i; for(i=0; inCte; i++){ if( sqlite3StrICmp(zName, p->a[i].zName)==0 ){ *ppContext = p; return &p->a[i]; } } } } return 0; } /* The code generator maintains a stack of active WITH clauses ** with the inner-most WITH clause being at the top of the stack. ** ** This routine pushes the WITH clause passed as the second argument ** onto the top of the stack. If argument bFree is true, then this ** WITH clause will never be popped from the stack. In this case it ** should be freed along with the Parse object. In other cases, when ** bFree==0, the With object will be freed along with the SELECT ** statement with which it is associated. */ SQLITE_PRIVATE void sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ assert( bFree==0 || pParse->pWith==0 ); if( pWith ){ pWith->pOuter = pParse->pWith; pParse->pWith = pWith; pParse->bFreeWith = bFree; } } /* ** This function checks if argument pFrom refers to a CTE declared by ** a WITH clause on the stack currently maintained by the parser. And, ** if currently processing a CTE expression, if it is a recursive ** reference to the current CTE. ** ** If pFrom falls into either of the two categories above, pFrom->pTab ** and other fields are populated accordingly. The caller should check ** (pFrom->pTab!=0) to determine whether or not a successful match ** was found. ** ** Whether or not a match is found, SQLITE_OK is returned if no error ** occurs. If an error does occur, an error message is stored in the ** parser and some error code other than SQLITE_OK returned. */ static int withExpand( Walker *pWalker, struct SrcList_item *pFrom ){ Parse *pParse = pWalker->pParse; sqlite3 *db = pParse->db; struct Cte *pCte; /* Matched CTE (or NULL if no match) */ With *pWith; /* WITH clause that pCte belongs to */ assert( pFrom->pTab==0 ); pCte = searchWith(pParse->pWith, pFrom, &pWith); if( pCte ){ Table *pTab; ExprList *pEList; Select *pSel; Select *pLeft; /* Left-most SELECT statement */ int bMayRecursive; /* True if compound joined by UNION [ALL] */ With *pSavedWith; /* Initial value of pParse->pWith */ /* If pCte->zErr is non-NULL at this point, then this is an illegal ** recursive reference to CTE pCte. Leave an error in pParse and return ** early. If pCte->zErr is NULL, then this is not a recursive reference. ** In this case, proceed. */ if( pCte->zErr ){ sqlite3ErrorMsg(pParse, pCte->zErr, pCte->zName); return SQLITE_ERROR; } assert( pFrom->pTab==0 ); pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return WRC_Abort; pTab->nRef = 1; pTab->zName = sqlite3DbStrDup(db, pCte->zName); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; pFrom->pSelect = sqlite3SelectDup(db, pCte->pSelect, 0); if( db->mallocFailed ) return SQLITE_NOMEM; assert( pFrom->pSelect ); /* Check if this is a recursive CTE. */ pSel = pFrom->pSelect; bMayRecursive = ( pSel->op==TK_ALL || pSel->op==TK_UNION ); if( bMayRecursive ){ int i; SrcList *pSrc = pFrom->pSelect->pSrc; for(i=0; inSrc; i++){ struct SrcList_item *pItem = &pSrc->a[i]; if( pItem->zDatabase==0 && pItem->zName!=0 && 0==sqlite3StrICmp(pItem->zName, pCte->zName) ){ pItem->pTab = pTab; pItem->isRecursive = 1; pTab->nRef++; pSel->selFlags |= SF_Recursive; } } } /* Only one recursive reference is permitted. */ if( pTab->nRef>2 ){ sqlite3ErrorMsg( pParse, "multiple references to recursive table: %s", pCte->zName ); return SQLITE_ERROR; } assert( pTab->nRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nRef==2 )); pCte->zErr = "circular reference: %s"; pSavedWith = pParse->pWith; pParse->pWith = pWith; sqlite3WalkSelect(pWalker, bMayRecursive ? pSel->pPrior : pSel); for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior); pEList = pLeft->pEList; if( pCte->pCols ){ if( pEList && pEList->nExpr!=pCte->pCols->nExpr ){ sqlite3ErrorMsg(pParse, "table %s has %d values for %d columns", pCte->zName, pEList->nExpr, pCte->pCols->nExpr ); pParse->pWith = pSavedWith; return SQLITE_ERROR; } pEList = pCte->pCols; } selectColumnsFromExprList(pParse, pEList, &pTab->nCol, &pTab->aCol); if( bMayRecursive ){ if( pSel->selFlags & SF_Recursive ){ pCte->zErr = "multiple recursive references: %s"; }else{ pCte->zErr = "recursive reference in a subquery: %s"; } sqlite3WalkSelect(pWalker, pSel); } pCte->zErr = 0; pParse->pWith = pSavedWith; } return SQLITE_OK; } #endif #ifndef SQLITE_OMIT_CTE /* ** If the SELECT passed as the second argument has an associated WITH ** clause, pop it from the stack stored as part of the Parse object. ** ** This function is used as the xSelectCallback2() callback by ** sqlite3SelectExpand() when walking a SELECT tree to resolve table ** names and other FROM clause elements. */ static void selectPopWith(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; With *pWith = findRightmost(p)->pWith; if( pWith!=0 ){ assert( pParse->pWith==pWith ); pParse->pWith = pWith->pOuter; } } #else #define selectPopWith 0 #endif /* ** This routine is a Walker callback for "expanding" a SELECT statement. ** "Expanding" means to do the following: ** ** (1) Make sure VDBE cursor numbers have been assigned to every ** element of the FROM clause. ** ** (2) Fill in the pTabList->a[].pTab fields in the SrcList that ** defines FROM clause. When views appear in the FROM clause, ** fill pTabList->a[].pSelect with a copy of the SELECT statement ** that implements the view. A copy is made of the view's SELECT ** statement so that we can freely modify or delete that statement ** without worrying about messing up the persistent representation ** of the view. ** ** (3) Add terms to the WHERE clause to accommodate the NATURAL keyword ** on joins and the ON and USING clause of joins. ** ** (4) Scan the list of columns in the result set (pEList) looking ** for instances of the "*" operator or the TABLE.* operator. ** If found, expand each "*" to be every column in every table ** and TABLE.* to be every column in TABLE. ** */ static int selectExpander(Walker *pWalker, Select *p){ Parse *pParse = pWalker->pParse; int i, j, k; SrcList *pTabList; ExprList *pEList; struct SrcList_item *pFrom; sqlite3 *db = pParse->db; Expr *pE, *pRight, *pExpr; u16 selFlags = p->selFlags; p->selFlags |= SF_Expanded; if( db->mallocFailed ){ return WRC_Abort; } if( NEVER(p->pSrc==0) || (selFlags & SF_Expanded)!=0 ){ return WRC_Prune; } pTabList = p->pSrc; pEList = p->pEList; if( pWalker->xSelectCallback2==selectPopWith ){ sqlite3WithPush(pParse, findRightmost(p)->pWith, 0); } /* Make sure cursor numbers have been assigned to all entries in ** the FROM clause of the SELECT statement. */ sqlite3SrcListAssignCursors(pParse, pTabList); /* Look up every table named in the FROM clause of the select. If ** an entry of the FROM clause is a subquery instead of a table or view, ** then create a transient table structure to describe the subquery. */ for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab; assert( pFrom->isRecursive==0 || pFrom->pTab ); if( pFrom->isRecursive ) continue; if( pFrom->pTab!=0 ){ /* This statement has already been prepared. There is no need ** to go further. */ assert( i==0 ); #ifndef SQLITE_OMIT_CTE selectPopWith(pWalker, p); #endif return WRC_Prune; } #ifndef SQLITE_OMIT_CTE if( withExpand(pWalker, pFrom) ) return WRC_Abort; if( pFrom->pTab ) {} else #endif if( pFrom->zName==0 ){ #ifndef SQLITE_OMIT_SUBQUERY Select *pSel = pFrom->pSelect; /* A sub-query in the FROM clause of a SELECT */ assert( pSel!=0 ); assert( pFrom->pTab==0 ); if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort; pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return WRC_Abort; pTab->nRef = 1; pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab); while( pSel->pPrior ){ pSel = pSel->pPrior; } selectColumnsFromExprList(pParse, pSel->pEList, &pTab->nCol, &pTab->aCol); pTab->iPKey = -1; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); pTab->tabFlags |= TF_Ephemeral; #endif }else{ /* An ordinary table or view name in the FROM clause */ assert( pFrom->pTab==0 ); pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom); if( pTab==0 ) return WRC_Abort; if( pTab->nRef==0xffff ){ sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535", pTab->zName); pFrom->pTab = 0; return WRC_Abort; } pTab->nRef++; #if !defined(SQLITE_OMIT_VIEW) || !defined (SQLITE_OMIT_VIRTUALTABLE) if( pTab->pSelect || IsVirtual(pTab) ){ /* We reach here if the named table is a really a view */ if( sqlite3ViewGetColumnNames(pParse, pTab) ) return WRC_Abort; assert( pFrom->pSelect==0 ); pFrom->pSelect = sqlite3SelectDup(db, pTab->pSelect, 0); sqlite3SelectSetName(pFrom->pSelect, pTab->zName); sqlite3WalkSelect(pWalker, pFrom->pSelect); } #endif } /* Locate the index named by the INDEXED BY clause, if any. */ if( sqlite3IndexedByLookup(pParse, pFrom) ){ return WRC_Abort; } } /* Process NATURAL keywords, and ON and USING clauses of joins. */ if( db->mallocFailed || sqliteProcessJoin(pParse, p) ){ return WRC_Abort; } /* For every "*" that occurs in the column list, insert the names of ** all columns in all tables. And for every TABLE.* insert the names ** of all columns in TABLE. The parser inserted a special expression ** with the TK_ALL operator for each "*" that it found in the column list. ** The following code just has to locate the TK_ALL expressions and expand ** each one to the list of all columns in all tables. ** ** The first loop just checks to see if there are any "*" operators ** that need expanding. */ for(k=0; knExpr; k++){ pE = pEList->a[k].pExpr; if( pE->op==TK_ALL ) break; assert( pE->op!=TK_DOT || pE->pRight!=0 ); assert( pE->op!=TK_DOT || (pE->pLeft!=0 && pE->pLeft->op==TK_ID) ); if( pE->op==TK_DOT && pE->pRight->op==TK_ALL ) break; } if( knExpr ){ /* ** If we get here it means the result set contains one or more "*" ** operators that need to be expanded. Loop through each expression ** in the result set and expand them one by one. */ struct ExprList_item *a = pEList->a; ExprList *pNew = 0; int flags = pParse->db->flags; int longNames = (flags & SQLITE_FullColNames)!=0 && (flags & SQLITE_ShortColNames)==0; for(k=0; knExpr; k++){ pE = a[k].pExpr; pRight = pE->pRight; assert( pE->op!=TK_DOT || pRight!=0 ); if( pE->op!=TK_ALL && (pE->op!=TK_DOT || pRight->op!=TK_ALL) ){ /* This particular expression does not need to be expanded. */ pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr); if( pNew ){ pNew->a[pNew->nExpr-1].zName = a[k].zName; pNew->a[pNew->nExpr-1].zSpan = a[k].zSpan; a[k].zName = 0; a[k].zSpan = 0; } a[k].pExpr = 0; }else{ /* This expression is a "*" or a "TABLE.*" and needs to be ** expanded. */ int tableSeen = 0; /* Set to 1 when TABLE matches */ char *zTName = 0; /* text of name of TABLE */ if( pE->op==TK_DOT ){ assert( pE->pLeft!=0 ); assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); zTName = pE->pLeft->u.zToken; } for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; Select *pSub = pFrom->pSelect; char *zTabName = pFrom->zAlias; const char *zSchemaName = 0; int iDb; if( zTabName==0 ){ zTabName = pTab->zName; } if( db->mallocFailed ) break; if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){ pSub = 0; if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ continue; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); zSchemaName = iDb>=0 ? db->aDb[iDb].zName : "*"; } for(j=0; jnCol; j++){ char *zName = pTab->aCol[j].zName; char *zColname; /* The computed column name */ char *zToFree; /* Malloced string that needs to be freed */ Token sColname; /* Computed column name as a token */ assert( zName ); if( zTName && pSub && sqlite3MatchSpanName(pSub->pEList->a[j].zSpan, 0, zTName, 0)==0 ){ continue; } /* If a column is marked as 'hidden' (currently only possible ** for virtual tables), do not include it in the expanded ** result-set list. */ if( IsHiddenColumn(&pTab->aCol[j]) ){ assert(IsVirtual(pTab)); continue; } tableSeen = 1; if( i>0 && zTName==0 ){ if( (pFrom->jointype & JT_NATURAL)!=0 && tableAndColumnIndex(pTabList, i, zName, 0, 0) ){ /* In a NATURAL join, omit the join columns from the ** table to the right of the join */ continue; } if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){ /* In a join with a USING clause, omit columns in the ** using clause from the table on the right. */ continue; } } pRight = sqlite3Expr(db, TK_ID, zName); zColname = zName; zToFree = 0; if( longNames || pTabList->nSrc>1 ){ Expr *pLeft; pLeft = sqlite3Expr(db, TK_ID, zTabName); pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); if( zSchemaName ){ pLeft = sqlite3Expr(db, TK_ID, zSchemaName); pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr, 0); } if( longNames ){ zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName); zToFree = zColname; } }else{ pExpr = pRight; } pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); sColname.z = zColname; sColname.n = sqlite3Strlen30(zColname); sqlite3ExprListSetName(pParse, pNew, &sColname, 0); if( pNew && (p->selFlags & SF_NestedFrom)!=0 ){ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1]; if( pSub ){ pX->zSpan = sqlite3DbStrDup(db, pSub->pEList->a[j].zSpan); testcase( pX->zSpan==0 ); }else{ pX->zSpan = sqlite3MPrintf(db, "%s.%s.%s", zSchemaName, zTabName, zColname); testcase( pX->zSpan==0 ); } pX->bSpanIsTab = 1; } sqlite3DbFree(db, zToFree); } } if( !tableSeen ){ if( zTName ){ sqlite3ErrorMsg(pParse, "no such table: %s", zTName); }else{ sqlite3ErrorMsg(pParse, "no tables specified"); } } } } sqlite3ExprListDelete(db, pEList); p->pEList = pNew; } #if SQLITE_MAX_COLUMN if( p->pEList && p->pEList->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many columns in result set"); } #endif return WRC_Continue; } /* ** No-op routine for the parse-tree walker. ** ** When this routine is the Walker.xExprCallback then expression trees ** are walked without any actions being taken at each node. Presumably, ** when this routine is used for Walker.xExprCallback then ** Walker.xSelectCallback is set to do something useful for every ** subquery in the parser tree. */ static int exprWalkNoop(Walker *NotUsed, Expr *NotUsed2){ UNUSED_PARAMETER2(NotUsed, NotUsed2); return WRC_Continue; } /* ** This routine "expands" a SELECT statement and all of its subqueries. ** For additional information on what it means to "expand" a SELECT ** statement, see the comment on the selectExpand worker callback above. ** ** Expanding a SELECT statement is the first step in processing a ** SELECT statement. The SELECT statement must be expanded before ** name resolution is performed. ** ** If anything goes wrong, an error message is written into pParse. ** The calling function can detect the problem by looking at pParse->nErr ** and/or pParse->db->mallocFailed. */ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ Walker w; memset(&w, 0, sizeof(w)); w.xExprCallback = exprWalkNoop; w.pParse = pParse; if( pParse->hasCompound ){ w.xSelectCallback = convertCompoundSelectToSubquery; sqlite3WalkSelect(&w, pSelect); } w.xSelectCallback = selectExpander; if( (pSelect->selFlags & SF_MultiValue)==0 ){ w.xSelectCallback2 = selectPopWith; } sqlite3WalkSelect(&w, pSelect); } #ifndef SQLITE_OMIT_SUBQUERY /* ** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() ** interface. ** ** For each FROM-clause subquery, add Column.zType and Column.zColl ** information to the Table structure that represents the result set ** of that subquery. ** ** The Table structure that represents the result set was constructed ** by selectExpander() but the type and collation information was omitted ** at that point because identifiers had not yet been resolved. This ** routine is called after identifier resolution. */ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ Parse *pParse; int i; SrcList *pTabList; struct SrcList_item *pFrom; assert( p->selFlags & SF_Resolved ); if( (p->selFlags & SF_HasTypeInfo)==0 ){ p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; if( ALWAYS(pTab!=0) && (pTab->tabFlags & TF_Ephemeral)!=0 ){ /* A sub-query in the FROM clause of a SELECT */ Select *pSel = pFrom->pSelect; if( pSel ){ while( pSel->pPrior ) pSel = pSel->pPrior; selectAddColumnTypeAndCollation(pParse, pTab, pSel); } } } } } #endif /* ** This routine adds datatype and collating sequence information to ** the Table structures of all FROM-clause subqueries in a ** SELECT statement. ** ** Use this routine after name resolution. */ static void sqlite3SelectAddTypeInfo(Parse *pParse, Select *pSelect){ #ifndef SQLITE_OMIT_SUBQUERY Walker w; memset(&w, 0, sizeof(w)); w.xSelectCallback2 = selectAddSubqueryTypeInfo; w.xExprCallback = exprWalkNoop; w.pParse = pParse; sqlite3WalkSelect(&w, pSelect); #endif } /* ** This routine sets up a SELECT statement for processing. The ** following is accomplished: ** ** * VDBE Cursor numbers are assigned to all FROM-clause terms. ** * Ephemeral Table objects are created for all FROM-clause subqueries. ** * ON and USING clauses are shifted into WHERE statements ** * Wildcards "*" and "TABLE.*" in result sets are expanded. ** * Identifiers in expression are matched to tables. ** ** This routine acts recursively on all subqueries within the SELECT. */ SQLITE_PRIVATE void sqlite3SelectPrep( Parse *pParse, /* The parser context */ Select *p, /* The SELECT statement being coded. */ NameContext *pOuterNC /* Name context for container */ ){ sqlite3 *db; if( NEVER(p==0) ) return; db = pParse->db; if( db->mallocFailed ) return; if( p->selFlags & SF_HasTypeInfo ) return; sqlite3SelectExpand(pParse, p); if( pParse->nErr || db->mallocFailed ) return; sqlite3ResolveSelectNames(pParse, p, pOuterNC); if( pParse->nErr || db->mallocFailed ) return; sqlite3SelectAddTypeInfo(pParse, p); } /* ** Reset the aggregate accumulator. ** ** The aggregate accumulator is a set of memory cells that hold ** intermediate results while calculating an aggregate. This ** routine generates code that stores NULLs in all of those memory ** cells. */ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ Vdbe *v = pParse->pVdbe; int i; struct AggInfo_func *pFunc; int nReg = pAggInfo->nFunc + pAggInfo->nColumn; if( nReg==0 ) return; #ifdef SQLITE_DEBUG /* Verify that all AggInfo registers are within the range specified by ** AggInfo.mnReg..AggInfo.mxReg */ assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 ); for(i=0; inColumn; i++){ assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg ); } for(i=0; inFunc; i++){ assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg ); } #endif sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg); for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ if( pFunc->iDistinct>=0 ){ Expr *pE = pFunc->pExpr; assert( !ExprHasProperty(pE, EP_xIsSelect) ); if( pE->x.pList==0 || pE->x.pList->nExpr!=1 ){ sqlite3ErrorMsg(pParse, "DISTINCT aggregates must have exactly one " "argument"); pFunc->iDistinct = -1; }else{ KeyInfo *pKeyInfo = keyInfoFromExprList(pParse, pE->x.pList, 0, 0); sqlite3VdbeAddOp4(v, OP_OpenEphemeral, pFunc->iDistinct, 0, 0, (char*)pKeyInfo, P4_KEYINFO); } } } } /* ** Invoke the OP_AggFinalize opcode for every aggregate function ** in the AggInfo structure. */ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ Vdbe *v = pParse->pVdbe; int i; struct AggInfo_func *pF; for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ ExprList *pList = pF->pExpr->x.pList; assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); sqlite3VdbeAddOp4(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, 0, (void*)pF->pFunc, P4_FUNCDEF); } } /* ** Update the accumulator memory cells for an aggregate based on ** the current cursor position. */ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){ Vdbe *v = pParse->pVdbe; int i; int regHit = 0; int addrHitTest = 0; struct AggInfo_func *pF; struct AggInfo_col *pC; pAggInfo->directMode = 1; for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ int nArg; int addrNext = 0; int regAgg; ExprList *pList = pF->pExpr->x.pList; assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) ); if( pList ){ nArg = pList->nExpr; regAgg = sqlite3GetTempRange(pParse, nArg); sqlite3ExprCodeExprList(pParse, pList, regAgg, SQLITE_ECEL_DUP); }else{ nArg = 0; regAgg = 0; } if( pF->iDistinct>=0 ){ addrNext = sqlite3VdbeMakeLabel(v); testcase( nArg==0 ); /* Error condition */ testcase( nArg>1 ); /* Also an error */ codeDistinct(pParse, pF->iDistinct, addrNext, 1, regAgg); } if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ CollSeq *pColl = 0; struct ExprList_item *pItem; int j; assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ for(j=0, pItem=pList->a; !pColl && jpExpr); } if( !pColl ){ pColl = pParse->db->pDfltColl; } if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); } sqlite3VdbeAddOp4(v, OP_AggStep0, 0, regAgg, pF->iMem, (void*)pF->pFunc, P4_FUNCDEF); sqlite3VdbeChangeP5(v, (u8)nArg); sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg); sqlite3ReleaseTempRange(pParse, regAgg, nArg); if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); sqlite3ExprCacheClear(pParse); } } /* Before populating the accumulator registers, clear the column cache. ** Otherwise, if any of the required column values are already present ** in registers, sqlite3ExprCode() may use OP_SCopy to copy the value ** to pC->iMem. But by the time the value is used, the original register ** may have been used, invalidating the underlying buffer holding the ** text or blob value. See ticket [883034dcb5]. ** ** Another solution would be to change the OP_SCopy used to copy cached ** values to an OP_Copy. */ if( regHit ){ addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v); } sqlite3ExprCacheClear(pParse); for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ sqlite3ExprCode(pParse, pC->pExpr, pC->iMem); } pAggInfo->directMode = 0; sqlite3ExprCacheClear(pParse); if( addrHitTest ){ sqlite3VdbeJumpHere(v, addrHitTest); } } /* ** Add a single OP_Explain instruction to the VDBE to explain a simple ** count(*) query ("SELECT count(*) FROM pTab"). */ #ifndef SQLITE_OMIT_EXPLAIN static void explainSimpleCount( Parse *pParse, /* Parse context */ Table *pTab, /* Table being queried */ Index *pIdx /* Index used to optimize scan, or NULL */ ){ if( pParse->explain==2 ){ int bCover = (pIdx!=0 && (HasRowid(pTab) || !IsPrimaryKeyIndex(pIdx))); char *zEqp = sqlite3MPrintf(pParse->db, "SCAN TABLE %s%s%s", pTab->zName, bCover ? " USING COVERING INDEX " : "", bCover ? pIdx->zName : "" ); sqlite3VdbeAddOp4( pParse->pVdbe, OP_Explain, pParse->iSelectId, 0, 0, zEqp, P4_DYNAMIC ); } } #else # define explainSimpleCount(a,b,c) #endif /* ** Generate code for the SELECT statement given in the p argument. ** ** The results are returned according to the SelectDest structure. ** See comments in sqliteInt.h for further information. ** ** This routine returns the number of errors. If any errors are ** encountered, then an appropriate error message is left in ** pParse->zErrMsg. ** ** This routine does NOT free the Select structure passed in. The ** calling function needs to do that. */ SQLITE_PRIVATE int sqlite3Select( Parse *pParse, /* The parser context */ Select *p, /* The SELECT statement being coded. */ SelectDest *pDest /* What to do with the query results */ ){ int i, j; /* Loop counters */ WhereInfo *pWInfo; /* Return from sqlite3WhereBegin() */ Vdbe *v; /* The virtual machine under construction */ int isAgg; /* True for select lists like "count(*)" */ ExprList *pEList = 0; /* List of columns to extract. */ SrcList *pTabList; /* List of tables to select from */ Expr *pWhere; /* The WHERE clause. May be NULL */ ExprList *pGroupBy; /* The GROUP BY clause. May be NULL */ Expr *pHaving; /* The HAVING clause. May be NULL */ int rc = 1; /* Value to return from this function */ DistinctCtx sDistinct; /* Info on how to code the DISTINCT keyword */ SortCtx sSort; /* Info on how to code the ORDER BY clause */ AggInfo sAggInfo; /* Information used by aggregate queries */ int iEnd; /* Address of the end of the query */ sqlite3 *db; /* The database connection */ #ifndef SQLITE_OMIT_EXPLAIN int iRestoreSelectId = pParse->iSelectId; pParse->iSelectId = pParse->iNextSelectId++; #endif db = pParse->db; if( p==0 || db->mallocFailed || pParse->nErr ){ return 1; } if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; memset(&sAggInfo, 0, sizeof(sAggInfo)); #if SELECTTRACE_ENABLED pParse->nSelectIndent++; SELECTTRACE(1,pParse,p, ("begin processing:\n")); if( sqlite3SelectTrace & 0x100 ){ sqlite3TreeViewSelect(0, p, 0); } #endif assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistFifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Fifo ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_DistQueue ); assert( p->pOrderBy==0 || pDest->eDest!=SRT_Queue ); if( IgnorableOrderby(pDest) ){ assert(pDest->eDest==SRT_Exists || pDest->eDest==SRT_Union || pDest->eDest==SRT_Except || pDest->eDest==SRT_Discard || pDest->eDest==SRT_Queue || pDest->eDest==SRT_DistFifo || pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_Fifo); /* If ORDER BY makes no difference in the output then neither does ** DISTINCT so it can be removed too. */ sqlite3ExprListDelete(db, p->pOrderBy); p->pOrderBy = 0; p->selFlags &= ~SF_Distinct; } sqlite3SelectPrep(pParse, p, 0); memset(&sSort, 0, sizeof(sSort)); sSort.pOrderBy = p->pOrderBy; pTabList = p->pSrc; if( pParse->nErr || db->mallocFailed ){ goto select_end; } assert( p->pEList!=0 ); isAgg = (p->selFlags & SF_Aggregate)!=0; #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p, ("after name resolution:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif /* If writing to memory or generating a set ** only a single column may be output. */ #ifndef SQLITE_OMIT_SUBQUERY if( checkForMultiColumnSelectError(pParse, pDest, p->pEList->nExpr) ){ goto select_end; } #endif /* Try to flatten subqueries in the FROM clause up into the main query */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; !p->pPrior && inSrc; i++){ struct SrcList_item *pItem = &pTabList->a[i]; Select *pSub = pItem->pSelect; int isAggSub; if( pSub==0 ) continue; isAggSub = (pSub->selFlags & SF_Aggregate)!=0; if( flattenSubquery(pParse, p, i, isAgg, isAggSub) ){ /* This subquery can be absorbed into its parent. */ if( isAggSub ){ isAgg = 1; p->selFlags |= SF_Aggregate; } i = -1; } pTabList = p->pSrc; if( db->mallocFailed ) goto select_end; if( !IgnorableOrderby(pDest) ){ sSort.pOrderBy = p->pOrderBy; } } #endif /* Get a pointer the VDBE under construction, allocating a new VDBE if one ** does not already exist */ v = sqlite3GetVdbe(pParse); if( v==0 ) goto select_end; #ifndef SQLITE_OMIT_COMPOUND_SELECT /* Handle compound SELECT statements using the separate multiSelect() ** procedure. */ if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); explainSetInteger(pParse->iSelectId, iRestoreSelectId); #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p,("end compound-select processing\n")); pParse->nSelectIndent--; #endif return rc; } #endif /* Generate code for all sub-queries in the FROM clause */ #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) for(i=0; inSrc; i++){ struct SrcList_item *pItem = &pTabList->a[i]; SelectDest dest; Select *pSub = pItem->pSelect; if( pSub==0 ) continue; /* Sometimes the code for a subquery will be generated more than ** once, if the subquery is part of the WHERE clause in a LEFT JOIN, ** for example. In that case, do not regenerate the code to manifest ** a view or the co-routine to implement a view. The first instance ** is sufficient, though the subroutine to manifest the view does need ** to be invoked again. */ if( pItem->addrFillSub ){ if( pItem->viaCoroutine==0 ){ sqlite3VdbeAddOp2(v, OP_Gosub, pItem->regReturn, pItem->addrFillSub); } continue; } /* Increment Parse.nHeight by the height of the largest expression ** tree referred to by this, the parent select. The child select ** may contain expression trees of at most ** (SQLITE_MAX_EXPR_DEPTH-Parse.nHeight) height. This is a bit ** more conservative than necessary, but much easier than enforcing ** an exact limit. */ pParse->nHeight += sqlite3SelectExprHeight(p); /* Make copies of constant WHERE-clause terms in the outer query down ** inside the subquery. This can help the subquery to run more efficiently. */ if( (pItem->jointype & JT_OUTER)==0 && pushDownWhereTerms(db, pSub, p->pWhere, pItem->iCursor) ){ #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x100 ){ SELECTTRACE(0x100,pParse,p,("After WHERE-clause push-down:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif } /* Generate code to implement the subquery */ if( pTabList->nSrc==1 && (p->selFlags & SF_All)==0 && OptimizationEnabled(db, SQLITE_SubqCoroutine) ){ /* Implement a co-routine that will return a single row of the result ** set on each invocation. */ int addrTop = sqlite3VdbeCurrentAddr(v)+1; pItem->regReturn = ++pParse->nMem; sqlite3VdbeAddOp3(v, OP_InitCoroutine, pItem->regReturn, 0, addrTop); VdbeComment((v, "%s", pItem->pTab->zName)); pItem->addrFillSub = addrTop; sqlite3SelectDestInit(&dest, SRT_Coroutine, pItem->regReturn); explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow); pItem->viaCoroutine = 1; pItem->regResult = dest.iSdst; sqlite3VdbeAddOp1(v, OP_EndCoroutine, pItem->regReturn); sqlite3VdbeJumpHere(v, addrTop-1); sqlite3ClearTempRegCache(pParse); }else{ /* Generate a subroutine that will fill an ephemeral table with ** the content of this subquery. pItem->addrFillSub will point ** to the address of the generated subroutine. pItem->regReturn ** is a register allocated to hold the subroutine return address */ int topAddr; int onceAddr = 0; int retAddr; assert( pItem->addrFillSub==0 ); pItem->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn); pItem->addrFillSub = topAddr+1; if( pItem->isCorrelated==0 ){ /* If the subquery is not correlated and if we are not inside of ** a trigger, then we only need to compute the value of the subquery ** once. */ onceAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v); VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName)); }else{ VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName)); } sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); explainSetInteger(pItem->iSelectId, (u8)pParse->iNextSelectId); sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = sqlite3LogEst(pSub->nSelectRow); if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn); VdbeComment((v, "end %s", pItem->pTab->zName)); sqlite3VdbeChangeP1(v, topAddr, retAddr); sqlite3ClearTempRegCache(pParse); } if( db->mallocFailed ) goto select_end; pParse->nHeight -= sqlite3SelectExprHeight(p); } #endif /* Various elements of the SELECT copied into local variables for ** convenience */ pEList = p->pEList; pWhere = p->pWhere; pGroupBy = p->pGroupBy; pHaving = p->pHaving; sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; #if SELECTTRACE_ENABLED if( sqlite3SelectTrace & 0x400 ){ SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif /* If the query is DISTINCT with an ORDER BY but is not an aggregate, and ** if the select-list is the same as the ORDER BY list, then this query ** can be rewritten as a GROUP BY. In other words, this: ** ** SELECT DISTINCT xyz FROM ... ORDER BY xyz ** ** is transformed to: ** ** SELECT xyz FROM ... GROUP BY xyz ORDER BY xyz ** ** The second form is preferred as a single index (or temp-table) may be ** used for both the ORDER BY and DISTINCT processing. As originally ** written the query must use a temp-table for at least one of the ORDER ** BY and DISTINCT, and an index or separate temp-table for the other. */ if( (p->selFlags & (SF_Distinct|SF_Aggregate))==SF_Distinct && sqlite3ExprListCompare(sSort.pOrderBy, pEList, -1)==0 ){ p->selFlags &= ~SF_Distinct; pGroupBy = p->pGroupBy = sqlite3ExprListDup(db, pEList, 0); /* Notice that even thought SF_Distinct has been cleared from p->selFlags, ** the sDistinct.isTnct is still set. Hence, isTnct represents the ** original setting of the SF_Distinct flag, not the current setting */ assert( sDistinct.isTnct ); } /* If there is an ORDER BY clause, then create an ephemeral index to ** do the sorting. But this sorting ephemeral index might end up ** being unused if the data can be extracted in pre-sorted order. ** If that is the case, then the OP_OpenEphemeral instruction will be ** changed to an OP_Noop once we figure out that the sorting index is ** not needed. The sSort.addrSortIndex variable is used to facilitate ** that change. */ if( sSort.pOrderBy ){ KeyInfo *pKeyInfo; pKeyInfo = keyInfoFromExprList(pParse, sSort.pOrderBy, 0, pEList->nExpr); sSort.iECursor = pParse->nTab++; sSort.addrSortIndex = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, sSort.iECursor, sSort.pOrderBy->nExpr+1+pEList->nExpr, 0, (char*)pKeyInfo, P4_KEYINFO ); }else{ sSort.addrSortIndex = -1; } /* If the output is destined for a temporary table, open that table. */ if( pDest->eDest==SRT_EphemTab ){ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr); } /* Set the limiter. */ iEnd = sqlite3VdbeMakeLabel(v); p->nSelectRow = LARGEST_INT64; computeLimitRegisters(pParse, p, iEnd); if( p->iLimit==0 && sSort.addrSortIndex>=0 ){ sqlite3VdbeGetOp(v, sSort.addrSortIndex)->opcode = OP_SorterOpen; sSort.sortFlags |= SORTFLAG_UseSorter; } /* Open an ephemeral index to use for the distinct set. */ if( p->selFlags & SF_Distinct ){ sDistinct.tabTnct = pParse->nTab++; sDistinct.addrTnct = sqlite3VdbeAddOp4(v, OP_OpenEphemeral, sDistinct.tabTnct, 0, 0, (char*)keyInfoFromExprList(pParse, p->pEList,0,0), P4_KEYINFO); sqlite3VdbeChangeP5(v, BTREE_UNORDERED); sDistinct.eTnctType = WHERE_DISTINCT_UNORDERED; }else{ sDistinct.eTnctType = WHERE_DISTINCT_NOOP; } if( !isAgg && pGroupBy==0 ){ /* No aggregate functions and no GROUP BY clause */ u16 wctrlFlags = (sDistinct.isTnct ? WHERE_WANT_DISTINCT : 0); /* Begin the database scan. */ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, p->pEList, wctrlFlags, 0); if( pWInfo==0 ) goto select_end; if( sqlite3WhereOutputRowCount(pWInfo) < p->nSelectRow ){ p->nSelectRow = sqlite3WhereOutputRowCount(pWInfo); } if( sDistinct.isTnct && sqlite3WhereIsDistinct(pWInfo) ){ sDistinct.eTnctType = sqlite3WhereIsDistinct(pWInfo); } if( sSort.pOrderBy ){ sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo); if( sSort.nOBSat==sSort.pOrderBy->nExpr ){ sSort.pOrderBy = 0; } } /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral ** into an OP_Noop. */ if( sSort.addrSortIndex>=0 && sSort.pOrderBy==0 ){ sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); } /* Use the standard inner loop. */ selectInnerLoop(pParse, p, pEList, -1, &sSort, &sDistinct, pDest, sqlite3WhereContinueLabel(pWInfo), sqlite3WhereBreakLabel(pWInfo)); /* End the database scan loop. */ sqlite3WhereEnd(pWInfo); }else{ /* This case when there exist aggregate functions or a GROUP BY clause ** or both */ NameContext sNC; /* Name context for processing aggregate information */ int iAMem; /* First Mem address for storing current GROUP BY */ int iBMem; /* First Mem address for previous GROUP BY */ int iUseFlag; /* Mem address holding flag indicating that at least ** one row of the input to the aggregator has been ** processed */ int iAbortFlag; /* Mem address which causes query abort if positive */ int groupBySort; /* Rows come from source in GROUP BY order */ int addrEnd; /* End of processing for this SELECT */ int sortPTab = 0; /* Pseudotable used to decode sorting results */ int sortOut = 0; /* Output register from the sorter */ int orderByGrp = 0; /* True if the GROUP BY and ORDER BY are the same */ /* Remove any and all aliases between the result set and the ** GROUP BY clause. */ if( pGroupBy ){ int k; /* Loop counter */ struct ExprList_item *pItem; /* For looping over expression in a list */ for(k=p->pEList->nExpr, pItem=p->pEList->a; k>0; k--, pItem++){ pItem->u.x.iAlias = 0; } for(k=pGroupBy->nExpr, pItem=pGroupBy->a; k>0; k--, pItem++){ pItem->u.x.iAlias = 0; } if( p->nSelectRow>100 ) p->nSelectRow = 100; }else{ p->nSelectRow = 1; } /* If there is both a GROUP BY and an ORDER BY clause and they are ** identical, then it may be possible to disable the ORDER BY clause ** on the grounds that the GROUP BY will cause elements to come out ** in the correct order. It also may not - the GROUP BY might use a ** database index that causes rows to be grouped together as required ** but not actually sorted. Either way, record the fact that the ** ORDER BY and GROUP BY clauses are the same by setting the orderByGrp ** variable. */ if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){ orderByGrp = 1; } /* Create a label to jump to when we want to abort the query */ addrEnd = sqlite3VdbeMakeLabel(v); /* Convert TK_COLUMN nodes into TK_AGG_COLUMN and make entries in ** sAggInfo for all TK_AGG_FUNCTION nodes in expressions of the ** SELECT statement. */ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; sNC.pAggInfo = &sAggInfo; sAggInfo.mnReg = pParse->nMem+1; sAggInfo.nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0; sAggInfo.pGroupBy = pGroupBy; sqlite3ExprAnalyzeAggList(&sNC, pEList); sqlite3ExprAnalyzeAggList(&sNC, sSort.pOrderBy); if( pHaving ){ sqlite3ExprAnalyzeAggregates(&sNC, pHaving); } sAggInfo.nAccumulator = sAggInfo.nColumn; for(i=0; ix.pList); sNC.ncFlags &= ~NC_InAggFunc; } sAggInfo.mxReg = pParse->nMem; if( db->mallocFailed ) goto select_end; /* Processing for aggregates with GROUP BY is very different and ** much more complex than aggregates without a GROUP BY. */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ int j1; /* A-vs-B comparision jump */ int addrOutputRow; /* Start of subroutine that outputs a result row */ int regOutputRow; /* Return address register for output subroutine */ int addrSetAbort; /* Set the abort flag and return */ int addrTopOfLoop; /* Top of the input loop */ int addrSortingIdx; /* The OP_OpenEphemeral for the sorting index */ int addrReset; /* Subroutine for resetting the accumulator */ int regReset; /* Return address register for reset subroutine */ /* If there is a GROUP BY clause we might need a sorting index to ** implement it. Allocate that sorting index now. If it turns out ** that we do not need it after all, the OP_SorterOpen instruction ** will be converted into a Noop. */ sAggInfo.sortingIdx = pParse->nTab++; pKeyInfo = keyInfoFromExprList(pParse, pGroupBy, 0, sAggInfo.nColumn); addrSortingIdx = sqlite3VdbeAddOp4(v, OP_SorterOpen, sAggInfo.sortingIdx, sAggInfo.nSortingColumn, 0, (char*)pKeyInfo, P4_KEYINFO); /* Initialize memory locations used by GROUP BY aggregate processing */ iUseFlag = ++pParse->nMem; iAbortFlag = ++pParse->nMem; regOutputRow = ++pParse->nMem; addrOutputRow = sqlite3VdbeMakeLabel(v); regReset = ++pParse->nMem; addrReset = sqlite3VdbeMakeLabel(v); iAMem = pParse->nMem + 1; pParse->nMem += pGroupBy->nExpr; iBMem = pParse->nMem + 1; pParse->nMem += pGroupBy->nExpr; sqlite3VdbeAddOp2(v, OP_Integer, 0, iAbortFlag); VdbeComment((v, "clear abort flag")); sqlite3VdbeAddOp2(v, OP_Integer, 0, iUseFlag); VdbeComment((v, "indicate accumulator empty")); sqlite3VdbeAddOp3(v, OP_Null, 0, iAMem, iAMem+pGroupBy->nExpr-1); /* Begin a loop that will extract all source rows in GROUP BY order. ** This might involve two separate loops with an OP_Sort in between, or ** it might be a single loop that uses an index to extract information ** in the right order to begin with. */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, 0, WHERE_GROUPBY | (orderByGrp ? WHERE_SORTBYGROUP : 0), 0 ); if( pWInfo==0 ) goto select_end; if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ /* The optimizer is able to deliver rows in group by order so ** we do not have to sort. The OP_OpenEphemeral table will be ** cancelled later because we still need to use the pKeyInfo */ groupBySort = 0; }else{ /* Rows are coming out in undetermined order. We have to push ** each row into a sorting index, terminate the first loop, ** then loop over the sorting index in order to get the output ** in sorted order */ int regBase; int regRecord; int nCol; int nGroupBy; explainTempTable(pParse, (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ? "DISTINCT" : "GROUP BY"); groupBySort = 1; nGroupBy = pGroupBy->nExpr; nCol = nGroupBy; j = nGroupBy; for(i=0; i=j ){ nCol++; j++; } } regBase = sqlite3GetTempRange(pParse, nCol); sqlite3ExprCacheClear(pParse); sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0); j = nGroupBy; for(i=0; iiSorterColumn>=j ){ int r1 = j + regBase; int r2; r2 = sqlite3ExprCodeGetColumn(pParse, pCol->pTab, pCol->iColumn, pCol->iTable, r1, 0); if( r1!=r2 ){ sqlite3VdbeAddOp2(v, OP_SCopy, r2, r1); } j++; } } regRecord = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); sqlite3VdbeAddOp2(v, OP_SorterInsert, sAggInfo.sortingIdx, regRecord); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ReleaseTempRange(pParse, regBase, nCol); sqlite3WhereEnd(pWInfo); sAggInfo.sortingIdxPTab = sortPTab = pParse->nTab++; sortOut = sqlite3GetTempReg(pParse); sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol); sqlite3VdbeAddOp2(v, OP_SorterSort, sAggInfo.sortingIdx, addrEnd); VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v); sAggInfo.useSortingIdx = 1; sqlite3ExprCacheClear(pParse); } /* If the index or temporary table used by the GROUP BY sort ** will naturally deliver rows in the order required by the ORDER BY ** clause, cancel the ephemeral table open coded earlier. ** ** This is an optimization - the correct answer should result regardless. ** Use the SQLITE_GroupByOrder flag with SQLITE_TESTCTRL_OPTIMIZER to ** disable this optimization for testing purposes. */ if( orderByGrp && OptimizationEnabled(db, SQLITE_GroupByOrder) && (groupBySort || sqlite3WhereIsSorted(pWInfo)) ){ sSort.pOrderBy = 0; sqlite3VdbeChangeToNoop(v, sSort.addrSortIndex); } /* Evaluate the current GROUP BY terms and store in b0, b1, b2... ** (b0 is memory location iBMem+0, b1 is iBMem+1, and so forth) ** Then compare the current GROUP BY terms against the GROUP BY terms ** from the previous row currently stored in a0, a1, a2... */ addrTopOfLoop = sqlite3VdbeCurrentAddr(v); sqlite3ExprCacheClear(pParse); if( groupBySort ){ sqlite3VdbeAddOp3(v, OP_SorterData, sAggInfo.sortingIdx, sortOut, sortPTab); } for(j=0; jnExpr; j++){ if( groupBySort ){ sqlite3VdbeAddOp3(v, OP_Column, sortPTab, j, iBMem+j); }else{ sAggInfo.directMode = 1; sqlite3ExprCode(pParse, pGroupBy->a[j].pExpr, iBMem+j); } } sqlite3VdbeAddOp4(v, OP_Compare, iAMem, iBMem, pGroupBy->nExpr, (char*)sqlite3KeyInfoRef(pKeyInfo), P4_KEYINFO); j1 = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp3(v, OP_Jump, j1+1, 0, j1+1); VdbeCoverage(v); /* Generate code that runs whenever the GROUP BY changes. ** Changes in the GROUP BY are detected by the previous code ** block. If there were no changes, this block is skipped. ** ** This code copies current group by terms in b0,b1,b2,... ** over to a0,a1,a2. It then calls the output subroutine ** and resets the aggregate accumulator registers in preparation ** for the next GROUP BY batch. */ sqlite3ExprCodeMove(pParse, iBMem, iAMem, pGroupBy->nExpr); sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); VdbeComment((v, "output one row")); sqlite3VdbeAddOp2(v, OP_IfPos, iAbortFlag, addrEnd); VdbeCoverage(v); VdbeComment((v, "check abort flag")); sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); VdbeComment((v, "reset accumulator")); /* Update the aggregate accumulators based on the content of ** the current row */ sqlite3VdbeJumpHere(v, j1); updateAccumulator(pParse, &sAggInfo); sqlite3VdbeAddOp2(v, OP_Integer, 1, iUseFlag); VdbeComment((v, "indicate data in accumulator")); /* End of the loop */ if( groupBySort ){ sqlite3VdbeAddOp2(v, OP_SorterNext, sAggInfo.sortingIdx, addrTopOfLoop); VdbeCoverage(v); }else{ sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx); } /* Output the final row of result */ sqlite3VdbeAddOp2(v, OP_Gosub, regOutputRow, addrOutputRow); VdbeComment((v, "output final row")); /* Jump over the subroutines */ sqlite3VdbeAddOp2(v, OP_Goto, 0, addrEnd); /* Generate a subroutine that outputs a single row of the result ** set. This subroutine first looks at the iUseFlag. If iUseFlag ** is less than or equal to zero, the subroutine is a no-op. If ** the processing calls for the query to abort, this subroutine ** increments the iAbortFlag memory location before returning in ** order to signal the caller to abort. */ addrSetAbort = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_Integer, 1, iAbortFlag); VdbeComment((v, "set abort flag")); sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); sqlite3VdbeResolveLabel(v, addrOutputRow); addrOutputRow = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_IfPos, iUseFlag, addrOutputRow+2); VdbeCoverage(v); VdbeComment((v, "Groupby result generator entry point")); sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); finalizeAggFunctions(pParse, &sAggInfo); sqlite3ExprIfFalse(pParse, pHaving, addrOutputRow+1, SQLITE_JUMPIFNULL); selectInnerLoop(pParse, p, p->pEList, -1, &sSort, &sDistinct, pDest, addrOutputRow+1, addrSetAbort); sqlite3VdbeAddOp1(v, OP_Return, regOutputRow); VdbeComment((v, "end groupby result generator")); /* Generate a subroutine that will reset the group-by accumulator */ sqlite3VdbeResolveLabel(v, addrReset); resetAccumulator(pParse, &sAggInfo); sqlite3VdbeAddOp1(v, OP_Return, regReset); } /* endif pGroupBy. Begin aggregate queries without GROUP BY: */ else { ExprList *pDel = 0; #ifndef SQLITE_OMIT_BTREECOUNT Table *pTab; if( (pTab = isSimpleCount(p, &sAggInfo))!=0 ){ /* If isSimpleCount() returns a pointer to a Table structure, then ** the SQL statement is of the form: ** ** SELECT count(*) FROM ** ** where the Table structure returned represents table . ** ** This statement is so common that it is optimized specially. The ** OP_Count instruction is executed either on the intkey table that ** contains the data for table or on one of its indexes. It ** is better to execute the op on an index, as indexes are almost ** always spread across less pages than their corresponding tables. */ const int iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); const int iCsr = pParse->nTab++; /* Cursor to scan b-tree */ Index *pIdx; /* Iterator variable */ KeyInfo *pKeyInfo = 0; /* Keyinfo for scanned index */ Index *pBest = 0; /* Best index found so far */ int iRoot = pTab->tnum; /* Root page of scanned b-tree */ sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); /* Search for the index that has the lowest scan cost. ** ** (2011-04-15) Do not do a full scan of an unordered index. ** ** (2013-10-03) Do not count the entries in a partial index. ** ** In practice the KeyInfo structure will not be used. It is only ** passed to keep OP_OpenRead happy. */ if( !HasRowid(pTab) ) pBest = sqlite3PrimaryKeyIndex(pTab); for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->bUnordered==0 && pIdx->szIdxRowszTabRow && pIdx->pPartIdxWhere==0 && (!pBest || pIdx->szIdxRowszIdxRow) ){ pBest = pIdx; } } if( pBest ){ iRoot = pBest->tnum; pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pBest); } /* Open a read-only cursor, execute the OP_Count, close the cursor. */ sqlite3VdbeAddOp4Int(v, OP_OpenRead, iCsr, iRoot, iDb, 1); if( pKeyInfo ){ sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO); } sqlite3VdbeAddOp2(v, OP_Count, iCsr, sAggInfo.aFunc[0].iMem); sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else #endif /* SQLITE_OMIT_BTREECOUNT */ { /* Check if the query is of one of the following forms: ** ** SELECT min(x) FROM ... ** SELECT max(x) FROM ... ** ** If it is, then ask the code in where.c to attempt to sort results ** as if there was an "ORDER ON x" or "ORDER ON x DESC" clause. ** If where.c is able to produce results sorted in this order, then ** add vdbe code to break out of the processing loop after the ** first iteration (since the first iteration of the loop is ** guaranteed to operate on the row with the minimum or maximum ** value of x, the only row required). ** ** A special flag must be passed to sqlite3WhereBegin() to slightly ** modify behavior as follows: ** ** + If the query is a "SELECT min(x)", then the loop coded by ** where.c should not iterate over any values with a NULL value ** for x. ** ** + The optimizer code in where.c (the thing that decides which ** index or indices to use) should place a different priority on ** satisfying the 'ORDER BY' clause than it does in other cases. ** Refer to code and comments in where.c for details. */ ExprList *pMinMax = 0; u8 flag = WHERE_ORDERBY_NORMAL; assert( p->pGroupBy==0 ); assert( flag==0 ); if( p->pHaving==0 ){ flag = minMaxQuery(&sAggInfo, &pMinMax); } assert( flag==0 || (pMinMax!=0 && pMinMax->nExpr==1) ); if( flag ){ pMinMax = sqlite3ExprListDup(db, pMinMax, 0); pDel = pMinMax; if( pMinMax && !db->mallocFailed ){ pMinMax->a[0].sortOrder = flag!=WHERE_ORDERBY_MIN ?1:0; pMinMax->a[0].pExpr->op = TK_COLUMN; } } /* This case runs if the aggregate has no GROUP BY clause. The ** processing is much simpler since there is only a single row ** of output. */ resetAccumulator(pParse, &sAggInfo); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax,0,flag,0); if( pWInfo==0 ){ sqlite3ExprListDelete(db, pDel); goto select_end; } updateAccumulator(pParse, &sAggInfo); assert( pMinMax==0 || pMinMax->nExpr==1 ); if( sqlite3WhereIsOrdered(pWInfo)>0 ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3WhereBreakLabel(pWInfo)); VdbeComment((v, "%s() by index", (flag==WHERE_ORDERBY_MIN?"min":"max"))); } sqlite3WhereEnd(pWInfo); finalizeAggFunctions(pParse, &sAggInfo); } sSort.pOrderBy = 0; sqlite3ExprIfFalse(pParse, pHaving, addrEnd, SQLITE_JUMPIFNULL); selectInnerLoop(pParse, p, p->pEList, -1, 0, 0, pDest, addrEnd, addrEnd); sqlite3ExprListDelete(db, pDel); } sqlite3VdbeResolveLabel(v, addrEnd); } /* endif aggregate query */ if( sDistinct.eTnctType==WHERE_DISTINCT_UNORDERED ){ explainTempTable(pParse, "DISTINCT"); } /* If there is an ORDER BY clause, then we need to sort the results ** and send them to the callback one by one. */ if( sSort.pOrderBy ){ explainTempTable(pParse, sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY"); generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest); } /* Jump here to skip this query */ sqlite3VdbeResolveLabel(v, iEnd); /* The SELECT has been coded. If there is an error in the Parse structure, ** set the return code to 1. Otherwise 0. */ rc = (pParse->nErr>0); /* Control jumps to here if an error is encountered above, or upon ** successful coding of the SELECT. */ select_end: explainSetInteger(pParse->iSelectId, iRestoreSelectId); /* Identify column names if results of the SELECT are to be output. */ if( rc==SQLITE_OK && pDest->eDest==SRT_Output ){ generateColumnNames(pParse, pTabList, pEList); } sqlite3DbFree(db, sAggInfo.aCol); sqlite3DbFree(db, sAggInfo.aFunc); #if SELECTTRACE_ENABLED SELECTTRACE(1,pParse,p,("end processing\n")); pParse->nSelectIndent--; #endif return rc; } /************** End of select.c **********************************************/ /************** Begin file table.c *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains the sqlite3_get_table() and sqlite3_free_table() ** interface routines. These are just wrappers around the main ** interface routine of sqlite3_exec(). ** ** These routines are in a separate files so that they will not be linked ** if they are not used. */ /* #include "sqliteInt.h" */ /* #include */ /* #include */ #ifndef SQLITE_OMIT_GET_TABLE /* ** This structure is used to pass data from sqlite3_get_table() through ** to the callback function is uses to build the result. */ typedef struct TabResult { char **azResult; /* Accumulated output */ char *zErrMsg; /* Error message text, if an error occurs */ u32 nAlloc; /* Slots allocated for azResult[] */ u32 nRow; /* Number of rows in the result */ u32 nColumn; /* Number of columns in the result */ u32 nData; /* Slots used in azResult[]. (nRow+1)*nColumn */ int rc; /* Return code from sqlite3_exec() */ } TabResult; /* ** This routine is called once for each row in the result table. Its job ** is to fill in the TabResult structure appropriately, allocating new ** memory as necessary. */ static int sqlite3_get_table_cb(void *pArg, int nCol, char **argv, char **colv){ TabResult *p = (TabResult*)pArg; /* Result accumulator */ int need; /* Slots needed in p->azResult[] */ int i; /* Loop counter */ char *z; /* A single column of result */ /* Make sure there is enough space in p->azResult to hold everything ** we need to remember from this invocation of the callback. */ if( p->nRow==0 && argv!=0 ){ need = nCol*2; }else{ need = nCol; } if( p->nData + need > p->nAlloc ){ char **azNew; p->nAlloc = p->nAlloc*2 + need; azNew = sqlite3_realloc64( p->azResult, sizeof(char*)*p->nAlloc ); if( azNew==0 ) goto malloc_failed; p->azResult = azNew; } /* If this is the first row, then generate an extra row containing ** the names of all columns. */ if( p->nRow==0 ){ p->nColumn = nCol; for(i=0; iazResult[p->nData++] = z; } }else if( (int)p->nColumn!=nCol ){ sqlite3_free(p->zErrMsg); p->zErrMsg = sqlite3_mprintf( "sqlite3_get_table() called with two or more incompatible queries" ); p->rc = SQLITE_ERROR; return 1; } /* Copy over the row data */ if( argv!=0 ){ for(i=0; iazResult[p->nData++] = z; } p->nRow++; } return 0; malloc_failed: p->rc = SQLITE_NOMEM; return 1; } /* ** Query the database. But instead of invoking a callback for each row, ** malloc() for space to hold the result and return the entire results ** at the conclusion of the call. ** ** The result that is written to ***pazResult is held in memory obtained ** from malloc(). But the caller cannot free this memory directly. ** Instead, the entire table should be passed to sqlite3_free_table() when ** the calling procedure is finished using it. */ SQLITE_API int SQLITE_STDCALL sqlite3_get_table( sqlite3 *db, /* The database on which the SQL executes */ const char *zSql, /* The SQL to be executed */ char ***pazResult, /* Write the result table here */ int *pnRow, /* Write the number of rows in the result here */ int *pnColumn, /* Write the number of columns of result here */ char **pzErrMsg /* Write error messages here */ ){ int rc; TabResult res; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || pazResult==0 ) return SQLITE_MISUSE_BKPT; #endif *pazResult = 0; if( pnColumn ) *pnColumn = 0; if( pnRow ) *pnRow = 0; if( pzErrMsg ) *pzErrMsg = 0; res.zErrMsg = 0; res.nRow = 0; res.nColumn = 0; res.nData = 1; res.nAlloc = 20; res.rc = SQLITE_OK; res.azResult = sqlite3_malloc64(sizeof(char*)*res.nAlloc ); if( res.azResult==0 ){ db->errCode = SQLITE_NOMEM; return SQLITE_NOMEM; } res.azResult[0] = 0; rc = sqlite3_exec(db, zSql, sqlite3_get_table_cb, &res, pzErrMsg); assert( sizeof(res.azResult[0])>= sizeof(res.nData) ); res.azResult[0] = SQLITE_INT_TO_PTR(res.nData); if( (rc&0xff)==SQLITE_ABORT ){ sqlite3_free_table(&res.azResult[1]); if( res.zErrMsg ){ if( pzErrMsg ){ sqlite3_free(*pzErrMsg); *pzErrMsg = sqlite3_mprintf("%s",res.zErrMsg); } sqlite3_free(res.zErrMsg); } db->errCode = res.rc; /* Assume 32-bit assignment is atomic */ return res.rc; } sqlite3_free(res.zErrMsg); if( rc!=SQLITE_OK ){ sqlite3_free_table(&res.azResult[1]); return rc; } if( res.nAlloc>res.nData ){ char **azNew; azNew = sqlite3_realloc64( res.azResult, sizeof(char*)*res.nData ); if( azNew==0 ){ sqlite3_free_table(&res.azResult[1]); db->errCode = SQLITE_NOMEM; return SQLITE_NOMEM; } res.azResult = azNew; } *pazResult = &res.azResult[1]; if( pnColumn ) *pnColumn = res.nColumn; if( pnRow ) *pnRow = res.nRow; return rc; } /* ** This routine frees the space the sqlite3_get_table() malloced. */ SQLITE_API void SQLITE_STDCALL sqlite3_free_table( char **azResult /* Result returned from sqlite3_get_table() */ ){ if( azResult ){ int i, n; azResult--; assert( azResult!=0 ); n = SQLITE_PTR_TO_INT(azResult[0]); for(i=1; ipNext; sqlite3ExprDelete(db, pTmp->pWhere); sqlite3ExprListDelete(db, pTmp->pExprList); sqlite3SelectDelete(db, pTmp->pSelect); sqlite3IdListDelete(db, pTmp->pIdList); sqlite3DbFree(db, pTmp); } } /* ** Given table pTab, return a list of all the triggers attached to ** the table. The list is connected by Trigger.pNext pointers. ** ** All of the triggers on pTab that are in the same database as pTab ** are already attached to pTab->pTrigger. But there might be additional ** triggers on pTab in the TEMP schema. This routine prepends all ** TEMP triggers on pTab to the beginning of the pTab->pTrigger list ** and returns the combined list. ** ** To state it another way: This routine returns a list of all triggers ** that fire off of pTab. The list will include any TEMP triggers on ** pTab as well as the triggers lised in pTab->pTrigger. */ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ Schema * const pTmpSchema = pParse->db->aDb[1].pSchema; Trigger *pList = 0; /* List of triggers to return */ if( pParse->disableTriggers ){ return 0; } if( pTmpSchema!=pTab->pSchema ){ HashElem *p; assert( sqlite3SchemaMutexHeld(pParse->db, 0, pTmpSchema) ); for(p=sqliteHashFirst(&pTmpSchema->trigHash); p; p=sqliteHashNext(p)){ Trigger *pTrig = (Trigger *)sqliteHashData(p); if( pTrig->pTabSchema==pTab->pSchema && 0==sqlite3StrICmp(pTrig->table, pTab->zName) ){ pTrig->pNext = (pList ? pList : pTab->pTrigger); pList = pTrig; } } } return (pList ? pList : pTab->pTrigger); } /* ** This is called by the parser when it sees a CREATE TRIGGER statement ** up to the point of the BEGIN before the trigger actions. A Trigger ** structure is generated based on the information available and stored ** in pParse->pNewTrigger. After the trigger actions have been parsed, the ** sqlite3FinishTrigger() function is called to complete the trigger ** construction process. */ SQLITE_PRIVATE void sqlite3BeginTrigger( Parse *pParse, /* The parse context of the CREATE TRIGGER statement */ Token *pName1, /* The name of the trigger */ Token *pName2, /* The name of the trigger */ int tr_tm, /* One of TK_BEFORE, TK_AFTER, TK_INSTEAD */ int op, /* One of TK_INSERT, TK_UPDATE, TK_DELETE */ IdList *pColumns, /* column list if this is an UPDATE OF trigger */ SrcList *pTableName,/* The name of the table/view the trigger applies to */ Expr *pWhen, /* WHEN clause */ int isTemp, /* True if the TEMPORARY keyword is present */ int noErr /* Suppress errors if the trigger already exists */ ){ Trigger *pTrigger = 0; /* The new trigger */ Table *pTab; /* Table that the trigger fires off of */ char *zName = 0; /* Name of the trigger */ sqlite3 *db = pParse->db; /* The database connection */ int iDb; /* The database to store the trigger in */ Token *pName; /* The unqualified db name */ DbFixer sFix; /* State vector for the DB fixer */ int iTabDb; /* Index of the database holding pTab */ assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */ assert( pName2!=0 ); assert( op==TK_INSERT || op==TK_UPDATE || op==TK_DELETE ); assert( op>0 && op<0xff ); if( isTemp ){ /* If TEMP was specified, then the trigger name may not be qualified. */ if( pName2->n>0 ){ sqlite3ErrorMsg(pParse, "temporary trigger may not have qualified name"); goto trigger_cleanup; } iDb = 1; pName = pName1; }else{ /* Figure out the db that the trigger will be created in */ iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pName); if( iDb<0 ){ goto trigger_cleanup; } } if( !pTableName || db->mallocFailed ){ goto trigger_cleanup; } /* A long-standing parser bug is that this syntax was allowed: ** ** CREATE TRIGGER attached.demo AFTER INSERT ON attached.tab .... ** ^^^^^^^^ ** ** To maintain backwards compatibility, ignore the database ** name on pTableName if we are reparsing out of SQLITE_MASTER. */ if( db->init.busy && iDb!=1 ){ sqlite3DbFree(db, pTableName->a[0].zDatabase); pTableName->a[0].zDatabase = 0; } /* If the trigger name was unqualified, and the table is a temp table, ** then set iDb to 1 to create the trigger in the temporary database. ** If sqlite3SrcListLookup() returns 0, indicating the table does not ** exist, the error is caught by the block below. */ pTab = sqlite3SrcListLookup(pParse, pTableName); if( db->init.busy==0 && pName2->n==0 && pTab && pTab->pSchema==db->aDb[1].pSchema ){ iDb = 1; } /* Ensure the table name matches database name and that the table exists */ if( db->mallocFailed ) goto trigger_cleanup; assert( pTableName->nSrc==1 ); sqlite3FixInit(&sFix, pParse, iDb, "trigger", pName); if( sqlite3FixSrcList(&sFix, pTableName) ){ goto trigger_cleanup; } pTab = sqlite3SrcListLookup(pParse, pTableName); if( !pTab ){ /* The table does not exist. */ if( db->init.iDb==1 ){ /* Ticket #3810. ** Normally, whenever a table is dropped, all associated triggers are ** dropped too. But if a TEMP trigger is created on a non-TEMP table ** and the table is dropped by a different database connection, the ** trigger is not visible to the database connection that does the ** drop so the trigger cannot be dropped. This results in an ** "orphaned trigger" - a trigger whose associated table is missing. */ db->init.orphanTrigger = 1; } goto trigger_cleanup; } if( IsVirtual(pTab) ){ sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); goto trigger_cleanup; } /* Check that the trigger name is not reserved and that no trigger of the ** specified name exists */ zName = sqlite3NameFromToken(db, pName); if( !zName || SQLITE_OK!=sqlite3CheckObjectName(pParse, zName) ){ goto trigger_cleanup; } assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( sqlite3HashFind(&(db->aDb[iDb].pSchema->trigHash),zName) ){ if( !noErr ){ sqlite3ErrorMsg(pParse, "trigger %T already exists", pName); }else{ assert( !db->init.busy ); sqlite3CodeVerifySchema(pParse, iDb); } goto trigger_cleanup; } /* Do not create a trigger on a system table */ if( sqlite3StrNICmp(pTab->zName, "sqlite_", 7)==0 ){ sqlite3ErrorMsg(pParse, "cannot create trigger on system table"); goto trigger_cleanup; } /* INSTEAD of triggers are only for views and views only support INSTEAD ** of triggers. */ if( pTab->pSelect && tr_tm!=TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create %s trigger on view: %S", (tr_tm == TK_BEFORE)?"BEFORE":"AFTER", pTableName, 0); goto trigger_cleanup; } if( !pTab->pSelect && tr_tm==TK_INSTEAD ){ sqlite3ErrorMsg(pParse, "cannot create INSTEAD OF" " trigger on table: %S", pTableName, 0); goto trigger_cleanup; } iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema); #ifndef SQLITE_OMIT_AUTHORIZATION { int code = SQLITE_CREATE_TRIGGER; const char *zDb = db->aDb[iTabDb].zName; const char *zDbTrig = isTemp ? db->aDb[1].zName : zDb; if( iTabDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER; if( sqlite3AuthCheck(pParse, code, zName, pTab->zName, zDbTrig) ){ goto trigger_cleanup; } if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iTabDb),0,zDb)){ goto trigger_cleanup; } } #endif /* INSTEAD OF triggers can only appear on views and BEFORE triggers ** cannot appear on views. So we might as well translate every ** INSTEAD OF trigger into a BEFORE trigger. It simplifies code ** elsewhere. */ if (tr_tm == TK_INSTEAD){ tr_tm = TK_BEFORE; } /* Build the Trigger object */ pTrigger = (Trigger*)sqlite3DbMallocZero(db, sizeof(Trigger)); if( pTrigger==0 ) goto trigger_cleanup; pTrigger->zName = zName; zName = 0; pTrigger->table = sqlite3DbStrDup(db, pTableName->a[0].zName); pTrigger->pSchema = db->aDb[iDb].pSchema; pTrigger->pTabSchema = pTab->pSchema; pTrigger->op = (u8)op; pTrigger->tr_tm = tr_tm==TK_BEFORE ? TRIGGER_BEFORE : TRIGGER_AFTER; pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE); pTrigger->pColumns = sqlite3IdListDup(db, pColumns); assert( pParse->pNewTrigger==0 ); pParse->pNewTrigger = pTrigger; trigger_cleanup: sqlite3DbFree(db, zName); sqlite3SrcListDelete(db, pTableName); sqlite3IdListDelete(db, pColumns); sqlite3ExprDelete(db, pWhen); if( !pParse->pNewTrigger ){ sqlite3DeleteTrigger(db, pTrigger); }else{ assert( pParse->pNewTrigger==pTrigger ); } } /* ** This routine is called after all of the trigger actions have been parsed ** in order to complete the process of building the trigger. */ SQLITE_PRIVATE void sqlite3FinishTrigger( Parse *pParse, /* Parser context */ TriggerStep *pStepList, /* The triggered program */ Token *pAll /* Token that describes the complete CREATE TRIGGER */ ){ Trigger *pTrig = pParse->pNewTrigger; /* Trigger being finished */ char *zName; /* Name of trigger */ sqlite3 *db = pParse->db; /* The database */ DbFixer sFix; /* Fixer object */ int iDb; /* Database containing the trigger */ Token nameToken; /* Trigger name for error reporting */ pParse->pNewTrigger = 0; if( NEVER(pParse->nErr) || !pTrig ) goto triggerfinish_cleanup; zName = pTrig->zName; iDb = sqlite3SchemaToIndex(pParse->db, pTrig->pSchema); pTrig->step_list = pStepList; while( pStepList ){ pStepList->pTrig = pTrig; pStepList = pStepList->pNext; } nameToken.z = pTrig->zName; nameToken.n = sqlite3Strlen30(nameToken.z); sqlite3FixInit(&sFix, pParse, iDb, "trigger", &nameToken); if( sqlite3FixTriggerStep(&sFix, pTrig->step_list) || sqlite3FixExpr(&sFix, pTrig->pWhen) ){ goto triggerfinish_cleanup; } /* if we are not initializing, ** build the sqlite_master entry */ if( !db->init.busy ){ Vdbe *v; char *z; /* Make an entry in the sqlite_master table */ v = sqlite3GetVdbe(pParse); if( v==0 ) goto triggerfinish_cleanup; sqlite3BeginWriteOperation(pParse, 0, iDb); z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n); sqlite3NestedParse(pParse, "INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zName, pTrig->table, z); sqlite3DbFree(db, z); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "type='trigger' AND name='%q'", zName)); } if( db->init.busy ){ Trigger *pLink = pTrig; Hash *pHash = &db->aDb[iDb].pSchema->trigHash; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pTrig = sqlite3HashInsert(pHash, zName, pTrig); if( pTrig ){ db->mallocFailed = 1; }else if( pLink->pSchema==pLink->pTabSchema ){ Table *pTab; pTab = sqlite3HashFind(&pLink->pTabSchema->tblHash, pLink->table); assert( pTab!=0 ); pLink->pNext = pTab->pTrigger; pTab->pTrigger = pLink; } } triggerfinish_cleanup: sqlite3DeleteTrigger(db, pTrig); assert( !pParse->pNewTrigger ); sqlite3DeleteTriggerStep(db, pStepList); } /* ** Turn a SELECT statement (that the pSelect parameter points to) into ** a trigger step. Return a pointer to a TriggerStep structure. ** ** The parser calls this routine when it finds a SELECT statement in ** body of a TRIGGER. */ SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3 *db, Select *pSelect){ TriggerStep *pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep)); if( pTriggerStep==0 ) { sqlite3SelectDelete(db, pSelect); return 0; } pTriggerStep->op = TK_SELECT; pTriggerStep->pSelect = pSelect; pTriggerStep->orconf = OE_Default; return pTriggerStep; } /* ** Allocate space to hold a new trigger step. The allocated space ** holds both the TriggerStep object and the TriggerStep.target.z string. ** ** If an OOM error occurs, NULL is returned and db->mallocFailed is set. */ static TriggerStep *triggerStepAllocate( sqlite3 *db, /* Database connection */ u8 op, /* Trigger opcode */ Token *pName /* The target name */ ){ TriggerStep *pTriggerStep; pTriggerStep = sqlite3DbMallocZero(db, sizeof(TriggerStep) + pName->n + 1); if( pTriggerStep ){ char *z = (char*)&pTriggerStep[1]; memcpy(z, pName->z, pName->n); sqlite3Dequote(z); pTriggerStep->zTarget = z; pTriggerStep->op = op; } return pTriggerStep; } /* ** Build a trigger step out of an INSERT statement. Return a pointer ** to the new trigger step. ** ** The parser calls this routine when it sees an INSERT inside the ** body of a trigger. */ SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep( sqlite3 *db, /* The database connection */ Token *pTableName, /* Name of the table into which we insert */ IdList *pColumn, /* List of columns in pTableName to insert into */ Select *pSelect, /* A SELECT statement that supplies values */ u8 orconf /* The conflict algorithm (OE_Abort, OE_Replace, etc.) */ ){ TriggerStep *pTriggerStep; assert(pSelect != 0 || db->mallocFailed); pTriggerStep = triggerStepAllocate(db, TK_INSERT, pTableName); if( pTriggerStep ){ pTriggerStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE); pTriggerStep->pIdList = pColumn; pTriggerStep->orconf = orconf; }else{ sqlite3IdListDelete(db, pColumn); } sqlite3SelectDelete(db, pSelect); return pTriggerStep; } /* ** Construct a trigger step that implements an UPDATE statement and return ** a pointer to that trigger step. The parser calls this routine when it ** sees an UPDATE statement inside the body of a CREATE TRIGGER. */ SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep( sqlite3 *db, /* The database connection */ Token *pTableName, /* Name of the table to be updated */ ExprList *pEList, /* The SET clause: list of column and new values */ Expr *pWhere, /* The WHERE clause */ u8 orconf /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */ ){ TriggerStep *pTriggerStep; pTriggerStep = triggerStepAllocate(db, TK_UPDATE, pTableName); if( pTriggerStep ){ pTriggerStep->pExprList = sqlite3ExprListDup(db, pEList, EXPRDUP_REDUCE); pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); pTriggerStep->orconf = orconf; } sqlite3ExprListDelete(db, pEList); sqlite3ExprDelete(db, pWhere); return pTriggerStep; } /* ** Construct a trigger step that implements a DELETE statement and return ** a pointer to that trigger step. The parser calls this routine when it ** sees a DELETE statement inside the body of a CREATE TRIGGER. */ SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep( sqlite3 *db, /* Database connection */ Token *pTableName, /* The table from which rows are deleted */ Expr *pWhere /* The WHERE clause */ ){ TriggerStep *pTriggerStep; pTriggerStep = triggerStepAllocate(db, TK_DELETE, pTableName); if( pTriggerStep ){ pTriggerStep->pWhere = sqlite3ExprDup(db, pWhere, EXPRDUP_REDUCE); pTriggerStep->orconf = OE_Default; } sqlite3ExprDelete(db, pWhere); return pTriggerStep; } /* ** Recursively delete a Trigger structure */ SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3 *db, Trigger *pTrigger){ if( pTrigger==0 ) return; sqlite3DeleteTriggerStep(db, pTrigger->step_list); sqlite3DbFree(db, pTrigger->zName); sqlite3DbFree(db, pTrigger->table); sqlite3ExprDelete(db, pTrigger->pWhen); sqlite3IdListDelete(db, pTrigger->pColumns); sqlite3DbFree(db, pTrigger); } /* ** This function is called to drop a trigger from the database schema. ** ** This may be called directly from the parser and therefore identifies ** the trigger by name. The sqlite3DropTriggerPtr() routine does the ** same job as this routine except it takes a pointer to the trigger ** instead of the trigger name. **/ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr){ Trigger *pTrigger = 0; int i; const char *zDb; const char *zName; sqlite3 *db = pParse->db; if( db->mallocFailed ) goto drop_trigger_cleanup; if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ goto drop_trigger_cleanup; } assert( pName->nSrc==1 ); zDb = pName->a[0].zDatabase; zName = pName->a[0].zName; assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) ); for(i=OMIT_TEMPDB; inDb; i++){ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */ if( zDb && sqlite3StrICmp(db->aDb[j].zName, zDb) ) continue; assert( sqlite3SchemaMutexHeld(db, j, 0) ); pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName); if( pTrigger ) break; } if( !pTrigger ){ if( !noErr ){ sqlite3ErrorMsg(pParse, "no such trigger: %S", pName, 0); }else{ sqlite3CodeVerifyNamedSchema(pParse, zDb); } pParse->checkSchema = 1; goto drop_trigger_cleanup; } sqlite3DropTriggerPtr(pParse, pTrigger); drop_trigger_cleanup: sqlite3SrcListDelete(db, pName); } /* ** Return a pointer to the Table structure for the table that a trigger ** is set on. */ static Table *tableOfTrigger(Trigger *pTrigger){ return sqlite3HashFind(&pTrigger->pTabSchema->tblHash, pTrigger->table); } /* ** Drop a trigger given a pointer to that trigger. */ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){ Table *pTable; Vdbe *v; sqlite3 *db = pParse->db; int iDb; iDb = sqlite3SchemaToIndex(pParse->db, pTrigger->pSchema); assert( iDb>=0 && iDbnDb ); pTable = tableOfTrigger(pTrigger); assert( pTable ); assert( pTable->pSchema==pTrigger->pSchema || iDb==1 ); #ifndef SQLITE_OMIT_AUTHORIZATION { int code = SQLITE_DROP_TRIGGER; const char *zDb = db->aDb[iDb].zName; const char *zTab = SCHEMA_TABLE(iDb); if( iDb==1 ) code = SQLITE_DROP_TEMP_TRIGGER; if( sqlite3AuthCheck(pParse, code, pTrigger->zName, pTable->zName, zDb) || sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){ return; } } #endif /* Generate code to destroy the database record of the trigger. */ assert( pTable!=0 ); if( (v = sqlite3GetVdbe(pParse))!=0 ){ int base; static const int iLn = VDBE_OFFSET_LINENO(2); static const VdbeOpList dropTrigger[] = { { OP_Rewind, 0, ADDR(9), 0}, { OP_String8, 0, 1, 0}, /* 1 */ { OP_Column, 0, 1, 2}, { OP_Ne, 2, ADDR(8), 1}, { OP_String8, 0, 1, 0}, /* 4: "trigger" */ { OP_Column, 0, 0, 2}, { OP_Ne, 2, ADDR(8), 1}, { OP_Delete, 0, 0, 0}, { OP_Next, 0, ADDR(1), 0}, /* 8 */ }; sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3OpenMasterTable(pParse, iDb); base = sqlite3VdbeAddOpList(v, ArraySize(dropTrigger), dropTrigger, iLn); sqlite3VdbeChangeP4(v, base+1, pTrigger->zName, P4_TRANSIENT); sqlite3VdbeChangeP4(v, base+4, "trigger", P4_STATIC); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddOp2(v, OP_Close, 0, 0); sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0); if( pParse->nMem<3 ){ pParse->nMem = 3; } } } /* ** Remove a trigger from the hash tables of the sqlite* pointer. */ SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3 *db, int iDb, const char *zName){ Trigger *pTrigger; Hash *pHash; assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); pHash = &(db->aDb[iDb].pSchema->trigHash); pTrigger = sqlite3HashInsert(pHash, zName, 0); if( ALWAYS(pTrigger) ){ if( pTrigger->pSchema==pTrigger->pTabSchema ){ Table *pTab = tableOfTrigger(pTrigger); Trigger **pp; for(pp=&pTab->pTrigger; *pp!=pTrigger; pp=&((*pp)->pNext)); *pp = (*pp)->pNext; } sqlite3DeleteTrigger(db, pTrigger); db->flags |= SQLITE_InternChanges; } } /* ** pEList is the SET clause of an UPDATE statement. Each entry ** in pEList is of the format =. If any of the entries ** in pEList have an which matches an identifier in pIdList, ** then return TRUE. If pIdList==NULL, then it is considered a ** wildcard that matches anything. Likewise if pEList==NULL then ** it matches anything so always return true. Return false only ** if there is no match. */ static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){ int e; if( pIdList==0 || NEVER(pEList==0) ) return 1; for(e=0; enExpr; e++){ if( sqlite3IdListIndex(pIdList, pEList->a[e].zName)>=0 ) return 1; } return 0; } /* ** Return a list of all triggers on table pTab if there exists at least ** one trigger that must be fired when an operation of type 'op' is ** performed on the table, and, if that operation is an UPDATE, if at ** least one of the columns in pChanges is being modified. */ SQLITE_PRIVATE Trigger *sqlite3TriggersExist( Parse *pParse, /* Parse context */ Table *pTab, /* The table the contains the triggers */ int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */ ExprList *pChanges, /* Columns that change in an UPDATE statement */ int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ ){ int mask = 0; Trigger *pList = 0; Trigger *p; if( (pParse->db->flags & SQLITE_EnableTrigger)!=0 ){ pList = sqlite3TriggerList(pParse, pTab); } assert( pList==0 || IsVirtual(pTab)==0 ); for(p=pList; p; p=p->pNext){ if( p->op==op && checkColumnOverlap(p->pColumns, pChanges) ){ mask |= p->tr_tm; } } if( pMask ){ *pMask = mask; } return (mask ? pList : 0); } /* ** Convert the pStep->zTarget string into a SrcList and return a pointer ** to that SrcList. ** ** This routine adds a specific database name, if needed, to the target when ** forming the SrcList. This prevents a trigger in one database from ** referring to a target in another database. An exception is when the ** trigger is in TEMP in which case it can refer to any other database it ** wants. */ static SrcList *targetSrcList( Parse *pParse, /* The parsing context */ TriggerStep *pStep /* The trigger containing the target token */ ){ sqlite3 *db = pParse->db; int iDb; /* Index of the database to use */ SrcList *pSrc; /* SrcList to be returned */ pSrc = sqlite3SrcListAppend(db, 0, 0, 0); if( pSrc ){ assert( pSrc->nSrc>0 ); pSrc->a[pSrc->nSrc-1].zName = sqlite3DbStrDup(db, pStep->zTarget); iDb = sqlite3SchemaToIndex(db, pStep->pTrig->pSchema); if( iDb==0 || iDb>=2 ){ assert( iDbnDb ); pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName); } } return pSrc; } /* ** Generate VDBE code for the statements inside the body of a single ** trigger. */ static int codeTriggerProgram( Parse *pParse, /* The parser context */ TriggerStep *pStepList, /* List of statements inside the trigger body */ int orconf /* Conflict algorithm. (OE_Abort, etc) */ ){ TriggerStep *pStep; Vdbe *v = pParse->pVdbe; sqlite3 *db = pParse->db; assert( pParse->pTriggerTab && pParse->pToplevel ); assert( pStepList ); assert( v!=0 ); for(pStep=pStepList; pStep; pStep=pStep->pNext){ /* Figure out the ON CONFLICT policy that will be used for this step ** of the trigger program. If the statement that caused this trigger ** to fire had an explicit ON CONFLICT, then use it. Otherwise, use ** the ON CONFLICT policy that was specified as part of the trigger ** step statement. Example: ** ** CREATE TRIGGER AFTER INSERT ON t1 BEGIN; ** INSERT OR REPLACE INTO t2 VALUES(new.a, new.b); ** END; ** ** INSERT INTO t1 ... ; -- insert into t2 uses REPLACE policy ** INSERT OR IGNORE INTO t1 ... ; -- insert into t2 uses IGNORE policy */ pParse->eOrconf = (orconf==OE_Default)?pStep->orconf:(u8)orconf; assert( pParse->okConstFactor==0 ); switch( pStep->op ){ case TK_UPDATE: { sqlite3Update(pParse, targetSrcList(pParse, pStep), sqlite3ExprListDup(db, pStep->pExprList, 0), sqlite3ExprDup(db, pStep->pWhere, 0), pParse->eOrconf ); break; } case TK_INSERT: { sqlite3Insert(pParse, targetSrcList(pParse, pStep), sqlite3SelectDup(db, pStep->pSelect, 0), sqlite3IdListDup(db, pStep->pIdList), pParse->eOrconf ); break; } case TK_DELETE: { sqlite3DeleteFrom(pParse, targetSrcList(pParse, pStep), sqlite3ExprDup(db, pStep->pWhere, 0) ); break; } default: assert( pStep->op==TK_SELECT ); { SelectDest sDest; Select *pSelect = sqlite3SelectDup(db, pStep->pSelect, 0); sqlite3SelectDestInit(&sDest, SRT_Discard, 0); sqlite3Select(pParse, pSelect, &sDest); sqlite3SelectDelete(db, pSelect); break; } } if( pStep->op!=TK_SELECT ){ sqlite3VdbeAddOp0(v, OP_ResetCount); } } return 0; } #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS /* ** This function is used to add VdbeComment() annotations to a VDBE ** program. It is not used in production code, only for debugging. */ static const char *onErrorText(int onError){ switch( onError ){ case OE_Abort: return "abort"; case OE_Rollback: return "rollback"; case OE_Fail: return "fail"; case OE_Replace: return "replace"; case OE_Ignore: return "ignore"; case OE_Default: return "default"; } return "n/a"; } #endif /* ** Parse context structure pFrom has just been used to create a sub-vdbe ** (trigger program). If an error has occurred, transfer error information ** from pFrom to pTo. */ static void transferParseError(Parse *pTo, Parse *pFrom){ assert( pFrom->zErrMsg==0 || pFrom->nErr ); assert( pTo->zErrMsg==0 || pTo->nErr ); if( pTo->nErr==0 ){ pTo->zErrMsg = pFrom->zErrMsg; pTo->nErr = pFrom->nErr; pTo->rc = pFrom->rc; }else{ sqlite3DbFree(pFrom->db, pFrom->zErrMsg); } } /* ** Create and populate a new TriggerPrg object with a sub-program ** implementing trigger pTrigger with ON CONFLICT policy orconf. */ static TriggerPrg *codeRowTrigger( Parse *pParse, /* Current parse context */ Trigger *pTrigger, /* Trigger to code */ Table *pTab, /* The table pTrigger is attached to */ int orconf /* ON CONFLICT policy to code trigger program with */ ){ Parse *pTop = sqlite3ParseToplevel(pParse); sqlite3 *db = pParse->db; /* Database handle */ TriggerPrg *pPrg; /* Value to return */ Expr *pWhen = 0; /* Duplicate of trigger WHEN expression */ Vdbe *v; /* Temporary VM */ NameContext sNC; /* Name context for sub-vdbe */ SubProgram *pProgram = 0; /* Sub-vdbe for trigger program */ Parse *pSubParse; /* Parse context for sub-vdbe */ int iEndTrigger = 0; /* Label to jump to if WHEN is false */ assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); assert( pTop->pVdbe ); /* Allocate the TriggerPrg and SubProgram objects. To ensure that they ** are freed if an error occurs, link them into the Parse.pTriggerPrg ** list of the top-level Parse object sooner rather than later. */ pPrg = sqlite3DbMallocZero(db, sizeof(TriggerPrg)); if( !pPrg ) return 0; pPrg->pNext = pTop->pTriggerPrg; pTop->pTriggerPrg = pPrg; pPrg->pProgram = pProgram = sqlite3DbMallocZero(db, sizeof(SubProgram)); if( !pProgram ) return 0; sqlite3VdbeLinkSubProgram(pTop->pVdbe, pProgram); pPrg->pTrigger = pTrigger; pPrg->orconf = orconf; pPrg->aColmask[0] = 0xffffffff; pPrg->aColmask[1] = 0xffffffff; /* Allocate and populate a new Parse context to use for coding the ** trigger sub-program. */ pSubParse = sqlite3StackAllocZero(db, sizeof(Parse)); if( !pSubParse ) return 0; memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pSubParse; pSubParse->db = db; pSubParse->pTriggerTab = pTab; pSubParse->pToplevel = pTop; pSubParse->zAuthContext = pTrigger->zName; pSubParse->eTriggerOp = pTrigger->op; pSubParse->nQueryLoop = pParse->nQueryLoop; v = sqlite3GetVdbe(pSubParse); if( v ){ VdbeComment((v, "Start: %s.%s (%s %s%s%s ON %s)", pTrigger->zName, onErrorText(orconf), (pTrigger->tr_tm==TRIGGER_BEFORE ? "BEFORE" : "AFTER"), (pTrigger->op==TK_UPDATE ? "UPDATE" : ""), (pTrigger->op==TK_INSERT ? "INSERT" : ""), (pTrigger->op==TK_DELETE ? "DELETE" : ""), pTab->zName )); #ifndef SQLITE_OMIT_TRACE sqlite3VdbeChangeP4(v, -1, sqlite3MPrintf(db, "-- TRIGGER %s", pTrigger->zName), P4_DYNAMIC ); #endif /* If one was specified, code the WHEN clause. If it evaluates to false ** (or NULL) the sub-vdbe is immediately halted by jumping to the ** OP_Halt inserted at the end of the program. */ if( pTrigger->pWhen ){ pWhen = sqlite3ExprDup(db, pTrigger->pWhen, 0); if( SQLITE_OK==sqlite3ResolveExprNames(&sNC, pWhen) && db->mallocFailed==0 ){ iEndTrigger = sqlite3VdbeMakeLabel(v); sqlite3ExprIfFalse(pSubParse, pWhen, iEndTrigger, SQLITE_JUMPIFNULL); } sqlite3ExprDelete(db, pWhen); } /* Code the trigger program into the sub-vdbe. */ codeTriggerProgram(pSubParse, pTrigger->step_list, orconf); /* Insert an OP_Halt at the end of the sub-program. */ if( iEndTrigger ){ sqlite3VdbeResolveLabel(v, iEndTrigger); } sqlite3VdbeAddOp0(v, OP_Halt); VdbeComment((v, "End: %s.%s", pTrigger->zName, onErrorText(orconf))); transferParseError(pParse, pSubParse); if( db->mallocFailed==0 ){ pProgram->aOp = sqlite3VdbeTakeOpArray(v, &pProgram->nOp, &pTop->nMaxArg); } pProgram->nMem = pSubParse->nMem; pProgram->nCsr = pSubParse->nTab; pProgram->nOnce = pSubParse->nOnce; pProgram->token = (void *)pTrigger; pPrg->aColmask[0] = pSubParse->oldmask; pPrg->aColmask[1] = pSubParse->newmask; sqlite3VdbeDelete(v); } assert( !pSubParse->pAinc && !pSubParse->pZombieTab ); assert( !pSubParse->pTriggerPrg && !pSubParse->nMaxArg ); sqlite3ParserReset(pSubParse); sqlite3StackFree(db, pSubParse); return pPrg; } /* ** Return a pointer to a TriggerPrg object containing the sub-program for ** trigger pTrigger with default ON CONFLICT algorithm orconf. If no such ** TriggerPrg object exists, a new object is allocated and populated before ** being returned. */ static TriggerPrg *getRowTrigger( Parse *pParse, /* Current parse context */ Trigger *pTrigger, /* Trigger to code */ Table *pTab, /* The table trigger pTrigger is attached to */ int orconf /* ON CONFLICT algorithm. */ ){ Parse *pRoot = sqlite3ParseToplevel(pParse); TriggerPrg *pPrg; assert( pTrigger->zName==0 || pTab==tableOfTrigger(pTrigger) ); /* It may be that this trigger has already been coded (or is in the ** process of being coded). If this is the case, then an entry with ** a matching TriggerPrg.pTrigger field will be present somewhere ** in the Parse.pTriggerPrg list. Search for such an entry. */ for(pPrg=pRoot->pTriggerPrg; pPrg && (pPrg->pTrigger!=pTrigger || pPrg->orconf!=orconf); pPrg=pPrg->pNext ); /* If an existing TriggerPrg could not be located, create a new one. */ if( !pPrg ){ pPrg = codeRowTrigger(pParse, pTrigger, pTab, orconf); } return pPrg; } /* ** Generate code for the trigger program associated with trigger p on ** table pTab. The reg, orconf and ignoreJump parameters passed to this ** function are the same as those described in the header function for ** sqlite3CodeRowTrigger() */ SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect( Parse *pParse, /* Parse context */ Trigger *p, /* Trigger to code */ Table *pTab, /* The table to code triggers from */ int reg, /* Reg array containing OLD.* and NEW.* values */ int orconf, /* ON CONFLICT policy */ int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ ){ Vdbe *v = sqlite3GetVdbe(pParse); /* Main VM */ TriggerPrg *pPrg; pPrg = getRowTrigger(pParse, p, pTab, orconf); assert( pPrg || pParse->nErr || pParse->db->mallocFailed ); /* Code the OP_Program opcode in the parent VDBE. P4 of the OP_Program ** is a pointer to the sub-vdbe containing the trigger program. */ if( pPrg ){ int bRecursive = (p->zName && 0==(pParse->db->flags&SQLITE_RecTriggers)); sqlite3VdbeAddOp3(v, OP_Program, reg, ignoreJump, ++pParse->nMem); sqlite3VdbeChangeP4(v, -1, (const char *)pPrg->pProgram, P4_SUBPROGRAM); VdbeComment( (v, "Call: %s.%s", (p->zName?p->zName:"fkey"), onErrorText(orconf))); /* Set the P5 operand of the OP_Program instruction to non-zero if ** recursive invocation of this trigger program is disallowed. Recursive ** invocation is disallowed if (a) the sub-program is really a trigger, ** not a foreign key action, and (b) the flag to enable recursive triggers ** is clear. */ sqlite3VdbeChangeP5(v, (u8)bRecursive); } } /* ** This is called to code the required FOR EACH ROW triggers for an operation ** on table pTab. The operation to code triggers for (INSERT, UPDATE or DELETE) ** is given by the op parameter. The tr_tm parameter determines whether the ** BEFORE or AFTER triggers are coded. If the operation is an UPDATE, then ** parameter pChanges is passed the list of columns being modified. ** ** If there are no triggers that fire at the specified time for the specified ** operation on pTab, this function is a no-op. ** ** The reg argument is the address of the first in an array of registers ** that contain the values substituted for the new.* and old.* references ** in the trigger program. If N is the number of columns in table pTab ** (a copy of pTab->nCol), then registers are populated as follows: ** ** Register Contains ** ------------------------------------------------------ ** reg+0 OLD.rowid ** reg+1 OLD.* value of left-most column of pTab ** ... ... ** reg+N OLD.* value of right-most column of pTab ** reg+N+1 NEW.rowid ** reg+N+2 OLD.* value of left-most column of pTab ** ... ... ** reg+N+N+1 NEW.* value of right-most column of pTab ** ** For ON DELETE triggers, the registers containing the NEW.* values will ** never be accessed by the trigger program, so they are not allocated or ** populated by the caller (there is no data to populate them with anyway). ** Similarly, for ON INSERT triggers the values stored in the OLD.* registers ** are never accessed, and so are not allocated by the caller. So, for an ** ON INSERT trigger, the value passed to this function as parameter reg ** is not a readable register, although registers (reg+N) through ** (reg+N+N+1) are. ** ** Parameter orconf is the default conflict resolution algorithm for the ** trigger program to use (REPLACE, IGNORE etc.). Parameter ignoreJump ** is the instruction that control should jump to if a trigger program ** raises an IGNORE exception. */ SQLITE_PRIVATE void sqlite3CodeRowTrigger( Parse *pParse, /* Parse context */ Trigger *pTrigger, /* List of triggers on table pTab */ int op, /* One of TK_UPDATE, TK_INSERT, TK_DELETE */ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ int tr_tm, /* One of TRIGGER_BEFORE, TRIGGER_AFTER */ Table *pTab, /* The table to code triggers from */ int reg, /* The first in an array of registers (see above) */ int orconf, /* ON CONFLICT policy */ int ignoreJump /* Instruction to jump to for RAISE(IGNORE) */ ){ Trigger *p; /* Used to iterate through pTrigger list */ assert( op==TK_UPDATE || op==TK_INSERT || op==TK_DELETE ); assert( tr_tm==TRIGGER_BEFORE || tr_tm==TRIGGER_AFTER ); assert( (op==TK_UPDATE)==(pChanges!=0) ); for(p=pTrigger; p; p=p->pNext){ /* Sanity checking: The schema for the trigger and for the table are ** always defined. The trigger must be in the same schema as the table ** or else it must be a TEMP trigger. */ assert( p->pSchema!=0 ); assert( p->pTabSchema!=0 ); assert( p->pSchema==p->pTabSchema || p->pSchema==pParse->db->aDb[1].pSchema ); /* Determine whether we should code this trigger */ if( p->op==op && p->tr_tm==tr_tm && checkColumnOverlap(p->pColumns, pChanges) ){ sqlite3CodeRowTriggerDirect(pParse, p, pTab, reg, orconf, ignoreJump); } } } /* ** Triggers may access values stored in the old.* or new.* pseudo-table. ** This function returns a 32-bit bitmask indicating which columns of the ** old.* or new.* tables actually are used by triggers. This information ** may be used by the caller, for example, to avoid having to load the entire ** old.* record into memory when executing an UPDATE or DELETE command. ** ** Bit 0 of the returned mask is set if the left-most column of the ** table may be accessed using an [old|new].reference. Bit 1 is set if ** the second leftmost column value is required, and so on. If there ** are more than 32 columns in the table, and at least one of the columns ** with an index greater than 32 may be accessed, 0xffffffff is returned. ** ** It is not possible to determine if the old.rowid or new.rowid column is ** accessed by triggers. The caller must always assume that it is. ** ** Parameter isNew must be either 1 or 0. If it is 0, then the mask returned ** applies to the old.* table. If 1, the new.* table. ** ** Parameter tr_tm must be a mask with one or both of the TRIGGER_BEFORE ** and TRIGGER_AFTER bits set. Values accessed by BEFORE triggers are only ** included in the returned mask if the TRIGGER_BEFORE bit is set in the ** tr_tm parameter. Similarly, values accessed by AFTER triggers are only ** included in the returned mask if the TRIGGER_AFTER bit is set in tr_tm. */ SQLITE_PRIVATE u32 sqlite3TriggerColmask( Parse *pParse, /* Parse context */ Trigger *pTrigger, /* List of triggers on table pTab */ ExprList *pChanges, /* Changes list for any UPDATE OF triggers */ int isNew, /* 1 for new.* ref mask, 0 for old.* ref mask */ int tr_tm, /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ Table *pTab, /* The table to code triggers from */ int orconf /* Default ON CONFLICT policy for trigger steps */ ){ const int op = pChanges ? TK_UPDATE : TK_DELETE; u32 mask = 0; Trigger *p; assert( isNew==1 || isNew==0 ); for(p=pTrigger; p; p=p->pNext){ if( p->op==op && (tr_tm&p->tr_tm) && checkColumnOverlap(p->pColumns,pChanges) ){ TriggerPrg *pPrg; pPrg = getRowTrigger(pParse, p, pTab, orconf); if( pPrg ){ mask |= pPrg->aColmask[isNew]; } } } return mask; } #endif /* !defined(SQLITE_OMIT_TRIGGER) */ /************** End of trigger.c *********************************************/ /************** Begin file update.c ******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains C code routines that are called by the parser ** to handle UPDATE statements. */ /* #include "sqliteInt.h" */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Forward declaration */ static void updateVirtualTable( Parse *pParse, /* The parsing context */ SrcList *pSrc, /* The virtual table to be modified */ Table *pTab, /* The virtual table */ ExprList *pChanges, /* The columns to change in the UPDATE statement */ Expr *pRowidExpr, /* Expression used to recompute the rowid */ int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ Expr *pWhere, /* WHERE clause of the UPDATE statement */ int onError /* ON CONFLICT strategy */ ); #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* ** The most recently coded instruction was an OP_Column to retrieve the ** i-th column of table pTab. This routine sets the P4 parameter of the ** OP_Column to the default value, if any. ** ** The default value of a column is specified by a DEFAULT clause in the ** column definition. This was either supplied by the user when the table ** was created, or added later to the table definition by an ALTER TABLE ** command. If the latter, then the row-records in the table btree on disk ** may not contain a value for the column and the default value, taken ** from the P4 parameter of the OP_Column instruction, is returned instead. ** If the former, then all row-records are guaranteed to include a value ** for the column and the P4 value is not required. ** ** Column definitions created by an ALTER TABLE command may only have ** literal default values specified: a number, null or a string. (If a more ** complicated default expression value was provided, it is evaluated ** when the ALTER TABLE is executed and one of the literal values written ** into the sqlite_master table.) ** ** Therefore, the P4 parameter is only required if the default value for ** the column is a literal number, string or null. The sqlite3ValueFromExpr() ** function is capable of transforming these types of expressions into ** sqlite3_value objects. ** ** If parameter iReg is not negative, code an OP_RealAffinity instruction ** on register iReg. This is used when an equivalent integer value is ** stored in place of an 8-byte floating point value in order to save ** space. */ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ assert( pTab!=0 ); if( !pTab->pSelect ){ sqlite3_value *pValue = 0; u8 enc = ENC(sqlite3VdbeDb(v)); Column *pCol = &pTab->aCol[i]; VdbeComment((v, "%s.%s", pTab->zName, pCol->zName)); assert( inCol ); sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc, pCol->affinity, &pValue); if( pValue ){ sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM); } #ifndef SQLITE_OMIT_FLOATING_POINT if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); } #endif } } /* ** Process an UPDATE statement. ** ** UPDATE OR IGNORE table_wxyz SET a=b, c=d WHERE e<5 AND f NOT NULL; ** \_______/ \________/ \______/ \________________/ * onError pTabList pChanges pWhere */ SQLITE_PRIVATE void sqlite3Update( Parse *pParse, /* The parser context */ SrcList *pTabList, /* The table in which we should change things */ ExprList *pChanges, /* Things to be changed */ Expr *pWhere, /* The WHERE clause. May be null */ int onError /* How to handle constraint errors */ ){ int i, j; /* Loop counters */ Table *pTab; /* The table to be updated */ int addrTop = 0; /* VDBE instruction address of the start of the loop */ WhereInfo *pWInfo; /* Information about the WHERE clause */ Vdbe *v; /* The virtual database engine */ Index *pIdx; /* For looping over indices */ Index *pPk; /* The PRIMARY KEY index for WITHOUT ROWID tables */ int nIdx; /* Number of indices that need updating */ int iBaseCur; /* Base cursor number */ int iDataCur; /* Cursor for the canonical data btree */ int iIdxCur; /* Cursor for the first index */ sqlite3 *db; /* The database structure */ int *aRegIdx = 0; /* One register assigned to each index to be updated */ int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the ** an expression for the i-th column of the table. ** aXRef[i]==-1 if the i-th column is not changed. */ u8 *aToOpen; /* 1 for tables and indices to be opened */ u8 chngPk; /* PRIMARY KEY changed in a WITHOUT ROWID table */ u8 chngRowid; /* Rowid changed in a normal table */ u8 chngKey; /* Either chngPk or chngRowid */ Expr *pRowidExpr = 0; /* Expression defining the new record number */ AuthContext sContext; /* The authorization context */ NameContext sNC; /* The name-context to resolve expressions in */ int iDb; /* Database containing the table being updated */ int okOnePass; /* True for one-pass algorithm without the FIFO */ int hasFK; /* True if foreign key processing is required */ int labelBreak; /* Jump here to break out of UPDATE loop */ int labelContinue; /* Jump here to continue next step of UPDATE loop */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True when updating a view (INSTEAD OF trigger) */ Trigger *pTrigger; /* List of triggers on pTab, if required */ int tmask; /* Mask of TRIGGER_BEFORE|TRIGGER_AFTER */ #endif int newmask; /* Mask of NEW.* columns accessed by BEFORE triggers */ int iEph = 0; /* Ephemeral table holding all primary key values */ int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */ int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */ /* Register Allocations */ int regRowCount = 0; /* A count of rows changed */ int regOldRowid; /* The old rowid */ int regNewRowid; /* The new rowid */ int regNew; /* Content of the NEW.* table in triggers */ int regOld = 0; /* Content of OLD.* table in triggers */ int regRowSet = 0; /* Rowset of rows to be updated */ int regKey = 0; /* composite PRIMARY KEY value */ memset(&sContext, 0, sizeof(sContext)); db = pParse->db; if( pParse->nErr || db->mallocFailed ){ goto update_cleanup; } assert( pTabList->nSrc==1 ); /* Locate the table which we want to update. */ pTab = sqlite3SrcListLookup(pParse, pTabList); if( pTab==0 ) goto update_cleanup; iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema); /* Figure out if we have any triggers and if the table being ** updated is a view. */ #ifndef SQLITE_OMIT_TRIGGER pTrigger = sqlite3TriggersExist(pParse, pTab, TK_UPDATE, pChanges, &tmask); isView = pTab->pSelect!=0; assert( pTrigger || tmask==0 ); #else # define pTrigger 0 # define isView 0 # define tmask 0 #endif #ifdef SQLITE_OMIT_VIEW # undef isView # define isView 0 #endif if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto update_cleanup; } if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ goto update_cleanup; } /* Allocate a cursors for the main database table and for all indices. ** The index cursors might not be used, but if they are used they ** need to occur right after the database cursor. So go ahead and ** allocate enough space, just in case. */ pTabList->a[0].iCursor = iBaseCur = iDataCur = pParse->nTab++; iIdxCur = iDataCur+1; pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); for(nIdx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, nIdx++){ if( IsPrimaryKeyIndex(pIdx) && pPk!=0 ){ iDataCur = pParse->nTab; pTabList->a[0].iCursor = iDataCur; } pParse->nTab++; } /* Allocate space for aXRef[], aRegIdx[], and aToOpen[]. ** Initialize aXRef[] and aToOpen[] to their default values. */ aXRef = sqlite3DbMallocRaw(db, sizeof(int) * (pTab->nCol+nIdx) + nIdx+2 ); if( aXRef==0 ) goto update_cleanup; aRegIdx = aXRef+pTab->nCol; aToOpen = (u8*)(aRegIdx+nIdx); memset(aToOpen, 1, nIdx+1); aToOpen[nIdx+1] = 0; for(i=0; inCol; i++) aXRef[i] = -1; /* Initialize the name-context */ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; /* Resolve the column names in all the expressions of the ** of the UPDATE statement. Also find the column index ** for each column to be updated in the pChanges array. For each ** column to be updated, make sure we have authorization to change ** that column. */ chngRowid = chngPk = 0; for(i=0; inExpr; i++){ if( sqlite3ResolveExprNames(&sNC, pChanges->a[i].pExpr) ){ goto update_cleanup; } for(j=0; jnCol; j++){ if( sqlite3StrICmp(pTab->aCol[j].zName, pChanges->a[i].zName)==0 ){ if( j==pTab->iPKey ){ chngRowid = 1; pRowidExpr = pChanges->a[i].pExpr; }else if( pPk && (pTab->aCol[j].colFlags & COLFLAG_PRIMKEY)!=0 ){ chngPk = 1; } aXRef[j] = i; break; } } if( j>=pTab->nCol ){ if( pPk==0 && sqlite3IsRowid(pChanges->a[i].zName) ){ j = -1; chngRowid = 1; pRowidExpr = pChanges->a[i].pExpr; }else{ sqlite3ErrorMsg(pParse, "no such column: %s", pChanges->a[i].zName); pParse->checkSchema = 1; goto update_cleanup; } } #ifndef SQLITE_OMIT_AUTHORIZATION { int rc; rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName, j<0 ? "ROWID" : pTab->aCol[j].zName, db->aDb[iDb].zName); if( rc==SQLITE_DENY ){ goto update_cleanup; }else if( rc==SQLITE_IGNORE ){ aXRef[j] = -1; } } #endif } assert( (chngRowid & chngPk)==0 ); assert( chngRowid==0 || chngRowid==1 ); assert( chngPk==0 || chngPk==1 ); chngKey = chngRowid + chngPk; /* The SET expressions are not actually used inside the WHERE loop. ** So reset the colUsed mask */ pTabList->a[0].colUsed = 0; hasFK = sqlite3FkRequired(pParse, pTab, aXRef, chngKey); /* There is one entry in the aRegIdx[] array for each index on the table ** being updated. Fill in aRegIdx[] with a register number that will hold ** the key for accessing each index. */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ int reg; if( chngKey || hasFK || pIdx->pPartIdxWhere || pIdx==pPk ){ reg = ++pParse->nMem; }else{ reg = 0; for(i=0; inKeyCol; i++){ if( aXRef[pIdx->aiColumn[i]]>=0 ){ reg = ++pParse->nMem; break; } } } if( reg==0 ) aToOpen[j+1] = 0; aRegIdx[j] = reg; } /* Begin generating code. */ v = sqlite3GetVdbe(pParse); if( v==0 ) goto update_cleanup; if( pParse->nested==0 ) sqlite3VdbeCountChanges(v); sqlite3BeginWriteOperation(pParse, 1, iDb); #ifndef SQLITE_OMIT_VIRTUALTABLE /* Virtual tables must be handled separately */ if( IsVirtual(pTab) ){ updateVirtualTable(pParse, pTabList, pTab, pChanges, pRowidExpr, aXRef, pWhere, onError); pWhere = 0; pTabList = 0; goto update_cleanup; } #endif /* Allocate required registers. */ regRowSet = ++pParse->nMem; regOldRowid = regNewRowid = ++pParse->nMem; if( chngPk || pTrigger || hasFK ){ regOld = pParse->nMem + 1; pParse->nMem += pTab->nCol; } if( chngKey || pTrigger || hasFK ){ regNewRowid = ++pParse->nMem; } regNew = pParse->nMem + 1; pParse->nMem += pTab->nCol; /* Start the view context. */ if( isView ){ sqlite3AuthContextPush(pParse, &sContext, pTab->zName); } /* If we are trying to update a view, realize that view into ** an ephemeral table. */ #if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER) if( isView ){ sqlite3MaterializeView(pParse, pTab, pWhere, iDataCur); } #endif /* Resolve the column names in all the expressions in the ** WHERE clause. */ if( sqlite3ResolveExprNames(&sNC, pWhere) ){ goto update_cleanup; } /* Begin the database scan */ if( HasRowid(pTab) ){ sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); pWInfo = sqlite3WhereBegin( pParse, pTabList, pWhere, 0, 0, WHERE_ONEPASS_DESIRED, iIdxCur ); if( pWInfo==0 ) goto update_cleanup; okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); /* Remember the rowid of every item to be updated. */ sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid); if( !okOnePass ){ sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid); } /* End the database scan loop. */ sqlite3WhereEnd(pWInfo); }else{ int iPk; /* First of nPk memory cells holding PRIMARY KEY value */ i16 nPk; /* Number of components of the PRIMARY KEY */ int addrOpen; /* Address of the OpenEphemeral instruction */ assert( pPk!=0 ); nPk = pPk->nKeyCol; iPk = pParse->nMem+1; pParse->nMem += nPk; regKey = ++pParse->nMem; iEph = pParse->nTab++; sqlite3VdbeAddOp2(v, OP_Null, 0, iPk); addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nPk); sqlite3VdbeSetP4KeyInfo(pParse, pPk); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, WHERE_ONEPASS_DESIRED, iIdxCur); if( pWInfo==0 ) goto update_cleanup; okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); for(i=0; iaiColumn[i], iPk+i); } if( okOnePass ){ sqlite3VdbeChangeToNoop(v, addrOpen); nKey = nPk; regKey = iPk; }else{ sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey, sqlite3IndexAffinityStr(v, pPk), nPk); sqlite3VdbeAddOp2(v, OP_IdxInsert, iEph, regKey); } sqlite3WhereEnd(pWInfo); } /* Initialize the count of updated rows */ if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){ regRowCount = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount); } labelBreak = sqlite3VdbeMakeLabel(v); if( !isView ){ /* ** Open every index that needs updating. Note that if any ** index could potentially invoke a REPLACE conflict resolution ** action, then we need to open all indices because we might need ** to be deleting some records. */ if( onError==OE_Replace ){ memset(aToOpen, 1, nIdx+1); }else{ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( pIdx->onError==OE_Replace ){ memset(aToOpen, 1, nIdx+1); break; } } } if( okOnePass ){ if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0; if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0; } sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, iBaseCur, aToOpen, 0, 0); } /* Top of the update loop */ if( okOnePass ){ if( aToOpen[iDataCur-iBaseCur] && !isView ){ assert( pPk ); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey); VdbeCoverageNeverTaken(v); } labelContinue = labelBreak; sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak); VdbeCoverageIf(v, pPk==0); VdbeCoverageIf(v, pPk!=0); }else if( pPk ){ labelContinue = sqlite3VdbeMakeLabel(v); sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v); addrTop = sqlite3VdbeAddOp2(v, OP_RowKey, iEph, regKey); sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0); VdbeCoverage(v); }else{ labelContinue = sqlite3VdbeAddOp3(v, OP_RowSetRead, regRowSet, labelBreak, regOldRowid); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); VdbeCoverage(v); } /* If the record number will change, set register regNewRowid to ** contain the new value. If the record number is not being modified, ** then regNewRowid is the same register as regOldRowid, which is ** already populated. */ assert( chngKey || pTrigger || hasFK || regOldRowid==regNewRowid ); if( chngRowid ){ sqlite3ExprCode(pParse, pRowidExpr, regNewRowid); sqlite3VdbeAddOp1(v, OP_MustBeInt, regNewRowid); VdbeCoverage(v); } /* Compute the old pre-UPDATE content of the row being changed, if that ** information is needed */ if( chngPk || hasFK || pTrigger ){ u32 oldmask = (hasFK ? sqlite3FkOldmask(pParse, pTab) : 0); oldmask |= sqlite3TriggerColmask(pParse, pTrigger, pChanges, 0, TRIGGER_BEFORE|TRIGGER_AFTER, pTab, onError ); for(i=0; inCol; i++){ if( oldmask==0xffffffff || (i<32 && (oldmask & MASKBIT32(i))!=0) || (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){ testcase( oldmask!=0xffffffff && i==31 ); sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regOld+i); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, regOld+i); } } if( chngRowid==0 && pPk==0 ){ sqlite3VdbeAddOp2(v, OP_Copy, regOldRowid, regNewRowid); } } /* Populate the array of registers beginning at regNew with the new ** row data. This array is used to check constants, create the new ** table and index records, and as the values for any new.* references ** made by triggers. ** ** If there are one or more BEFORE triggers, then do not populate the ** registers associated with columns that are (a) not modified by ** this UPDATE statement and (b) not accessed by new.* references. The ** values for registers not modified by the UPDATE must be reloaded from ** the database after the BEFORE triggers are fired anyway (as the trigger ** may have modified them). So not loading those that are not going to ** be used eliminates some redundant opcodes. */ newmask = sqlite3TriggerColmask( pParse, pTrigger, pChanges, 1, TRIGGER_BEFORE, pTab, onError ); /*sqlite3VdbeAddOp3(v, OP_Null, 0, regNew, regNew+pTab->nCol-1);*/ for(i=0; inCol; i++){ if( i==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); }else{ j = aXRef[i]; if( j>=0 ){ sqlite3ExprCode(pParse, pChanges->a[j].pExpr, regNew+i); }else if( 0==(tmask&TRIGGER_BEFORE) || i>31 || (newmask & MASKBIT32(i)) ){ /* This branch loads the value of a column that will not be changed ** into a register. This is done if there are no BEFORE triggers, or ** if there are one or more BEFORE triggers that use this value via ** a new.* reference in a trigger program. */ testcase( i==31 ); testcase( i==32 ); sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, regNew+i); } } } /* Fire any BEFORE UPDATE triggers. This happens before constraints are ** verified. One could argue that this is wrong. */ if( tmask&TRIGGER_BEFORE ){ sqlite3TableAffinity(v, pTab, regNew); sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, TRIGGER_BEFORE, pTab, regOldRowid, onError, labelContinue); /* The row-trigger may have deleted the row being updated. In this ** case, jump to the next row. No updates or AFTER triggers are ** required. This behavior - what happens when the row being updated ** is deleted or renamed by a BEFORE trigger - is left undefined in the ** documentation. */ if( pPk ){ sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue,regKey,nKey); VdbeCoverage(v); }else{ sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue, regOldRowid); VdbeCoverage(v); } /* If it did not delete it, the row-trigger may still have modified ** some of the columns of the row being updated. Load the values for ** all columns not modified by the update statement into their ** registers in case this has happened. */ for(i=0; inCol; i++){ if( aXRef[i]<0 && i!=pTab->iPKey ){ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, i, regNew+i); } } } if( !isView ){ int j1 = 0; /* Address of jump instruction */ int bReplace = 0; /* True if REPLACE conflict resolution might happen */ /* Do constraint checks. */ assert( regOldRowid>0 ); sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, regNewRowid, regOldRowid, chngKey, onError, labelContinue, &bReplace); /* Do FK constraint checks. */ if( hasFK ){ sqlite3FkCheck(pParse, pTab, regOldRowid, 0, aXRef, chngKey); } /* Delete the index entries associated with the current record. */ if( bReplace || chngKey ){ if( pPk ){ j1 = sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, 0, regKey, nKey); }else{ j1 = sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, 0, regOldRowid); } VdbeCoverageNeverTaken(v); } sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx); /* If changing the record number, delete the old record. */ if( hasFK || chngKey || pPk!=0 ){ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0); } if( bReplace || chngKey ){ sqlite3VdbeJumpHere(v, j1); } if( hasFK ){ sqlite3FkCheck(pParse, pTab, 0, regNewRowid, aXRef, chngKey); } /* Insert the new index entries and the new record. */ sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur, regNewRowid, aRegIdx, 1, 0, 0); /* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to ** handle rows (possibly in other tables) that refer via a foreign key ** to the row just updated. */ if( hasFK ){ sqlite3FkActions(pParse, pTab, pChanges, regOldRowid, aXRef, chngKey); } } /* Increment the row counter */ if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab){ sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated. */ if( okOnePass ){ /* Nothing to do at end-of-loop for a single-pass */ }else if( pPk ){ sqlite3VdbeResolveLabel(v, labelContinue); sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v); }else{ sqlite3VdbeAddOp2(v, OP_Goto, 0, labelContinue); } sqlite3VdbeResolveLabel(v, labelBreak); /* Close all tables */ for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ assert( aRegIdx ); if( aToOpen[i+1] ){ sqlite3VdbeAddOp2(v, OP_Close, iIdxCur+i, 0); } } if( iDataCurnested==0 && pParse->pTriggerTab==0 ){ sqlite3AutoincrementEnd(pParse); } /* ** Return the number of rows that were changed. If this routine is ** generating code because of a call to sqlite3NestedParse(), do not ** invoke the callback function. */ if( (db->flags&SQLITE_CountRows) && !pParse->pTriggerTab && !pParse->nested ){ sqlite3VdbeAddOp2(v, OP_ResultRow, regRowCount, 1); sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "rows updated", SQLITE_STATIC); } update_cleanup: sqlite3AuthContextPop(&sContext); sqlite3DbFree(db, aXRef); /* Also frees aRegIdx[] and aToOpen[] */ sqlite3SrcListDelete(db, pTabList); sqlite3ExprListDelete(db, pChanges); sqlite3ExprDelete(db, pWhere); return; } /* Make sure "isView" and other macros defined above are undefined. Otherwise ** they may interfere with compilation of other functions in this file ** (or in another file, if this file becomes part of the amalgamation). */ #ifdef isView #undef isView #endif #ifdef pTrigger #undef pTrigger #endif #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Generate code for an UPDATE of a virtual table. ** ** The strategy is that we create an ephemeral table that contains ** for each row to be changed: ** ** (A) The original rowid of that row. ** (B) The revised rowid for the row. (note1) ** (C) The content of every column in the row. ** ** Then we loop over this ephemeral table and for each row in ** the ephemeral table call VUpdate. ** ** When finished, drop the ephemeral table. ** ** (note1) Actually, if we know in advance that (A) is always the same ** as (B) we only store (A), then duplicate (A) when pulling ** it out of the ephemeral table before calling VUpdate. */ static void updateVirtualTable( Parse *pParse, /* The parsing context */ SrcList *pSrc, /* The virtual table to be modified */ Table *pTab, /* The virtual table */ ExprList *pChanges, /* The columns to change in the UPDATE statement */ Expr *pRowid, /* Expression used to recompute the rowid */ int *aXRef, /* Mapping from columns of pTab to entries in pChanges */ Expr *pWhere, /* WHERE clause of the UPDATE statement */ int onError /* ON CONFLICT strategy */ ){ Vdbe *v = pParse->pVdbe; /* Virtual machine under construction */ ExprList *pEList = 0; /* The result set of the SELECT statement */ Select *pSelect = 0; /* The SELECT statement */ Expr *pExpr; /* Temporary expression */ int ephemTab; /* Table holding the result of the SELECT */ int i; /* Loop counter */ int addr; /* Address of top of loop */ int iReg; /* First register in set passed to OP_VUpdate */ sqlite3 *db = pParse->db; /* Database connection */ const char *pVTab = (const char*)sqlite3GetVTable(db, pTab); SelectDest dest; /* Construct the SELECT statement that will find the new values for ** all updated rows. */ pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, "_rowid_")); if( pRowid ){ pEList = sqlite3ExprListAppend(pParse, pEList, sqlite3ExprDup(db, pRowid, 0)); } assert( pTab->iPKey<0 ); for(i=0; inCol; i++){ if( aXRef[i]>=0 ){ pExpr = sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0); }else{ pExpr = sqlite3Expr(db, TK_ID, pTab->aCol[i].zName); } pEList = sqlite3ExprListAppend(pParse, pEList, pExpr); } pSelect = sqlite3SelectNew(pParse, pEList, pSrc, pWhere, 0, 0, 0, 0, 0, 0); /* Create the ephemeral table into which the update results will ** be stored. */ assert( v ); ephemTab = pParse->nTab++; /* fill the ephemeral table */ sqlite3SelectDestInit(&dest, SRT_EphemTab, ephemTab); sqlite3Select(pParse, pSelect, &dest); /* Generate code to scan the ephemeral table and call VUpdate. */ iReg = ++pParse->nMem; pParse->nMem += pTab->nCol+1; addr = sqlite3VdbeAddOp2(v, OP_Rewind, ephemTab, 0); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_Column, ephemTab, 0, iReg); sqlite3VdbeAddOp3(v, OP_Column, ephemTab, (pRowid?1:0), iReg+1); for(i=0; inCol; i++){ sqlite3VdbeAddOp3(v, OP_Column, ephemTab, i+1+(pRowid!=0), iReg+2+i); } sqlite3VtabMakeWritable(pParse, pTab); sqlite3VdbeAddOp4(v, OP_VUpdate, 0, pTab->nCol+2, iReg, pVTab, P4_VTAB); sqlite3VdbeChangeP5(v, onError==OE_Default ? OE_Abort : onError); sqlite3MayAbort(pParse); sqlite3VdbeAddOp2(v, OP_Next, ephemTab, addr+1); VdbeCoverage(v); sqlite3VdbeJumpHere(v, addr); sqlite3VdbeAddOp2(v, OP_Close, ephemTab, 0); /* Cleanup */ sqlite3SelectDelete(db, pSelect); } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /************** End of update.c **********************************************/ /************** Begin file vacuum.c ******************************************/ /* ** 2003 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to implement the VACUUM command. ** ** Most of the code in this file may be omitted by defining the ** SQLITE_OMIT_VACUUM macro. */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ #if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH) /* ** Finalize a prepared statement. If there was an error, store the ** text of the error message in *pzErrMsg. Return the result code. */ static int vacuumFinalize(sqlite3 *db, sqlite3_stmt *pStmt, char **pzErrMsg){ int rc; rc = sqlite3VdbeFinalize((Vdbe*)pStmt); if( rc ){ sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db)); } return rc; } /* ** Execute zSql on database db. Return an error code. */ static int execSql(sqlite3 *db, char **pzErrMsg, const char *zSql){ sqlite3_stmt *pStmt; VVA_ONLY( int rc; ) if( !zSql ){ return SQLITE_NOMEM; } if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){ sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db)); return sqlite3_errcode(db); } VVA_ONLY( rc = ) sqlite3_step(pStmt); assert( rc!=SQLITE_ROW || (db->flags&SQLITE_CountRows) ); return vacuumFinalize(db, pStmt, pzErrMsg); } /* ** Execute zSql on database db. The statement returns exactly ** one column. Execute this as SQL on the same database. */ static int execExecSql(sqlite3 *db, char **pzErrMsg, const char *zSql){ sqlite3_stmt *pStmt; int rc; rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ) return rc; while( SQLITE_ROW==sqlite3_step(pStmt) ){ rc = execSql(db, pzErrMsg, (char*)sqlite3_column_text(pStmt, 0)); if( rc!=SQLITE_OK ){ vacuumFinalize(db, pStmt, pzErrMsg); return rc; } } return vacuumFinalize(db, pStmt, pzErrMsg); } /* ** The VACUUM command is used to clean up the database, ** collapse free space, etc. It is modelled after the VACUUM command ** in PostgreSQL. The VACUUM command works as follows: ** ** (1) Create a new transient database file ** (2) Copy all content from the database being vacuumed into ** the new transient database file ** (3) Copy content from the transient database back into the ** original database. ** ** The transient database requires temporary disk space approximately ** equal to the size of the original database. The copy operation of ** step (3) requires additional temporary disk space approximately equal ** to the size of the original database for the rollback journal. ** Hence, temporary disk space that is approximately 2x the size of the ** original database is required. Every page of the database is written ** approximately 3 times: Once for step (2) and twice for step (3). ** Two writes per page are required in step (3) because the original ** database content must be written into the rollback journal prior to ** overwriting the database with the vacuumed content. ** ** Only 1x temporary space and only 1x writes would be required if ** the copy of step (3) were replaced by deleting the original database ** and renaming the transient database as the original. But that will ** not work if other processes are attached to the original database. ** And a power loss in between deleting the original and renaming the ** transient would cause the database file to appear to be deleted ** following reboot. */ SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse){ Vdbe *v = sqlite3GetVdbe(pParse); if( v ){ sqlite3VdbeAddOp2(v, OP_Vacuum, 0, 0); sqlite3VdbeUsesBtree(v, 0); } return; } /* ** This routine implements the OP_Vacuum opcode of the VDBE. */ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ int rc = SQLITE_OK; /* Return code from service routines */ Btree *pMain; /* The database being vacuumed */ Btree *pTemp; /* The temporary database we vacuum into */ char *zSql = 0; /* SQL statements */ int saved_flags; /* Saved value of the db->flags */ int saved_nChange; /* Saved value of db->nChange */ int saved_nTotalChange; /* Saved value of db->nTotalChange */ void (*saved_xTrace)(void*,const char*); /* Saved db->xTrace */ Db *pDb = 0; /* Database to detach at end of vacuum */ int isMemDb; /* True if vacuuming a :memory: database */ int nRes; /* Bytes of reserved space at the end of each page */ int nDb; /* Number of attached databases */ if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); return SQLITE_ERROR; } if( db->nVdbeActive>1 ){ sqlite3SetString(pzErrMsg, db,"cannot VACUUM - SQL statements in progress"); return SQLITE_ERROR; } /* Save the current value of the database flags so that it can be ** restored before returning. Then set the writable-schema flag, and ** disable CHECK and foreign key constraints. */ saved_flags = db->flags; saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; saved_xTrace = db->xTrace; db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin; db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder); db->xTrace = 0; pMain = db->aDb[0].pBt; isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); /* Attach the temporary database as 'vacuum_db'. The synchronous pragma ** can be set to 'off' for this file, as it is not recovered if a crash ** occurs anyway. The integrity of the database is maintained by a ** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** ** An optimisation would be to use a non-journaled pager. ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially ** empty. Only the journal header is written. Apparently it takes more ** time to parse and run the PRAGMA to turn journalling off than it does ** to write the journal header file. */ nDb = db->nDb; if( sqlite3TempInMemory(db) ){ zSql = "ATTACH ':memory:' AS vacuum_db;"; }else{ zSql = "ATTACH '' AS vacuum_db;"; } rc = execSql(db, pzErrMsg, zSql); if( db->nDb>nDb ){ pDb = &db->aDb[db->nDb-1]; assert( strcmp(pDb->zName,"vacuum_db")==0 ); } if( rc!=SQLITE_OK ) goto end_of_vacuum; pTemp = db->aDb[db->nDb-1].pBt; /* The call to execSql() to attach the temp database has left the file ** locked (as there was more than one active statement when the transaction ** to read the schema was concluded. Unlock it here so that this doesn't ** cause problems for the call to BtreeSetPageSize() below. */ sqlite3BtreeCommit(pTemp); nRes = sqlite3BtreeGetOptimalReserve(pMain); /* A VACUUM cannot change the pagesize of an encrypted database. */ #ifdef SQLITE_HAS_CODEC if( db->nextPagesize ){ extern void sqlite3CodecGetKey(sqlite3*, int, void**, int*); int nKey; char *zKey; sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey); if( nKey ) db->nextPagesize = 0; } #endif rc = execSql(db, pzErrMsg, "PRAGMA vacuum_db.synchronous=OFF"); if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Begin a transaction and take an exclusive lock on the main database ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below, ** to ensure that we do not try to change the page-size on a WAL database. */ rc = execSql(db, pzErrMsg, "BEGIN;"); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = sqlite3BtreeBeginTrans(pMain, 2); if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Do not attempt to change the page size for a WAL database */ if( sqlite3PagerGetJournalMode(sqlite3BtreePager(pMain)) ==PAGER_JOURNALMODE_WAL ){ db->nextPagesize = 0; } if( sqlite3BtreeSetPageSize(pTemp, sqlite3BtreeGetPageSize(pMain), nRes, 0) || (!isMemDb && sqlite3BtreeSetPageSize(pTemp, db->nextPagesize, nRes, 0)) || NEVER(db->mallocFailed) ){ rc = SQLITE_NOMEM; goto end_of_vacuum; } #ifndef SQLITE_OMIT_AUTOVACUUM sqlite3BtreeSetAutoVacuum(pTemp, db->nextAutovac>=0 ? db->nextAutovac : sqlite3BtreeGetAutoVacuum(pMain)); #endif /* Query the schema of the main database. Create a mirror schema ** in the temporary database. */ rc = execExecSql(db, pzErrMsg, "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) " " FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'" " AND coalesce(rootpage,1)>0" ); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = execExecSql(db, pzErrMsg, "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)" " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' "); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = execExecSql(db, pzErrMsg, "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) " " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'"); if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Loop through the tables in the main database. For each, do ** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy ** the contents to the temporary database. */ assert( (db->flags & SQLITE_Vacuum)==0 ); db->flags |= SQLITE_Vacuum; rc = execExecSql(db, pzErrMsg, "SELECT 'INSERT INTO vacuum_db.' || quote(name) " "|| ' SELECT * FROM main.' || quote(name) || ';'" "FROM main.sqlite_master " "WHERE type = 'table' AND name!='sqlite_sequence' " " AND coalesce(rootpage,1)>0" ); assert( (db->flags & SQLITE_Vacuum)!=0 ); db->flags &= ~SQLITE_Vacuum; if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Copy over the sequence table */ rc = execExecSql(db, pzErrMsg, "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' " "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' " ); if( rc!=SQLITE_OK ) goto end_of_vacuum; rc = execExecSql(db, pzErrMsg, "SELECT 'INSERT INTO vacuum_db.' || quote(name) " "|| ' SELECT * FROM main.' || quote(name) || ';' " "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';" ); if( rc!=SQLITE_OK ) goto end_of_vacuum; /* Copy the triggers, views, and virtual tables from the main database ** over to the temporary database. None of these objects has any ** associated storage, so all we have to do is copy their entries ** from the SQLITE_MASTER table. */ rc = execSql(db, pzErrMsg, "INSERT INTO vacuum_db.sqlite_master " " SELECT type, name, tbl_name, rootpage, sql" " FROM main.sqlite_master" " WHERE type='view' OR type='trigger'" " OR (type='table' AND rootpage=0)" ); if( rc ) goto end_of_vacuum; /* At this point, there is a write transaction open on both the ** vacuum database and the main database. Assuming no error occurs, ** both transactions are closed by this block - the main database ** transaction by sqlite3BtreeCopyFile() and the other by an explicit ** call to sqlite3BtreeCommit(). */ { u32 meta; int i; /* This array determines which meta meta values are preserved in the ** vacuum. Even entries are the meta value number and odd entries ** are an increment to apply to the meta value after the vacuum. ** The increment is used to increase the schema cookie so that other ** connections to the same database will know to reread the schema. */ static const unsigned char aCopy[] = { BTREE_SCHEMA_VERSION, 1, /* Add one to the old schema cookie */ BTREE_DEFAULT_CACHE_SIZE, 0, /* Preserve the default page cache size */ BTREE_TEXT_ENCODING, 0, /* Preserve the text encoding */ BTREE_USER_VERSION, 0, /* Preserve the user version */ BTREE_APPLICATION_ID, 0, /* Preserve the application id */ }; assert( 1==sqlite3BtreeIsInTrans(pTemp) ); assert( 1==sqlite3BtreeIsInTrans(pMain) ); /* Copy Btree meta values */ for(i=0; iflags */ db->flags = saved_flags; db->nChange = saved_nChange; db->nTotalChange = saved_nTotalChange; db->xTrace = saved_xTrace; sqlite3BtreeSetPageSize(pMain, -1, -1, 1); /* Currently there is an SQL level transaction open on the vacuum ** database. No locks are held on any other files (since the main file ** was committed at the btree level). So it safe to end the transaction ** by manually setting the autoCommit flag to true and detaching the ** vacuum database. The vacuum_db journal file is deleted when the pager ** is closed by the DETACH. */ db->autoCommit = 1; if( pDb ){ sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; pDb->pSchema = 0; } /* This both clears the schemas and reduces the size of the db->aDb[] ** array. */ sqlite3ResetAllSchemasOfConnection(db); return rc; } #endif /* SQLITE_OMIT_VACUUM && SQLITE_OMIT_ATTACH */ /************** End of vacuum.c **********************************************/ /************** Begin file vtab.c ********************************************/ /* ** 2006 June 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code used to help implement virtual tables. */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* #include "sqliteInt.h" */ /* ** Before a virtual table xCreate() or xConnect() method is invoked, the ** sqlite3.pVtabCtx member variable is set to point to an instance of ** this struct allocated on the stack. It is used by the implementation of ** the sqlite3_declare_vtab() and sqlite3_vtab_config() APIs, both of which ** are invoked only from within xCreate and xConnect methods. */ struct VtabCtx { VTable *pVTable; /* The virtual table being constructed */ Table *pTab; /* The Table object to which the virtual table belongs */ VtabCtx *pPrior; /* Parent context (if any) */ int bDeclared; /* True after sqlite3_declare_vtab() is called */ }; /* ** The actual function that does the work of creating a new module. ** This function implements the sqlite3_create_module() and ** sqlite3_create_module_v2() interfaces. */ static int createModule( sqlite3 *db, /* Database in which module is registered */ const char *zName, /* Name assigned to this module */ const sqlite3_module *pModule, /* The definition of the module */ void *pAux, /* Context pointer for xCreate/xConnect */ void (*xDestroy)(void *) /* Module destructor function */ ){ int rc = SQLITE_OK; int nName; sqlite3_mutex_enter(db->mutex); nName = sqlite3Strlen30(zName); if( sqlite3HashFind(&db->aModule, zName) ){ rc = SQLITE_MISUSE_BKPT; }else{ Module *pMod; pMod = (Module *)sqlite3DbMallocRaw(db, sizeof(Module) + nName + 1); if( pMod ){ Module *pDel; char *zCopy = (char *)(&pMod[1]); memcpy(zCopy, zName, nName+1); pMod->zName = zCopy; pMod->pModule = pModule; pMod->pAux = pAux; pMod->xDestroy = xDestroy; pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod); assert( pDel==0 || pDel==pMod ); if( pDel ){ db->mallocFailed = 1; sqlite3DbFree(db, pDel); } } } rc = sqlite3ApiExit(db, rc); if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux); sqlite3_mutex_leave(db->mutex); return rc; } /* ** External API function used to create a new virtual-table module. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_module( sqlite3 *db, /* Database in which module is registered */ const char *zName, /* Name assigned to this module */ const sqlite3_module *pModule, /* The definition of the module */ void *pAux /* Context pointer for xCreate/xConnect */ ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; #endif return createModule(db, zName, pModule, pAux, 0); } /* ** External API function used to create a new virtual-table module. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2( sqlite3 *db, /* Database in which module is registered */ const char *zName, /* Name assigned to this module */ const sqlite3_module *pModule, /* The definition of the module */ void *pAux, /* Context pointer for xCreate/xConnect */ void (*xDestroy)(void *) /* Module destructor function */ ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; #endif return createModule(db, zName, pModule, pAux, xDestroy); } /* ** Lock the virtual table so that it cannot be disconnected. ** Locks nest. Every lock should have a corresponding unlock. ** If an unlock is omitted, resources leaks will occur. ** ** If a disconnect is attempted while a virtual table is locked, ** the disconnect is deferred until all locks have been removed. */ SQLITE_PRIVATE void sqlite3VtabLock(VTable *pVTab){ pVTab->nRef++; } /* ** pTab is a pointer to a Table structure representing a virtual-table. ** Return a pointer to the VTable object used by connection db to access ** this virtual-table, if one has been created, or NULL otherwise. */ SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3 *db, Table *pTab){ VTable *pVtab; assert( IsVirtual(pTab) ); for(pVtab=pTab->pVTable; pVtab && pVtab->db!=db; pVtab=pVtab->pNext); return pVtab; } /* ** Decrement the ref-count on a virtual table object. When the ref-count ** reaches zero, call the xDisconnect() method to delete the object. */ SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ sqlite3 *db = pVTab->db; assert( db ); assert( pVTab->nRef>0 ); assert( db->magic==SQLITE_MAGIC_OPEN || db->magic==SQLITE_MAGIC_ZOMBIE ); pVTab->nRef--; if( pVTab->nRef==0 ){ sqlite3_vtab *p = pVTab->pVtab; if( p ){ p->pModule->xDisconnect(p); } sqlite3DbFree(db, pVTab); } } /* ** Table p is a virtual table. This function moves all elements in the ** p->pVTable list to the sqlite3.pDisconnect lists of their associated ** database connections to be disconnected at the next opportunity. ** Except, if argument db is not NULL, then the entry associated with ** connection db is left in the p->pVTable list. */ static VTable *vtabDisconnectAll(sqlite3 *db, Table *p){ VTable *pRet = 0; VTable *pVTable = p->pVTable; p->pVTable = 0; /* Assert that the mutex (if any) associated with the BtShared database ** that contains table p is held by the caller. See header comments ** above function sqlite3VtabUnlockList() for an explanation of why ** this makes it safe to access the sqlite3.pDisconnect list of any ** database connection that may have an entry in the p->pVTable list. */ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, p->pSchema) ); while( pVTable ){ sqlite3 *db2 = pVTable->db; VTable *pNext = pVTable->pNext; assert( db2 ); if( db2==db ){ pRet = pVTable; p->pVTable = pRet; pRet->pNext = 0; }else{ pVTable->pNext = db2->pDisconnect; db2->pDisconnect = pVTable; } pVTable = pNext; } assert( !db || pRet ); return pRet; } /* ** Table *p is a virtual table. This function removes the VTable object ** for table *p associated with database connection db from the linked ** list in p->pVTab. It also decrements the VTable ref count. This is ** used when closing database connection db to free all of its VTable ** objects without disturbing the rest of the Schema object (which may ** be being used by other shared-cache connections). */ SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p){ VTable **ppVTab; assert( IsVirtual(p) ); assert( sqlite3BtreeHoldsAllMutexes(db) ); assert( sqlite3_mutex_held(db->mutex) ); for(ppVTab=&p->pVTable; *ppVTab; ppVTab=&(*ppVTab)->pNext){ if( (*ppVTab)->db==db ){ VTable *pVTab = *ppVTab; *ppVTab = pVTab->pNext; sqlite3VtabUnlock(pVTab); break; } } } /* ** Disconnect all the virtual table objects in the sqlite3.pDisconnect list. ** ** This function may only be called when the mutexes associated with all ** shared b-tree databases opened using connection db are held by the ** caller. This is done to protect the sqlite3.pDisconnect list. The ** sqlite3.pDisconnect list is accessed only as follows: ** ** 1) By this function. In this case, all BtShared mutexes and the mutex ** associated with the database handle itself must be held. ** ** 2) By function vtabDisconnectAll(), when it adds a VTable entry to ** the sqlite3.pDisconnect list. In this case either the BtShared mutex ** associated with the database the virtual table is stored in is held ** or, if the virtual table is stored in a non-sharable database, then ** the database handle mutex is held. ** ** As a result, a sqlite3.pDisconnect cannot be accessed simultaneously ** by multiple threads. It is thread-safe. */ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ VTable *p = db->pDisconnect; db->pDisconnect = 0; assert( sqlite3BtreeHoldsAllMutexes(db) ); assert( sqlite3_mutex_held(db->mutex) ); if( p ){ sqlite3ExpirePreparedStatements(db); do { VTable *pNext = p->pNext; sqlite3VtabUnlock(p); p = pNext; }while( p ); } } /* ** Clear any and all virtual-table information from the Table record. ** This routine is called, for example, just before deleting the Table ** record. ** ** Since it is a virtual-table, the Table structure contains a pointer ** to the head of a linked list of VTable structures. Each VTable ** structure is associated with a single sqlite3* user of the schema. ** The reference count of the VTable structure associated with database ** connection db is decremented immediately (which may lead to the ** structure being xDisconnected and free). Any other VTable structures ** in the list are moved to the sqlite3.pDisconnect list of the associated ** database connection. */ SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){ if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); if( p->azModuleArg ){ int i; for(i=0; inModuleArg; i++){ if( i!=1 ) sqlite3DbFree(db, p->azModuleArg[i]); } sqlite3DbFree(db, p->azModuleArg); } } /* ** Add a new module argument to pTable->azModuleArg[]. ** The string is not copied - the pointer is stored. The ** string will be freed automatically when the table is ** deleted. */ static void addModuleArgument(sqlite3 *db, Table *pTable, char *zArg){ int i = pTable->nModuleArg++; int nBytes = sizeof(char *)*(1+pTable->nModuleArg); char **azModuleArg; azModuleArg = sqlite3DbRealloc(db, pTable->azModuleArg, nBytes); if( azModuleArg==0 ){ int j; for(j=0; jazModuleArg[j]); } sqlite3DbFree(db, zArg); sqlite3DbFree(db, pTable->azModuleArg); pTable->nModuleArg = 0; }else{ azModuleArg[i] = zArg; azModuleArg[i+1] = 0; } pTable->azModuleArg = azModuleArg; } /* ** The parser calls this routine when it first sees a CREATE VIRTUAL TABLE ** statement. The module name has been parsed, but the optional list ** of parameters that follow the module name are still pending. */ SQLITE_PRIVATE void sqlite3VtabBeginParse( Parse *pParse, /* Parsing context */ Token *pName1, /* Name of new table, or database name */ Token *pName2, /* Name of new table or NULL */ Token *pModuleName, /* Name of the module for the virtual table */ int ifNotExists /* No error if the table already exists */ ){ int iDb; /* The database the table is being created in */ Table *pTable; /* The new virtual table */ sqlite3 *db; /* Database connection */ sqlite3StartTable(pParse, pName1, pName2, 0, 0, 1, ifNotExists); pTable = pParse->pNewTable; if( pTable==0 ) return; assert( 0==pTable->pIndex ); db = pParse->db; iDb = sqlite3SchemaToIndex(db, pTable->pSchema); assert( iDb>=0 ); pTable->tabFlags |= TF_Virtual; pTable->nModuleArg = 0; addModuleArgument(db, pTable, sqlite3NameFromToken(db, pModuleName)); addModuleArgument(db, pTable, 0); addModuleArgument(db, pTable, sqlite3DbStrDup(db, pTable->zName)); assert( (pParse->sNameToken.z==pName2->z && pName2->z!=0) || (pParse->sNameToken.z==pName1->z && pName2->z==0) ); pParse->sNameToken.n = (int)( &pModuleName->z[pModuleName->n] - pParse->sNameToken.z ); #ifndef SQLITE_OMIT_AUTHORIZATION /* Creating a virtual table invokes the authorization callback twice. ** The first invocation, to obtain permission to INSERT a row into the ** sqlite_master table, has already been made by sqlite3StartTable(). ** The second call, to obtain permission to create the table, is made now. */ if( pTable->azModuleArg ){ sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName, pTable->azModuleArg[0], pParse->db->aDb[iDb].zName); } #endif } /* ** This routine takes the module argument that has been accumulating ** in pParse->zArg[] and appends it to the list of arguments on the ** virtual table currently under construction in pParse->pTable. */ static void addArgumentToVtab(Parse *pParse){ if( pParse->sArg.z && pParse->pNewTable ){ const char *z = (const char*)pParse->sArg.z; int n = pParse->sArg.n; sqlite3 *db = pParse->db; addModuleArgument(db, pParse->pNewTable, sqlite3DbStrNDup(db, z, n)); } } /* ** The parser calls this routine after the CREATE VIRTUAL TABLE statement ** has been completely parsed. */ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ Table *pTab = pParse->pNewTable; /* The table being constructed */ sqlite3 *db = pParse->db; /* The database connection */ if( pTab==0 ) return; addArgumentToVtab(pParse); pParse->sArg.z = 0; if( pTab->nModuleArg<1 ) return; /* If the CREATE VIRTUAL TABLE statement is being entered for the ** first time (in other words if the virtual table is actually being ** created now instead of just being read out of sqlite_master) then ** do additional initialization work and store the statement text ** in the sqlite_master table. */ if( !db->init.busy ){ char *zStmt; char *zWhere; int iDb; int iReg; Vdbe *v; /* Compute the complete text of the CREATE VIRTUAL TABLE statement */ if( pEnd ){ pParse->sNameToken.n = (int)(pEnd->z - pParse->sNameToken.z) + pEnd->n; } zStmt = sqlite3MPrintf(db, "CREATE VIRTUAL TABLE %T", &pParse->sNameToken); /* A slot for the record has already been allocated in the ** SQLITE_MASTER table. We just need to update that slot with all ** the information we've collected. ** ** The VM register number pParse->regRowid holds the rowid of an ** entry in the sqlite_master table tht was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); sqlite3NestedParse(pParse, "UPDATE %Q.%s " "SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q " "WHERE rowid=#%d", db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pTab->zName, pTab->zName, zStmt, pParse->regRowid ); sqlite3DbFree(db, zStmt); v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); zWhere = sqlite3MPrintf(db, "name='%q' AND type='table'", pTab->zName); sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); iReg = ++pParse->nMem; sqlite3VdbeAddOp4(v, OP_String8, 0, iReg, 0, pTab->zName, 0); sqlite3VdbeAddOp2(v, OP_VCreate, iDb, iReg); } /* If we are rereading the sqlite_master table create the in-memory ** record of the table. The xConnect() method is not called until ** the first time the virtual table is used in an SQL statement. This ** allows a schema that contains virtual tables to be loaded before ** the required virtual table implementations are registered. */ else { Table *pOld; Schema *pSchema = pTab->pSchema; const char *zName = pTab->zName; assert( sqlite3SchemaMutexHeld(db, 0, pSchema) ); pOld = sqlite3HashInsert(&pSchema->tblHash, zName, pTab); if( pOld ){ db->mallocFailed = 1; assert( pTab==pOld ); /* Malloc must have failed inside HashInsert() */ return; } pParse->pNewTable = 0; } } /* ** The parser calls this routine when it sees the first token ** of an argument to the module name in a CREATE VIRTUAL TABLE statement. */ SQLITE_PRIVATE void sqlite3VtabArgInit(Parse *pParse){ addArgumentToVtab(pParse); pParse->sArg.z = 0; pParse->sArg.n = 0; } /* ** The parser calls this routine for each token after the first token ** in an argument to the module name in a CREATE VIRTUAL TABLE statement. */ SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse *pParse, Token *p){ Token *pArg = &pParse->sArg; if( pArg->z==0 ){ pArg->z = p->z; pArg->n = p->n; }else{ assert(pArg->z <= p->z); pArg->n = (int)(&p->z[p->n] - pArg->z); } } /* ** Invoke a virtual table constructor (either xCreate or xConnect). The ** pointer to the function to invoke is passed as the fourth parameter ** to this procedure. */ static int vtabCallConstructor( sqlite3 *db, Table *pTab, Module *pMod, int (*xConstruct)(sqlite3*,void*,int,const char*const*,sqlite3_vtab**,char**), char **pzErr ){ VtabCtx sCtx; VTable *pVTable; int rc; const char *const*azArg = (const char *const*)pTab->azModuleArg; int nArg = pTab->nModuleArg; char *zErr = 0; char *zModuleName; int iDb; VtabCtx *pCtx; /* Check that the virtual-table is not already being initialized */ for(pCtx=db->pVtabCtx; pCtx; pCtx=pCtx->pPrior){ if( pCtx->pTab==pTab ){ *pzErr = sqlite3MPrintf(db, "vtable constructor called recursively: %s", pTab->zName ); return SQLITE_LOCKED; } } zModuleName = sqlite3MPrintf(db, "%s", pTab->zName); if( !zModuleName ){ return SQLITE_NOMEM; } pVTable = sqlite3DbMallocZero(db, sizeof(VTable)); if( !pVTable ){ sqlite3DbFree(db, zModuleName); return SQLITE_NOMEM; } pVTable->db = db; pVTable->pMod = pMod; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pTab->azModuleArg[1] = db->aDb[iDb].zName; /* Invoke the virtual table constructor */ assert( &db->pVtabCtx ); assert( xConstruct ); sCtx.pTab = pTab; sCtx.pVTable = pVTable; sCtx.pPrior = db->pVtabCtx; sCtx.bDeclared = 0; db->pVtabCtx = &sCtx; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; assert( sCtx.pTab==pTab ); if( SQLITE_OK!=rc ){ if( zErr==0 ){ *pzErr = sqlite3MPrintf(db, "vtable constructor failed: %s", zModuleName); }else { *pzErr = sqlite3MPrintf(db, "%s", zErr); sqlite3_free(zErr); } sqlite3DbFree(db, pVTable); }else if( ALWAYS(pVTable->pVtab) ){ /* Justification of ALWAYS(): A correct vtab constructor must allocate ** the sqlite3_vtab object if successful. */ memset(pVTable->pVtab, 0, sizeof(pVTable->pVtab[0])); pVTable->pVtab->pModule = pMod->pModule; pVTable->nRef = 1; if( sCtx.bDeclared==0 ){ const char *zFormat = "vtable constructor did not declare schema: %s"; *pzErr = sqlite3MPrintf(db, zFormat, pTab->zName); sqlite3VtabUnlock(pVTable); rc = SQLITE_ERROR; }else{ int iCol; u8 oooHidden = 0; /* If everything went according to plan, link the new VTable structure ** into the linked list headed by pTab->pVTable. Then loop through the ** columns of the table to see if any of them contain the token "hidden". ** If so, set the Column COLFLAG_HIDDEN flag and remove the token from ** the type string. */ pVTable->pNext = pTab->pVTable; pTab->pVTable = pVTable; for(iCol=0; iColnCol; iCol++){ char *zType = pTab->aCol[iCol].zType; int nType; int i = 0; if( !zType ){ pTab->tabFlags |= oooHidden; continue; } nType = sqlite3Strlen30(zType); if( sqlite3StrNICmp("hidden", zType, 6)||(zType[6] && zType[6]!=' ') ){ for(i=0; i0 ){ assert(zType[i-1]==' '); zType[i-1] = '\0'; } pTab->aCol[iCol].colFlags |= COLFLAG_HIDDEN; oooHidden = TF_OOOHidden; }else{ pTab->tabFlags |= oooHidden; } } } } sqlite3DbFree(db, zModuleName); return rc; } /* ** This function is invoked by the parser to call the xConnect() method ** of the virtual table pTab. If an error occurs, an error code is returned ** and an error left in pParse. ** ** This call is a no-op if table pTab is not a virtual table. */ SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse *pParse, Table *pTab){ sqlite3 *db = pParse->db; const char *zMod; Module *pMod; int rc; assert( pTab ); if( (pTab->tabFlags & TF_Virtual)==0 || sqlite3GetVTable(db, pTab) ){ return SQLITE_OK; } /* Locate the required virtual table module */ zMod = pTab->azModuleArg[0]; pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); if( !pMod ){ const char *zModule = pTab->azModuleArg[0]; sqlite3ErrorMsg(pParse, "no such module: %s", zModule); rc = SQLITE_ERROR; }else{ char *zErr = 0; rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xConnect, &zErr); if( rc!=SQLITE_OK ){ sqlite3ErrorMsg(pParse, "%s", zErr); } sqlite3DbFree(db, zErr); } return rc; } /* ** Grow the db->aVTrans[] array so that there is room for at least one ** more v-table. Return SQLITE_NOMEM if a malloc fails, or SQLITE_OK otherwise. */ static int growVTrans(sqlite3 *db){ const int ARRAY_INCR = 5; /* Grow the sqlite3.aVTrans array if required */ if( (db->nVTrans%ARRAY_INCR)==0 ){ VTable **aVTrans; int nBytes = sizeof(sqlite3_vtab *) * (db->nVTrans + ARRAY_INCR); aVTrans = sqlite3DbRealloc(db, (void *)db->aVTrans, nBytes); if( !aVTrans ){ return SQLITE_NOMEM; } memset(&aVTrans[db->nVTrans], 0, sizeof(sqlite3_vtab *)*ARRAY_INCR); db->aVTrans = aVTrans; } return SQLITE_OK; } /* ** Add the virtual table pVTab to the array sqlite3.aVTrans[]. Space should ** have already been reserved using growVTrans(). */ static void addToVTrans(sqlite3 *db, VTable *pVTab){ /* Add pVtab to the end of sqlite3.aVTrans */ db->aVTrans[db->nVTrans++] = pVTab; sqlite3VtabLock(pVTab); } /* ** This function is invoked by the vdbe to call the xCreate method ** of the virtual table named zTab in database iDb. ** ** If an error occurs, *pzErr is set to point an an English language ** description of the error and an SQLITE_XXX error code is returned. ** In this case the caller must call sqlite3DbFree(db, ) on *pzErr. */ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab, char **pzErr){ int rc = SQLITE_OK; Table *pTab; Module *pMod; const char *zMod; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); assert( pTab && (pTab->tabFlags & TF_Virtual)!=0 && !pTab->pVTable ); /* Locate the required virtual table module */ zMod = pTab->azModuleArg[0]; pMod = (Module*)sqlite3HashFind(&db->aModule, zMod); /* If the module has been registered and includes a Create method, ** invoke it now. If the module has not been registered, return an ** error. Otherwise, do nothing. */ if( !pMod ){ *pzErr = sqlite3MPrintf(db, "no such module: %s", zMod); rc = SQLITE_ERROR; }else{ rc = vtabCallConstructor(db, pTab, pMod, pMod->pModule->xCreate, pzErr); } /* Justification of ALWAYS(): The xConstructor method is required to ** create a valid sqlite3_vtab if it returns SQLITE_OK. */ if( rc==SQLITE_OK && ALWAYS(sqlite3GetVTable(db, pTab)) ){ rc = growVTrans(db); if( rc==SQLITE_OK ){ addToVTrans(db, sqlite3GetVTable(db, pTab)); } } return rc; } /* ** This function is used to set the schema of a virtual table. It is only ** valid to call this function from within the xCreate() or xConnect() of a ** virtual table module. */ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ VtabCtx *pCtx; Parse *pParse; int rc = SQLITE_OK; Table *pTab; char *zErr = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zCreateTable==0 ){ return SQLITE_MISUSE_BKPT; } #endif sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ sqlite3Error(db, SQLITE_MISUSE); sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; } pTab = pCtx->pTab; assert( (pTab->tabFlags & TF_Virtual)!=0 ); pParse = sqlite3StackAllocZero(db, sizeof(*pParse)); if( pParse==0 ){ rc = SQLITE_NOMEM; }else{ pParse->declareVtab = 1; pParse->db = db; pParse->nQueryLoop = 1; if( SQLITE_OK==sqlite3RunParser(pParse, zCreateTable, &zErr) && pParse->pNewTable && !db->mallocFailed && !pParse->pNewTable->pSelect && (pParse->pNewTable->tabFlags & TF_Virtual)==0 ){ if( !pTab->aCol ){ pTab->aCol = pParse->pNewTable->aCol; pTab->nCol = pParse->pNewTable->nCol; pParse->pNewTable->nCol = 0; pParse->pNewTable->aCol = 0; } pCtx->bDeclared = 1; }else{ sqlite3ErrorWithMsg(db, SQLITE_ERROR, (zErr ? "%s" : 0), zErr); sqlite3DbFree(db, zErr); rc = SQLITE_ERROR; } pParse->declareVtab = 0; if( pParse->pVdbe ){ sqlite3VdbeFinalize(pParse->pVdbe); } sqlite3DeleteTable(db, pParse->pNewTable); sqlite3ParserReset(pParse); sqlite3StackFree(db, pParse); } assert( (rc&0xff)==rc ); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** This function is invoked by the vdbe to call the xDestroy method ** of the virtual table named zTab in database iDb. This occurs ** when a DROP TABLE is mentioned. ** ** This call is a no-op if zTab is not a virtual table. */ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab){ int rc = SQLITE_OK; Table *pTab; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); if( ALWAYS(pTab!=0 && pTab->pVTable!=0) ){ VTable *p; for(p=pTab->pVTable; p; p=p->pNext){ assert( p->pVtab ); if( p->pVtab->nRef>0 ){ return SQLITE_LOCKED; } } p = vtabDisconnectAll(db, pTab); rc = p->pMod->pModule->xDestroy(p->pVtab); /* Remove the sqlite3_vtab* from the aVTrans[] array, if applicable */ if( rc==SQLITE_OK ){ assert( pTab->pVTable==p && p->pNext==0 ); p->pVtab = 0; pTab->pVTable = 0; sqlite3VtabUnlock(p); } } return rc; } /* ** This function invokes either the xRollback or xCommit method ** of each of the virtual tables in the sqlite3.aVTrans array. The method ** called is identified by the second argument, "offset", which is ** the offset of the method to call in the sqlite3_module structure. ** ** The array is cleared after invoking the callbacks. */ static void callFinaliser(sqlite3 *db, int offset){ int i; if( db->aVTrans ){ VTable **aVTrans = db->aVTrans; db->aVTrans = 0; for(i=0; inVTrans; i++){ VTable *pVTab = aVTrans[i]; sqlite3_vtab *p = pVTab->pVtab; if( p ){ int (*x)(sqlite3_vtab *); x = *(int (**)(sqlite3_vtab *))((char *)p->pModule + offset); if( x ) x(p); } pVTab->iSavepoint = 0; sqlite3VtabUnlock(pVTab); } sqlite3DbFree(db, aVTrans); db->nVTrans = 0; } } /* ** Invoke the xSync method of all virtual tables in the sqlite3.aVTrans ** array. Return the error code for the first error that occurs, or ** SQLITE_OK if all xSync operations are successful. ** ** If an error message is available, leave it in p->zErrMsg. */ SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe *p){ int i; int rc = SQLITE_OK; VTable **aVTrans = db->aVTrans; db->aVTrans = 0; for(i=0; rc==SQLITE_OK && inVTrans; i++){ int (*x)(sqlite3_vtab *); sqlite3_vtab *pVtab = aVTrans[i]->pVtab; if( pVtab && (x = pVtab->pModule->xSync)!=0 ){ rc = x(pVtab); sqlite3VtabImportErrmsg(p, pVtab); } } db->aVTrans = aVTrans; return rc; } /* ** Invoke the xRollback method of all virtual tables in the ** sqlite3.aVTrans array. Then clear the array itself. */ SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db){ callFinaliser(db, offsetof(sqlite3_module,xRollback)); return SQLITE_OK; } /* ** Invoke the xCommit method of all virtual tables in the ** sqlite3.aVTrans array. Then clear the array itself. */ SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db){ callFinaliser(db, offsetof(sqlite3_module,xCommit)); return SQLITE_OK; } /* ** If the virtual table pVtab supports the transaction interface ** (xBegin/xRollback/xCommit and optionally xSync) and a transaction is ** not currently open, invoke the xBegin method now. ** ** If the xBegin call is successful, place the sqlite3_vtab pointer ** in the sqlite3.aVTrans array. */ SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){ int rc = SQLITE_OK; const sqlite3_module *pModule; /* Special case: If db->aVTrans is NULL and db->nVTrans is greater ** than zero, then this function is being called from within a ** virtual module xSync() callback. It is illegal to write to ** virtual module tables in this case, so return SQLITE_LOCKED. */ if( sqlite3VtabInSync(db) ){ return SQLITE_LOCKED; } if( !pVTab ){ return SQLITE_OK; } pModule = pVTab->pVtab->pModule; if( pModule->xBegin ){ int i; /* If pVtab is already in the aVTrans array, return early */ for(i=0; inVTrans; i++){ if( db->aVTrans[i]==pVTab ){ return SQLITE_OK; } } /* Invoke the xBegin method. If successful, add the vtab to the ** sqlite3.aVTrans[] array. */ rc = growVTrans(db); if( rc==SQLITE_OK ){ rc = pModule->xBegin(pVTab->pVtab); if( rc==SQLITE_OK ){ addToVTrans(db, pVTab); } } } return rc; } /* ** Invoke either the xSavepoint, xRollbackTo or xRelease method of all ** virtual tables that currently have an open transaction. Pass iSavepoint ** as the second argument to the virtual table method invoked. ** ** If op is SAVEPOINT_BEGIN, the xSavepoint method is invoked. If it is ** SAVEPOINT_ROLLBACK, the xRollbackTo method. Otherwise, if op is ** SAVEPOINT_RELEASE, then the xRelease method of each virtual table with ** an open transaction is invoked. ** ** If any virtual table method returns an error code other than SQLITE_OK, ** processing is abandoned and the error returned to the caller of this ** function immediately. If all calls to virtual table methods are successful, ** SQLITE_OK is returned. */ SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){ int rc = SQLITE_OK; assert( op==SAVEPOINT_RELEASE||op==SAVEPOINT_ROLLBACK||op==SAVEPOINT_BEGIN ); assert( iSavepoint>=-1 ); if( db->aVTrans ){ int i; for(i=0; rc==SQLITE_OK && inVTrans; i++){ VTable *pVTab = db->aVTrans[i]; const sqlite3_module *pMod = pVTab->pMod->pModule; if( pVTab->pVtab && pMod->iVersion>=2 ){ int (*xMethod)(sqlite3_vtab *, int); switch( op ){ case SAVEPOINT_BEGIN: xMethod = pMod->xSavepoint; pVTab->iSavepoint = iSavepoint+1; break; case SAVEPOINT_ROLLBACK: xMethod = pMod->xRollbackTo; break; default: xMethod = pMod->xRelease; break; } if( xMethod && pVTab->iSavepoint>iSavepoint ){ rc = xMethod(pVTab->pVtab, iSavepoint); } } } } return rc; } /* ** The first parameter (pDef) is a function implementation. The ** second parameter (pExpr) is the first argument to this function. ** If pExpr is a column in a virtual table, then let the virtual ** table implementation have an opportunity to overload the function. ** ** This routine is used to allow virtual table implementations to ** overload MATCH, LIKE, GLOB, and REGEXP operators. ** ** Return either the pDef argument (indicating no change) or a ** new FuncDef structure that is marked as ephemeral using the ** SQLITE_FUNC_EPHEM flag. */ SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction( sqlite3 *db, /* Database connection for reporting malloc problems */ FuncDef *pDef, /* Function to possibly overload */ int nArg, /* Number of arguments to the function */ Expr *pExpr /* First argument to the function */ ){ Table *pTab; sqlite3_vtab *pVtab; sqlite3_module *pMod; void (*xFunc)(sqlite3_context*,int,sqlite3_value**) = 0; void *pArg = 0; FuncDef *pNew; int rc = 0; char *zLowerName; unsigned char *z; /* Check to see the left operand is a column in a virtual table */ if( NEVER(pExpr==0) ) return pDef; if( pExpr->op!=TK_COLUMN ) return pDef; pTab = pExpr->pTab; if( NEVER(pTab==0) ) return pDef; if( (pTab->tabFlags & TF_Virtual)==0 ) return pDef; pVtab = sqlite3GetVTable(db, pTab)->pVtab; assert( pVtab!=0 ); assert( pVtab->pModule!=0 ); pMod = (sqlite3_module *)pVtab->pModule; if( pMod->xFindFunction==0 ) return pDef; /* Call the xFindFunction method on the virtual table implementation ** to see if the implementation wants to overload this function */ zLowerName = sqlite3DbStrDup(db, pDef->zName); if( zLowerName ){ for(z=(unsigned char*)zLowerName; *z; z++){ *z = sqlite3UpperToLower[*z]; } rc = pMod->xFindFunction(pVtab, nArg, zLowerName, &xFunc, &pArg); sqlite3DbFree(db, zLowerName); } if( rc==0 ){ return pDef; } /* Create a new ephemeral function definition for the overloaded ** function */ pNew = sqlite3DbMallocZero(db, sizeof(*pNew) + sqlite3Strlen30(pDef->zName) + 1); if( pNew==0 ){ return pDef; } *pNew = *pDef; pNew->zName = (char *)&pNew[1]; memcpy(pNew->zName, pDef->zName, sqlite3Strlen30(pDef->zName)+1); pNew->xFunc = xFunc; pNew->pUserData = pArg; pNew->funcFlags |= SQLITE_FUNC_EPHEM; return pNew; } /* ** Make sure virtual table pTab is contained in the pParse->apVirtualLock[] ** array so that an OP_VBegin will get generated for it. Add pTab to the ** array if it is missing. If pTab is already in the array, this routine ** is a no-op. */ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ Parse *pToplevel = sqlite3ParseToplevel(pParse); int i, n; Table **apVtabLock; assert( IsVirtual(pTab) ); for(i=0; inVtabLock; i++){ if( pTab==pToplevel->apVtabLock[i] ) return; } n = (pToplevel->nVtabLock+1)*sizeof(pToplevel->apVtabLock[0]); apVtabLock = sqlite3_realloc64(pToplevel->apVtabLock, n); if( apVtabLock ){ pToplevel->apVtabLock = apVtabLock; pToplevel->apVtabLock[pToplevel->nVtabLock++] = pTab; }else{ pToplevel->db->mallocFailed = 1; } } /* ** Return the ON CONFLICT resolution mode in effect for the virtual ** table update operation currently in progress. ** ** The results of this routine are undefined unless it is called from ** within an xUpdate method. */ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *db){ static const unsigned char aMap[] = { SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE }; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif assert( OE_Rollback==1 && OE_Abort==2 && OE_Fail==3 ); assert( OE_Ignore==4 && OE_Replace==5 ); assert( db->vtabOnConflict>=1 && db->vtabOnConflict<=5 ); return (int)aMap[db->vtabOnConflict-1]; } /* ** Call from within the xCreate() or xConnect() methods to provide ** the SQLite core with additional information about the behavior ** of the virtual table being implemented. */ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){ va_list ap; int rc = SQLITE_OK; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); va_start(ap, op); switch( op ){ case SQLITE_VTAB_CONSTRAINT_SUPPORT: { VtabCtx *p = db->pVtabCtx; if( !p ){ rc = SQLITE_MISUSE_BKPT; }else{ assert( p->pTab==0 || (p->pTab->tabFlags & TF_Virtual)!=0 ); p->pVTable->bConstraint = (u8)va_arg(ap, int); } break; } default: rc = SQLITE_MISUSE_BKPT; break; } va_end(ap); if( rc!=SQLITE_OK ) sqlite3Error(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /************** End of vtab.c ************************************************/ /************** Begin file wherecode.c ***************************************/ /* ** 2015-06-06 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This module contains C code that generates VDBE code used to process ** the WHERE clause of SQL statements. ** ** This file was split off from where.c on 2015-06-06 in order to reduce the ** size of where.c and make it easier to edit. This file contains the routines ** that actually generate the bulk of the WHERE loop code. The original where.c ** file retains the code that does query planning and analysis. */ /* #include "sqliteInt.h" */ /************** Include whereInt.h in the middle of wherecode.c **************/ /************** Begin file whereInt.h ****************************************/ /* ** 2013-11-12 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains structure and macro definitions for the query ** planner logic in "where.c". These definitions are broken out into ** a separate source file for easier editing. */ /* ** Trace output macros */ #if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) /***/ int sqlite3WhereTrace; #endif #if defined(SQLITE_DEBUG) \ && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_WHERETRACE)) # define WHERETRACE(K,X) if(sqlite3WhereTrace&(K)) sqlite3DebugPrintf X # define WHERETRACE_ENABLED 1 #else # define WHERETRACE(K,X) #endif /* Forward references */ typedef struct WhereClause WhereClause; typedef struct WhereMaskSet WhereMaskSet; typedef struct WhereOrInfo WhereOrInfo; typedef struct WhereAndInfo WhereAndInfo; typedef struct WhereLevel WhereLevel; typedef struct WhereLoop WhereLoop; typedef struct WherePath WherePath; typedef struct WhereTerm WhereTerm; typedef struct WhereLoopBuilder WhereLoopBuilder; typedef struct WhereScan WhereScan; typedef struct WhereOrCost WhereOrCost; typedef struct WhereOrSet WhereOrSet; /* ** This object contains information needed to implement a single nested ** loop in WHERE clause. ** ** Contrast this object with WhereLoop. This object describes the ** implementation of the loop. WhereLoop describes the algorithm. ** This object contains a pointer to the WhereLoop algorithm as one of ** its elements. ** ** The WhereInfo object contains a single instance of this object for ** each term in the FROM clause (which is to say, for each of the ** nested loops as implemented). The order of WhereLevel objects determines ** the loop nested order, with WhereInfo.a[0] being the outer loop and ** WhereInfo.a[WhereInfo.nLevel-1] being the inner loop. */ struct WhereLevel { int iLeftJoin; /* Memory cell used to implement LEFT OUTER JOIN */ int iTabCur; /* The VDBE cursor used to access the table */ int iIdxCur; /* The VDBE cursor used to access pIdx */ int addrBrk; /* Jump here to break out of the loop */ int addrNxt; /* Jump here to start the next IN combination */ int addrSkip; /* Jump here for next iteration of skip-scan */ int addrCont; /* Jump here to continue with the next loop cycle */ int addrFirst; /* First instruction of interior of the loop */ int addrBody; /* Beginning of the body of this loop */ int iLikeRepCntr; /* LIKE range processing counter register */ int addrLikeRep; /* LIKE range processing address */ u8 iFrom; /* Which entry in the FROM clause */ u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */ int p1, p2; /* Operands of the opcode used to ends the loop */ union { /* Information that depends on pWLoop->wsFlags */ struct { int nIn; /* Number of entries in aInLoop[] */ struct InLoop { int iCur; /* The VDBE cursor used by this IN operator */ int addrInTop; /* Top of the IN loop */ u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ } *aInLoop; /* Information about each nested IN operator */ } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */ Index *pCovidx; /* Possible covering index for WHERE_MULTI_OR */ } u; struct WhereLoop *pWLoop; /* The selected WhereLoop object */ Bitmask notReady; /* FROM entries not usable at this level */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS int addrVisit; /* Address at which row is visited */ #endif }; /* ** Each instance of this object represents an algorithm for evaluating one ** term of a join. Every term of the FROM clause will have at least ** one corresponding WhereLoop object (unless INDEXED BY constraints ** prevent a query solution - which is an error) and many terms of the ** FROM clause will have multiple WhereLoop objects, each describing a ** potential way of implementing that FROM-clause term, together with ** dependencies and cost estimates for using the chosen algorithm. ** ** Query planning consists of building up a collection of these WhereLoop ** objects, then computing a particular sequence of WhereLoop objects, with ** one WhereLoop object per FROM clause term, that satisfy all dependencies ** and that minimize the overall cost. */ struct WhereLoop { Bitmask prereq; /* Bitmask of other loops that must run first */ Bitmask maskSelf; /* Bitmask identifying table iTab */ #ifdef SQLITE_DEBUG char cId; /* Symbolic ID of this loop for debugging use */ #endif u8 iTab; /* Position in FROM clause of table for this loop */ u8 iSortIdx; /* Sorting index number. 0==None */ LogEst rSetup; /* One-time setup cost (ex: create transient index) */ LogEst rRun; /* Cost of running each loop */ LogEst nOut; /* Estimated number of output rows */ union { struct { /* Information for internal btree tables */ u16 nEq; /* Number of equality constraints */ Index *pIndex; /* Index used, or NULL */ } btree; struct { /* Information for virtual tables */ int idxNum; /* Index number */ u8 needFree; /* True if sqlite3_free(idxStr) is needed */ i8 isOrdered; /* True if satisfies ORDER BY */ u16 omitMask; /* Terms that may be omitted */ char *idxStr; /* Index identifier string */ } vtab; } u; u32 wsFlags; /* WHERE_* flags describing the plan */ u16 nLTerm; /* Number of entries in aLTerm[] */ u16 nSkip; /* Number of NULL aLTerm[] entries */ /**** whereLoopXfer() copies fields above ***********************/ # define WHERE_LOOP_XFER_SZ offsetof(WhereLoop,nLSlot) u16 nLSlot; /* Number of slots allocated for aLTerm[] */ WhereTerm **aLTerm; /* WhereTerms used */ WhereLoop *pNextLoop; /* Next WhereLoop object in the WhereClause */ WhereTerm *aLTermSpace[3]; /* Initial aLTerm[] space */ }; /* This object holds the prerequisites and the cost of running a ** subquery on one operand of an OR operator in the WHERE clause. ** See WhereOrSet for additional information */ struct WhereOrCost { Bitmask prereq; /* Prerequisites */ LogEst rRun; /* Cost of running this subquery */ LogEst nOut; /* Number of outputs for this subquery */ }; /* The WhereOrSet object holds a set of possible WhereOrCosts that ** correspond to the subquery(s) of OR-clause processing. Only the ** best N_OR_COST elements are retained. */ #define N_OR_COST 3 struct WhereOrSet { u16 n; /* Number of valid a[] entries */ WhereOrCost a[N_OR_COST]; /* Set of best costs */ }; /* ** Each instance of this object holds a sequence of WhereLoop objects ** that implement some or all of a query plan. ** ** Think of each WhereLoop object as a node in a graph with arcs ** showing dependencies and costs for travelling between nodes. (That is ** not a completely accurate description because WhereLoop costs are a ** vector, not a scalar, and because dependencies are many-to-one, not ** one-to-one as are graph nodes. But it is a useful visualization aid.) ** Then a WherePath object is a path through the graph that visits some ** or all of the WhereLoop objects once. ** ** The "solver" works by creating the N best WherePath objects of length ** 1. Then using those as a basis to compute the N best WherePath objects ** of length 2. And so forth until the length of WherePaths equals the ** number of nodes in the FROM clause. The best (lowest cost) WherePath ** at the end is the chosen query plan. */ struct WherePath { Bitmask maskLoop; /* Bitmask of all WhereLoop objects in this path */ Bitmask revLoop; /* aLoop[]s that should be reversed for ORDER BY */ LogEst nRow; /* Estimated number of rows generated by this path */ LogEst rCost; /* Total cost of this path */ LogEst rUnsorted; /* Total cost of this path ignoring sorting costs */ i8 isOrdered; /* No. of ORDER BY terms satisfied. -1 for unknown */ WhereLoop **aLoop; /* Array of WhereLoop objects implementing this path */ }; /* ** The query generator uses an array of instances of this structure to ** help it analyze the subexpressions of the WHERE clause. Each WHERE ** clause subexpression is separated from the others by AND operators, ** usually, or sometimes subexpressions separated by OR. ** ** All WhereTerms are collected into a single WhereClause structure. ** The following identity holds: ** ** WhereTerm.pWC->a[WhereTerm.idx] == WhereTerm ** ** When a term is of the form: ** ** X ** ** where X is a column name and is one of certain operators, ** then WhereTerm.leftCursor and WhereTerm.u.leftColumn record the ** cursor number and column number for X. WhereTerm.eOperator records ** the using a bitmask encoding defined by WO_xxx below. The ** use of a bitmask encoding for the operator allows us to search ** quickly for terms that match any of several different operators. ** ** A WhereTerm might also be two or more subterms connected by OR: ** ** (t1.X ) OR (t1.Y ) OR .... ** ** In this second case, wtFlag has the TERM_ORINFO bit set and eOperator==WO_OR ** and the WhereTerm.u.pOrInfo field points to auxiliary information that ** is collected about the OR clause. ** ** If a term in the WHERE clause does not match either of the two previous ** categories, then eOperator==0. The WhereTerm.pExpr field is still set ** to the original subexpression content and wtFlags is set up appropriately ** but no other fields in the WhereTerm object are meaningful. ** ** When eOperator!=0, prereqRight and prereqAll record sets of cursor numbers, ** but they do so indirectly. A single WhereMaskSet structure translates ** cursor number into bits and the translated bit is stored in the prereq ** fields. The translation is used in order to maximize the number of ** bits that will fit in a Bitmask. The VDBE cursor numbers might be ** spread out over the non-negative integers. For example, the cursor ** numbers might be 3, 8, 9, 10, 20, 23, 41, and 45. The WhereMaskSet ** translates these sparse cursor numbers into consecutive integers ** beginning with 0 in order to make the best possible use of the available ** bits in the Bitmask. So, in the example above, the cursor numbers ** would be mapped into integers 0 through 7. ** ** The number of terms in a join is limited by the number of bits ** in prereqRight and prereqAll. The default is 64 bits, hence SQLite ** is only able to process joins with 64 or fewer tables. */ struct WhereTerm { Expr *pExpr; /* Pointer to the subexpression that is this term */ int iParent; /* Disable pWC->a[iParent] when this term disabled */ int leftCursor; /* Cursor number of X in "X " */ union { int leftColumn; /* Column number of X in "X " */ WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */ WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */ } u; LogEst truthProb; /* Probability of truth for this expression */ u16 eOperator; /* A WO_xx value describing */ u16 wtFlags; /* TERM_xxx bit flags. See below */ u8 nChild; /* Number of children that must disable us */ WhereClause *pWC; /* The clause this term is part of */ Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */ Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */ }; /* ** Allowed values of WhereTerm.wtFlags */ #define TERM_DYNAMIC 0x01 /* Need to call sqlite3ExprDelete(db, pExpr) */ #define TERM_VIRTUAL 0x02 /* Added by the optimizer. Do not code */ #define TERM_CODED 0x04 /* This term is already coded */ #define TERM_COPIED 0x08 /* Has a child */ #define TERM_ORINFO 0x10 /* Need to free the WhereTerm.u.pOrInfo object */ #define TERM_ANDINFO 0x20 /* Need to free the WhereTerm.u.pAndInfo obj */ #define TERM_OR_OK 0x40 /* Used during OR-clause processing */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 # define TERM_VNULL 0x80 /* Manufactured x>NULL or x<=NULL term */ #else # define TERM_VNULL 0x00 /* Disabled if not using stat3 */ #endif #define TERM_LIKEOPT 0x100 /* Virtual terms from the LIKE optimization */ #define TERM_LIKECOND 0x200 /* Conditionally this LIKE operator term */ #define TERM_LIKE 0x400 /* The original LIKE operator */ #define TERM_IS 0x800 /* Term.pExpr is an IS operator */ /* ** An instance of the WhereScan object is used as an iterator for locating ** terms in the WHERE clause that are useful to the query planner. */ struct WhereScan { WhereClause *pOrigWC; /* Original, innermost WhereClause */ WhereClause *pWC; /* WhereClause currently being scanned */ char *zCollName; /* Required collating sequence, if not NULL */ char idxaff; /* Must match this affinity, if zCollName!=NULL */ unsigned char nEquiv; /* Number of entries in aEquiv[] */ unsigned char iEquiv; /* Next unused slot in aEquiv[] */ u32 opMask; /* Acceptable operators */ int k; /* Resume scanning at this->pWC->a[this->k] */ int aEquiv[22]; /* Cursor,Column pairs for equivalence classes */ }; /* ** An instance of the following structure holds all information about a ** WHERE clause. Mostly this is a container for one or more WhereTerms. ** ** Explanation of pOuter: For a WHERE clause of the form ** ** a AND ((b AND c) OR (d AND e)) AND f ** ** There are separate WhereClause objects for the whole clause and for ** the subclauses "(b AND c)" and "(d AND e)". The pOuter field of the ** subclauses points to the WhereClause object for the whole clause. */ struct WhereClause { WhereInfo *pWInfo; /* WHERE clause processing context */ WhereClause *pOuter; /* Outer conjunction */ u8 op; /* Split operator. TK_AND or TK_OR */ int nTerm; /* Number of terms */ int nSlot; /* Number of entries in a[] */ WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ #if defined(SQLITE_SMALL_STACK) WhereTerm aStatic[1]; /* Initial static space for a[] */ #else WhereTerm aStatic[8]; /* Initial static space for a[] */ #endif }; /* ** A WhereTerm with eOperator==WO_OR has its u.pOrInfo pointer set to ** a dynamically allocated instance of the following structure. */ struct WhereOrInfo { WhereClause wc; /* Decomposition into subterms */ Bitmask indexable; /* Bitmask of all indexable tables in the clause */ }; /* ** A WhereTerm with eOperator==WO_AND has its u.pAndInfo pointer set to ** a dynamically allocated instance of the following structure. */ struct WhereAndInfo { WhereClause wc; /* The subexpression broken out */ }; /* ** An instance of the following structure keeps track of a mapping ** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. ** ** The VDBE cursor numbers are small integers contained in ** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE ** clause, the cursor numbers might not begin with 0 and they might ** contain gaps in the numbering sequence. But we want to make maximum ** use of the bits in our bitmasks. This structure provides a mapping ** from the sparse cursor numbers into consecutive integers beginning ** with 0. ** ** If WhereMaskSet.ix[A]==B it means that The A-th bit of a Bitmask ** corresponds VDBE cursor number B. The A-th bit of a bitmask is 1<3, 5->1, 8->2, 29->0, ** 57->5, 73->4. Or one of 719 other combinations might be used. It ** does not really matter. What is important is that sparse cursor ** numbers all get mapped into bit numbers that begin with 0 and contain ** no gaps. */ struct WhereMaskSet { int n; /* Number of assigned cursor values */ int ix[BMS]; /* Cursor assigned to each bit */ }; /* ** Initialize a WhereMaskSet object */ #define initMaskSet(P) (P)->n=0 /* ** This object is a convenience wrapper holding all information needed ** to construct WhereLoop objects for a particular query. */ struct WhereLoopBuilder { WhereInfo *pWInfo; /* Information about this WHERE */ WhereClause *pWC; /* WHERE clause terms */ ExprList *pOrderBy; /* ORDER BY clause */ WhereLoop *pNew; /* Template WhereLoop */ WhereOrSet *pOrSet; /* Record best loops here, if not NULL */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 UnpackedRecord *pRec; /* Probe for stat4 (if required) */ int nRecValid; /* Number of valid fields currently in pRec */ #endif }; /* ** The WHERE clause processing routine has two halves. The ** first part does the start of the WHERE loop and the second ** half does the tail of the WHERE loop. An instance of ** this structure is returned by the first half and passed ** into the second half to give some continuity. ** ** An instance of this object holds the complete state of the query ** planner. */ struct WhereInfo { Parse *pParse; /* Parsing and code generating context */ SrcList *pTabList; /* List of tables in the join */ ExprList *pOrderBy; /* The ORDER BY clause or NULL */ ExprList *pResultSet; /* Result set. DISTINCT operates on these */ WhereLoop *pLoops; /* List of all WhereLoop objects */ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ LogEst nRowOut; /* Estimated number of output rows */ u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */ i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */ u8 sorted; /* True if really sorted (not just grouped) */ u8 okOnePass; /* Ok to use one-pass algorithm for UPDATE/DELETE */ u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */ u8 eDistinct; /* One of the WHERE_DISTINCT_* values below */ u8 nLevel; /* Number of nested loop */ int iTop; /* The very beginning of the WHERE loop */ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */ int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */ WhereClause sWC; /* Decomposition of the WHERE clause */ WhereLevel a[1]; /* Information about each nest loop in WHERE */ }; /* ** Private interfaces - callable only by other where.c routines. ** ** where.c: */ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ int iCur, /* Cursor number of LHS */ int iColumn, /* Column number of LHS */ Bitmask notReady, /* RHS must not overlap with this mask */ u32 op, /* Mask of WO_xx values describing operator */ Index *pIdx /* Must be compatible with this index, if not NULL */ ); /* wherecode.c: */ #ifndef SQLITE_OMIT_EXPLAIN SQLITE_PRIVATE int sqlite3WhereExplainOneScan( Parse *pParse, /* Parse context */ SrcList *pTabList, /* Table list this loop refers to */ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ int iLevel, /* Value for "level" column of output */ int iFrom, /* Value for "from" column of output */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ); #else # define sqlite3WhereExplainOneScan(u,v,w,x,y,z) 0 #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3WhereAddScanStatus( Vdbe *v, /* Vdbe to add scanstatus entry to */ SrcList *pSrclist, /* FROM clause pLvl reads data from */ WhereLevel *pLvl, /* Level to add scanstatus() entry for */ int addrExplain /* Address of OP_Explain (or 0) */ ); #else # define sqlite3WhereAddScanStatus(a, b, c, d) ((void)d) #endif SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( WhereInfo *pWInfo, /* Complete information about the WHERE clause */ int iLevel, /* Which level of pWInfo->a[] should be coded */ Bitmask notReady /* Which tables are currently available */ ); /* whereexpr.c: */ SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*); SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause*); SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause*,Expr*,u8); SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet*, Expr*); SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet*, ExprList*); SQLITE_PRIVATE void sqlite3WhereExprAnalyze(SrcList*, WhereClause*); /* ** Bitmasks for the operators on WhereTerm objects. These are all ** operators that are of interest to the query planner. An ** OR-ed combination of these values can be used when searching for ** particular WhereTerms within a WhereClause. */ #define WO_IN 0x0001 #define WO_EQ 0x0002 #define WO_LT (WO_EQ<<(TK_LT-TK_EQ)) #define WO_LE (WO_EQ<<(TK_LE-TK_EQ)) #define WO_GT (WO_EQ<<(TK_GT-TK_EQ)) #define WO_GE (WO_EQ<<(TK_GE-TK_EQ)) #define WO_MATCH 0x0040 #define WO_IS 0x0080 #define WO_ISNULL 0x0100 #define WO_OR 0x0200 /* Two or more OR-connected terms */ #define WO_AND 0x0400 /* Two or more AND-connected terms */ #define WO_EQUIV 0x0800 /* Of the form A==B, both columns */ #define WO_NOOP 0x1000 /* This term does not restrict search space */ #define WO_ALL 0x1fff /* Mask of all possible WO_* values */ #define WO_SINGLE 0x01ff /* Mask of all non-compound WO_* values */ /* ** These are definitions of bits in the WhereLoop.wsFlags field. ** The particular combination of bits in each WhereLoop help to ** determine the algorithm that WhereLoop represents. */ #define WHERE_COLUMN_EQ 0x00000001 /* x=EXPR */ #define WHERE_COLUMN_RANGE 0x00000002 /* xEXPR */ #define WHERE_COLUMN_IN 0x00000004 /* x IN (...) */ #define WHERE_COLUMN_NULL 0x00000008 /* x IS NULL */ #define WHERE_CONSTRAINT 0x0000000f /* Any of the WHERE_COLUMN_xxx values */ #define WHERE_TOP_LIMIT 0x00000010 /* xEXPR or x>=EXPR constraint */ #define WHERE_BOTH_LIMIT 0x00000030 /* Both x>EXPR and x2; ** ** is run and there is an index on (a, b), then this function returns a ** string similar to: ** ** "a=? AND b>?" */ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop, Table *pTab){ Index *pIndex = pLoop->u.btree.pIndex; u16 nEq = pLoop->u.btree.nEq; u16 nSkip = pLoop->nSkip; int i, j; Column *aCol = pTab->aCol; i16 *aiColumn = pIndex->aiColumn; if( nEq==0 && (pLoop->wsFlags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ) return; sqlite3StrAccumAppend(pStr, " (", 2); for(i=0; i=nSkip ){ explainAppendTerm(pStr, i, z, "="); }else{ if( i ) sqlite3StrAccumAppend(pStr, " AND ", 5); sqlite3XPrintf(pStr, 0, "ANY(%s)", z); } } j = i; if( pLoop->wsFlags&WHERE_BTM_LIMIT ){ char *z = aiColumn[j] < 0 ? "rowid" : aCol[aiColumn[j]].zName; explainAppendTerm(pStr, i++, z, ">"); } if( pLoop->wsFlags&WHERE_TOP_LIMIT ){ char *z = aiColumn[j] < 0 ? "rowid" : aCol[aiColumn[j]].zName; explainAppendTerm(pStr, i, z, "<"); } sqlite3StrAccumAppend(pStr, ")", 1); } /* ** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN ** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was ** defined at compile-time. If it is not a no-op, a single OP_Explain opcode ** is added to the output to describe the table scan strategy in pLevel. ** ** If an OP_Explain opcode is added to the VM, its address is returned. ** Otherwise, if no OP_Explain is coded, zero is returned. */ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( Parse *pParse, /* Parse context */ SrcList *pTabList, /* Table list this loop refers to */ WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */ int iLevel, /* Value for "level" column of output */ int iFrom, /* Value for "from" column of output */ u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ int ret = 0; #if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) if( pParse->explain==2 ) #endif { struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom]; Vdbe *v = pParse->pVdbe; /* VM being constructed */ sqlite3 *db = pParse->db; /* Database handle */ int iId = pParse->iSelectId; /* Select id (left-most output column) */ int isSearch; /* True for a SEARCH. False for SCAN. */ WhereLoop *pLoop; /* The controlling WhereLoop object */ u32 flags; /* Flags that describe this loop */ char *zMsg; /* Text to add to EQP output */ StrAccum str; /* EQP output string */ char zBuf[100]; /* Initial space for EQP output string */ pLoop = pLevel->pWLoop; flags = pLoop->wsFlags; if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return 0; isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) || (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX)); sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH); sqlite3StrAccumAppendAll(&str, isSearch ? "SEARCH" : "SCAN"); if( pItem->pSelect ){ sqlite3XPrintf(&str, 0, " SUBQUERY %d", pItem->iSelectId); }else{ sqlite3XPrintf(&str, 0, " TABLE %s", pItem->zName); } if( pItem->zAlias ){ sqlite3XPrintf(&str, 0, " AS %s", pItem->zAlias); } if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){ const char *zFmt = 0; Index *pIdx; assert( pLoop->u.btree.pIndex!=0 ); pIdx = pLoop->u.btree.pIndex; assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) ); if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){ if( isSearch ){ zFmt = "PRIMARY KEY"; } }else if( flags & WHERE_PARTIALIDX ){ zFmt = "AUTOMATIC PARTIAL COVERING INDEX"; }else if( flags & WHERE_AUTO_INDEX ){ zFmt = "AUTOMATIC COVERING INDEX"; }else if( flags & WHERE_IDX_ONLY ){ zFmt = "COVERING INDEX %s"; }else{ zFmt = "INDEX %s"; } if( zFmt ){ sqlite3StrAccumAppend(&str, " USING ", 7); sqlite3XPrintf(&str, 0, zFmt, pIdx->zName); explainIndexRange(&str, pLoop, pItem->pTab); } }else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){ const char *zRange; if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){ zRange = "(rowid=?)"; }else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){ zRange = "(rowid>? AND rowid?)"; }else{ assert( flags&WHERE_TOP_LIMIT); zRange = "(rowidu.vtab.idxNum, pLoop->u.vtab.idxStr); } #endif #ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS if( pLoop->nOut>=10 ){ sqlite3XPrintf(&str, 0, " (~%llu rows)", sqlite3LogEstToInt(pLoop->nOut)); }else{ sqlite3StrAccumAppend(&str, " (~1 row)", 9); } #endif zMsg = sqlite3StrAccumFinish(&str); ret = sqlite3VdbeAddOp4(v, OP_Explain, iId, iLevel, iFrom, zMsg,P4_DYNAMIC); } return ret; } #endif /* SQLITE_OMIT_EXPLAIN */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS /* ** Configure the VM passed as the first argument with an ** sqlite3_stmt_scanstatus() entry corresponding to the scan used to ** implement level pLvl. Argument pSrclist is a pointer to the FROM ** clause that the scan reads data from. ** ** If argument addrExplain is not 0, it must be the address of an ** OP_Explain instruction that describes the same loop. */ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( Vdbe *v, /* Vdbe to add scanstatus entry to */ SrcList *pSrclist, /* FROM clause pLvl reads data from */ WhereLevel *pLvl, /* Level to add scanstatus() entry for */ int addrExplain /* Address of OP_Explain (or 0) */ ){ const char *zObj = 0; WhereLoop *pLoop = pLvl->pWLoop; if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ zObj = pLoop->u.btree.pIndex->zName; }else{ zObj = pSrclist->a[pLvl->iFrom].zName; } sqlite3VdbeScanStatus( v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj ); } #endif /* ** Disable a term in the WHERE clause. Except, do not disable the term ** if it controls a LEFT OUTER JOIN and it did not originate in the ON ** or USING clause of that join. ** ** Consider the term t2.z='ok' in the following queries: ** ** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok' ** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok' ** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok' ** ** The t2.z='ok' is disabled in the in (2) because it originates ** in the ON clause. The term is disabled in (3) because it is not part ** of a LEFT OUTER JOIN. In (1), the term is not disabled. ** ** Disabling a term causes that term to not be tested in the inner loop ** of the join. Disabling is an optimization. When terms are satisfied ** by indices, we disable them to prevent redundant tests in the inner ** loop. We would get the correct results if nothing were ever disabled, ** but joins might run a little slower. The trick is to disable as much ** as we can without disabling too much. If we disabled in (1), we'd get ** the wrong answer. See ticket #813. ** ** If all the children of a term are disabled, then that term is also ** automatically disabled. In this way, terms get disabled if derived ** virtual terms are tested first. For example: ** ** x GLOB 'abc*' AND x>='abc' AND x<'acd' ** \___________/ \______/ \_____/ ** parent child1 child2 ** ** Only the parent term was in the original WHERE clause. The child1 ** and child2 terms were added by the LIKE optimization. If both of ** the virtual child terms are valid, then testing of the parent can be ** skipped. ** ** Usually the parent term is marked as TERM_CODED. But if the parent ** term was originally TERM_LIKE, then the parent gets TERM_LIKECOND instead. ** The TERM_LIKECOND marking indicates that the term should be coded inside ** a conditional such that is only evaluated on the second pass of a ** LIKE-optimization loop, when scanning BLOBs instead of strings. */ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ int nLoop = 0; while( pTerm && (pTerm->wtFlags & TERM_CODED)==0 && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin)) && (pLevel->notReady & pTerm->prereqAll)==0 ){ if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){ pTerm->wtFlags |= TERM_LIKECOND; }else{ pTerm->wtFlags |= TERM_CODED; } if( pTerm->iParent<0 ) break; pTerm = &pTerm->pWC->a[pTerm->iParent]; pTerm->nChild--; if( pTerm->nChild!=0 ) break; nLoop++; } } /* ** Code an OP_Affinity opcode to apply the column affinity string zAff ** to the n registers starting at base. ** ** As an optimization, SQLITE_AFF_BLOB entries (which are no-ops) at the ** beginning and end of zAff are ignored. If all entries in zAff are ** SQLITE_AFF_BLOB, then no code gets generated. ** ** This routine makes its own copy of zAff so that the caller is free ** to modify zAff after this routine returns. */ static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){ Vdbe *v = pParse->pVdbe; if( zAff==0 ){ assert( pParse->db->mallocFailed ); return; } assert( v!=0 ); /* Adjust base and n to skip over SQLITE_AFF_BLOB entries at the beginning ** and end of the affinity string. */ while( n>0 && zAff[0]==SQLITE_AFF_BLOB ){ n--; base++; zAff++; } while( n>1 && zAff[n-1]==SQLITE_AFF_BLOB ){ n--; } /* Code the OP_Affinity opcode if there is anything left to do. */ if( n>0 ){ sqlite3VdbeAddOp2(v, OP_Affinity, base, n); sqlite3VdbeChangeP4(v, -1, zAff, n); sqlite3ExprCacheAffinityChange(pParse, base, n); } } /* ** Generate code for a single equality term of the WHERE clause. An equality ** term can be either X=expr or X IN (...). pTerm is the term to be ** coded. ** ** The current value for the constraint is left in register iReg. ** ** For a constraint of the form X=expr, the expression is evaluated and its ** result is left on the stack. For constraints of the form X IN (...) ** this routine sets up a loop that will iterate over all values of X. */ static int codeEqualityTerm( Parse *pParse, /* The parsing context */ WhereTerm *pTerm, /* The term of the WHERE clause to be coded */ WhereLevel *pLevel, /* The level of the FROM clause we are working on */ int iEq, /* Index of the equality term within this level */ int bRev, /* True for reverse-order IN operations */ int iTarget /* Attempt to leave results in this register */ ){ Expr *pX = pTerm->pExpr; Vdbe *v = pParse->pVdbe; int iReg; /* Register holding results */ assert( iTarget>0 ); if( pX->op==TK_EQ || pX->op==TK_IS ){ iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget); }else if( pX->op==TK_ISNULL ){ iReg = iTarget; sqlite3VdbeAddOp2(v, OP_Null, 0, iReg); #ifndef SQLITE_OMIT_SUBQUERY }else{ int eType; int iTab; struct InLoop *pIn; WhereLoop *pLoop = pLevel->pWLoop; if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 && pLoop->u.btree.pIndex->aSortOrder[iEq] ){ testcase( iEq==0 ); testcase( bRev ); bRev = !bRev; } assert( pX->op==TK_IN ); iReg = iTarget; eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0); if( eType==IN_INDEX_INDEX_DESC ){ testcase( bRev ); bRev = !bRev; } iTab = pX->iTable; sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0); VdbeCoverageIf(v, bRev); VdbeCoverageIf(v, !bRev); assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 ); pLoop->wsFlags |= WHERE_IN_ABLE; if( pLevel->u.in.nIn==0 ){ pLevel->addrNxt = sqlite3VdbeMakeLabel(v); } pLevel->u.in.nIn++; pLevel->u.in.aInLoop = sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop, sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn); pIn = pLevel->u.in.aInLoop; if( pIn ){ pIn += pLevel->u.in.nIn - 1; pIn->iCur = iTab; if( eType==IN_INDEX_ROWID ){ pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg); }else{ pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg); } pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen; sqlite3VdbeAddOp1(v, OP_IsNull, iReg); VdbeCoverage(v); }else{ pLevel->u.in.nIn = 0; } #endif } disableTerm(pLevel, pTerm); return iReg; } /* ** Generate code that will evaluate all == and IN constraints for an ** index scan. ** ** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c). ** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10 ** The index has as many as three equality constraints, but in this ** example, the third "c" value is an inequality. So only two ** constraints are coded. This routine will generate code to evaluate ** a==5 and b IN (1,2,3). The current values for a and b will be stored ** in consecutive registers and the index of the first register is returned. ** ** In the example above nEq==2. But this subroutine works for any value ** of nEq including 0. If nEq==0, this routine is nearly a no-op. ** The only thing it does is allocate the pLevel->iMem memory cell and ** compute the affinity string. ** ** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints ** are == or IN and are covered by the nEq. nExtraReg is 1 if there is ** an inequality constraint (such as the "c>=5 AND c<10" in the example) that ** occurs after the nEq quality constraints. ** ** This routine allocates a range of nEq+nExtraReg memory cells and returns ** the index of the first memory cell in that range. The code that ** calls this routine will use that memory range to store keys for ** start and termination conditions of the loop. ** key value of the loop. If one or more IN operators appear, then ** this routine allocates an additional nEq memory cells for internal ** use. ** ** Before returning, *pzAff is set to point to a buffer containing a ** copy of the column affinity string of the index allocated using ** sqlite3DbMalloc(). Except, entries in the copy of the string associated ** with equality constraints that use BLOB or NONE affinity are set to ** SQLITE_AFF_BLOB. This is to deal with SQL such as the following: ** ** CREATE TABLE t1(a TEXT PRIMARY KEY, b); ** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b; ** ** In the example above, the index on t1(a) has TEXT affinity. But since ** the right hand side of the equality constraint (t2.b) has BLOB/NONE affinity, ** no conversion should be attempted before using a t2.b value as part of ** a key to search the index. Hence the first byte in the returned affinity ** string in this example would be set to SQLITE_AFF_BLOB. */ static int codeAllEqualityTerms( Parse *pParse, /* Parsing context */ WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */ int bRev, /* Reverse the order of IN operators */ int nExtraReg, /* Number of extra registers to allocate */ char **pzAff /* OUT: Set to point to affinity string */ ){ u16 nEq; /* The number of == or IN constraints to code */ u16 nSkip; /* Number of left-most columns to skip */ Vdbe *v = pParse->pVdbe; /* The vm under construction */ Index *pIdx; /* The index being used for this loop */ WhereTerm *pTerm; /* A single constraint term */ WhereLoop *pLoop; /* The WhereLoop object */ int j; /* Loop counter */ int regBase; /* Base register */ int nReg; /* Number of registers to allocate */ char *zAff; /* Affinity string to return */ /* This module is only called on query plans that use an index. */ pLoop = pLevel->pWLoop; assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ); nEq = pLoop->u.btree.nEq; nSkip = pLoop->nSkip; pIdx = pLoop->u.btree.pIndex; assert( pIdx!=0 ); /* Figure out how many memory cells we will need then allocate them. */ regBase = pParse->nMem + 1; nReg = pLoop->u.btree.nEq + nExtraReg; pParse->nMem += nReg; zAff = sqlite3DbStrDup(pParse->db, sqlite3IndexAffinityStr(v, pIdx)); if( !zAff ){ pParse->db->mallocFailed = 1; } if( nSkip ){ int iIdxCur = pLevel->iIdxCur; sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); VdbeComment((v, "begin skip-scan on %s", pIdx->zName)); j = sqlite3VdbeAddOp0(v, OP_Goto); pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT), iIdxCur, 0, regBase, nSkip); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); sqlite3VdbeJumpHere(v, j); for(j=0; jaiColumn[j]>=0 ); VdbeComment((v, "%s", pIdx->pTable->aCol[pIdx->aiColumn[j]].zName)); } } /* Evaluate the equality constraints */ assert( zAff==0 || (int)strlen(zAff)>=nEq ); for(j=nSkip; jaLTerm[j]; assert( pTerm!=0 ); /* The following testcase is true for indices with redundant columns. ** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */ testcase( (pTerm->wtFlags & TERM_CODED)!=0 ); testcase( pTerm->wtFlags & TERM_VIRTUAL ); r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j); if( r1!=regBase+j ){ if( nReg==1 ){ sqlite3ReleaseTempReg(pParse, regBase); regBase = r1; }else{ sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j); } } testcase( pTerm->eOperator & WO_ISNULL ); testcase( pTerm->eOperator & WO_IN ); if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){ Expr *pRight = pTerm->pExpr->pRight; if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk); VdbeCoverage(v); } if( zAff ){ if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){ zAff[j] = SQLITE_AFF_BLOB; } if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){ zAff[j] = SQLITE_AFF_BLOB; } } } } *pzAff = zAff; return regBase; } /* ** If the most recently coded instruction is a constant range contraint ** that originated from the LIKE optimization, then change the P3 to be ** pLoop->iLikeRepCntr and set P5. ** ** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range ** expression: "x>='ABC' AND x<'abd'". But this requires that the range ** scan loop run twice, once for strings and a second time for BLOBs. ** The OP_String opcodes on the second pass convert the upper and lower ** bound string contants to blobs. This routine makes the necessary changes ** to the OP_String opcodes for that to happen. */ static void whereLikeOptimizationStringFixup( Vdbe *v, /* prepared statement under construction */ WhereLevel *pLevel, /* The loop that contains the LIKE operator */ WhereTerm *pTerm /* The upper or lower bound just coded */ ){ if( pTerm->wtFlags & TERM_LIKEOPT ){ VdbeOp *pOp; assert( pLevel->iLikeRepCntr>0 ); pOp = sqlite3VdbeGetOp(v, -1); assert( pOp!=0 ); assert( pOp->opcode==OP_String8 || pTerm->pWC->pWInfo->pParse->db->mallocFailed ); pOp->p3 = pLevel->iLikeRepCntr; pOp->p5 = 1; } } /* ** Generate code for the start of the iLevel-th loop in the WHERE clause ** implementation described by pWInfo. */ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( WhereInfo *pWInfo, /* Complete information about the WHERE clause */ int iLevel, /* Which level of pWInfo->a[] should be coded */ Bitmask notReady /* Which tables are currently available */ ){ int j, k; /* Loop counters */ int iCur; /* The VDBE cursor for the table */ int addrNxt; /* Where to jump to continue with the next IN case */ int omitTable; /* True if we use the index only */ int bRev; /* True if we need to scan in reverse order */ WhereLevel *pLevel; /* The where level to be coded */ WhereLoop *pLoop; /* The WhereLoop object being coded */ WhereClause *pWC; /* Decomposition of the entire WHERE clause */ WhereTerm *pTerm; /* A WHERE clause term */ Parse *pParse; /* Parsing context */ sqlite3 *db; /* Database connection */ Vdbe *v; /* The prepared stmt under constructions */ struct SrcList_item *pTabItem; /* FROM clause term being coded */ int addrBrk; /* Jump here to break out of the loop */ int addrCont; /* Jump here to continue with next cycle */ int iRowidReg = 0; /* Rowid is stored in this register, if not zero */ int iReleaseReg = 0; /* Temp register to free before returning */ pParse = pWInfo->pParse; v = pParse->pVdbe; pWC = &pWInfo->sWC; db = pParse->db; pLevel = &pWInfo->a[iLevel]; pLoop = pLevel->pWLoop; pTabItem = &pWInfo->pTabList->a[pLevel->iFrom]; iCur = pTabItem->iCursor; pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0 && (pWInfo->wctrlFlags & WHERE_FORCE_TABLE)==0; VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); /* Create labels for the "break" and "continue" instructions ** for the current loop. Jump to addrBrk to break out of a loop. ** Jump to cont to go immediately to the next iteration of the ** loop. ** ** When there is an IN operator, we also have a "addrNxt" label that ** means to continue with the next IN value combination. When ** there are no IN operators in the constraints, the "addrNxt" label ** is the same as "addrBrk". */ addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(v); addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(v); /* If this is the right table of a LEFT OUTER JOIN, allocate and ** initialize a memory cell that records if this table matches any ** row of the left table of the join. */ if( pLevel->iFrom>0 && (pTabItem[0].jointype & JT_LEFT)!=0 ){ pLevel->iLeftJoin = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin); VdbeComment((v, "init LEFT JOIN no-match flag")); } /* Special case of a FROM clause subquery implemented as a co-routine */ if( pTabItem->viaCoroutine ){ int regYield = pTabItem->regReturn; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk); VdbeCoverage(v); VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName)); pLevel->op = OP_Goto; }else #ifndef SQLITE_OMIT_VIRTUALTABLE if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ /* Case 1: The table is a virtual-table. Use the VFilter and VNext ** to access the data. */ int iReg; /* P3 Value for OP_VFilter */ int addrNotFound; int nConstraint = pLoop->nLTerm; sqlite3ExprCachePush(pParse); iReg = sqlite3GetTempRange(pParse, nConstraint+2); addrNotFound = pLevel->addrBrk; for(j=0; jaLTerm[j]; if( pTerm==0 ) continue; if( pTerm->eOperator & WO_IN ){ codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget); addrNotFound = pLevel->addrNxt; }else{ sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget); } } sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg); sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1); sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg, pLoop->u.vtab.idxStr, pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC); VdbeCoverage(v); pLoop->u.vtab.needFree = 0; for(j=0; ju.vtab.omitMask>>j)&1 ){ disableTerm(pLevel, pLoop->aLTerm[j]); } } pLevel->op = OP_VNext; pLevel->p1 = iCur; pLevel->p2 = sqlite3VdbeCurrentAddr(v); sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2); sqlite3ExprCachePop(pParse); }else #endif /* SQLITE_OMIT_VIRTUALTABLE */ if( (pLoop->wsFlags & WHERE_IPK)!=0 && (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0 ){ /* Case 2: We can directly reference a single row using an ** equality comparison against the ROWID field. Or ** we reference multiple rows using a "rowid IN (...)" ** construct. */ assert( pLoop->u.btree.nEq==1 ); pTerm = pLoop->aLTerm[0]; assert( pTerm!=0 ); assert( pTerm->pExpr!=0 ); assert( omitTable==0 ); testcase( pTerm->wtFlags & TERM_VIRTUAL ); iReleaseReg = ++pParse->nMem; iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); addrNxt = pLevel->addrNxt; sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); VdbeCoverage(v); sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg); VdbeCoverage(v); sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); VdbeComment((v, "pk")); pLevel->op = OP_Noop; }else if( (pLoop->wsFlags & WHERE_IPK)!=0 && (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0 ){ /* Case 3: We have an inequality comparison against the ROWID field. */ int testOp = OP_Noop; int start; int memEndValue = 0; WhereTerm *pStart, *pEnd; assert( omitTable==0 ); j = 0; pStart = pEnd = 0; if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++]; if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++]; assert( pStart!=0 || pEnd!=0 ); if( bRev ){ pTerm = pStart; pStart = pEnd; pEnd = pTerm; } if( pStart ){ Expr *pX; /* The expression that defines the start bound */ int r1, rTemp; /* Registers for holding the start boundary */ /* The following constant maps TK_xx codes into corresponding ** seek opcodes. It depends on a particular ordering of TK_xx */ const u8 aMoveOp[] = { /* TK_GT */ OP_SeekGT, /* TK_LE */ OP_SeekLE, /* TK_LT */ OP_SeekLT, /* TK_GE */ OP_SeekGE }; assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ assert( (pStart->wtFlags & TERM_VNULL)==0 ); testcase( pStart->wtFlags & TERM_VIRTUAL ); pX = pStart->pExpr; assert( pX!=0 ); testcase( pStart->leftCursor!=iCur ); /* transitive constraints */ r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp); sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1); VdbeComment((v, "pk")); VdbeCoverageIf(v, pX->op==TK_GT); VdbeCoverageIf(v, pX->op==TK_LE); VdbeCoverageIf(v, pX->op==TK_LT); VdbeCoverageIf(v, pX->op==TK_GE); sqlite3ExprCacheAffinityChange(pParse, r1, 1); sqlite3ReleaseTempReg(pParse, rTemp); disableTerm(pLevel, pStart); }else{ sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); } if( pEnd ){ Expr *pX; pX = pEnd->pExpr; assert( pX!=0 ); assert( (pEnd->wtFlags & TERM_VNULL)==0 ); testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */ testcase( pEnd->wtFlags & TERM_VIRTUAL ); memEndValue = ++pParse->nMem; sqlite3ExprCode(pParse, pX->pRight, memEndValue); if( pX->op==TK_LT || pX->op==TK_GT ){ testOp = bRev ? OP_Le : OP_Ge; }else{ testOp = bRev ? OP_Lt : OP_Gt; } disableTerm(pLevel, pEnd); } start = sqlite3VdbeCurrentAddr(v); pLevel->op = bRev ? OP_Prev : OP_Next; pLevel->p1 = iCur; pLevel->p2 = start; assert( pLevel->p5==0 ); if( testOp!=OP_Noop ){ iRowidReg = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg); VdbeCoverageIf(v, testOp==OP_Le); VdbeCoverageIf(v, testOp==OP_Lt); VdbeCoverageIf(v, testOp==OP_Ge); VdbeCoverageIf(v, testOp==OP_Gt); sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL); } }else if( pLoop->wsFlags & WHERE_INDEXED ){ /* Case 4: A scan using an index. ** ** The WHERE clause may contain zero or more equality ** terms ("==" or "IN" operators) that refer to the N ** left-most columns of the index. It may also contain ** inequality constraints (>, <, >= or <=) on the indexed ** column that immediately follows the N equalities. Only ** the right-most column can be an inequality - the rest must ** use the "==" and "IN" operators. For example, if the ** index is on (x,y,z), then the following clauses are all ** optimized: ** ** x=5 ** x=5 AND y=10 ** x=5 AND y<10 ** x=5 AND y>5 AND y<10 ** x=5 AND y=5 AND z<=10 ** ** The z<10 term of the following cannot be used, only ** the x=5 term: ** ** x=5 AND z<10 ** ** N may be zero if there are inequality constraints. ** If there are no inequality constraints, then N is at ** least one. ** ** This case is also used when there are no WHERE clause ** constraints but an index is selected anyway, in order ** to force the output order to conform to an ORDER BY. */ static const u8 aStartOp[] = { 0, 0, OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */ OP_Last, /* 3: (!start_constraints && startEq && bRev) */ OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */ OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */ OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */ OP_SeekLE /* 7: (start_constraints && startEq && bRev) */ }; static const u8 aEndOp[] = { OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */ OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */ OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */ OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */ }; u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */ int regBase; /* Base register holding constraint values */ WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */ WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */ int startEq; /* True if range start uses ==, >= or <= */ int endEq; /* True if range end uses ==, >= or <= */ int start_constraints; /* Start of range is constrained */ int nConstraint; /* Number of constraint terms */ Index *pIdx; /* The index we will be using */ int iIdxCur; /* The VDBE cursor for the index */ int nExtraReg = 0; /* Number of extra registers needed */ int op; /* Instruction opcode */ char *zStartAff; /* Affinity for start of range constraint */ char cEndAff = 0; /* Affinity for end of range constraint */ u8 bSeekPastNull = 0; /* True to seek past initial nulls */ u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */ pIdx = pLoop->u.btree.pIndex; iIdxCur = pLevel->iIdxCur; assert( nEq>=pLoop->nSkip ); /* If this loop satisfies a sort order (pOrderBy) request that ** was passed to this function to implement a "SELECT min(x) ..." ** query, then the caller will only allow the loop to run for ** a single iteration. This means that the first row returned ** should not have a NULL value stored in 'x'. If column 'x' is ** the first one after the nEq equality constraints in the index, ** this requires some special handling. */ assert( pWInfo->pOrderBy==0 || pWInfo->pOrderBy->nExpr==1 || (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 ); if( (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)!=0 && pWInfo->nOBSat>0 && (pIdx->nKeyCol>nEq) ){ assert( pLoop->nSkip==0 ); bSeekPastNull = 1; nExtraReg = 1; } /* Find any inequality constraint terms for the start and end ** of the range. */ j = nEq; if( pLoop->wsFlags & WHERE_BTM_LIMIT ){ pRangeStart = pLoop->aLTerm[j++]; nExtraReg = 1; /* Like optimization range constraints always occur in pairs */ assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 || (pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 ); } if( pLoop->wsFlags & WHERE_TOP_LIMIT ){ pRangeEnd = pLoop->aLTerm[j++]; nExtraReg = 1; if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){ assert( pRangeStart!=0 ); /* LIKE opt constraints */ assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */ pLevel->iLikeRepCntr = ++pParse->nMem; testcase( bRev ); testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC ); sqlite3VdbeAddOp2(v, OP_Integer, bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC), pLevel->iLikeRepCntr); VdbeComment((v, "LIKE loop counter")); pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v); } if( pRangeStart==0 && (j = pIdx->aiColumn[nEq])>=0 && pIdx->pTable->aCol[j].notNull==0 ){ bSeekPastNull = 1; } } assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 ); /* Generate code to evaluate all constraint terms using == or IN ** and store the values of those terms in an array of registers ** starting at regBase. */ regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff); assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq ); if( zStartAff ) cEndAff = zStartAff[nEq]; addrNxt = pLevel->addrNxt; /* If we are doing a reverse order scan on an ascending index, or ** a forward order scan on a descending index, interchange the ** start and end terms (pRangeStart and pRangeEnd). */ if( (nEqnKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC)) || (bRev && pIdx->nKeyCol==nEq) ){ SWAP(WhereTerm *, pRangeEnd, pRangeStart); SWAP(u8, bSeekPastNull, bStopAtNull); } testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 ); testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 ); testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 ); testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 ); startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE); endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE); start_constraints = pRangeStart || nEq>0; /* Seek the index cursor to the start of the range. */ nConstraint = nEq; if( pRangeStart ){ Expr *pRight = pRangeStart->pExpr->pRight; sqlite3ExprCode(pParse, pRight, regBase+nEq); whereLikeOptimizationStringFixup(v, pLevel, pRangeStart); if( (pRangeStart->wtFlags & TERM_VNULL)==0 && sqlite3ExprCanBeNull(pRight) ){ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); VdbeCoverage(v); } if( zStartAff ){ if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_BLOB){ /* Since the comparison is to be performed with no conversions ** applied to the operands, set the affinity to apply to pRight to ** SQLITE_AFF_BLOB. */ zStartAff[nEq] = SQLITE_AFF_BLOB; } if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){ zStartAff[nEq] = SQLITE_AFF_BLOB; } } nConstraint++; testcase( pRangeStart->wtFlags & TERM_VIRTUAL ); }else if( bSeekPastNull ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); nConstraint++; startEq = 0; start_constraints = 1; } codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff); op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev]; assert( op!=0 ); sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); VdbeCoverage(v); VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind ); VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last ); VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT ); VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE ); VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE ); VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT ); /* Load the value for the inequality constraint at the end of the ** range (if any). */ nConstraint = nEq; if( pRangeEnd ){ Expr *pRight = pRangeEnd->pExpr->pRight; sqlite3ExprCacheRemove(pParse, regBase+nEq, 1); sqlite3ExprCode(pParse, pRight, regBase+nEq); whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); if( (pRangeEnd->wtFlags & TERM_VNULL)==0 && sqlite3ExprCanBeNull(pRight) ){ sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt); VdbeCoverage(v); } if( sqlite3CompareAffinity(pRight, cEndAff)!=SQLITE_AFF_BLOB && !sqlite3ExprNeedsNoAffinityChange(pRight, cEndAff) ){ codeApplyAffinity(pParse, regBase+nEq, 1, &cEndAff); } nConstraint++; testcase( pRangeEnd->wtFlags & TERM_VIRTUAL ); }else if( bStopAtNull ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); endEq = 0; nConstraint++; } sqlite3DbFree(db, zStartAff); /* Top of the loop body */ pLevel->p2 = sqlite3VdbeCurrentAddr(v); /* Check if the index cursor is past the end of the range. */ if( nConstraint ){ op = aEndOp[bRev*2 + endEq]; sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT ); testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE ); testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT ); testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE ); } /* Seek the table cursor, if required */ disableTerm(pLevel, pRangeStart); disableTerm(pLevel, pRangeEnd); if( omitTable ){ /* pIdx is a covering index. No need to access the main table. */ }else if( HasRowid(pIdx->pTable) ){ iRowidReg = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); sqlite3VdbeAddOp2(v, OP_Seek, iCur, iRowidReg); /* Deferred seek */ }else if( iCur!=iIdxCur ){ Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable); iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol); for(j=0; jnKeyCol; j++){ k = sqlite3ColumnOfIndex(pIdx, pPk->aiColumn[j]); sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j); } sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont, iRowidReg, pPk->nKeyCol); VdbeCoverage(v); } /* Record the instruction used to terminate the loop. Disable ** WHERE clause terms made redundant by the index range scan. */ if( pLoop->wsFlags & WHERE_ONEROW ){ pLevel->op = OP_Noop; }else if( bRev ){ pLevel->op = OP_Prev; }else{ pLevel->op = OP_Next; } pLevel->p1 = iIdxCur; pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0; if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){ pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; }else{ assert( pLevel->p5==0 ); } }else #ifndef SQLITE_OMIT_OR_OPTIMIZATION if( pLoop->wsFlags & WHERE_MULTI_OR ){ /* Case 5: Two or more separately indexed terms connected by OR ** ** Example: ** ** CREATE TABLE t1(a,b,c,d); ** CREATE INDEX i1 ON t1(a); ** CREATE INDEX i2 ON t1(b); ** CREATE INDEX i3 ON t1(c); ** ** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13) ** ** In the example, there are three indexed terms connected by OR. ** The top of the loop looks like this: ** ** Null 1 # Zero the rowset in reg 1 ** ** Then, for each indexed term, the following. The arguments to ** RowSetTest are such that the rowid of the current row is inserted ** into the RowSet. If it is already present, control skips the ** Gosub opcode and jumps straight to the code generated by WhereEnd(). ** ** sqlite3WhereBegin() ** RowSetTest # Insert rowid into rowset ** Gosub 2 A ** sqlite3WhereEnd() ** ** Following the above, code to terminate the loop. Label A, the target ** of the Gosub above, jumps to the instruction right after the Goto. ** ** Null 1 # Zero the rowset in reg 1 ** Goto B # The loop is finished. ** ** A: # Return data, whatever. ** ** Return 2 # Jump back to the Gosub ** ** B: ** ** Added 2014-05-26: If the table is a WITHOUT ROWID table, then ** use an ephemeral index instead of a RowSet to record the primary ** keys of the rows we have already seen. ** */ WhereClause *pOrWc; /* The OR-clause broken out into subterms */ SrcList *pOrTab; /* Shortened table list or OR-clause generation */ Index *pCov = 0; /* Potential covering index (or NULL) */ int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */ int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */ int regRowset = 0; /* Register for RowSet object */ int regRowid = 0; /* Register holding rowid */ int iLoopBody = sqlite3VdbeMakeLabel(v); /* Start of loop body */ int iRetInit; /* Address of regReturn init */ int untestedTerms = 0; /* Some terms not completely tested */ int ii; /* Loop counter */ u16 wctrlFlags; /* Flags for sub-WHERE clause */ Expr *pAndExpr = 0; /* An ".. AND (...)" expression */ Table *pTab = pTabItem->pTab; pTerm = pLoop->aLTerm[0]; assert( pTerm!=0 ); assert( pTerm->eOperator & WO_OR ); assert( (pTerm->wtFlags & TERM_ORINFO)!=0 ); pOrWc = &pTerm->u.pOrInfo->wc; pLevel->op = OP_Return; pLevel->p1 = regReturn; /* Set up a new SrcList in pOrTab containing the table being scanned ** by this loop in the a[0] slot and all notReady tables in a[1..] slots. ** This becomes the SrcList in the recursive call to sqlite3WhereBegin(). */ if( pWInfo->nLevel>1 ){ int nNotReady; /* The number of notReady tables */ struct SrcList_item *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; pOrTab = sqlite3StackAllocRaw(db, sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); pOrTab->nSrc = pOrTab->nAlloc; memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem)); origSrc = pWInfo->pTabList->a; for(k=1; k<=nNotReady; k++){ memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k])); } }else{ pOrTab = pWInfo->pTabList; } /* Initialize the rowset register to contain NULL. An SQL NULL is ** equivalent to an empty rowset. Or, create an ephemeral index ** capable of holding primary keys in the case of a WITHOUT ROWID. ** ** Also initialize regReturn to contain the address of the instruction ** immediately following the OP_Return at the bottom of the loop. This ** is required in a few obscure LEFT JOIN cases where control jumps ** over the top of the loop into the body of it. In this case the ** correct response for the end-of-loop code (the OP_Return) is to ** fall through to the next instruction, just as an OP_Next does if ** called on an uninitialized cursor. */ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ if( HasRowid(pTab) ){ regRowset = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); regRowset = pParse->nTab++; sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol); sqlite3VdbeSetP4KeyInfo(pParse, pPk); } regRowid = ++pParse->nMem; } iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn); /* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y ** Then for every term xN, evaluate as the subexpression: xN AND z ** That way, terms in y that are factored into the disjunction will ** be picked up by the recursive calls to sqlite3WhereBegin() below. ** ** Actually, each subexpression is converted to "xN AND w" where w is ** the "interesting" terms of z - terms that did not originate in the ** ON or USING clause of a LEFT JOIN, and terms that are usable as ** indices. ** ** This optimization also only applies if the (x1 OR x2 OR ...) term ** is not contained in the ON clause of a LEFT JOIN. ** See ticket http://www.sqlite.org/src/info/f2369304e4 */ if( pWC->nTerm>1 ){ int iTerm; for(iTerm=0; iTermnTerm; iTerm++){ Expr *pExpr = pWC->a[iTerm].pExpr; if( &pWC->a[iTerm] == pTerm ) continue; if( ExprHasProperty(pExpr, EP_FromJoin) ) continue; if( (pWC->a[iTerm].wtFlags & TERM_VIRTUAL)!=0 ) continue; if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue; testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO ); pExpr = sqlite3ExprDup(db, pExpr, 0); pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr); } if( pAndExpr ){ pAndExpr = sqlite3PExpr(pParse, TK_AND, 0, pAndExpr, 0); } } /* Run a separate WHERE clause for each term of the OR clause. After ** eliminating duplicates from other WHERE clauses, the action for each ** sub-WHERE clause is to to invoke the main loop body as a subroutine. */ wctrlFlags = WHERE_OMIT_OPEN_CLOSE | WHERE_FORCE_TABLE | WHERE_ONETABLE_ONLY | WHERE_NO_AUTOINDEX; for(ii=0; iinTerm; ii++){ WhereTerm *pOrTerm = &pOrWc->a[ii]; if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){ WhereInfo *pSubWInfo; /* Info for single OR-term scan */ Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */ int j1 = 0; /* Address of jump operation */ if( pAndExpr && !ExprHasProperty(pOrExpr, EP_FromJoin) ){ pAndExpr->pLeft = pOrExpr; pOrExpr = pAndExpr; } /* Loop through table entries that match term pOrTerm. */ WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, wctrlFlags, iCovCur); assert( pSubWInfo || pParse->nErr || db->mallocFailed ); if( pSubWInfo ){ WhereLoop *pSubLoop; int addrExplain = sqlite3WhereExplainOneScan( pParse, pOrTab, &pSubWInfo->a[0], iLevel, pLevel->iFrom, 0 ); sqlite3WhereAddScanStatus(v, pOrTab, &pSubWInfo->a[0], addrExplain); /* This is the sub-WHERE clause body. First skip over ** duplicate rows from prior sub-WHERE clauses, and record the ** rowid (or PRIMARY KEY) for the current row so that the same ** row will be skipped in subsequent sub-WHERE clauses. */ if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){ int r; int iSet = ((ii==pOrWc->nTerm-1)?-1:ii); if( HasRowid(pTab) ){ r = sqlite3ExprCodeGetColumn(pParse, pTab, -1, iCur, regRowid, 0); j1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0, r,iSet); VdbeCoverage(v); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); int nPk = pPk->nKeyCol; int iPk; /* Read the PK into an array of temp registers. */ r = sqlite3GetTempRange(pParse, nPk); for(iPk=0; iPkaiColumn[iPk]; int rx; rx = sqlite3ExprCodeGetColumn(pParse, pTab, iCol, iCur,r+iPk,0); if( rx!=r+iPk ){ sqlite3VdbeAddOp2(v, OP_SCopy, rx, r+iPk); } } /* Check if the temp table already contains this key. If so, ** the row has already been included in the result set and ** can be ignored (by jumping past the Gosub below). Otherwise, ** insert the key into the temp table and proceed with processing ** the row. ** ** Use some of the same optimizations as OP_RowSetTest: If iSet ** is zero, assume that the key cannot already be present in ** the temp table. And if iSet is -1, assume that there is no ** need to insert the key into the temp table, as it will never ** be tested for. */ if( iSet ){ j1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk); VdbeCoverage(v); } if( iSet>=0 ){ sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid); sqlite3VdbeAddOp3(v, OP_IdxInsert, regRowset, regRowid, 0); if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); } /* Release the array of temp registers */ sqlite3ReleaseTempRange(pParse, r, nPk); } } /* Invoke the main loop body as a subroutine */ sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody); /* Jump here (skipping the main loop body subroutine) if the ** current sub-WHERE row is a duplicate from prior sub-WHEREs. */ if( j1 ) sqlite3VdbeJumpHere(v, j1); /* The pSubWInfo->untestedTerms flag means that this OR term ** contained one or more AND term from a notReady table. The ** terms from the notReady table could not be tested and will ** need to be tested later. */ if( pSubWInfo->untestedTerms ) untestedTerms = 1; /* If all of the OR-connected terms are optimized using the same ** index, and the index is opened using the same cursor number ** by each call to sqlite3WhereBegin() made by this loop, it may ** be possible to use that index as a covering index. ** ** If the call to sqlite3WhereBegin() above resulted in a scan that ** uses an index, and this is either the first OR-connected term ** processed or the index is the same as that used by all previous ** terms, set pCov to the candidate covering index. Otherwise, set ** pCov to NULL to indicate that no candidate covering index will ** be available. */ pSubLoop = pSubWInfo->a[0].pWLoop; assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0 && (ii==0 || pSubLoop->u.btree.pIndex==pCov) && (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex)) ){ assert( pSubWInfo->a[0].iIdxCur==iCovCur ); pCov = pSubLoop->u.btree.pIndex; wctrlFlags |= WHERE_REOPEN_IDX; }else{ pCov = 0; } /* Finish the loop through table entries that match term pOrTerm. */ sqlite3WhereEnd(pSubWInfo); } } } pLevel->u.pCovidx = pCov; if( pCov ) pLevel->iIdxCur = iCovCur; if( pAndExpr ){ pAndExpr->pLeft = 0; sqlite3ExprDelete(db, pAndExpr); } sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrBrk); sqlite3VdbeResolveLabel(v, iLoopBody); if( pWInfo->nLevel>1 ) sqlite3StackFree(db, pOrTab); if( !untestedTerms ) disableTerm(pLevel, pTerm); }else #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ { /* Case 6: There is no usable index. We must do a complete ** scan of the entire table. */ static const u8 aStep[] = { OP_Next, OP_Prev }; static const u8 aStart[] = { OP_Rewind, OP_Last }; assert( bRev==0 || bRev==1 ); if( pTabItem->isRecursive ){ /* Tables marked isRecursive have only a single row that is stored in ** a pseudo-cursor. No need to Rewind or Next such cursors. */ pLevel->op = OP_Noop; }else{ pLevel->op = aStep[bRev]; pLevel->p1 = iCur; pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk); VdbeCoverageIf(v, bRev==0); VdbeCoverageIf(v, bRev!=0); pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP; } } #ifdef SQLITE_ENABLE_STMT_SCANSTATUS pLevel->addrVisit = sqlite3VdbeCurrentAddr(v); #endif /* Insert code to test every subexpression that can be completely ** computed using the current set of tables. */ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ Expr *pE; int skipLikeAddr = 0; testcase( pTerm->wtFlags & TERM_VIRTUAL ); testcase( pTerm->wtFlags & TERM_CODED ); if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ testcase( pWInfo->untestedTerms==0 && (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ); pWInfo->untestedTerms = 1; continue; } pE = pTerm->pExpr; assert( pE!=0 ); if( pLevel->iLeftJoin && !ExprHasProperty(pE, EP_FromJoin) ){ continue; } if( pTerm->wtFlags & TERM_LIKECOND ){ assert( pLevel->iLikeRepCntr>0 ); skipLikeAddr = sqlite3VdbeAddOp1(v, OP_IfNot, pLevel->iLikeRepCntr); VdbeCoverage(v); } sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL); if( skipLikeAddr ) sqlite3VdbeJumpHere(v, skipLikeAddr); pTerm->wtFlags |= TERM_CODED; } /* Insert code to test for implied constraints based on transitivity ** of the "==" operator. ** ** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123" ** and we are coding the t1 loop and the t2 loop has not yet coded, ** then we cannot use the "t1.a=t2.b" constraint, but we can code ** the implied "t1.a=123" constraint. */ for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){ Expr *pE, *pEAlt; WhereTerm *pAlt; if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue; if( (pTerm->eOperator & WO_EQUIV)==0 ) continue; if( pTerm->leftCursor!=iCur ) continue; if( pLevel->iLeftJoin ) continue; pE = pTerm->pExpr; assert( !ExprHasProperty(pE, EP_FromJoin) ); assert( (pTerm->prereqRight & pLevel->notReady)!=0 ); pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady, WO_EQ|WO_IN|WO_IS, 0); if( pAlt==0 ) continue; if( pAlt->wtFlags & (TERM_CODED) ) continue; testcase( pAlt->eOperator & WO_EQ ); testcase( pAlt->eOperator & WO_IS ); testcase( pAlt->eOperator & WO_IN ); VdbeModuleComment((v, "begin transitive constraint")); pEAlt = sqlite3StackAllocRaw(db, sizeof(*pEAlt)); if( pEAlt ){ *pEAlt = *pAlt->pExpr; pEAlt->pLeft = pE->pLeft; sqlite3ExprIfFalse(pParse, pEAlt, addrCont, SQLITE_JUMPIFNULL); sqlite3StackFree(db, pEAlt); } } /* For a LEFT OUTER JOIN, generate code that will record the fact that ** at least one row of the right table has matched the left table. */ if( pLevel->iLeftJoin ){ pLevel->addrFirst = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin); VdbeComment((v, "record LEFT JOIN hit")); sqlite3ExprCacheClear(pParse); for(pTerm=pWC->a, j=0; jnTerm; j++, pTerm++){ testcase( pTerm->wtFlags & TERM_VIRTUAL ); testcase( pTerm->wtFlags & TERM_CODED ); if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ assert( pWInfo->untestedTerms ); continue; } assert( pTerm->pExpr ); sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); pTerm->wtFlags |= TERM_CODED; } } return pLevel->notReady; } /************** End of wherecode.c *******************************************/ /************** Begin file whereexpr.c ***************************************/ /* ** 2015-06-08 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This module contains C code that generates VDBE code used to process ** the WHERE clause of SQL statements. ** ** This file was originally part of where.c but was split out to improve ** readability and editabiliity. This file contains utility routines for ** analyzing Expr objects in the WHERE clause. */ /* #include "sqliteInt.h" */ /* #include "whereInt.h" */ /* Forward declarations */ static void exprAnalyze(SrcList*, WhereClause*, int); /* ** Deallocate all memory associated with a WhereOrInfo object. */ static void whereOrInfoDelete(sqlite3 *db, WhereOrInfo *p){ sqlite3WhereClauseClear(&p->wc); sqlite3DbFree(db, p); } /* ** Deallocate all memory associated with a WhereAndInfo object. */ static void whereAndInfoDelete(sqlite3 *db, WhereAndInfo *p){ sqlite3WhereClauseClear(&p->wc); sqlite3DbFree(db, p); } /* ** Add a single new WhereTerm entry to the WhereClause object pWC. ** The new WhereTerm object is constructed from Expr p and with wtFlags. ** The index in pWC->a[] of the new WhereTerm is returned on success. ** 0 is returned if the new WhereTerm could not be added due to a memory ** allocation error. The memory allocation failure will be recorded in ** the db->mallocFailed flag so that higher-level functions can detect it. ** ** This routine will increase the size of the pWC->a[] array as necessary. ** ** If the wtFlags argument includes TERM_DYNAMIC, then responsibility ** for freeing the expression p is assumed by the WhereClause object pWC. ** This is true even if this routine fails to allocate a new WhereTerm. ** ** WARNING: This routine might reallocate the space used to store ** WhereTerms. All pointers to WhereTerms should be invalidated after ** calling this routine. Such pointers may be reinitialized by referencing ** the pWC->a[] array. */ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){ WhereTerm *pTerm; int idx; testcase( wtFlags & TERM_VIRTUAL ); if( pWC->nTerm>=pWC->nSlot ){ WhereTerm *pOld = pWC->a; sqlite3 *db = pWC->pWInfo->pParse->db; pWC->a = sqlite3DbMallocRaw(db, sizeof(pWC->a[0])*pWC->nSlot*2 ); if( pWC->a==0 ){ if( wtFlags & TERM_DYNAMIC ){ sqlite3ExprDelete(db, p); } pWC->a = pOld; return 0; } memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm); if( pOld!=pWC->aStatic ){ sqlite3DbFree(db, pOld); } pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]); memset(&pWC->a[pWC->nTerm], 0, sizeof(pWC->a[0])*(pWC->nSlot-pWC->nTerm)); } pTerm = &pWC->a[idx = pWC->nTerm++]; if( p && ExprHasProperty(p, EP_Unlikely) ){ pTerm->truthProb = sqlite3LogEst(p->iTable) - 270; }else{ pTerm->truthProb = 1; } pTerm->pExpr = sqlite3ExprSkipCollate(p); pTerm->wtFlags = wtFlags; pTerm->pWC = pWC; pTerm->iParent = -1; return idx; } /* ** Return TRUE if the given operator is one of the operators that is ** allowed for an indexable WHERE clause term. The allowed operators are ** "=", "<", ">", "<=", ">=", "IN", and "IS NULL" */ static int allowedOp(int op){ assert( TK_GT>TK_EQ && TK_GTTK_EQ && TK_LTTK_EQ && TK_LE=TK_EQ && op<=TK_GE) || op==TK_ISNULL || op==TK_IS; } /* ** Commute a comparison operator. Expressions of the form "X op Y" ** are converted into "Y op X". ** ** If left/right precedence rules come into play when determining the ** collating sequence, then COLLATE operators are adjusted to ensure ** that the collating sequence does not change. For example: ** "Y collate NOCASE op X" becomes "X op Y" because any collation sequence on ** the left hand side of a comparison overrides any collation sequence ** attached to the right. For the same reason the EP_Collate flag ** is not commuted. */ static void exprCommute(Parse *pParse, Expr *pExpr){ u16 expRight = (pExpr->pRight->flags & EP_Collate); u16 expLeft = (pExpr->pLeft->flags & EP_Collate); assert( allowedOp(pExpr->op) && pExpr->op!=TK_IN ); if( expRight==expLeft ){ /* Either X and Y both have COLLATE operator or neither do */ if( expRight ){ /* Both X and Y have COLLATE operators. Make sure X is always ** used by clearing the EP_Collate flag from Y. */ pExpr->pRight->flags &= ~EP_Collate; }else if( sqlite3ExprCollSeq(pParse, pExpr->pLeft)!=0 ){ /* Neither X nor Y have COLLATE operators, but X has a non-default ** collating sequence. So add the EP_Collate marker on X to cause ** it to be searched first. */ pExpr->pLeft->flags |= EP_Collate; } } SWAP(Expr*,pExpr->pRight,pExpr->pLeft); if( pExpr->op>=TK_GT ){ assert( TK_LT==TK_GT+2 ); assert( TK_GE==TK_LE+2 ); assert( TK_GT>TK_EQ ); assert( TK_GTop>=TK_GT && pExpr->op<=TK_GE ); pExpr->op = ((pExpr->op-TK_GT)^2)+TK_GT; } } /* ** Translate from TK_xx operator to WO_xx bitmask. */ static u16 operatorMask(int op){ u16 c; assert( allowedOp(op) ); if( op==TK_IN ){ c = WO_IN; }else if( op==TK_ISNULL ){ c = WO_ISNULL; }else if( op==TK_IS ){ c = WO_IS; }else{ assert( (WO_EQ<<(op-TK_EQ)) < 0x7fff ); c = (u16)(WO_EQ<<(op-TK_EQ)); } assert( op!=TK_ISNULL || c==WO_ISNULL ); assert( op!=TK_IN || c==WO_IN ); assert( op!=TK_EQ || c==WO_EQ ); assert( op!=TK_LT || c==WO_LT ); assert( op!=TK_LE || c==WO_LE ); assert( op!=TK_GT || c==WO_GT ); assert( op!=TK_GE || c==WO_GE ); assert( op!=TK_IS || c==WO_IS ); return c; } #ifndef SQLITE_OMIT_LIKE_OPTIMIZATION /* ** Check to see if the given expression is a LIKE or GLOB operator that ** can be optimized using inequality constraints. Return TRUE if it is ** so and false if not. ** ** In order for the operator to be optimizible, the RHS must be a string ** literal that does not begin with a wildcard. The LHS must be a column ** that may only be NULL, a string, or a BLOB, never a number. (This means ** that virtual tables cannot participate in the LIKE optimization.) The ** collating sequence for the column on the LHS must be appropriate for ** the operator. */ static int isLikeOrGlob( Parse *pParse, /* Parsing and code generating context */ Expr *pExpr, /* Test this expression */ Expr **ppPrefix, /* Pointer to TK_STRING expression with pattern prefix */ int *pisComplete, /* True if the only wildcard is % in the last character */ int *pnoCase /* True if uppercase is equivalent to lowercase */ ){ const char *z = 0; /* String on RHS of LIKE operator */ Expr *pRight, *pLeft; /* Right and left size of LIKE operator */ ExprList *pList; /* List of operands to the LIKE operator */ int c; /* One character in z[] */ int cnt; /* Number of non-wildcard prefix characters */ char wc[3]; /* Wildcard characters */ sqlite3 *db = pParse->db; /* Database connection */ sqlite3_value *pVal = 0; int op; /* Opcode of pRight */ if( !sqlite3IsLikeFunction(db, pExpr, pnoCase, wc) ){ return 0; } #ifdef SQLITE_EBCDIC if( *pnoCase ) return 0; #endif pList = pExpr->x.pList; pLeft = pList->a[1].pExpr; if( pLeft->op!=TK_COLUMN || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT || IsVirtual(pLeft->pTab) /* Value might be numeric */ ){ /* IMP: R-02065-49465 The left-hand side of the LIKE or GLOB operator must ** be the name of an indexed column with TEXT affinity. */ return 0; } assert( pLeft->iColumn!=(-1) ); /* Because IPK never has AFF_TEXT */ pRight = sqlite3ExprSkipCollate(pList->a[0].pExpr); op = pRight->op; if( op==TK_VARIABLE ){ Vdbe *pReprepare = pParse->pReprepare; int iCol = pRight->iColumn; pVal = sqlite3VdbeGetBoundValue(pReprepare, iCol, SQLITE_AFF_BLOB); if( pVal && sqlite3_value_type(pVal)==SQLITE_TEXT ){ z = (char *)sqlite3_value_text(pVal); } sqlite3VdbeSetVarmask(pParse->pVdbe, iCol); assert( pRight->op==TK_VARIABLE || pRight->op==TK_REGISTER ); }else if( op==TK_STRING ){ z = pRight->u.zToken; } if( z ){ cnt = 0; while( (c=z[cnt])!=0 && c!=wc[0] && c!=wc[1] && c!=wc[2] ){ cnt++; } if( cnt!=0 && 255!=(u8)z[cnt-1] ){ Expr *pPrefix; *pisComplete = c==wc[0] && z[cnt+1]==0; pPrefix = sqlite3Expr(db, TK_STRING, z); if( pPrefix ) pPrefix->u.zToken[cnt] = 0; *ppPrefix = pPrefix; if( op==TK_VARIABLE ){ Vdbe *v = pParse->pVdbe; sqlite3VdbeSetVarmask(v, pRight->iColumn); if( *pisComplete && pRight->u.zToken[1] ){ /* If the rhs of the LIKE expression is a variable, and the current ** value of the variable means there is no need to invoke the LIKE ** function, then no OP_Variable will be added to the program. ** This causes problems for the sqlite3_bind_parameter_name() ** API. To work around them, add a dummy OP_Variable here. */ int r1 = sqlite3GetTempReg(pParse); sqlite3ExprCodeTarget(pParse, pRight, r1); sqlite3VdbeChangeP3(v, sqlite3VdbeCurrentAddr(v)-1, 0); sqlite3ReleaseTempReg(pParse, r1); } } }else{ z = 0; } } sqlite3ValueFree(pVal); return (z!=0); } #endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Check to see if the given expression is of the form ** ** column MATCH expr ** ** If it is then return TRUE. If not, return FALSE. */ static int isMatchOfColumn( Expr *pExpr /* Test this expression */ ){ ExprList *pList; if( pExpr->op!=TK_FUNCTION ){ return 0; } if( sqlite3StrICmp(pExpr->u.zToken,"match")!=0 ){ return 0; } pList = pExpr->x.pList; if( pList->nExpr!=2 ){ return 0; } if( pList->a[1].pExpr->op != TK_COLUMN ){ return 0; } return 1; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* ** If the pBase expression originated in the ON or USING clause of ** a join, then transfer the appropriate markings over to derived. */ static void transferJoinMarkings(Expr *pDerived, Expr *pBase){ if( pDerived ){ pDerived->flags |= pBase->flags & EP_FromJoin; pDerived->iRightJoinTable = pBase->iRightJoinTable; } } /* ** Mark term iChild as being a child of term iParent */ static void markTermAsChild(WhereClause *pWC, int iChild, int iParent){ pWC->a[iChild].iParent = iParent; pWC->a[iChild].truthProb = pWC->a[iParent].truthProb; pWC->a[iParent].nChild++; } /* ** Return the N-th AND-connected subterm of pTerm. Or if pTerm is not ** a conjunction, then return just pTerm when N==0. If N is exceeds ** the number of available subterms, return NULL. */ static WhereTerm *whereNthSubterm(WhereTerm *pTerm, int N){ if( pTerm->eOperator!=WO_AND ){ return N==0 ? pTerm : 0; } if( Nu.pAndInfo->wc.nTerm ){ return &pTerm->u.pAndInfo->wc.a[N]; } return 0; } /* ** Subterms pOne and pTwo are contained within WHERE clause pWC. The ** two subterms are in disjunction - they are OR-ed together. ** ** If these two terms are both of the form: "A op B" with the same ** A and B values but different operators and if the operators are ** compatible (if one is = and the other is <, for example) then ** add a new virtual AND term to pWC that is the combination of the ** two. ** ** Some examples: ** ** x x<=y ** x=y OR x=y --> x=y ** x<=y OR x x<=y ** ** The following is NOT generated: ** ** xy --> x!=y */ static void whereCombineDisjuncts( SrcList *pSrc, /* the FROM clause */ WhereClause *pWC, /* The complete WHERE clause */ WhereTerm *pOne, /* First disjunct */ WhereTerm *pTwo /* Second disjunct */ ){ u16 eOp = pOne->eOperator | pTwo->eOperator; sqlite3 *db; /* Database connection (for malloc) */ Expr *pNew; /* New virtual expression */ int op; /* Operator for the combined expression */ int idxNew; /* Index in pWC of the next virtual term */ if( (pOne->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; if( (pTwo->eOperator & (WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE))==0 ) return; if( (eOp & (WO_EQ|WO_LT|WO_LE))!=eOp && (eOp & (WO_EQ|WO_GT|WO_GE))!=eOp ) return; assert( pOne->pExpr->pLeft!=0 && pOne->pExpr->pRight!=0 ); assert( pTwo->pExpr->pLeft!=0 && pTwo->pExpr->pRight!=0 ); if( sqlite3ExprCompare(pOne->pExpr->pLeft, pTwo->pExpr->pLeft, -1) ) return; if( sqlite3ExprCompare(pOne->pExpr->pRight, pTwo->pExpr->pRight, -1) )return; /* If we reach this point, it means the two subterms can be combined */ if( (eOp & (eOp-1))!=0 ){ if( eOp & (WO_LT|WO_LE) ){ eOp = WO_LE; }else{ assert( eOp & (WO_GT|WO_GE) ); eOp = WO_GE; } } db = pWC->pWInfo->pParse->db; pNew = sqlite3ExprDup(db, pOne->pExpr, 0); if( pNew==0 ) return; for(op=TK_EQ; eOp!=(WO_EQ<<(op-TK_EQ)); op++){ assert( opop = op; idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); exprAnalyze(pSrc, pWC, idxNew); } #if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) /* ** Analyze a term that consists of two or more OR-connected ** subterms. So in: ** ** ... WHERE (a=5) AND (b=7 OR c=9 OR d=13) AND (d=13) ** ^^^^^^^^^^^^^^^^^^^^ ** ** This routine analyzes terms such as the middle term in the above example. ** A WhereOrTerm object is computed and attached to the term under ** analysis, regardless of the outcome of the analysis. Hence: ** ** WhereTerm.wtFlags |= TERM_ORINFO ** WhereTerm.u.pOrInfo = a dynamically allocated WhereOrTerm object ** ** The term being analyzed must have two or more of OR-connected subterms. ** A single subterm might be a set of AND-connected sub-subterms. ** Examples of terms under analysis: ** ** (A) t1.x=t2.y OR t1.x=t2.z OR t1.y=15 OR t1.z=t3.a+5 ** (B) x=expr1 OR expr2=x OR x=expr3 ** (C) t1.x=t2.y OR (t1.x=t2.z AND t1.y=15) ** (D) x=expr1 OR (y>11 AND y<22 AND z LIKE '*hello*') ** (E) (p.a=1 AND q.b=2 AND r.c=3) OR (p.x=4 AND q.y=5 AND r.z=6) ** (F) x>A OR (x=A AND y>=B) ** ** CASE 1: ** ** If all subterms are of the form T.C=expr for some single column of C and ** a single table T (as shown in example B above) then create a new virtual ** term that is an equivalent IN expression. In other words, if the term ** being analyzed is: ** ** x = expr1 OR expr2 = x OR x = expr3 ** ** then create a new virtual term like this: ** ** x IN (expr1,expr2,expr3) ** ** CASE 2: ** ** If there are exactly two disjuncts and one side has x>A and the other side ** has x=A (for the same x and A) then add a new virtual conjunct term to the ** WHERE clause of the form "x>=A". Example: ** ** x>A OR (x=A AND y>B) adds: x>=A ** ** The added conjunct can sometimes be helpful in query planning. ** ** CASE 3: ** ** If all subterms are indexable by a single table T, then set ** ** WhereTerm.eOperator = WO_OR ** WhereTerm.u.pOrInfo->indexable |= the cursor number for table T ** ** A subterm is "indexable" if it is of the form ** "T.C " where C is any column of table T and ** is one of "=", "<", "<=", ">", ">=", "IS NULL", or "IN". ** A subterm is also indexable if it is an AND of two or more ** subsubterms at least one of which is indexable. Indexable AND ** subterms have their eOperator set to WO_AND and they have ** u.pAndInfo set to a dynamically allocated WhereAndTerm object. ** ** From another point of view, "indexable" means that the subterm could ** potentially be used with an index if an appropriate index exists. ** This analysis does not consider whether or not the index exists; that ** is decided elsewhere. This analysis only looks at whether subterms ** appropriate for indexing exist. ** ** All examples A through E above satisfy case 3. But if a term ** also satisfies case 1 (such as B) we know that the optimizer will ** always prefer case 1, so in that case we pretend that case 3 is not ** satisfied. ** ** It might be the case that multiple tables are indexable. For example, ** (E) above is indexable on tables P, Q, and R. ** ** Terms that satisfy case 3 are candidates for lookup by using ** separate indices to find rowids for each subterm and composing ** the union of all rowids using a RowSet object. This is similar ** to "bitmap indices" in other database engines. ** ** OTHERWISE: ** ** If none of cases 1, 2, or 3 apply, then leave the eOperator set to ** zero. This term is not useful for search. */ static void exprAnalyzeOrTerm( SrcList *pSrc, /* the FROM clause */ WhereClause *pWC, /* the complete WHERE clause */ int idxTerm /* Index of the OR-term to be analyzed */ ){ WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ Parse *pParse = pWInfo->pParse; /* Parser context */ sqlite3 *db = pParse->db; /* Database connection */ WhereTerm *pTerm = &pWC->a[idxTerm]; /* The term to be analyzed */ Expr *pExpr = pTerm->pExpr; /* The expression of the term */ int i; /* Loop counters */ WhereClause *pOrWc; /* Breakup of pTerm into subterms */ WhereTerm *pOrTerm; /* A Sub-term within the pOrWc */ WhereOrInfo *pOrInfo; /* Additional information associated with pTerm */ Bitmask chngToIN; /* Tables that might satisfy case 1 */ Bitmask indexable; /* Tables that are indexable, satisfying case 2 */ /* ** Break the OR clause into its separate subterms. The subterms are ** stored in a WhereClause structure containing within the WhereOrInfo ** object that is attached to the original OR clause term. */ assert( (pTerm->wtFlags & (TERM_DYNAMIC|TERM_ORINFO|TERM_ANDINFO))==0 ); assert( pExpr->op==TK_OR ); pTerm->u.pOrInfo = pOrInfo = sqlite3DbMallocZero(db, sizeof(*pOrInfo)); if( pOrInfo==0 ) return; pTerm->wtFlags |= TERM_ORINFO; pOrWc = &pOrInfo->wc; sqlite3WhereClauseInit(pOrWc, pWInfo); sqlite3WhereSplit(pOrWc, pExpr, TK_OR); sqlite3WhereExprAnalyze(pSrc, pOrWc); if( db->mallocFailed ) return; assert( pOrWc->nTerm>=2 ); /* ** Compute the set of tables that might satisfy cases 1 or 3. */ indexable = ~(Bitmask)0; chngToIN = ~(Bitmask)0; for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0 && indexable; i--, pOrTerm++){ if( (pOrTerm->eOperator & WO_SINGLE)==0 ){ WhereAndInfo *pAndInfo; assert( (pOrTerm->wtFlags & (TERM_ANDINFO|TERM_ORINFO))==0 ); chngToIN = 0; pAndInfo = sqlite3DbMallocRaw(db, sizeof(*pAndInfo)); if( pAndInfo ){ WhereClause *pAndWC; WhereTerm *pAndTerm; int j; Bitmask b = 0; pOrTerm->u.pAndInfo = pAndInfo; pOrTerm->wtFlags |= TERM_ANDINFO; pOrTerm->eOperator = WO_AND; pAndWC = &pAndInfo->wc; sqlite3WhereClauseInit(pAndWC, pWC->pWInfo); sqlite3WhereSplit(pAndWC, pOrTerm->pExpr, TK_AND); sqlite3WhereExprAnalyze(pSrc, pAndWC); pAndWC->pOuter = pWC; testcase( db->mallocFailed ); if( !db->mallocFailed ){ for(j=0, pAndTerm=pAndWC->a; jnTerm; j++, pAndTerm++){ assert( pAndTerm->pExpr ); if( allowedOp(pAndTerm->pExpr->op) ){ b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pAndTerm->leftCursor); } } } indexable &= b; } }else if( pOrTerm->wtFlags & TERM_COPIED ){ /* Skip this term for now. We revisit it when we process the ** corresponding TERM_VIRTUAL term */ }else{ Bitmask b; b = sqlite3WhereGetMask(&pWInfo->sMaskSet, pOrTerm->leftCursor); if( pOrTerm->wtFlags & TERM_VIRTUAL ){ WhereTerm *pOther = &pOrWc->a[pOrTerm->iParent]; b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pOther->leftCursor); } indexable &= b; if( (pOrTerm->eOperator & WO_EQ)==0 ){ chngToIN = 0; }else{ chngToIN &= b; } } } /* ** Record the set of tables that satisfy case 3. The set might be ** empty. */ pOrInfo->indexable = indexable; pTerm->eOperator = indexable==0 ? 0 : WO_OR; /* For a two-way OR, attempt to implementation case 2. */ if( indexable && pOrWc->nTerm==2 ){ int iOne = 0; WhereTerm *pOne; while( (pOne = whereNthSubterm(&pOrWc->a[0],iOne++))!=0 ){ int iTwo = 0; WhereTerm *pTwo; while( (pTwo = whereNthSubterm(&pOrWc->a[1],iTwo++))!=0 ){ whereCombineDisjuncts(pSrc, pWC, pOne, pTwo); } } } /* ** chngToIN holds a set of tables that *might* satisfy case 1. But ** we have to do some additional checking to see if case 1 really ** is satisfied. ** ** chngToIN will hold either 0, 1, or 2 bits. The 0-bit case means ** that there is no possibility of transforming the OR clause into an ** IN operator because one or more terms in the OR clause contain ** something other than == on a column in the single table. The 1-bit ** case means that every term of the OR clause is of the form ** "table.column=expr" for some single table. The one bit that is set ** will correspond to the common table. We still need to check to make ** sure the same column is used on all terms. The 2-bit case is when ** the all terms are of the form "table1.column=table2.column". It ** might be possible to form an IN operator with either table1.column ** or table2.column as the LHS if either is common to every term of ** the OR clause. ** ** Note that terms of the form "table.column1=table.column2" (the ** same table on both sizes of the ==) cannot be optimized. */ if( chngToIN ){ int okToChngToIN = 0; /* True if the conversion to IN is valid */ int iColumn = -1; /* Column index on lhs of IN operator */ int iCursor = -1; /* Table cursor common to all terms */ int j = 0; /* Loop counter */ /* Search for a table and column that appears on one side or the ** other of the == operator in every subterm. That table and column ** will be recorded in iCursor and iColumn. There might not be any ** such table and column. Set okToChngToIN if an appropriate table ** and column is found but leave okToChngToIN false if not found. */ for(j=0; j<2 && !okToChngToIN; j++){ pOrTerm = pOrWc->a; for(i=pOrWc->nTerm-1; i>=0; i--, pOrTerm++){ assert( pOrTerm->eOperator & WO_EQ ); pOrTerm->wtFlags &= ~TERM_OR_OK; if( pOrTerm->leftCursor==iCursor ){ /* This is the 2-bit case and we are on the second iteration and ** current term is from the first iteration. So skip this term. */ assert( j==1 ); continue; } if( (chngToIN & sqlite3WhereGetMask(&pWInfo->sMaskSet, pOrTerm->leftCursor))==0 ){ /* This term must be of the form t1.a==t2.b where t2 is in the ** chngToIN set but t1 is not. This term will be either preceded ** or follwed by an inverted copy (t2.b==t1.a). Skip this term ** and use its inversion. */ testcase( pOrTerm->wtFlags & TERM_COPIED ); testcase( pOrTerm->wtFlags & TERM_VIRTUAL ); assert( pOrTerm->wtFlags & (TERM_COPIED|TERM_VIRTUAL) ); continue; } iColumn = pOrTerm->u.leftColumn; iCursor = pOrTerm->leftCursor; break; } if( i<0 ){ /* No candidate table+column was found. This can only occur ** on the second iteration */ assert( j==1 ); assert( IsPowerOfTwo(chngToIN) ); assert( chngToIN==sqlite3WhereGetMask(&pWInfo->sMaskSet, iCursor) ); break; } testcase( j==1 ); /* We have found a candidate table and column. Check to see if that ** table and column is common to every term in the OR clause */ okToChngToIN = 1; for(; i>=0 && okToChngToIN; i--, pOrTerm++){ assert( pOrTerm->eOperator & WO_EQ ); if( pOrTerm->leftCursor!=iCursor ){ pOrTerm->wtFlags &= ~TERM_OR_OK; }else if( pOrTerm->u.leftColumn!=iColumn ){ okToChngToIN = 0; }else{ int affLeft, affRight; /* If the right-hand side is also a column, then the affinities ** of both right and left sides must be such that no type ** conversions are required on the right. (Ticket #2249) */ affRight = sqlite3ExprAffinity(pOrTerm->pExpr->pRight); affLeft = sqlite3ExprAffinity(pOrTerm->pExpr->pLeft); if( affRight!=0 && affRight!=affLeft ){ okToChngToIN = 0; }else{ pOrTerm->wtFlags |= TERM_OR_OK; } } } } /* At this point, okToChngToIN is true if original pTerm satisfies ** case 1. In that case, construct a new virtual term that is ** pTerm converted into an IN operator. */ if( okToChngToIN ){ Expr *pDup; /* A transient duplicate expression */ ExprList *pList = 0; /* The RHS of the IN operator */ Expr *pLeft = 0; /* The LHS of the IN operator */ Expr *pNew; /* The complete IN operator */ for(i=pOrWc->nTerm-1, pOrTerm=pOrWc->a; i>=0; i--, pOrTerm++){ if( (pOrTerm->wtFlags & TERM_OR_OK)==0 ) continue; assert( pOrTerm->eOperator & WO_EQ ); assert( pOrTerm->leftCursor==iCursor ); assert( pOrTerm->u.leftColumn==iColumn ); pDup = sqlite3ExprDup(db, pOrTerm->pExpr->pRight, 0); pList = sqlite3ExprListAppend(pWInfo->pParse, pList, pDup); pLeft = pOrTerm->pExpr->pLeft; } assert( pLeft!=0 ); pDup = sqlite3ExprDup(db, pLeft, 0); pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0, 0); if( pNew ){ int idxNew; transferJoinMarkings(pNew, pExpr); assert( !ExprHasProperty(pNew, EP_xIsSelect) ); pNew->x.pList = pList; idxNew = whereClauseInsert(pWC, pNew, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); exprAnalyze(pSrc, pWC, idxNew); pTerm = &pWC->a[idxTerm]; markTermAsChild(pWC, idxNew, idxTerm); }else{ sqlite3ExprListDelete(db, pList); } pTerm->eOperator = WO_NOOP; /* case 1 trumps case 3 */ } } } #endif /* !SQLITE_OMIT_OR_OPTIMIZATION && !SQLITE_OMIT_SUBQUERY */ /* ** We already know that pExpr is a binary operator where both operands are ** column references. This routine checks to see if pExpr is an equivalence ** relation: ** 1. The SQLITE_Transitive optimization must be enabled ** 2. Must be either an == or an IS operator ** 3. Not originating in the ON clause of an OUTER JOIN ** 4. The affinities of A and B must be compatible ** 5a. Both operands use the same collating sequence OR ** 5b. The overall collating sequence is BINARY ** If this routine returns TRUE, that means that the RHS can be substituted ** for the LHS anyplace else in the WHERE clause where the LHS column occurs. ** This is an optimization. No harm comes from returning 0. But if 1 is ** returned when it should not be, then incorrect answers might result. */ static int termIsEquivalence(Parse *pParse, Expr *pExpr){ char aff1, aff2; CollSeq *pColl; const char *zColl1, *zColl2; if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0; if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0; if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0; aff1 = sqlite3ExprAffinity(pExpr->pLeft); aff2 = sqlite3ExprAffinity(pExpr->pRight); if( aff1!=aff2 && (!sqlite3IsNumericAffinity(aff1) || !sqlite3IsNumericAffinity(aff2)) ){ return 0; } pColl = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight); if( pColl==0 || sqlite3StrICmp(pColl->zName, "BINARY")==0 ) return 1; pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); /* Since pLeft and pRight are both a column references, their collating ** sequence should always be defined. */ zColl1 = ALWAYS(pColl) ? pColl->zName : 0; pColl = sqlite3ExprCollSeq(pParse, pExpr->pRight); zColl2 = ALWAYS(pColl) ? pColl->zName : 0; return sqlite3StrICmp(zColl1, zColl2)==0; } /* ** Recursively walk the expressions of a SELECT statement and generate ** a bitmask indicating which tables are used in that expression ** tree. */ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ Bitmask mask = 0; while( pS ){ SrcList *pSrc = pS->pSrc; mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pEList); mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pGroupBy); mask |= sqlite3WhereExprListUsage(pMaskSet, pS->pOrderBy); mask |= sqlite3WhereExprUsage(pMaskSet, pS->pWhere); mask |= sqlite3WhereExprUsage(pMaskSet, pS->pHaving); if( ALWAYS(pSrc!=0) ){ int i; for(i=0; inSrc; i++){ mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect); mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn); } } pS = pS->pPrior; } return mask; } /* ** The input to this routine is an WhereTerm structure with only the ** "pExpr" field filled in. The job of this routine is to analyze the ** subexpression and populate all the other fields of the WhereTerm ** structure. ** ** If the expression is of the form " X" it gets commuted ** to the standard form of "X ". ** ** If the expression is of the form "X Y" where both X and Y are ** columns, then the original expression is unchanged and a new virtual ** term of the form "Y X" is added to the WHERE clause and ** analyzed separately. The original term is marked with TERM_COPIED ** and the new term is marked with TERM_DYNAMIC (because it's pExpr ** needs to be freed with the WhereClause) and TERM_VIRTUAL (because it ** is a commuted copy of a prior term.) The original term has nChild=1 ** and the copy has idxParent set to the index of the original term. */ static void exprAnalyze( SrcList *pSrc, /* the FROM clause */ WhereClause *pWC, /* the WHERE clause */ int idxTerm /* Index of the term to be analyzed */ ){ WhereInfo *pWInfo = pWC->pWInfo; /* WHERE clause processing context */ WhereTerm *pTerm; /* The term to be analyzed */ WhereMaskSet *pMaskSet; /* Set of table index masks */ Expr *pExpr; /* The expression to be analyzed */ Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ Bitmask prereqAll; /* Prerequesites of pExpr */ Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */ Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */ int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */ int noCase = 0; /* uppercase equivalent to lowercase */ int op; /* Top-level operator. pExpr->op */ Parse *pParse = pWInfo->pParse; /* Parsing context */ sqlite3 *db = pParse->db; /* Database connection */ if( db->mallocFailed ){ return; } pTerm = &pWC->a[idxTerm]; pMaskSet = &pWInfo->sMaskSet; pExpr = pTerm->pExpr; assert( pExpr->op!=TK_AS && pExpr->op!=TK_COLLATE ); prereqLeft = sqlite3WhereExprUsage(pMaskSet, pExpr->pLeft); op = pExpr->op; if( op==TK_IN ){ assert( pExpr->pRight==0 ); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect); }else{ pTerm->prereqRight = sqlite3WhereExprListUsage(pMaskSet, pExpr->x.pList); } }else if( op==TK_ISNULL ){ pTerm->prereqRight = 0; }else{ pTerm->prereqRight = sqlite3WhereExprUsage(pMaskSet, pExpr->pRight); } prereqAll = sqlite3WhereExprUsage(pMaskSet, pExpr); if( ExprHasProperty(pExpr, EP_FromJoin) ){ Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->iRightJoinTable); prereqAll |= x; extraRight = x-1; /* ON clause terms may not be used with an index ** on left table of a LEFT JOIN. Ticket #3015 */ } pTerm->prereqAll = prereqAll; pTerm->leftCursor = -1; pTerm->iParent = -1; pTerm->eOperator = 0; if( allowedOp(op) ){ Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft); Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight); u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV; if( pLeft->op==TK_COLUMN ){ pTerm->leftCursor = pLeft->iTable; pTerm->u.leftColumn = pLeft->iColumn; pTerm->eOperator = operatorMask(op) & opMask; } if( op==TK_IS ) pTerm->wtFlags |= TERM_IS; if( pRight && pRight->op==TK_COLUMN ){ WhereTerm *pNew; Expr *pDup; u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */ if( pTerm->leftCursor>=0 ){ int idxNew; pDup = sqlite3ExprDup(db, pExpr, 0); if( db->mallocFailed ){ sqlite3ExprDelete(db, pDup); return; } idxNew = whereClauseInsert(pWC, pDup, TERM_VIRTUAL|TERM_DYNAMIC); if( idxNew==0 ) return; pNew = &pWC->a[idxNew]; markTermAsChild(pWC, idxNew, idxTerm); if( op==TK_IS ) pNew->wtFlags |= TERM_IS; pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; if( termIsEquivalence(pParse, pDup) ){ pTerm->eOperator |= WO_EQUIV; eExtraOp = WO_EQUIV; } }else{ pDup = pExpr; pNew = pTerm; } exprCommute(pParse, pDup); pLeft = sqlite3ExprSkipCollate(pDup->pLeft); pNew->leftCursor = pLeft->iTable; pNew->u.leftColumn = pLeft->iColumn; testcase( (prereqLeft | extraRight) != prereqLeft ); pNew->prereqRight = prereqLeft | extraRight; pNew->prereqAll = prereqAll; pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask; } } #ifndef SQLITE_OMIT_BETWEEN_OPTIMIZATION /* If a term is the BETWEEN operator, create two new virtual terms ** that define the range that the BETWEEN implements. For example: ** ** a BETWEEN b AND c ** ** is converted into: ** ** (a BETWEEN b AND c) AND (a>=b) AND (a<=c) ** ** The two new terms are added onto the end of the WhereClause object. ** The new terms are "dynamic" and are children of the original BETWEEN ** term. That means that if the BETWEEN term is coded, the children are ** skipped. Or, if the children are satisfied by an index, the original ** BETWEEN term is skipped. */ else if( pExpr->op==TK_BETWEEN && pWC->op==TK_AND ){ ExprList *pList = pExpr->x.pList; int i; static const u8 ops[] = {TK_GE, TK_LE}; assert( pList!=0 ); assert( pList->nExpr==2 ); for(i=0; i<2; i++){ Expr *pNewExpr; int idxNew; pNewExpr = sqlite3PExpr(pParse, ops[i], sqlite3ExprDup(db, pExpr->pLeft, 0), sqlite3ExprDup(db, pList->a[i].pExpr, 0), 0); transferJoinMarkings(pNewExpr, pExpr); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); exprAnalyze(pSrc, pWC, idxNew); pTerm = &pWC->a[idxTerm]; markTermAsChild(pWC, idxNew, idxTerm); } } #endif /* SQLITE_OMIT_BETWEEN_OPTIMIZATION */ #if !defined(SQLITE_OMIT_OR_OPTIMIZATION) && !defined(SQLITE_OMIT_SUBQUERY) /* Analyze a term that is composed of two or more subterms connected by ** an OR operator. */ else if( pExpr->op==TK_OR ){ assert( pWC->op==TK_AND ); exprAnalyzeOrTerm(pSrc, pWC, idxTerm); pTerm = &pWC->a[idxTerm]; } #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ #ifndef SQLITE_OMIT_LIKE_OPTIMIZATION /* Add constraints to reduce the search space on a LIKE or GLOB ** operator. ** ** A like pattern of the form "x LIKE 'aBc%'" is changed into constraints ** ** x>='ABC' AND x<'abd' AND x LIKE 'aBc%' ** ** The last character of the prefix "abc" is incremented to form the ** termination condition "abd". If case is not significant (the default ** for LIKE) then the lower-bound is made all uppercase and the upper- ** bound is made all lowercase so that the bounds also work when comparing ** BLOBs. */ if( pWC->op==TK_AND && isLikeOrGlob(pParse, pExpr, &pStr1, &isComplete, &noCase) ){ Expr *pLeft; /* LHS of LIKE/GLOB operator */ Expr *pStr2; /* Copy of pStr1 - RHS of LIKE/GLOB operator */ Expr *pNewExpr1; Expr *pNewExpr2; int idxNew1; int idxNew2; const char *zCollSeqName; /* Name of collating sequence */ const u16 wtFlags = TERM_LIKEOPT | TERM_VIRTUAL | TERM_DYNAMIC; pLeft = pExpr->x.pList->a[1].pExpr; pStr2 = sqlite3ExprDup(db, pStr1, 0); /* Convert the lower bound to upper-case and the upper bound to ** lower-case (upper-case is less than lower-case in ASCII) so that ** the range constraints also work for BLOBs */ if( noCase && !pParse->db->mallocFailed ){ int i; char c; pTerm->wtFlags |= TERM_LIKE; for(i=0; (c = pStr1->u.zToken[i])!=0; i++){ pStr1->u.zToken[i] = sqlite3Toupper(c); pStr2->u.zToken[i] = sqlite3Tolower(c); } } if( !db->mallocFailed ){ u8 c, *pC; /* Last character before the first wildcard */ pC = (u8*)&pStr2->u.zToken[sqlite3Strlen30(pStr2->u.zToken)-1]; c = *pC; if( noCase ){ /* The point is to increment the last character before the first ** wildcard. But if we increment '@', that will push it into the ** alphabetic range where case conversions will mess up the ** inequality. To avoid this, make sure to also run the full ** LIKE on all candidate expressions by clearing the isComplete flag */ if( c=='A'-1 ) isComplete = 0; c = sqlite3UpperToLower[c]; } *pC = c + 1; } zCollSeqName = noCase ? "NOCASE" : "BINARY"; pNewExpr1 = sqlite3ExprDup(db, pLeft, 0); pNewExpr1 = sqlite3PExpr(pParse, TK_GE, sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName), pStr1, 0); transferJoinMarkings(pNewExpr1, pExpr); idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags); testcase( idxNew1==0 ); exprAnalyze(pSrc, pWC, idxNew1); pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); pNewExpr2 = sqlite3PExpr(pParse, TK_LT, sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName), pStr2, 0); transferJoinMarkings(pNewExpr2, pExpr); idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags); testcase( idxNew2==0 ); exprAnalyze(pSrc, pWC, idxNew2); pTerm = &pWC->a[idxTerm]; if( isComplete ){ markTermAsChild(pWC, idxNew1, idxTerm); markTermAsChild(pWC, idxNew2, idxTerm); } } #endif /* SQLITE_OMIT_LIKE_OPTIMIZATION */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* Add a WO_MATCH auxiliary term to the constraint set if the ** current expression is of the form: column MATCH expr. ** This information is used by the xBestIndex methods of ** virtual tables. The native query optimizer does not attempt ** to do anything with MATCH functions. */ if( isMatchOfColumn(pExpr) ){ int idxNew; Expr *pRight, *pLeft; WhereTerm *pNewTerm; Bitmask prereqColumn, prereqExpr; pRight = pExpr->x.pList->a[0].pExpr; pLeft = pExpr->x.pList->a[1].pExpr; prereqExpr = sqlite3WhereExprUsage(pMaskSet, pRight); prereqColumn = sqlite3WhereExprUsage(pMaskSet, pLeft); if( (prereqExpr & prereqColumn)==0 ){ Expr *pNewExpr; pNewExpr = sqlite3PExpr(pParse, TK_MATCH, 0, sqlite3ExprDup(db, pRight, 0), 0); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC); testcase( idxNew==0 ); pNewTerm = &pWC->a[idxNew]; pNewTerm->prereqRight = prereqExpr; pNewTerm->leftCursor = pLeft->iTable; pNewTerm->u.leftColumn = pLeft->iColumn; pNewTerm->eOperator = WO_MATCH; markTermAsChild(pWC, idxNew, idxTerm); pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; pNewTerm->prereqAll = pTerm->prereqAll; } } #endif /* SQLITE_OMIT_VIRTUALTABLE */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* When sqlite_stat3 histogram data is available an operator of the ** form "x IS NOT NULL" can sometimes be evaluated more efficiently ** as "x>NULL" if x is not an INTEGER PRIMARY KEY. So construct a ** virtual term of that form. ** ** Note that the virtual term must be tagged with TERM_VNULL. */ if( pExpr->op==TK_NOTNULL && pExpr->pLeft->op==TK_COLUMN && pExpr->pLeft->iColumn>=0 && OptimizationEnabled(db, SQLITE_Stat34) ){ Expr *pNewExpr; Expr *pLeft = pExpr->pLeft; int idxNew; WhereTerm *pNewTerm; pNewExpr = sqlite3PExpr(pParse, TK_GT, sqlite3ExprDup(db, pLeft, 0), sqlite3PExpr(pParse, TK_NULL, 0, 0, 0), 0); idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL); if( idxNew ){ pNewTerm = &pWC->a[idxNew]; pNewTerm->prereqRight = 0; pNewTerm->leftCursor = pLeft->iTable; pNewTerm->u.leftColumn = pLeft->iColumn; pNewTerm->eOperator = WO_GT; markTermAsChild(pWC, idxNew, idxTerm); pTerm = &pWC->a[idxTerm]; pTerm->wtFlags |= TERM_COPIED; pNewTerm->prereqAll = pTerm->prereqAll; } } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* Prevent ON clause terms of a LEFT JOIN from being used to drive ** an index for tables to the left of the join. */ pTerm->prereqRight |= extraRight; } /*************************************************************************** ** Routines with file scope above. Interface to the rest of the where.c ** subsystem follows. ***************************************************************************/ /* ** This routine identifies subexpressions in the WHERE clause where ** each subexpression is separated by the AND operator or some other ** operator specified in the op parameter. The WhereClause structure ** is filled with pointers to subexpressions. For example: ** ** WHERE a=='hello' AND coalesce(b,11)<10 AND (c+12!=d OR c==22) ** \________/ \_______________/ \________________/ ** slot[0] slot[1] slot[2] ** ** The original WHERE clause in pExpr is unaltered. All this routine ** does is make slot[] entries point to substructure within pExpr. ** ** In the previous sentence and in the diagram, "slot[]" refers to ** the WhereClause.a[] array. The slot[] array grows as needed to contain ** all terms of the WHERE clause. */ SQLITE_PRIVATE void sqlite3WhereSplit(WhereClause *pWC, Expr *pExpr, u8 op){ Expr *pE2 = sqlite3ExprSkipCollate(pExpr); pWC->op = op; if( pE2==0 ) return; if( pE2->op!=op ){ whereClauseInsert(pWC, pExpr, 0); }else{ sqlite3WhereSplit(pWC, pE2->pLeft, op); sqlite3WhereSplit(pWC, pE2->pRight, op); } } /* ** Initialize a preallocated WhereClause structure. */ SQLITE_PRIVATE void sqlite3WhereClauseInit( WhereClause *pWC, /* The WhereClause to be initialized */ WhereInfo *pWInfo /* The WHERE processing context */ ){ pWC->pWInfo = pWInfo; pWC->pOuter = 0; pWC->nTerm = 0; pWC->nSlot = ArraySize(pWC->aStatic); pWC->a = pWC->aStatic; } /* ** Deallocate a WhereClause structure. The WhereClause structure ** itself is not freed. This routine is the inverse of sqlite3WhereClauseInit(). */ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){ int i; WhereTerm *a; sqlite3 *db = pWC->pWInfo->pParse->db; for(i=pWC->nTerm-1, a=pWC->a; i>=0; i--, a++){ if( a->wtFlags & TERM_DYNAMIC ){ sqlite3ExprDelete(db, a->pExpr); } if( a->wtFlags & TERM_ORINFO ){ whereOrInfoDelete(db, a->u.pOrInfo); }else if( a->wtFlags & TERM_ANDINFO ){ whereAndInfoDelete(db, a->u.pAndInfo); } } if( pWC->a!=pWC->aStatic ){ sqlite3DbFree(db, pWC->a); } } /* ** These routines walk (recursively) an expression tree and generate ** a bitmask indicating which tables are used in that expression ** tree. */ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){ Bitmask mask = 0; if( p==0 ) return 0; if( p->op==TK_COLUMN ){ mask = sqlite3WhereGetMask(pMaskSet, p->iTable); return mask; } mask = sqlite3WhereExprUsage(pMaskSet, p->pRight); mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft); if( ExprHasProperty(p, EP_xIsSelect) ){ mask |= exprSelectUsage(pMaskSet, p->x.pSelect); }else{ mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList); } return mask; } SQLITE_PRIVATE Bitmask sqlite3WhereExprListUsage(WhereMaskSet *pMaskSet, ExprList *pList){ int i; Bitmask mask = 0; if( pList ){ for(i=0; inExpr; i++){ mask |= sqlite3WhereExprUsage(pMaskSet, pList->a[i].pExpr); } } return mask; } /* ** Call exprAnalyze on all terms in a WHERE clause. ** ** Note that exprAnalyze() might add new virtual terms onto the ** end of the WHERE clause. We do not want to analyze these new ** virtual terms, so start analyzing at the end and work forward ** so that the added virtual terms are never processed. */ SQLITE_PRIVATE void sqlite3WhereExprAnalyze( SrcList *pTabList, /* the FROM clause */ WhereClause *pWC /* the WHERE clause to be analyzed */ ){ int i; for(i=pWC->nTerm-1; i>=0; i--){ exprAnalyze(pTabList, pWC, i); } } /************** End of whereexpr.c *******************************************/ /************** Begin file where.c *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This module contains C code that generates VDBE code used to process ** the WHERE clause of SQL statements. This module is responsible for ** generating the code that loops through a table looking for applicable ** rows. Indices are selected and used to speed the search when doing ** so is applicable. Because this module is responsible for selecting ** indices, you might also think of this module as the "query optimizer". */ /* #include "sqliteInt.h" */ /* #include "whereInt.h" */ /* Forward declaration of methods */ static int whereLoopResize(sqlite3*, WhereLoop*, int); /* Test variable that can be set to enable WHERE tracing */ #if defined(SQLITE_TEST) || defined(SQLITE_DEBUG) /***/ int sqlite3WhereTrace = 0; #endif /* ** Return the estimated number of output rows from a WHERE clause */ SQLITE_PRIVATE u64 sqlite3WhereOutputRowCount(WhereInfo *pWInfo){ return sqlite3LogEstToInt(pWInfo->nRowOut); } /* ** Return one of the WHERE_DISTINCT_xxxxx values to indicate how this ** WHERE clause returns outputs for DISTINCT processing. */ SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){ return pWInfo->eDistinct; } /* ** Return TRUE if the WHERE clause returns rows in ORDER BY order. ** Return FALSE if the output needs to be sorted. */ SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ return pWInfo->nOBSat; } /* ** Return the VDBE address or label to jump to in order to continue ** immediately with the next row of a WHERE clause. */ SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo *pWInfo){ assert( pWInfo->iContinue!=0 ); return pWInfo->iContinue; } /* ** Return the VDBE address or label to jump to in order to break ** out of a WHERE loop. */ SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo *pWInfo){ return pWInfo->iBreak; } /* ** Return TRUE if an UPDATE or DELETE statement can operate directly on ** the rowids returned by a WHERE clause. Return FALSE if doing an ** UPDATE or DELETE might change subsequent WHERE clause results. ** ** If the ONEPASS optimization is used (if this routine returns true) ** then also write the indices of open cursors used by ONEPASS ** into aiCur[0] and aiCur[1]. iaCur[0] gets the cursor of the data ** table and iaCur[1] gets the cursor used by an auxiliary index. ** Either value may be -1, indicating that cursor is not used. ** Any cursors returned will have been opened for writing. ** ** aiCur[0] and aiCur[1] both get -1 if the where-clause logic is ** unable to use the ONEPASS optimization. */ SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo *pWInfo, int *aiCur){ memcpy(aiCur, pWInfo->aiCurOnePass, sizeof(int)*2); return pWInfo->okOnePass; } /* ** Move the content of pSrc into pDest */ static void whereOrMove(WhereOrSet *pDest, WhereOrSet *pSrc){ pDest->n = pSrc->n; memcpy(pDest->a, pSrc->a, pDest->n*sizeof(pDest->a[0])); } /* ** Try to insert a new prerequisite/cost entry into the WhereOrSet pSet. ** ** The new entry might overwrite an existing entry, or it might be ** appended, or it might be discarded. Do whatever is the right thing ** so that pSet keeps the N_OR_COST best entries seen so far. */ static int whereOrInsert( WhereOrSet *pSet, /* The WhereOrSet to be updated */ Bitmask prereq, /* Prerequisites of the new entry */ LogEst rRun, /* Run-cost of the new entry */ LogEst nOut /* Number of outputs for the new entry */ ){ u16 i; WhereOrCost *p; for(i=pSet->n, p=pSet->a; i>0; i--, p++){ if( rRun<=p->rRun && (prereq & p->prereq)==prereq ){ goto whereOrInsert_done; } if( p->rRun<=rRun && (p->prereq & prereq)==p->prereq ){ return 0; } } if( pSet->na[pSet->n++]; p->nOut = nOut; }else{ p = pSet->a; for(i=1; in; i++){ if( p->rRun>pSet->a[i].rRun ) p = pSet->a + i; } if( p->rRun<=rRun ) return 0; } whereOrInsert_done: p->prereq = prereq; p->rRun = rRun; if( p->nOut>nOut ) p->nOut = nOut; return 1; } /* ** Return the bitmask for the given cursor number. Return 0 if ** iCursor is not in the set. */ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){ int i; assert( pMaskSet->n<=(int)sizeof(Bitmask)*8 ); for(i=0; in; i++){ if( pMaskSet->ix[i]==iCursor ){ return MASKBIT(i); } } return 0; } /* ** Create a new mask for cursor iCursor. ** ** There is one cursor per table in the FROM clause. The number of ** tables in the FROM clause is limited by a test early in the ** sqlite3WhereBegin() routine. So we know that the pMaskSet->ix[] ** array will never overflow. */ static void createMask(WhereMaskSet *pMaskSet, int iCursor){ assert( pMaskSet->n < ArraySize(pMaskSet->ix) ); pMaskSet->ix[pMaskSet->n++] = iCursor; } /* ** Advance to the next WhereTerm that matches according to the criteria ** established when the pScan object was initialized by whereScanInit(). ** Return NULL if there are no more matching WhereTerms. */ static WhereTerm *whereScanNext(WhereScan *pScan){ int iCur; /* The cursor on the LHS of the term */ int iColumn; /* The column on the LHS of the term. -1 for IPK */ Expr *pX; /* An expression being tested */ WhereClause *pWC; /* Shorthand for pScan->pWC */ WhereTerm *pTerm; /* The term being tested */ int k = pScan->k; /* Where to start scanning */ while( pScan->iEquiv<=pScan->nEquiv ){ iCur = pScan->aEquiv[pScan->iEquiv-2]; iColumn = pScan->aEquiv[pScan->iEquiv-1]; while( (pWC = pScan->pWC)!=0 ){ for(pTerm=pWC->a+k; knTerm; k++, pTerm++){ if( pTerm->leftCursor==iCur && pTerm->u.leftColumn==iColumn && (pScan->iEquiv<=2 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin)) ){ if( (pTerm->eOperator & WO_EQUIV)!=0 && pScan->nEquivaEquiv) ){ int j; pX = sqlite3ExprSkipCollate(pTerm->pExpr->pRight); assert( pX->op==TK_COLUMN ); for(j=0; jnEquiv; j+=2){ if( pScan->aEquiv[j]==pX->iTable && pScan->aEquiv[j+1]==pX->iColumn ){ break; } } if( j==pScan->nEquiv ){ pScan->aEquiv[j] = pX->iTable; pScan->aEquiv[j+1] = pX->iColumn; pScan->nEquiv += 2; } } if( (pTerm->eOperator & pScan->opMask)!=0 ){ /* Verify the affinity and collating sequence match */ if( pScan->zCollName && (pTerm->eOperator & WO_ISNULL)==0 ){ CollSeq *pColl; Parse *pParse = pWC->pWInfo->pParse; pX = pTerm->pExpr; if( !sqlite3IndexAffinityOk(pX, pScan->idxaff) ){ continue; } assert(pX->pLeft); pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); if( pColl==0 ) pColl = pParse->db->pDfltColl; if( sqlite3StrICmp(pColl->zName, pScan->zCollName) ){ continue; } } if( (pTerm->eOperator & (WO_EQ|WO_IS))!=0 && (pX = pTerm->pExpr->pRight)->op==TK_COLUMN && pX->iTable==pScan->aEquiv[0] && pX->iColumn==pScan->aEquiv[1] ){ testcase( pTerm->eOperator & WO_IS ); continue; } pScan->k = k+1; return pTerm; } } } pScan->pWC = pScan->pWC->pOuter; k = 0; } pScan->pWC = pScan->pOrigWC; k = 0; pScan->iEquiv += 2; } return 0; } /* ** Initialize a WHERE clause scanner object. Return a pointer to the ** first match. Return NULL if there are no matches. ** ** The scanner will be searching the WHERE clause pWC. It will look ** for terms of the form "X " where X is column iColumn of table ** iCur. The must be one of the operators described by opMask. ** ** If the search is for X and the WHERE clause contains terms of the ** form X=Y then this routine might also return terms of the form ** "Y ". The number of levels of transitivity is limited, ** but is enough to handle most commonly occurring SQL statements. ** ** If X is not the INTEGER PRIMARY KEY then X must be compatible with ** index pIdx. */ static WhereTerm *whereScanInit( WhereScan *pScan, /* The WhereScan object being initialized */ WhereClause *pWC, /* The WHERE clause to be scanned */ int iCur, /* Cursor to scan for */ int iColumn, /* Column to scan for */ u32 opMask, /* Operator(s) to scan for */ Index *pIdx /* Must be compatible with this index */ ){ int j; /* memset(pScan, 0, sizeof(*pScan)); */ pScan->pOrigWC = pWC; pScan->pWC = pWC; if( pIdx && iColumn>=0 ){ pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity; for(j=0; pIdx->aiColumn[j]!=iColumn; j++){ if( NEVER(j>pIdx->nColumn) ) return 0; } pScan->zCollName = pIdx->azColl[j]; }else{ pScan->idxaff = 0; pScan->zCollName = 0; } pScan->opMask = opMask; pScan->k = 0; pScan->aEquiv[0] = iCur; pScan->aEquiv[1] = iColumn; pScan->nEquiv = 2; pScan->iEquiv = 2; return whereScanNext(pScan); } /* ** Search for a term in the WHERE clause that is of the form "X " ** where X is a reference to the iColumn of table iCur and is one of ** the WO_xx operator codes specified by the op parameter. ** Return a pointer to the term. Return 0 if not found. ** ** The term returned might by Y= if there is another constraint in ** the WHERE clause that specifies that X=Y. Any such constraints will be ** identified by the WO_EQUIV bit in the pTerm->eOperator field. The ** aEquiv[] array holds X and all its equivalents, with each SQL variable ** taking up two slots in aEquiv[]. The first slot is for the cursor number ** and the second is for the column number. There are 22 slots in aEquiv[] ** so that means we can look for X plus up to 10 other equivalent values. ** Hence a search for X will return if X=A1 and A1=A2 and A2=A3 ** and ... and A9=A10 and A10=. ** ** If there are multiple terms in the WHERE clause of the form "X " ** then try for the one with no dependencies on - in other words where ** is a constant expression of some kind. Only return entries of ** the form "X Y" where Y is a column in another table if no terms of ** the form "X " exist. If no terms with a constant RHS ** exist, try to return a term that does not use WO_EQUIV. */ SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ int iCur, /* Cursor number of LHS */ int iColumn, /* Column number of LHS */ Bitmask notReady, /* RHS must not overlap with this mask */ u32 op, /* Mask of WO_xx values describing operator */ Index *pIdx /* Must be compatible with this index, if not NULL */ ){ WhereTerm *pResult = 0; WhereTerm *p; WhereScan scan; p = whereScanInit(&scan, pWC, iCur, iColumn, op, pIdx); op &= WO_EQ|WO_IS; while( p ){ if( (p->prereqRight & notReady)==0 ){ if( p->prereqRight==0 && (p->eOperator&op)!=0 ){ testcase( p->eOperator & WO_IS ); return p; } if( pResult==0 ) pResult = p; } p = whereScanNext(&scan); } return pResult; } /* ** This function searches pList for an entry that matches the iCol-th column ** of index pIdx. ** ** If such an expression is found, its index in pList->a[] is returned. If ** no expression is found, -1 is returned. */ static int findIndexCol( Parse *pParse, /* Parse context */ ExprList *pList, /* Expression list to search */ int iBase, /* Cursor for table associated with pIdx */ Index *pIdx, /* Index to match column of */ int iCol /* Column of index to match */ ){ int i; const char *zColl = pIdx->azColl[iCol]; for(i=0; inExpr; i++){ Expr *p = sqlite3ExprSkipCollate(pList->a[i].pExpr); if( p->op==TK_COLUMN && p->iColumn==pIdx->aiColumn[iCol] && p->iTable==iBase ){ CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[i].pExpr); if( pColl && 0==sqlite3StrICmp(pColl->zName, zColl) ){ return i; } } } return -1; } /* ** Return true if the DISTINCT expression-list passed as the third argument ** is redundant. ** ** A DISTINCT list is redundant if any subset of the columns in the ** DISTINCT list are collectively unique and individually non-null. */ static int isDistinctRedundant( Parse *pParse, /* Parsing context */ SrcList *pTabList, /* The FROM clause */ WhereClause *pWC, /* The WHERE clause */ ExprList *pDistinct /* The result set that needs to be DISTINCT */ ){ Table *pTab; Index *pIdx; int i; int iBase; /* If there is more than one table or sub-select in the FROM clause of ** this query, then it will not be possible to show that the DISTINCT ** clause is redundant. */ if( pTabList->nSrc!=1 ) return 0; iBase = pTabList->a[0].iCursor; pTab = pTabList->a[0].pTab; /* If any of the expressions is an IPK column on table iBase, then return ** true. Note: The (p->iTable==iBase) part of this test may be false if the ** current SELECT is a correlated sub-query. */ for(i=0; inExpr; i++){ Expr *p = sqlite3ExprSkipCollate(pDistinct->a[i].pExpr); if( p->op==TK_COLUMN && p->iTable==iBase && p->iColumn<0 ) return 1; } /* Loop through all indices on the table, checking each to see if it makes ** the DISTINCT qualifier redundant. It does so if: ** ** 1. The index is itself UNIQUE, and ** ** 2. All of the columns in the index are either part of the pDistinct ** list, or else the WHERE clause contains a term of the form "col=X", ** where X is a constant value. The collation sequences of the ** comparison and select-list expressions must match those of the index. ** ** 3. All of those index columns for which the WHERE clause does not ** contain a "col=X" term are subject to a NOT NULL constraint. */ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ if( !IsUniqueIndex(pIdx) ) continue; for(i=0; inKeyCol; i++){ i16 iCol = pIdx->aiColumn[i]; if( 0==sqlite3WhereFindTerm(pWC, iBase, iCol, ~(Bitmask)0, WO_EQ, pIdx) ){ int iIdxCol = findIndexCol(pParse, pDistinct, iBase, pIdx, i); if( iIdxCol<0 || pTab->aCol[iCol].notNull==0 ){ break; } } } if( i==pIdx->nKeyCol ){ /* This index implies that the DISTINCT qualifier is redundant. */ return 1; } } return 0; } /* ** Estimate the logarithm of the input value to base 2. */ static LogEst estLog(LogEst N){ return N<=10 ? 0 : sqlite3LogEst(N) - 33; } /* ** Convert OP_Column opcodes to OP_Copy in previously generated code. ** ** This routine runs over generated VDBE code and translates OP_Column ** opcodes into OP_Copy, and OP_Rowid into OP_Null, when the table is being ** accessed via co-routine instead of via table lookup. */ static void translateColumnToCopy( Vdbe *v, /* The VDBE containing code to translate */ int iStart, /* Translate from this opcode to the end */ int iTabCur, /* OP_Column/OP_Rowid references to this table */ int iRegister /* The first column is in this register */ ){ VdbeOp *pOp = sqlite3VdbeGetOp(v, iStart); int iEnd = sqlite3VdbeCurrentAddr(v); for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ pOp->opcode = OP_Copy; pOp->p1 = pOp->p2 + iRegister; pOp->p2 = pOp->p3; pOp->p3 = 0; }else if( pOp->opcode==OP_Rowid ){ pOp->opcode = OP_Null; pOp->p1 = 0; pOp->p3 = 0; } } } /* ** Two routines for printing the content of an sqlite3_index_info ** structure. Used for testing and debugging only. If neither ** SQLITE_TEST or SQLITE_DEBUG are defined, then these routines ** are no-ops. */ #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) static void TRACE_IDX_INPUTS(sqlite3_index_info *p){ int i; if( !sqlite3WhereTrace ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" constraint[%d]: col=%d termid=%d op=%d usabled=%d\n", i, p->aConstraint[i].iColumn, p->aConstraint[i].iTermOffset, p->aConstraint[i].op, p->aConstraint[i].usable); } for(i=0; inOrderBy; i++){ sqlite3DebugPrintf(" orderby[%d]: col=%d desc=%d\n", i, p->aOrderBy[i].iColumn, p->aOrderBy[i].desc); } } static void TRACE_IDX_OUTPUTS(sqlite3_index_info *p){ int i; if( !sqlite3WhereTrace ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, p->aConstraintUsage[i].argvIndex, p->aConstraintUsage[i].omit); } sqlite3DebugPrintf(" idxNum=%d\n", p->idxNum); sqlite3DebugPrintf(" idxStr=%s\n", p->idxStr); sqlite3DebugPrintf(" orderByConsumed=%d\n", p->orderByConsumed); sqlite3DebugPrintf(" estimatedCost=%g\n", p->estimatedCost); sqlite3DebugPrintf(" estimatedRows=%lld\n", p->estimatedRows); } #else #define TRACE_IDX_INPUTS(A) #define TRACE_IDX_OUTPUTS(A) #endif #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* ** Return TRUE if the WHERE clause term pTerm is of a form where it ** could be used with an index to access pSrc, assuming an appropriate ** index existed. */ static int termCanDriveIndex( WhereTerm *pTerm, /* WHERE clause term to check */ struct SrcList_item *pSrc, /* Table we are trying to access */ Bitmask notReady /* Tables in outer loops of the join */ ){ char aff; if( pTerm->leftCursor!=pSrc->iCursor ) return 0; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; if( (pTerm->prereqRight & notReady)!=0 ) return 0; if( pTerm->u.leftColumn<0 ) return 0; aff = pSrc->pTab->aCol[pTerm->u.leftColumn].affinity; if( !sqlite3IndexAffinityOk(pTerm->pExpr, aff) ) return 0; testcase( pTerm->pExpr->op==TK_IS ); return 1; } #endif #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* ** Generate code to construct the Index object for an automatic index ** and to set up the WhereLevel object pLevel so that the code generator ** makes use of the automatic index. */ static void constructAutomaticIndex( Parse *pParse, /* The parsing context */ WhereClause *pWC, /* The WHERE clause */ struct SrcList_item *pSrc, /* The FROM clause term to get the next index */ Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ int nKeyCol; /* Number of columns in the constructed index */ WhereTerm *pTerm; /* A single term of the WHERE clause */ WhereTerm *pWCEnd; /* End of pWC->a[] */ Index *pIdx; /* Object describing the transient index */ Vdbe *v; /* Prepared statement under construction */ int addrInit; /* Address of the initialization bypass jump */ Table *pTable; /* The table being indexed */ int addrTop; /* Top of the index fill loop */ int regRecord; /* Register holding an index record */ int n; /* Column counter */ int i; /* Loop counter */ int mxBitCol; /* Maximum column in pSrc->colUsed */ CollSeq *pColl; /* Collating sequence to on a column */ WhereLoop *pLoop; /* The Loop object */ char *zNotUsed; /* Extra space on the end of pIdx */ Bitmask idxCols; /* Bitmap of columns used for indexing */ Bitmask extraCols; /* Bitmap of additional columns */ u8 sentWarning = 0; /* True if a warnning has been issued */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ struct SrcList_item *pTabItem; /* FROM clause term being indexed */ /* Generate code to skip over the creation and initialization of the ** transient index on 2nd and subsequent iterations of the loop. */ v = pParse->pVdbe; assert( v!=0 ); addrInit = sqlite3CodeOnce(pParse); VdbeCoverage(v); /* Count the number of columns that will be added to the index ** and used to match WHERE clause constraints */ nKeyCol = 0; pTable = pSrc->pTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; idxCols = 0; for(pTerm=pWC->a; pTermpExpr; assert( !ExprHasProperty(pExpr, EP_FromJoin) /* prereq always non-zero */ || pExpr->iRightJoinTable!=pSrc->iCursor /* for the right-hand */ || pLoop->prereq!=0 ); /* table of a LEFT JOIN */ if( pLoop->prereq==0 && (pTerm->wtFlags & TERM_VIRTUAL)==0 && !ExprHasProperty(pExpr, EP_FromJoin) && sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor) ){ pPartial = sqlite3ExprAnd(pParse->db, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); } if( termCanDriveIndex(pTerm, pSrc, notReady) ){ int iCol = pTerm->u.leftColumn; Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS ); testcase( iCol==BMS-1 ); if( !sentWarning ){ sqlite3_log(SQLITE_WARNING_AUTOINDEX, "automatic index on %s(%s)", pTable->zName, pTable->aCol[iCol].zName); sentWarning = 1; } if( (idxCols & cMask)==0 ){ if( whereLoopResize(pParse->db, pLoop, nKeyCol+1) ){ goto end_auto_index_create; } pLoop->aLTerm[nKeyCol++] = pTerm; idxCols |= cMask; } } } assert( nKeyCol>0 ); pLoop->u.btree.nEq = pLoop->nLTerm = nKeyCol; pLoop->wsFlags = WHERE_COLUMN_EQ | WHERE_IDX_ONLY | WHERE_INDEXED | WHERE_AUTO_INDEX; /* Count the number of additional columns needed to create a ** covering index. A "covering index" is an index that contains all ** columns that are needed by the query. With a covering index, the ** original table never needs to be accessed. Automatic indices must ** be a covering index because the index will not be updated if the ** original table changes and the index and table cannot both be used ** if they go out of sync. */ extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); for(i=0; icolUsed & MASKBIT(BMS-1) ){ nKeyCol += pTable->nCol - BMS + 1; } /* Construct the Index object to describe this index */ pIdx = sqlite3AllocateIndexObject(pParse->db, nKeyCol+1, 0, &zNotUsed); if( pIdx==0 ) goto end_auto_index_create; pLoop->u.btree.pIndex = pIdx; pIdx->zName = "auto-index"; pIdx->pTable = pTable; n = 0; idxCols = 0; for(pTerm=pWC->a; pTermu.leftColumn; Bitmask cMask = iCol>=BMS ? MASKBIT(BMS-1) : MASKBIT(iCol); testcase( iCol==BMS-1 ); testcase( iCol==BMS ); if( (idxCols & cMask)==0 ){ Expr *pX = pTerm->pExpr; idxCols |= cMask; pIdx->aiColumn[n] = pTerm->u.leftColumn; pColl = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pX->pRight); pIdx->azColl[n] = pColl ? pColl->zName : "BINARY"; n++; } } } assert( (u32)n==pLoop->u.btree.nEq ); /* Add additional columns needed to make the automatic index into ** a covering index */ for(i=0; iaiColumn[n] = i; pIdx->azColl[n] = "BINARY"; n++; } } if( pSrc->colUsed & MASKBIT(BMS-1) ){ for(i=BMS-1; inCol; i++){ pIdx->aiColumn[n] = i; pIdx->azColl[n] = "BINARY"; n++; } } assert( n==nKeyCol ); pIdx->aiColumn[n] = -1; pIdx->azColl[n] = "BINARY"; /* Create the automatic index */ assert( pLevel->iIdxCur>=0 ); pLevel->iIdxCur = pParse->nTab++; sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "for %s", pTable->zName)); /* Fill the automatic index with content */ sqlite3ExprCachePush(pParse); pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; if( pTabItem->viaCoroutine ){ int regYield = pTabItem->regReturn; sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); VdbeComment((v, "next row of \"%s\"", pTabItem->pTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } if( pPartial ){ iContinue = sqlite3VdbeMakeLabel(v); sqlite3ExprIfFalse(pParse, pPartial, iContinue, SQLITE_JUMPIFNULL); pLoop->wsFlags |= WHERE_PARTIALIDX; } regRecord = sqlite3GetTempReg(pParse); sqlite3GenerateIndexKey(pParse, pIdx, pLevel->iTabCur, regRecord, 0, 0, 0, 0); sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); if( pTabItem->viaCoroutine ){ translateColumnToCopy(v, addrTop, pLevel->iTabCur, pTabItem->regResult); sqlite3VdbeAddOp2(v, OP_Goto, 0, addrTop); pTabItem->viaCoroutine = 0; }else{ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); } sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); sqlite3VdbeJumpHere(v, addrTop); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ExprCachePop(pParse); /* Jump here when skipping the initialization */ sqlite3VdbeJumpHere(v, addrInit); end_auto_index_create: sqlite3ExprDelete(pParse->db, pPartial); } #endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Allocate and populate an sqlite3_index_info structure. It is the ** responsibility of the caller to eventually release the structure ** by passing the pointer returned by this function to sqlite3_free(). */ static sqlite3_index_info *allocateIndexInfo( Parse *pParse, WhereClause *pWC, Bitmask mUnusable, /* Ignore terms with these prereqs */ struct SrcList_item *pSrc, ExprList *pOrderBy ){ int i, j; int nTerm; struct sqlite3_index_constraint *pIdxCons; struct sqlite3_index_orderby *pIdxOrderBy; struct sqlite3_index_constraint_usage *pUsage; WhereTerm *pTerm; int nOrderBy; sqlite3_index_info *pIdxInfo; /* Count the number of possible WHERE clause constraints referring ** to this virtual table */ for(i=nTerm=0, pTerm=pWC->a; inTerm; i++, pTerm++){ if( pTerm->leftCursor != pSrc->iCursor ) continue; if( pTerm->prereqRight & mUnusable ) continue; assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); testcase( pTerm->eOperator & WO_IN ); testcase( pTerm->eOperator & WO_ISNULL ); testcase( pTerm->eOperator & WO_IS ); testcase( pTerm->eOperator & WO_ALL ); if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue; if( pTerm->wtFlags & TERM_VNULL ) continue; nTerm++; } /* If the ORDER BY clause contains only columns in the current ** virtual table then allocate space for the aOrderBy part of ** the sqlite3_index_info structure. */ nOrderBy = 0; if( pOrderBy ){ int n = pOrderBy->nExpr; for(i=0; ia[i].pExpr; if( pExpr->op!=TK_COLUMN || pExpr->iTable!=pSrc->iCursor ) break; } if( i==n){ nOrderBy = n; } } /* Allocate the sqlite3_index_info structure */ pIdxInfo = sqlite3DbMallocZero(pParse->db, sizeof(*pIdxInfo) + (sizeof(*pIdxCons) + sizeof(*pUsage))*nTerm + sizeof(*pIdxOrderBy)*nOrderBy ); if( pIdxInfo==0 ){ sqlite3ErrorMsg(pParse, "out of memory"); return 0; } /* Initialize the structure. The sqlite3_index_info structure contains ** many fields that are declared "const" to prevent xBestIndex from ** changing them. We have to do some funky casting in order to ** initialize those fields. */ pIdxCons = (struct sqlite3_index_constraint*)&pIdxInfo[1]; pIdxOrderBy = (struct sqlite3_index_orderby*)&pIdxCons[nTerm]; pUsage = (struct sqlite3_index_constraint_usage*)&pIdxOrderBy[nOrderBy]; *(int*)&pIdxInfo->nConstraint = nTerm; *(int*)&pIdxInfo->nOrderBy = nOrderBy; *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint = pIdxCons; *(struct sqlite3_index_orderby**)&pIdxInfo->aOrderBy = pIdxOrderBy; *(struct sqlite3_index_constraint_usage**)&pIdxInfo->aConstraintUsage = pUsage; for(i=j=0, pTerm=pWC->a; inTerm; i++, pTerm++){ u8 op; if( pTerm->leftCursor != pSrc->iCursor ) continue; if( pTerm->prereqRight & mUnusable ) continue; assert( IsPowerOfTwo(pTerm->eOperator & ~WO_EQUIV) ); testcase( pTerm->eOperator & WO_IN ); testcase( pTerm->eOperator & WO_IS ); testcase( pTerm->eOperator & WO_ISNULL ); testcase( pTerm->eOperator & WO_ALL ); if( (pTerm->eOperator & ~(WO_ISNULL|WO_EQUIV|WO_IS))==0 ) continue; if( pTerm->wtFlags & TERM_VNULL ) continue; pIdxCons[j].iColumn = pTerm->u.leftColumn; pIdxCons[j].iTermOffset = i; op = (u8)pTerm->eOperator & WO_ALL; if( op==WO_IN ) op = WO_EQ; pIdxCons[j].op = op; /* The direct assignment in the previous line is possible only because ** the WO_ and SQLITE_INDEX_CONSTRAINT_ codes are identical. The ** following asserts verify this fact. */ assert( WO_EQ==SQLITE_INDEX_CONSTRAINT_EQ ); assert( WO_LT==SQLITE_INDEX_CONSTRAINT_LT ); assert( WO_LE==SQLITE_INDEX_CONSTRAINT_LE ); assert( WO_GT==SQLITE_INDEX_CONSTRAINT_GT ); assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE ); assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH ); assert( pTerm->eOperator & (WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) ); j++; } for(i=0; ia[i].pExpr; pIdxOrderBy[i].iColumn = pExpr->iColumn; pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder; } return pIdxInfo; } /* ** The table object reference passed as the second argument to this function ** must represent a virtual table. This function invokes the xBestIndex() ** method of the virtual table with the sqlite3_index_info object that ** comes in as the 3rd argument to this function. ** ** If an error occurs, pParse is populated with an error message and a ** non-zero value is returned. Otherwise, 0 is returned and the output ** part of the sqlite3_index_info structure is left populated. ** ** Whether or not an error is returned, it is the responsibility of the ** caller to eventually free p->idxStr if p->needToFreeIdxStr indicates ** that this is required. */ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3_vtab *pVtab = sqlite3GetVTable(pParse->db, pTab)->pVtab; int i; int rc; TRACE_IDX_INPUTS(p); rc = pVtab->pModule->xBestIndex(pVtab, p); TRACE_IDX_OUTPUTS(p); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM ){ pParse->db->mallocFailed = 1; }else if( !pVtab->zErrMsg ){ sqlite3ErrorMsg(pParse, "%s", sqlite3ErrStr(rc)); }else{ sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); } } sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = 0; for(i=0; inConstraint; i++){ if( !p->aConstraint[i].usable && p->aConstraintUsage[i].argvIndex>0 ){ sqlite3ErrorMsg(pParse, "table %s: xBestIndex returned an invalid plan", pTab->zName); } } return pParse->nErr; } #endif /* !defined(SQLITE_OMIT_VIRTUALTABLE) */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** Estimate the location of a particular key among all keys in an ** index. Store the results in aStat as follows: ** ** aStat[0] Est. number of rows less than pRec ** aStat[1] Est. number of rows equal to pRec ** ** Return the index of the sample that is the smallest sample that ** is greater than or equal to pRec. Note that this index is not an index ** into the aSample[] array - it is an index into a virtual set of samples ** based on the contents of aSample[] and the number of fields in record ** pRec. */ static int whereKeyStats( Parse *pParse, /* Database connection */ Index *pIdx, /* Index to consider domain of */ UnpackedRecord *pRec, /* Vector of values to consider */ int roundUp, /* Round up if true. Round down if false */ tRowcnt *aStat /* OUT: stats written here */ ){ IndexSample *aSample = pIdx->aSample; int iCol; /* Index of required stats in anEq[] etc. */ int i; /* Index of first sample >= pRec */ int iSample; /* Smallest sample larger than or equal to pRec */ int iMin = 0; /* Smallest sample not yet tested */ int iTest; /* Next sample to test */ int res; /* Result of comparison operation */ int nField; /* Number of fields in pRec */ tRowcnt iLower = 0; /* anLt[] + anEq[] of largest sample pRec is > */ #ifndef SQLITE_DEBUG UNUSED_PARAMETER( pParse ); #endif assert( pRec!=0 ); assert( pIdx->nSample>0 ); assert( pRec->nField>0 && pRec->nField<=pIdx->nSampleCol ); /* Do a binary search to find the first sample greater than or equal ** to pRec. If pRec contains a single field, the set of samples to search ** is simply the aSample[] array. If the samples in aSample[] contain more ** than one fields, all fields following the first are ignored. ** ** If pRec contains N fields, where N is more than one, then as well as the ** samples in aSample[] (truncated to N fields), the search also has to ** consider prefixes of those samples. For example, if the set of samples ** in aSample is: ** ** aSample[0] = (a, 5) ** aSample[1] = (a, 10) ** aSample[2] = (b, 5) ** aSample[3] = (c, 100) ** aSample[4] = (c, 105) ** ** Then the search space should ideally be the samples above and the ** unique prefixes [a], [b] and [c]. But since that is hard to organize, ** the code actually searches this set: ** ** 0: (a) ** 1: (a, 5) ** 2: (a, 10) ** 3: (a, 10) ** 4: (b) ** 5: (b, 5) ** 6: (c) ** 7: (c, 100) ** 8: (c, 105) ** 9: (c, 105) ** ** For each sample in the aSample[] array, N samples are present in the ** effective sample array. In the above, samples 0 and 1 are based on ** sample aSample[0]. Samples 2 and 3 on aSample[1] etc. ** ** Often, sample i of each block of N effective samples has (i+1) fields. ** Except, each sample may be extended to ensure that it is greater than or ** equal to the previous sample in the array. For example, in the above, ** sample 2 is the first sample of a block of N samples, so at first it ** appears that it should be 1 field in size. However, that would make it ** smaller than sample 1, so the binary search would not work. As a result, ** it is extended to two fields. The duplicates that this creates do not ** cause any problems. */ nField = pRec->nField; iCol = 0; iSample = pIdx->nSample * nField; do{ int iSamp; /* Index in aSample[] of test sample */ int n; /* Number of fields in test sample */ iTest = (iMin+iSample)/2; iSamp = iTest / nField; if( iSamp>0 ){ /* The proposed effective sample is a prefix of sample aSample[iSamp]. ** Specifically, the shortest prefix of at least (1 + iTest%nField) ** fields that is greater than the previous effective sample. */ for(n=(iTest % nField) + 1; nnField = n; res = sqlite3VdbeRecordCompare(aSample[iSamp].n, aSample[iSamp].p, pRec); if( res<0 ){ iLower = aSample[iSamp].anLt[n-1] + aSample[iSamp].anEq[n-1]; iMin = iTest+1; }else if( res==0 && ndb->mallocFailed==0 ){ if( res==0 ){ /* If (res==0) is true, then pRec must be equal to sample i. */ assert( inSample ); assert( iCol==nField-1 ); pRec->nField = nField; assert( 0==sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec) || pParse->db->mallocFailed ); }else{ /* Unless i==pIdx->nSample, indicating that pRec is larger than ** all samples in the aSample[] array, pRec must be smaller than the ** (iCol+1) field prefix of sample i. */ assert( i<=pIdx->nSample && i>=0 ); pRec->nField = iCol+1; assert( i==pIdx->nSample || sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)>0 || pParse->db->mallocFailed ); /* if i==0 and iCol==0, then record pRec is smaller than all samples ** in the aSample[] array. Otherwise, if (iCol>0) then pRec must ** be greater than or equal to the (iCol) field prefix of sample i. ** If (i>0), then pRec must also be greater than sample (i-1). */ if( iCol>0 ){ pRec->nField = iCol; assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0 || pParse->db->mallocFailed ); } if( i>0 ){ pRec->nField = nField; assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0 || pParse->db->mallocFailed ); } } } #endif /* ifdef SQLITE_DEBUG */ if( res==0 ){ /* Record pRec is equal to sample i */ assert( iCol==nField-1 ); aStat[0] = aSample[i].anLt[iCol]; aStat[1] = aSample[i].anEq[iCol]; }else{ /* At this point, the (iCol+1) field prefix of aSample[i] is the first ** sample that is greater than pRec. Or, if i==pIdx->nSample then pRec ** is larger than all samples in the array. */ tRowcnt iUpper, iGap; if( i>=pIdx->nSample ){ iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]); }else{ iUpper = aSample[i].anLt[iCol]; } if( iLower>=iUpper ){ iGap = 0; }else{ iGap = iUpper - iLower; } if( roundUp ){ iGap = (iGap*2)/3; }else{ iGap = iGap/3; } aStat[0] = iLower + iGap; aStat[1] = pIdx->aAvgEq[iCol]; } /* Restore the pRec->nField value before returning. */ pRec->nField = nField; return i; } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* ** If it is not NULL, pTerm is a term that provides an upper or lower ** bound on a range scan. Without considering pTerm, it is estimated ** that the scan will visit nNew rows. This function returns the number ** estimated to be visited after taking pTerm into account. ** ** If the user explicitly specified a likelihood() value for this term, ** then the return value is the likelihood multiplied by the number of ** input rows. Otherwise, this function assumes that an "IS NOT NULL" term ** has a likelihood of 0.50, and any other term a likelihood of 0.25. */ static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){ LogEst nRet = nNew; if( pTerm ){ if( pTerm->truthProb<=0 ){ nRet += pTerm->truthProb; }else if( (pTerm->wtFlags & TERM_VNULL)==0 ){ nRet -= 20; assert( 20==sqlite3LogEst(4) ); } } return nRet; } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** This function is called to estimate the number of rows visited by a ** range-scan on a skip-scan index. For example: ** ** CREATE INDEX i1 ON t1(a, b, c); ** SELECT * FROM t1 WHERE a=? AND c BETWEEN ? AND ?; ** ** Value pLoop->nOut is currently set to the estimated number of rows ** visited for scanning (a=? AND b=?). This function reduces that estimate ** by some factor to account for the (c BETWEEN ? AND ?) expression based ** on the stat4 data for the index. this scan will be peformed multiple ** times (once for each (a,b) combination that matches a=?) is dealt with ** by the caller. ** ** It does this by scanning through all stat4 samples, comparing values ** extracted from pLower and pUpper with the corresponding column in each ** sample. If L and U are the number of samples found to be less than or ** equal to the values extracted from pLower and pUpper respectively, and ** N is the total number of samples, the pLoop->nOut value is adjusted ** as follows: ** ** nOut = nOut * ( min(U - L, 1) / N ) ** ** If pLower is NULL, or a value cannot be extracted from the term, L is ** set to zero. If pUpper is NULL, or a value cannot be extracted from it, ** U is set to N. ** ** Normally, this function sets *pbDone to 1 before returning. However, ** if no value can be extracted from either pLower or pUpper (and so the ** estimate of the number of rows delivered remains unchanged), *pbDone ** is left as is. ** ** If an error occurs, an SQLite error code is returned. Otherwise, ** SQLITE_OK. */ static int whereRangeSkipScanEst( Parse *pParse, /* Parsing & code generating context */ WhereTerm *pLower, /* Lower bound on the range. ex: "x>123" Might be NULL */ WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */ WhereLoop *pLoop, /* Update the .nOut value of this loop */ int *pbDone /* Set to true if at least one expr. value extracted */ ){ Index *p = pLoop->u.btree.pIndex; int nEq = pLoop->u.btree.nEq; sqlite3 *db = pParse->db; int nLower = -1; int nUpper = p->nSample+1; int rc = SQLITE_OK; int iCol = p->aiColumn[nEq]; u8 aff = iCol>=0 ? p->pTable->aCol[iCol].affinity : SQLITE_AFF_INTEGER; CollSeq *pColl; sqlite3_value *p1 = 0; /* Value extracted from pLower */ sqlite3_value *p2 = 0; /* Value extracted from pUpper */ sqlite3_value *pVal = 0; /* Value extracted from record */ pColl = sqlite3LocateCollSeq(pParse, p->azColl[nEq]); if( pLower ){ rc = sqlite3Stat4ValueFromExpr(pParse, pLower->pExpr->pRight, aff, &p1); nLower = 0; } if( pUpper && rc==SQLITE_OK ){ rc = sqlite3Stat4ValueFromExpr(pParse, pUpper->pExpr->pRight, aff, &p2); nUpper = p2 ? 0 : p->nSample; } if( p1 || p2 ){ int i; int nDiff; for(i=0; rc==SQLITE_OK && inSample; i++){ rc = sqlite3Stat4Column(db, p->aSample[i].p, p->aSample[i].n, nEq, &pVal); if( rc==SQLITE_OK && p1 ){ int res = sqlite3MemCompare(p1, pVal, pColl); if( res>=0 ) nLower++; } if( rc==SQLITE_OK && p2 ){ int res = sqlite3MemCompare(p2, pVal, pColl); if( res>=0 ) nUpper++; } } nDiff = (nUpper - nLower); if( nDiff<=0 ) nDiff = 1; /* If there is both an upper and lower bound specified, and the ** comparisons indicate that they are close together, use the fallback ** method (assume that the scan visits 1/64 of the rows) for estimating ** the number of rows visited. Otherwise, estimate the number of rows ** using the method described in the header comment for this function. */ if( nDiff!=1 || pUpper==0 || pLower==0 ){ int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff)); pLoop->nOut -= nAdjust; *pbDone = 1; WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", nLower, nUpper, nAdjust*-1, pLoop->nOut)); } }else{ assert( *pbDone==0 ); } sqlite3ValueFree(p1); sqlite3ValueFree(p2); sqlite3ValueFree(pVal); return rc; } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ /* ** This function is used to estimate the number of rows that will be visited ** by scanning an index for a range of values. The range may have an upper ** bound, a lower bound, or both. The WHERE clause terms that set the upper ** and lower bounds are represented by pLower and pUpper respectively. For ** example, assuming that index p is on t1(a): ** ** ... FROM t1 WHERE a > ? AND a < ? ... ** |_____| |_____| ** | | ** pLower pUpper ** ** If either of the upper or lower bound is not present, then NULL is passed in ** place of the corresponding WhereTerm. ** ** The value in (pBuilder->pNew->u.btree.nEq) is the number of the index ** column subject to the range constraint. Or, equivalently, the number of ** equality constraints optimized by the proposed index scan. For example, ** assuming index p is on t1(a, b), and the SQL query is: ** ** ... FROM t1 WHERE a = ? AND b > ? AND b < ? ... ** ** then nEq is set to 1 (as the range restricted column, b, is the second ** left-most column of the index). Or, if the query is: ** ** ... FROM t1 WHERE a > ? AND a < ? ... ** ** then nEq is set to 0. ** ** When this function is called, *pnOut is set to the sqlite3LogEst() of the ** number of rows that the index scan is expected to visit without ** considering the range constraints. If nEq is 0, then *pnOut is the number of ** rows in the index. Assuming no error occurs, *pnOut is adjusted (reduced) ** to account for the range constraints pLower and pUpper. ** ** In the absence of sqlite_stat4 ANALYZE data, or if such data cannot be ** used, a single range inequality reduces the search space by a factor of 4. ** and a pair of constraints (x>? AND x123" Might be NULL */ WhereTerm *pUpper, /* Upper bound on the range. ex: "x<455" Might be NULL */ WhereLoop *pLoop /* Modify the .nOut and maybe .rRun fields */ ){ int rc = SQLITE_OK; int nOut = pLoop->nOut; LogEst nNew; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 Index *p = pLoop->u.btree.pIndex; int nEq = pLoop->u.btree.nEq; if( p->nSample>0 && nEqnSampleCol ){ if( nEq==pBuilder->nRecValid ){ UnpackedRecord *pRec = pBuilder->pRec; tRowcnt a[2]; u8 aff; /* Variable iLower will be set to the estimate of the number of rows in ** the index that are less than the lower bound of the range query. The ** lower bound being the concatenation of $P and $L, where $P is the ** key-prefix formed by the nEq values matched against the nEq left-most ** columns of the index, and $L is the value in pLower. ** ** Or, if pLower is NULL or $L cannot be extracted from it (because it ** is not a simple variable or literal value), the lower bound of the ** range is $P. Due to a quirk in the way whereKeyStats() works, even ** if $L is available, whereKeyStats() is called for both ($P) and ** ($P:$L) and the larger of the two returned values is used. ** ** Similarly, iUpper is to be set to the estimate of the number of rows ** less than the upper bound of the range query. Where the upper bound ** is either ($P) or ($P:$U). Again, even if $U is available, both values ** of iUpper are requested of whereKeyStats() and the smaller used. ** ** The number of rows between the two bounds is then just iUpper-iLower. */ tRowcnt iLower; /* Rows less than the lower bound */ tRowcnt iUpper; /* Rows less than the upper bound */ int iLwrIdx = -2; /* aSample[] for the lower bound */ int iUprIdx = -1; /* aSample[] for the upper bound */ if( pRec ){ testcase( pRec->nField!=pBuilder->nRecValid ); pRec->nField = pBuilder->nRecValid; } if( nEq==p->nKeyCol ){ aff = SQLITE_AFF_INTEGER; }else{ aff = p->pTable->aCol[p->aiColumn[nEq]].affinity; } /* Determine iLower and iUpper using ($P) only. */ if( nEq==0 ){ iLower = 0; iUpper = p->nRowEst0; }else{ /* Note: this call could be optimized away - since the same values must ** have been requested when testing key $P in whereEqualScanEst(). */ whereKeyStats(pParse, p, pRec, 0, a); iLower = a[0]; iUpper = a[0] + a[1]; } assert( pLower==0 || (pLower->eOperator & (WO_GT|WO_GE))!=0 ); assert( pUpper==0 || (pUpper->eOperator & (WO_LT|WO_LE))!=0 ); assert( p->aSortOrder!=0 ); if( p->aSortOrder[nEq] ){ /* The roles of pLower and pUpper are swapped for a DESC index */ SWAP(WhereTerm*, pLower, pUpper); } /* If possible, improve on the iLower estimate using ($P:$L). */ if( pLower ){ int bOk; /* True if value is extracted from pExpr */ Expr *pExpr = pLower->pExpr->pRight; rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk); if( rc==SQLITE_OK && bOk ){ tRowcnt iNew; iLwrIdx = whereKeyStats(pParse, p, pRec, 0, a); iNew = a[0] + ((pLower->eOperator & (WO_GT|WO_LE)) ? a[1] : 0); if( iNew>iLower ) iLower = iNew; nOut--; pLower = 0; } } /* If possible, improve on the iUpper estimate using ($P:$U). */ if( pUpper ){ int bOk; /* True if value is extracted from pExpr */ Expr *pExpr = pUpper->pExpr->pRight; rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk); if( rc==SQLITE_OK && bOk ){ tRowcnt iNew; iUprIdx = whereKeyStats(pParse, p, pRec, 1, a); iNew = a[0] + ((pUpper->eOperator & (WO_GT|WO_LE)) ? a[1] : 0); if( iNewpRec = pRec; if( rc==SQLITE_OK ){ if( iUpper>iLower ){ nNew = sqlite3LogEst(iUpper - iLower); /* TUNING: If both iUpper and iLower are derived from the same ** sample, then assume they are 4x more selective. This brings ** the estimated selectivity more in line with what it would be ** if estimated without the use of STAT3/4 tables. */ if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) ); }else{ nNew = 10; assert( 10==sqlite3LogEst(2) ); } if( nNewwtFlags & TERM_VNULL)==0 ); nNew = whereRangeAdjust(pLower, nOut); nNew = whereRangeAdjust(pUpper, nNew); /* TUNING: If there is both an upper and lower limit and neither limit ** has an application-defined likelihood(), assume the range is ** reduced by an additional 75%. This means that, by default, an open-ended ** range query (e.g. col > ?) is assumed to match 1/4 of the rows in the ** index. While a closed range (e.g. col BETWEEN ? AND ?) is estimated to ** match 1/64 of the index. */ if( pLower && pLower->truthProb>0 && pUpper && pUpper->truthProb>0 ){ nNew -= 20; } nOut -= (pLower!=0) + (pUpper!=0); if( nNew<10 ) nNew = 10; if( nNewnOut>nOut ){ WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n", pLoop->nOut, nOut)); } #endif pLoop->nOut = (LogEst)nOut; return rc; } #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** Estimate the number of rows that will be returned based on ** an equality constraint x=VALUE and where that VALUE occurs in ** the histogram data. This only works when x is the left-most ** column of an index and sqlite_stat3 histogram data is available ** for that index. When pExpr==NULL that means the constraint is ** "x IS NULL" instead of "x=VALUE". ** ** Write the estimated row count into *pnRow and return SQLITE_OK. ** If unable to make an estimate, leave *pnRow unchanged and return ** non-zero. ** ** This routine can fail if it is unable to load a collating sequence ** required for string comparison, or if unable to allocate memory ** for a UTF conversion required for comparison. The error is stored ** in the pParse structure. */ static int whereEqualScanEst( Parse *pParse, /* Parsing & code generating context */ WhereLoopBuilder *pBuilder, Expr *pExpr, /* Expression for VALUE in the x=VALUE constraint */ tRowcnt *pnRow /* Write the revised row estimate here */ ){ Index *p = pBuilder->pNew->u.btree.pIndex; int nEq = pBuilder->pNew->u.btree.nEq; UnpackedRecord *pRec = pBuilder->pRec; u8 aff; /* Column affinity */ int rc; /* Subfunction return code */ tRowcnt a[2]; /* Statistics */ int bOk; assert( nEq>=1 ); assert( nEq<=p->nColumn ); assert( p->aSample!=0 ); assert( p->nSample>0 ); assert( pBuilder->nRecValidnRecValid<(nEq-1) ){ return SQLITE_NOTFOUND; } /* This is an optimization only. The call to sqlite3Stat4ProbeSetValue() ** below would return the same value. */ if( nEq>=p->nColumn ){ *pnRow = 1; return SQLITE_OK; } aff = p->pTable->aCol[p->aiColumn[nEq-1]].affinity; rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq-1, &bOk); pBuilder->pRec = pRec; if( rc!=SQLITE_OK ) return rc; if( bOk==0 ) return SQLITE_NOTFOUND; pBuilder->nRecValid = nEq; whereKeyStats(pParse, p, pRec, 0, a); WHERETRACE(0x10,("equality scan regions: %d\n", (int)a[1])); *pnRow = a[1]; return rc; } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 /* ** Estimate the number of rows that will be returned based on ** an IN constraint where the right-hand side of the IN operator ** is a list of values. Example: ** ** WHERE x IN (1,2,3,4) ** ** Write the estimated row count into *pnRow and return SQLITE_OK. ** If unable to make an estimate, leave *pnRow unchanged and return ** non-zero. ** ** This routine can fail if it is unable to load a collating sequence ** required for string comparison, or if unable to allocate memory ** for a UTF conversion required for comparison. The error is stored ** in the pParse structure. */ static int whereInScanEst( Parse *pParse, /* Parsing & code generating context */ WhereLoopBuilder *pBuilder, ExprList *pList, /* The value list on the RHS of "x IN (v1,v2,v3,...)" */ tRowcnt *pnRow /* Write the revised row estimate here */ ){ Index *p = pBuilder->pNew->u.btree.pIndex; i64 nRow0 = sqlite3LogEstToInt(p->aiRowLogEst[0]); int nRecValid = pBuilder->nRecValid; int rc = SQLITE_OK; /* Subfunction return code */ tRowcnt nEst; /* Number of rows for a single term */ tRowcnt nRowEst = 0; /* New estimate of the number of rows */ int i; /* Loop counter */ assert( p->aSample!=0 ); for(i=0; rc==SQLITE_OK && inExpr; i++){ nEst = nRow0; rc = whereEqualScanEst(pParse, pBuilder, pList->a[i].pExpr, &nEst); nRowEst += nEst; pBuilder->nRecValid = nRecValid; } if( rc==SQLITE_OK ){ if( nRowEst > nRow0 ) nRowEst = nRow0; *pnRow = nRowEst; WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst)); } assert( pBuilder->nRecValid==nRecValid ); return rc; } #endif /* SQLITE_ENABLE_STAT3_OR_STAT4 */ #ifdef WHERETRACE_ENABLED /* ** Print the content of a WhereTerm object */ static void whereTermPrint(WhereTerm *pTerm, int iTerm){ if( pTerm==0 ){ sqlite3DebugPrintf("TERM-%-3d NULL\n", iTerm); }else{ char zType[4]; memcpy(zType, "...", 4); if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V'; if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E'; if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L'; sqlite3DebugPrintf( "TERM-%-3d %p %s cursor=%-3d prob=%-3d op=0x%03x wtFlags=0x%04x\n", iTerm, pTerm, zType, pTerm->leftCursor, pTerm->truthProb, pTerm->eOperator, pTerm->wtFlags); sqlite3TreeViewExpr(0, pTerm->pExpr, 0); } } #endif #ifdef WHERETRACE_ENABLED /* ** Print a WhereLoop object for debugging purposes */ static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){ WhereInfo *pWInfo = pWC->pWInfo; int nb = 1+(pWInfo->pTabList->nSrc+7)/8; struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; Table *pTab = pItem->pTab; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, p->iTab, nb, p->maskSelf, nb, p->prereq); sqlite3DebugPrintf(" %12s", pItem->zAlias ? pItem->zAlias : pTab->zName); if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ const char *zName; if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ if( strncmp(zName, "sqlite_autoindex_", 17)==0 ){ int i = sqlite3Strlen30(zName) - 1; while( zName[i]!='_' ) i--; zName += i; } sqlite3DebugPrintf(".%-16s %2d", zName, p->u.btree.nEq); }else{ sqlite3DebugPrintf("%20s",""); } }else{ char *z; if( p->u.vtab.idxStr ){ z = sqlite3_mprintf("(%d,\"%s\",%x)", p->u.vtab.idxNum, p->u.vtab.idxStr, p->u.vtab.omitMask); }else{ z = sqlite3_mprintf("(%d,%x)", p->u.vtab.idxNum, p->u.vtab.omitMask); } sqlite3DebugPrintf(" %-19s", z); sqlite3_free(z); } if( p->wsFlags & WHERE_SKIPSCAN ){ sqlite3DebugPrintf(" f %05x %d-%d", p->wsFlags, p->nLTerm,p->nSkip); }else{ sqlite3DebugPrintf(" f %05x N %d", p->wsFlags, p->nLTerm); } sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ int i; for(i=0; inLTerm; i++){ whereTermPrint(p->aLTerm[i], i); } } } #endif /* ** Convert bulk memory into a valid WhereLoop that can be passed ** to whereLoopClear harmlessly. */ static void whereLoopInit(WhereLoop *p){ p->aLTerm = p->aLTermSpace; p->nLTerm = 0; p->nLSlot = ArraySize(p->aLTermSpace); p->wsFlags = 0; } /* ** Clear the WhereLoop.u union. Leave WhereLoop.pLTerm intact. */ static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){ if( p->wsFlags & (WHERE_VIRTUALTABLE|WHERE_AUTO_INDEX) ){ if( (p->wsFlags & WHERE_VIRTUALTABLE)!=0 && p->u.vtab.needFree ){ sqlite3_free(p->u.vtab.idxStr); p->u.vtab.needFree = 0; p->u.vtab.idxStr = 0; }else if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && p->u.btree.pIndex!=0 ){ sqlite3DbFree(db, p->u.btree.pIndex->zColAff); sqlite3DbFree(db, p->u.btree.pIndex); p->u.btree.pIndex = 0; } } } /* ** Deallocate internal memory used by a WhereLoop object */ static void whereLoopClear(sqlite3 *db, WhereLoop *p){ if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); whereLoopClearUnion(db, p); whereLoopInit(p); } /* ** Increase the memory allocation for pLoop->aLTerm[] to be at least n. */ static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){ WhereTerm **paNew; if( p->nLSlot>=n ) return SQLITE_OK; n = (n+7)&~7; paNew = sqlite3DbMallocRaw(db, sizeof(p->aLTerm[0])*n); if( paNew==0 ) return SQLITE_NOMEM; memcpy(paNew, p->aLTerm, sizeof(p->aLTerm[0])*p->nLSlot); if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFree(db, p->aLTerm); p->aLTerm = paNew; p->nLSlot = n; return SQLITE_OK; } /* ** Transfer content from the second pLoop into the first. */ static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ whereLoopClearUnion(db, pTo); if( whereLoopResize(db, pTo, pFrom->nLTerm) ){ memset(&pTo->u, 0, sizeof(pTo->u)); return SQLITE_NOMEM; } memcpy(pTo, pFrom, WHERE_LOOP_XFER_SZ); memcpy(pTo->aLTerm, pFrom->aLTerm, pTo->nLTerm*sizeof(pTo->aLTerm[0])); if( pFrom->wsFlags & WHERE_VIRTUALTABLE ){ pFrom->u.vtab.needFree = 0; }else if( (pFrom->wsFlags & WHERE_AUTO_INDEX)!=0 ){ pFrom->u.btree.pIndex = 0; } return SQLITE_OK; } /* ** Delete a WhereLoop object */ static void whereLoopDelete(sqlite3 *db, WhereLoop *p){ whereLoopClear(db, p); sqlite3DbFree(db, p); } /* ** Free a WhereInfo structure */ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ if( ALWAYS(pWInfo) ){ int i; for(i=0; inLevel; i++){ WhereLevel *pLevel = &pWInfo->a[i]; if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE) ){ sqlite3DbFree(db, pLevel->u.in.aInLoop); } } sqlite3WhereClauseClear(&pWInfo->sWC); while( pWInfo->pLoops ){ WhereLoop *p = pWInfo->pLoops; pWInfo->pLoops = p->pNextLoop; whereLoopDelete(db, p); } sqlite3DbFree(db, pWInfo); } } /* ** Return TRUE if all of the following are true: ** ** (1) X has the same or lower cost that Y ** (2) X is a proper subset of Y ** (3) X skips at least as many columns as Y ** ** By "proper subset" we mean that X uses fewer WHERE clause terms ** than Y and that every WHERE clause term used by X is also used ** by Y. ** ** If X is a proper subset of Y then Y is a better choice and ought ** to have a lower cost. This routine returns TRUE when that cost ** relationship is inverted and needs to be adjusted. The third rule ** was added because if X uses skip-scan less than Y it still might ** deserve a lower cost even if it is a proper subset of Y. */ static int whereLoopCheaperProperSubset( const WhereLoop *pX, /* First WhereLoop to compare */ const WhereLoop *pY /* Compare against this WhereLoop */ ){ int i, j; if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ return 0; /* X is not a subset of Y */ } if( pY->nSkip > pX->nSkip ) return 0; if( pX->rRun >= pY->rRun ){ if( pX->rRun > pY->rRun ) return 0; /* X costs more than Y */ if( pX->nOut > pY->nOut ) return 0; /* X costs more than Y */ } for(i=pX->nLTerm-1; i>=0; i--){ if( pX->aLTerm[i]==0 ) continue; for(j=pY->nLTerm-1; j>=0; j--){ if( pY->aLTerm[j]==pX->aLTerm[i] ) break; } if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ } return 1; /* All conditions meet */ } /* ** Try to adjust the cost of WhereLoop pTemplate upwards or downwards so ** that: ** ** (1) pTemplate costs less than any other WhereLoops that are a proper ** subset of pTemplate ** ** (2) pTemplate costs more than any other WhereLoops for which pTemplate ** is a proper subset. ** ** To say "WhereLoop X is a proper subset of Y" means that X uses fewer ** WHERE clause terms than Y and that every WHERE clause term used by X is ** also used by Y. */ static void whereLoopAdjustCost(const WhereLoop *p, WhereLoop *pTemplate){ if( (pTemplate->wsFlags & WHERE_INDEXED)==0 ) return; for(; p; p=p->pNextLoop){ if( p->iTab!=pTemplate->iTab ) continue; if( (p->wsFlags & WHERE_INDEXED)==0 ) continue; if( whereLoopCheaperProperSubset(p, pTemplate) ){ /* Adjust pTemplate cost downward so that it is cheaper than its ** subset p. */ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut-1)); pTemplate->rRun = p->rRun; pTemplate->nOut = p->nOut - 1; }else if( whereLoopCheaperProperSubset(pTemplate, p) ){ /* Adjust pTemplate cost upward so that it is costlier than p since ** pTemplate is a proper subset of p */ WHERETRACE(0x80,("subset cost adjustment %d,%d to %d,%d\n", pTemplate->rRun, pTemplate->nOut, p->rRun, p->nOut+1)); pTemplate->rRun = p->rRun; pTemplate->nOut = p->nOut + 1; } } } /* ** Search the list of WhereLoops in *ppPrev looking for one that can be ** supplanted by pTemplate. ** ** Return NULL if the WhereLoop list contains an entry that can supplant ** pTemplate, in other words if pTemplate does not belong on the list. ** ** If pX is a WhereLoop that pTemplate can supplant, then return the ** link that points to pX. ** ** If pTemplate cannot supplant any existing element of the list but needs ** to be added to the list, then return a pointer to the tail of the list. */ static WhereLoop **whereLoopFindLesser( WhereLoop **ppPrev, const WhereLoop *pTemplate ){ WhereLoop *p; for(p=(*ppPrev); p; ppPrev=&p->pNextLoop, p=*ppPrev){ if( p->iTab!=pTemplate->iTab || p->iSortIdx!=pTemplate->iSortIdx ){ /* If either the iTab or iSortIdx values for two WhereLoop are different ** then those WhereLoops need to be considered separately. Neither is ** a candidate to replace the other. */ continue; } /* In the current implementation, the rSetup value is either zero ** or the cost of building an automatic index (NlogN) and the NlogN ** is the same for compatible WhereLoops. */ assert( p->rSetup==0 || pTemplate->rSetup==0 || p->rSetup==pTemplate->rSetup ); /* whereLoopAddBtree() always generates and inserts the automatic index ** case first. Hence compatible candidate WhereLoops never have a larger ** rSetup. Call this SETUP-INVARIANT */ assert( p->rSetup>=pTemplate->rSetup ); /* Any loop using an appliation-defined index (or PRIMARY KEY or ** UNIQUE constraint) with one or more == constraints is better ** than an automatic index. Unless it is a skip-scan. */ if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 && (pTemplate->nSkip)==0 && (pTemplate->wsFlags & WHERE_INDEXED)!=0 && (pTemplate->wsFlags & WHERE_COLUMN_EQ)!=0 && (p->prereq & pTemplate->prereq)==pTemplate->prereq ){ break; } /* If existing WhereLoop p is better than pTemplate, pTemplate can be ** discarded. WhereLoop p is better if: ** (1) p has no more dependencies than pTemplate, and ** (2) p has an equal or lower cost than pTemplate */ if( (p->prereq & pTemplate->prereq)==p->prereq /* (1) */ && p->rSetup<=pTemplate->rSetup /* (2a) */ && p->rRun<=pTemplate->rRun /* (2b) */ && p->nOut<=pTemplate->nOut /* (2c) */ ){ return 0; /* Discard pTemplate */ } /* If pTemplate is always better than p, then cause p to be overwritten ** with pTemplate. pTemplate is better than p if: ** (1) pTemplate has no more dependences than p, and ** (2) pTemplate has an equal or lower cost than p. */ if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */ && p->rRun>=pTemplate->rRun /* (2a) */ && p->nOut>=pTemplate->nOut /* (2b) */ ){ assert( p->rSetup>=pTemplate->rSetup ); /* SETUP-INVARIANT above */ break; /* Cause p to be overwritten by pTemplate */ } } return ppPrev; } /* ** Insert or replace a WhereLoop entry using the template supplied. ** ** An existing WhereLoop entry might be overwritten if the new template ** is better and has fewer dependencies. Or the template will be ignored ** and no insert will occur if an existing WhereLoop is faster and has ** fewer dependencies than the template. Otherwise a new WhereLoop is ** added based on the template. ** ** If pBuilder->pOrSet is not NULL then we care about only the ** prerequisites and rRun and nOut costs of the N best loops. That ** information is gathered in the pBuilder->pOrSet object. This special ** processing mode is used only for OR clause processing. ** ** When accumulating multiple loops (when pBuilder->pOrSet is NULL) we ** still might overwrite similar loops with the new template if the ** new template is better. Loops may be overwritten if the following ** conditions are met: ** ** (1) They have the same iTab. ** (2) They have the same iSortIdx. ** (3) The template has same or fewer dependencies than the current loop ** (4) The template has the same or lower cost than the current loop */ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ WhereLoop **ppPrev, *p; WhereInfo *pWInfo = pBuilder->pWInfo; sqlite3 *db = pWInfo->pParse->db; /* If pBuilder->pOrSet is defined, then only keep track of the costs ** and prereqs. */ if( pBuilder->pOrSet!=0 ){ #if WHERETRACE_ENABLED u16 n = pBuilder->pOrSet->n; int x = #endif whereOrInsert(pBuilder->pOrSet, pTemplate->prereq, pTemplate->rRun, pTemplate->nOut); #if WHERETRACE_ENABLED /* 0x8 */ if( sqlite3WhereTrace & 0x8 ){ sqlite3DebugPrintf(x?" or-%d: ":" or-X: ", n); whereLoopPrint(pTemplate, pBuilder->pWC); } #endif return SQLITE_OK; } /* Look for an existing WhereLoop to replace with pTemplate */ whereLoopAdjustCost(pWInfo->pLoops, pTemplate); ppPrev = whereLoopFindLesser(&pWInfo->pLoops, pTemplate); if( ppPrev==0 ){ /* There already exists a WhereLoop on the list that is better ** than pTemplate, so just ignore pTemplate */ #if WHERETRACE_ENABLED /* 0x8 */ if( sqlite3WhereTrace & 0x8 ){ sqlite3DebugPrintf(" skip: "); whereLoopPrint(pTemplate, pBuilder->pWC); } #endif return SQLITE_OK; }else{ p = *ppPrev; } /* If we reach this point it means that either p[] should be overwritten ** with pTemplate[] if p[] exists, or if p==NULL then allocate a new ** WhereLoop and insert it. */ #if WHERETRACE_ENABLED /* 0x8 */ if( sqlite3WhereTrace & 0x8 ){ if( p!=0 ){ sqlite3DebugPrintf("replace: "); whereLoopPrint(p, pBuilder->pWC); } sqlite3DebugPrintf(" add: "); whereLoopPrint(pTemplate, pBuilder->pWC); } #endif if( p==0 ){ /* Allocate a new WhereLoop to add to the end of the list */ *ppPrev = p = sqlite3DbMallocRaw(db, sizeof(WhereLoop)); if( p==0 ) return SQLITE_NOMEM; whereLoopInit(p); p->pNextLoop = 0; }else{ /* We will be overwriting WhereLoop p[]. But before we do, first ** go through the rest of the list and delete any other entries besides ** p[] that are also supplated by pTemplate */ WhereLoop **ppTail = &p->pNextLoop; WhereLoop *pToDel; while( *ppTail ){ ppTail = whereLoopFindLesser(ppTail, pTemplate); if( ppTail==0 ) break; pToDel = *ppTail; if( pToDel==0 ) break; *ppTail = pToDel->pNextLoop; #if WHERETRACE_ENABLED /* 0x8 */ if( sqlite3WhereTrace & 0x8 ){ sqlite3DebugPrintf(" delete: "); whereLoopPrint(pToDel, pBuilder->pWC); } #endif whereLoopDelete(db, pToDel); } } whereLoopXfer(db, p, pTemplate); if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ Index *pIndex = p->u.btree.pIndex; if( pIndex && pIndex->tnum==0 ){ p->u.btree.pIndex = 0; } } return SQLITE_OK; } /* ** Adjust the WhereLoop.nOut value downward to account for terms of the ** WHERE clause that reference the loop but which are not used by an ** index. * ** For every WHERE clause term that is not used by the index ** and which has a truth probability assigned by one of the likelihood(), ** likely(), or unlikely() SQL functions, reduce the estimated number ** of output rows by the probability specified. ** ** TUNING: For every WHERE clause term that is not used by the index ** and which does not have an assigned truth probability, heuristics ** described below are used to try to estimate the truth probability. ** TODO --> Perhaps this is something that could be improved by better ** table statistics. ** ** Heuristic 1: Estimate the truth probability as 93.75%. The 93.75% ** value corresponds to -1 in LogEst notation, so this means decrement ** the WhereLoop.nOut field for every such WHERE clause term. ** ** Heuristic 2: If there exists one or more WHERE clause terms of the ** form "x==EXPR" and EXPR is not a constant 0 or 1, then make sure the ** final output row estimate is no greater than 1/4 of the total number ** of rows in the table. In other words, assume that x==EXPR will filter ** out at least 3 out of 4 rows. If EXPR is -1 or 0 or 1, then maybe the ** "x" column is boolean or else -1 or 0 or 1 is a common default value ** on the "x" column and so in that case only cap the output row estimate ** at 1/2 instead of 1/4. */ static void whereLoopOutputAdjust( WhereClause *pWC, /* The WHERE clause */ WhereLoop *pLoop, /* The loop to adjust downward */ LogEst nRow /* Number of rows in the entire table */ ){ WhereTerm *pTerm, *pX; Bitmask notAllowed = ~(pLoop->prereq|pLoop->maskSelf); int i, j, k; LogEst iReduce = 0; /* pLoop->nOut should not exceed nRow-iReduce */ assert( (pLoop->wsFlags & WHERE_AUTO_INDEX)==0 ); for(i=pWC->nTerm, pTerm=pWC->a; i>0; i--, pTerm++){ if( (pTerm->wtFlags & TERM_VIRTUAL)!=0 ) break; if( (pTerm->prereqAll & pLoop->maskSelf)==0 ) continue; if( (pTerm->prereqAll & notAllowed)!=0 ) continue; for(j=pLoop->nLTerm-1; j>=0; j--){ pX = pLoop->aLTerm[j]; if( pX==0 ) continue; if( pX==pTerm ) break; if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; } if( j<0 ){ if( pTerm->truthProb<=0 ){ /* If a truth probability is specified using the likelihood() hints, ** then use the probability provided by the application. */ pLoop->nOut += pTerm->truthProb; }else{ /* In the absence of explicit truth probabilities, use heuristics to ** guess a reasonable truth probability. */ pLoop->nOut--; if( pTerm->eOperator&(WO_EQ|WO_IS) ){ Expr *pRight = pTerm->pExpr->pRight; testcase( pTerm->pExpr->op==TK_IS ); if( sqlite3ExprIsInteger(pRight, &k) && k>=(-1) && k<=1 ){ k = 10; }else{ k = 20; } if( iReducenOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce; } /* ** Adjust the cost C by the costMult facter T. This only occurs if ** compiled with -DSQLITE_ENABLE_COSTMULT */ #ifdef SQLITE_ENABLE_COSTMULT # define ApplyCostMultiplier(C,T) C += T #else # define ApplyCostMultiplier(C,T) #endif /* ** We have so far matched pBuilder->pNew->u.btree.nEq terms of the ** index pIndex. Try to match one more. ** ** When this function is called, pBuilder->pNew->nOut contains the ** number of rows expected to be visited by filtering using the nEq ** terms only. If it is modified, this value is restored before this ** function returns. ** ** If pProbe->tnum==0, that means pIndex is a fake index used for the ** INTEGER PRIMARY KEY. */ static int whereLoopAddBtreeIndex( WhereLoopBuilder *pBuilder, /* The WhereLoop factory */ struct SrcList_item *pSrc, /* FROM clause term being analyzed */ Index *pProbe, /* An index on pSrc */ LogEst nInMul /* log(Number of iterations due to IN) */ ){ WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */ Parse *pParse = pWInfo->pParse; /* Parsing context */ sqlite3 *db = pParse->db; /* Database connection malloc context */ WhereLoop *pNew; /* Template WhereLoop under construction */ WhereTerm *pTerm; /* A WhereTerm under consideration */ int opMask; /* Valid operators for constraints */ WhereScan scan; /* Iterator for WHERE terms */ Bitmask saved_prereq; /* Original value of pNew->prereq */ u16 saved_nLTerm; /* Original value of pNew->nLTerm */ u16 saved_nEq; /* Original value of pNew->u.btree.nEq */ u16 saved_nSkip; /* Original value of pNew->nSkip */ u32 saved_wsFlags; /* Original value of pNew->wsFlags */ LogEst saved_nOut; /* Original value of pNew->nOut */ int iCol; /* Index of the column in the table */ int rc = SQLITE_OK; /* Return code */ LogEst rSize; /* Number of rows in the table */ LogEst rLogSize; /* Logarithm of table size */ WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */ pNew = pBuilder->pNew; if( db->mallocFailed ) return SQLITE_NOMEM; assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 ); assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 ); if( pNew->wsFlags & WHERE_BTM_LIMIT ){ opMask = WO_LT|WO_LE; }else if( /*pProbe->tnum<=0 ||*/ (pSrc->jointype & JT_LEFT)!=0 ){ opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE; }else{ opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); assert( pNew->u.btree.nEqnColumn ); iCol = pProbe->aiColumn[pNew->u.btree.nEq]; pTerm = whereScanInit(&scan, pBuilder->pWC, pSrc->iCursor, iCol, opMask, pProbe); saved_nEq = pNew->u.btree.nEq; saved_nSkip = pNew->nSkip; saved_nLTerm = pNew->nLTerm; saved_wsFlags = pNew->wsFlags; saved_prereq = pNew->prereq; saved_nOut = pNew->nOut; pNew->rSetup = 0; rSize = pProbe->aiRowLogEst[0]; rLogSize = estLog(rSize); for(; rc==SQLITE_OK && pTerm!=0; pTerm = whereScanNext(&scan)){ u16 eOp = pTerm->eOperator; /* Shorthand for pTerm->eOperator */ LogEst rCostIdx; LogEst nOutUnadjusted; /* nOut before IN() and WHERE adjustments */ int nIn = 0; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 int nRecValid = pBuilder->nRecValid; #endif if( (eOp==WO_ISNULL || (pTerm->wtFlags&TERM_VNULL)!=0) && (iCol<0 || pSrc->pTab->aCol[iCol].notNull) ){ continue; /* ignore IS [NOT] NULL constraints on NOT NULL columns */ } if( pTerm->prereqRight & pNew->maskSelf ) continue; /* Do not allow the upper bound of a LIKE optimization range constraint ** to mix with a lower range bound from some other source */ if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue; pNew->wsFlags = saved_wsFlags; pNew->u.btree.nEq = saved_nEq; pNew->nLTerm = saved_nLTerm; if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ pNew->aLTerm[pNew->nLTerm++] = pTerm; pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf; assert( nInMul==0 || (pNew->wsFlags & WHERE_COLUMN_NULL)!=0 || (pNew->wsFlags & WHERE_COLUMN_IN)!=0 || (pNew->wsFlags & WHERE_SKIPSCAN)!=0 ); if( eOp & WO_IN ){ Expr *pExpr = pTerm->pExpr; pNew->wsFlags |= WHERE_COLUMN_IN; if( ExprHasProperty(pExpr, EP_xIsSelect) ){ /* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */ nIn = 46; assert( 46==sqlite3LogEst(25) ); }else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){ /* "x IN (value, value, ...)" */ nIn = sqlite3LogEst(pExpr->x.pList->nExpr); } assert( nIn>0 ); /* RHS always has 2 or more terms... The parser ** changes "x IN (?)" into "x=?". */ }else if( eOp & (WO_EQ|WO_IS) ){ pNew->wsFlags |= WHERE_COLUMN_EQ; if( iCol<0 || (nInMul==0 && pNew->u.btree.nEq==pProbe->nKeyCol-1) ){ if( iCol>=0 && pProbe->uniqNotNull==0 ){ pNew->wsFlags |= WHERE_UNQ_WANTED; }else{ pNew->wsFlags |= WHERE_ONEROW; } } }else if( eOp & WO_ISNULL ){ pNew->wsFlags |= WHERE_COLUMN_NULL; }else if( eOp & (WO_GT|WO_GE) ){ testcase( eOp & WO_GT ); testcase( eOp & WO_GE ); pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT; pBtm = pTerm; pTop = 0; if( pTerm->wtFlags & TERM_LIKEOPT ){ /* Range contraints that come from the LIKE optimization are ** always used in pairs. */ pTop = &pTerm[1]; assert( (pTop-(pTerm->pWC->a))pWC->nTerm ); assert( pTop->wtFlags & TERM_LIKEOPT ); assert( pTop->eOperator==WO_LT ); if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ pNew->aLTerm[pNew->nLTerm++] = pTop; pNew->wsFlags |= WHERE_TOP_LIMIT; } }else{ assert( eOp & (WO_LT|WO_LE) ); testcase( eOp & WO_LT ); testcase( eOp & WO_LE ); pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT; pTop = pTerm; pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ? pNew->aLTerm[pNew->nLTerm-2] : 0; } /* At this point pNew->nOut is set to the number of rows expected to ** be visited by the index scan before considering term pTerm, or the ** values of nIn and nInMul. In other words, assuming that all ** "x IN(...)" terms are replaced with "x = ?". This block updates ** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */ assert( pNew->nOut==saved_nOut ); if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ /* Adjust nOut using stat3/stat4 data. Or, if there is no stat3/stat4 ** data, using some other estimate. */ whereRangeScanEst(pParse, pBuilder, pBtm, pTop, pNew); }else{ int nEq = ++pNew->u.btree.nEq; assert( eOp & (WO_ISNULL|WO_EQ|WO_IN|WO_IS) ); assert( pNew->nOut==saved_nOut ); if( pTerm->truthProb<=0 && iCol>=0 ){ assert( (eOp & WO_IN) || nIn==0 ); testcase( eOp & WO_IN ); pNew->nOut += pTerm->truthProb; pNew->nOut -= nIn; }else{ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 tRowcnt nOut = 0; if( nInMul==0 && pProbe->nSample && pNew->u.btree.nEq<=pProbe->nSampleCol && ((eOp & WO_IN)==0 || !ExprHasProperty(pTerm->pExpr, EP_xIsSelect)) ){ Expr *pExpr = pTerm->pExpr; if( (eOp & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){ testcase( eOp & WO_EQ ); testcase( eOp & WO_IS ); testcase( eOp & WO_ISNULL ); rc = whereEqualScanEst(pParse, pBuilder, pExpr->pRight, &nOut); }else{ rc = whereInScanEst(pParse, pBuilder, pExpr->x.pList, &nOut); } if( rc==SQLITE_NOTFOUND ) rc = SQLITE_OK; if( rc!=SQLITE_OK ) break; /* Jump out of the pTerm loop */ if( nOut ){ pNew->nOut = sqlite3LogEst(nOut); if( pNew->nOut>saved_nOut ) pNew->nOut = saved_nOut; pNew->nOut -= nIn; } } if( nOut==0 ) #endif { pNew->nOut += (pProbe->aiRowLogEst[nEq] - pProbe->aiRowLogEst[nEq-1]); if( eOp & WO_ISNULL ){ /* TUNING: If there is no likelihood() value, assume that a ** "col IS NULL" expression matches twice as many rows ** as (col=?). */ pNew->nOut += 10; } } } } /* Set rCostIdx to the cost of visiting selected rows in index. Add ** it to pNew->rRun, which is currently set to the cost of the index ** seek only. Then, if this is a non-covering index, add the cost of ** visiting the rows in the main table. */ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult); nOutUnadjusted = pNew->nOut; pNew->rRun += nInMul + nIn; pNew->nOut += nInMul + nIn; whereLoopOutputAdjust(pBuilder->pWC, pNew, rSize); rc = whereLoopInsert(pBuilder, pNew); if( pNew->wsFlags & WHERE_COLUMN_RANGE ){ pNew->nOut = saved_nOut; }else{ pNew->nOut = nOutUnadjusted; } if( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 && pNew->u.btree.nEqnColumn ){ whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); } pNew->nOut = saved_nOut; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 pBuilder->nRecValid = nRecValid; #endif } pNew->prereq = saved_prereq; pNew->u.btree.nEq = saved_nEq; pNew->nSkip = saved_nSkip; pNew->wsFlags = saved_wsFlags; pNew->nOut = saved_nOut; pNew->nLTerm = saved_nLTerm; /* Consider using a skip-scan if there are no WHERE clause constraints ** available for the left-most terms of the index, and if the average ** number of repeats in the left-most terms is at least 18. ** ** The magic number 18 is selected on the basis that scanning 17 rows ** is almost always quicker than an index seek (even though if the index ** contains fewer than 2^17 rows we assume otherwise in other parts of ** the code). And, even if it is not, it should not be too much slower. ** On the other hand, the extra seeks could end up being significantly ** more expensive. */ assert( 42==sqlite3LogEst(18) ); if( saved_nEq==saved_nSkip && saved_nEq+1nKeyCol && pProbe->noSkipScan==0 && pProbe->aiRowLogEst[saved_nEq+1]>=42 /* TUNING: Minimum for skip-scan */ && (rc = whereLoopResize(db, pNew, pNew->nLTerm+1))==SQLITE_OK ){ LogEst nIter; pNew->u.btree.nEq++; pNew->nSkip++; pNew->aLTerm[pNew->nLTerm++] = 0; pNew->wsFlags |= WHERE_SKIPSCAN; nIter = pProbe->aiRowLogEst[saved_nEq] - pProbe->aiRowLogEst[saved_nEq+1]; pNew->nOut -= nIter; /* TUNING: Because uncertainties in the estimates for skip-scan queries, ** add a 1.375 fudge factor to make skip-scan slightly less likely. */ nIter += 5; whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nIter + nInMul); pNew->nOut = saved_nOut; pNew->u.btree.nEq = saved_nEq; pNew->nSkip = saved_nSkip; pNew->wsFlags = saved_wsFlags; } return rc; } /* ** Return True if it is possible that pIndex might be useful in ** implementing the ORDER BY clause in pBuilder. ** ** Return False if pBuilder does not contain an ORDER BY clause or ** if there is no way for pIndex to be useful in implementing that ** ORDER BY clause. */ static int indexMightHelpWithOrderBy( WhereLoopBuilder *pBuilder, Index *pIndex, int iCursor ){ ExprList *pOB; int ii, jj; if( pIndex->bUnordered ) return 0; if( (pOB = pBuilder->pWInfo->pOrderBy)==0 ) return 0; for(ii=0; iinExpr; ii++){ Expr *pExpr = sqlite3ExprSkipCollate(pOB->a[ii].pExpr); if( pExpr->op!=TK_COLUMN ) return 0; if( pExpr->iTable==iCursor ){ if( pExpr->iColumn<0 ) return 1; for(jj=0; jjnKeyCol; jj++){ if( pExpr->iColumn==pIndex->aiColumn[jj] ) return 1; } } } return 0; } /* ** Return a bitmask where 1s indicate that the corresponding column of ** the table is used by an index. Only the first 63 columns are considered. */ static Bitmask columnsInIndex(Index *pIdx){ Bitmask m = 0; int j; for(j=pIdx->nColumn-1; j>=0; j--){ int x = pIdx->aiColumn[j]; if( x>=0 ){ testcase( x==BMS-1 ); testcase( x==BMS-2 ); if( xa; inTerm; i++, pTerm++){ Expr *pExpr = pTerm->pExpr; if( sqlite3ExprImpliesExpr(pExpr, pWhere, iTab) && (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->iRightJoinTable==iTab) ){ return 1; } } return 0; } /* ** Add all WhereLoop objects for a single table of the join where the table ** is idenfied by pBuilder->pNew->iTab. That table is guaranteed to be ** a b-tree table, not a virtual table. ** ** The costs (WhereLoop.rRun) of the b-tree loops added by this function ** are calculated as follows: ** ** For a full scan, assuming the table (or index) contains nRow rows: ** ** cost = nRow * 3.0 // full-table scan ** cost = nRow * K // scan of covering index ** cost = nRow * (K+3.0) // scan of non-covering index ** ** where K is a value between 1.1 and 3.0 set based on the relative ** estimated average size of the index and table records. ** ** For an index scan, where nVisit is the number of index rows visited ** by the scan, and nSeek is the number of seek operations required on ** the index b-tree: ** ** cost = nSeek * (log(nRow) + K * nVisit) // covering index ** cost = nSeek * (log(nRow) + (K+3.0) * nVisit) // non-covering index ** ** Normally, nSeek is 1. nSeek values greater than 1 come about if the ** WHERE clause includes "x IN (....)" terms used in place of "x=?". Or when ** implicit "x IN (SELECT x FROM tbl)" terms are added for skip-scans. ** ** The estimated values (nRow, nVisit, nSeek) often contain a large amount ** of uncertainty. For this reason, scoring is designed to pick plans that ** "do the least harm" if the estimates are inaccurate. For example, a ** log(nRow) factor is omitted from a non-covering index scan in order to ** bias the scoring in favor of using an index, since the worst-case ** performance of using an index is far better than the worst-case performance ** of a full table scan. */ static int whereLoopAddBtree( WhereLoopBuilder *pBuilder, /* WHERE clause information */ Bitmask mExtra /* Extra prerequesites for using this table */ ){ WhereInfo *pWInfo; /* WHERE analysis context */ Index *pProbe; /* An index we are evaluating */ Index sPk; /* A fake index object for the primary key */ LogEst aiRowEstPk[2]; /* The aiRowLogEst[] value for the sPk index */ i16 aiColumnPk = -1; /* The aColumn[] value for the sPk index */ SrcList *pTabList; /* The FROM clause */ struct SrcList_item *pSrc; /* The FROM clause btree term to add */ WhereLoop *pNew; /* Template WhereLoop object */ int rc = SQLITE_OK; /* Return code */ int iSortIdx = 1; /* Index number */ int b; /* A boolean value */ LogEst rSize; /* number of rows in the table */ LogEst rLogSize; /* Logarithm of the number of rows in the table */ WhereClause *pWC; /* The parsed WHERE clause */ Table *pTab; /* Table being queried */ pNew = pBuilder->pNew; pWInfo = pBuilder->pWInfo; pTabList = pWInfo->pTabList; pSrc = pTabList->a + pNew->iTab; pTab = pSrc->pTab; pWC = pBuilder->pWC; assert( !IsVirtual(pSrc->pTab) ); if( pSrc->pIndex ){ /* An INDEXED BY clause specifies a particular index to use */ pProbe = pSrc->pIndex; }else if( !HasRowid(pTab) ){ pProbe = pTab->pIndex; }else{ /* There is no INDEXED BY clause. Create a fake Index object in local ** variable sPk to represent the rowid primary key index. Make this ** fake index the first in a chain of Index objects with all of the real ** indices to follow */ Index *pFirst; /* First of real indices on the table */ memset(&sPk, 0, sizeof(Index)); sPk.nKeyCol = 1; sPk.nColumn = 1; sPk.aiColumn = &aiColumnPk; sPk.aiRowLogEst = aiRowEstPk; sPk.onError = OE_Replace; sPk.pTable = pTab; sPk.szIdxRow = pTab->szTabRow; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0; pFirst = pSrc->pTab->pIndex; if( pSrc->notIndexed==0 ){ /* The real indices of the table are only considered if the ** NOT INDEXED qualifier is omitted from the FROM clause */ sPk.pNext = pFirst; } pProbe = &sPk; } rSize = pTab->nRowLogEst; rLogSize = estLog(rSize); #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* Automatic indexes */ if( !pBuilder->pOrSet /* Not part of an OR optimization */ && (pWInfo->wctrlFlags & WHERE_NO_AUTOINDEX)==0 && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 && pSrc->pIndex==0 /* Has no INDEXED BY clause */ && !pSrc->notIndexed /* Has no NOT INDEXED clause */ && HasRowid(pTab) /* Is not a WITHOUT ROWID table. (FIXME: Why not?) */ && !pSrc->isCorrelated /* Not a correlated subquery */ && !pSrc->isRecursive /* Not a recursive common table expression. */ ){ /* Generate auto-index WhereLoops */ WhereTerm *pTerm; WhereTerm *pWCEnd = pWC->a + pWC->nTerm; for(pTerm=pWC->a; rc==SQLITE_OK && pTermprereqRight & pNew->maskSelf ) continue; if( termCanDriveIndex(pTerm, pSrc, 0) ){ pNew->u.btree.nEq = 1; pNew->nSkip = 0; pNew->u.btree.pIndex = 0; pNew->nLTerm = 1; pNew->aLTerm[0] = pTerm; /* TUNING: One-time cost for computing the automatic index is ** estimated to be X*N*log2(N) where N is the number of rows in ** the table being indexed and where X is 7 (LogEst=28) for normal ** tables or 1.375 (LogEst=4) for views and subqueries. The value ** of X is smaller for views and subqueries so that the query planner ** will be more aggressive about generating automatic indexes for ** those objects, since there is no opportunity to add schema ** indexes on subqueries and views. */ pNew->rSetup = rLogSize + rSize + 4; if( pTab->pSelect==0 && (pTab->tabFlags & TF_Ephemeral)==0 ){ pNew->rSetup += 24; } ApplyCostMultiplier(pNew->rSetup, pTab->costMult); /* TUNING: Each index lookup yields 20 rows in the table. This ** is more than the usual guess of 10 rows, since we have no way ** of knowing how selective the index will ultimately be. It would ** not be unreasonable to make this value much larger. */ pNew->nOut = 43; assert( 43==sqlite3LogEst(20) ); pNew->rRun = sqlite3LogEstAdd(rLogSize,pNew->nOut); pNew->wsFlags = WHERE_AUTO_INDEX; pNew->prereq = mExtra | pTerm->prereqRight; rc = whereLoopInsert(pBuilder, pNew); } } } #endif /* SQLITE_OMIT_AUTOMATIC_INDEX */ /* Loop over all indices */ for(; rc==SQLITE_OK && pProbe; pProbe=pProbe->pNext, iSortIdx++){ if( pProbe->pPartIdxWhere!=0 && !whereUsablePartialIndex(pSrc->iCursor, pWC, pProbe->pPartIdxWhere) ){ testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */ continue; /* Partial index inappropriate for this query */ } rSize = pProbe->aiRowLogEst[0]; pNew->u.btree.nEq = 0; pNew->nSkip = 0; pNew->nLTerm = 0; pNew->iSortIdx = 0; pNew->rSetup = 0; pNew->prereq = mExtra; pNew->nOut = rSize; pNew->u.btree.pIndex = pProbe; b = indexMightHelpWithOrderBy(pBuilder, pProbe, pSrc->iCursor); /* The ONEPASS_DESIRED flags never occurs together with ORDER BY */ assert( (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || b==0 ); if( pProbe->tnum<=0 ){ /* Integer primary key index */ pNew->wsFlags = WHERE_IPK; /* Full table scan */ pNew->iSortIdx = b ? iSortIdx : 0; /* TUNING: Cost of full table scan is (N*3.0). */ pNew->rRun = rSize + 16; ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); rc = whereLoopInsert(pBuilder, pNew); pNew->nOut = rSize; if( rc ) break; }else{ Bitmask m; if( pProbe->isCovering ){ pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; m = 0; }else{ m = pSrc->colUsed & ~columnsInIndex(pProbe); pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; } /* Full scan via index */ if( b || !HasRowid(pTab) || ( m==0 && pProbe->bUnordered==0 && (pProbe->szIdxRowszTabRow) && (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 && sqlite3GlobalConfig.bUseCis && OptimizationEnabled(pWInfo->pParse->db, SQLITE_CoverIdxScan) ) ){ pNew->iSortIdx = b ? iSortIdx : 0; /* The cost of visiting the index rows is N*K, where K is ** between 1.1 and 3.0, depending on the relative sizes of the ** index and table rows. If this is a non-covering index scan, ** also add the cost of visiting table rows (N*3.0). */ pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow; if( m!=0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, rSize+16); } ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); rc = whereLoopInsert(pBuilder, pNew); pNew->nOut = rSize; if( rc ) break; } } rc = whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, 0); #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3Stat4ProbeFree(pBuilder->pRec); pBuilder->nRecValid = 0; pBuilder->pRec = 0; #endif /* If there was an INDEXED BY clause, then only that one index is ** considered. */ if( pSrc->pIndex ) break; } return rc; } #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Add all WhereLoop objects for a table of the join identified by ** pBuilder->pNew->iTab. That table is guaranteed to be a virtual table. ** ** If there are no LEFT or CROSS JOIN joins in the query, both mExtra and ** mUnusable are set to 0. Otherwise, mExtra is a mask of all FROM clause ** entries that occur before the virtual table in the FROM clause and are ** separated from it by at least one LEFT or CROSS JOIN. Similarly, the ** mUnusable mask contains all FROM clause entries that occur after the ** virtual table and are separated from it by at least one LEFT or ** CROSS JOIN. ** ** For example, if the query were: ** ** ... FROM t1, t2 LEFT JOIN t3, t4, vt CROSS JOIN t5, t6; ** ** then mExtra corresponds to (t1, t2) and mUnusable to (t5, t6). ** ** All the tables in mExtra must be scanned before the current virtual ** table. So any terms for which all prerequisites are satisfied by ** mExtra may be specified as "usable" in all calls to xBestIndex. ** Conversely, all tables in mUnusable must be scanned after the current ** virtual table, so any terms for which the prerequisites overlap with ** mUnusable should always be configured as "not-usable" for xBestIndex. */ static int whereLoopAddVirtual( WhereLoopBuilder *pBuilder, /* WHERE clause information */ Bitmask mExtra, /* Tables that must be scanned before this one */ Bitmask mUnusable /* Tables that must be scanned after this one */ ){ WhereInfo *pWInfo; /* WHERE analysis context */ Parse *pParse; /* The parsing context */ WhereClause *pWC; /* The WHERE clause */ struct SrcList_item *pSrc; /* The FROM clause term to search */ Table *pTab; sqlite3 *db; sqlite3_index_info *pIdxInfo; struct sqlite3_index_constraint *pIdxCons; struct sqlite3_index_constraint_usage *pUsage; WhereTerm *pTerm; int i, j; int iTerm, mxTerm; int nConstraint; int seenIn = 0; /* True if an IN operator is seen */ int seenVar = 0; /* True if a non-constant constraint is seen */ int iPhase; /* 0: const w/o IN, 1: const, 2: no IN, 2: IN */ WhereLoop *pNew; int rc = SQLITE_OK; assert( (mExtra & mUnusable)==0 ); pWInfo = pBuilder->pWInfo; pParse = pWInfo->pParse; db = pParse->db; pWC = pBuilder->pWC; pNew = pBuilder->pNew; pSrc = &pWInfo->pTabList->a[pNew->iTab]; pTab = pSrc->pTab; assert( IsVirtual(pTab) ); pIdxInfo = allocateIndexInfo(pParse, pWC, mUnusable, pSrc,pBuilder->pOrderBy); if( pIdxInfo==0 ) return SQLITE_NOMEM; pNew->prereq = 0; pNew->rSetup = 0; pNew->wsFlags = WHERE_VIRTUALTABLE; pNew->nLTerm = 0; pNew->u.vtab.needFree = 0; pUsage = pIdxInfo->aConstraintUsage; nConstraint = pIdxInfo->nConstraint; if( whereLoopResize(db, pNew, nConstraint) ){ sqlite3DbFree(db, pIdxInfo); return SQLITE_NOMEM; } for(iPhase=0; iPhase<=3; iPhase++){ if( !seenIn && (iPhase&1)!=0 ){ iPhase++; if( iPhase>3 ) break; } if( !seenVar && iPhase>1 ) break; pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; for(i=0; inConstraint; i++, pIdxCons++){ j = pIdxCons->iTermOffset; pTerm = &pWC->a[j]; switch( iPhase ){ case 0: /* Constants without IN operator */ pIdxCons->usable = 0; if( (pTerm->eOperator & WO_IN)!=0 ){ seenIn = 1; } if( (pTerm->prereqRight & ~mExtra)!=0 ){ seenVar = 1; }else if( (pTerm->eOperator & WO_IN)==0 ){ pIdxCons->usable = 1; } break; case 1: /* Constants with IN operators */ assert( seenIn ); pIdxCons->usable = (pTerm->prereqRight & ~mExtra)==0; break; case 2: /* Variables without IN */ assert( seenVar ); pIdxCons->usable = (pTerm->eOperator & WO_IN)==0; break; default: /* Variables with IN */ assert( seenVar && seenIn ); pIdxCons->usable = 1; break; } } memset(pUsage, 0, sizeof(pUsage[0])*pIdxInfo->nConstraint); if( pIdxInfo->needToFreeIdxStr ) sqlite3_free(pIdxInfo->idxStr); pIdxInfo->idxStr = 0; pIdxInfo->idxNum = 0; pIdxInfo->needToFreeIdxStr = 0; pIdxInfo->orderByConsumed = 0; pIdxInfo->estimatedCost = SQLITE_BIG_DBL / (double)2; pIdxInfo->estimatedRows = 25; rc = vtabBestIndex(pParse, pTab, pIdxInfo); if( rc ) goto whereLoopAddVtab_exit; pIdxCons = *(struct sqlite3_index_constraint**)&pIdxInfo->aConstraint; pNew->prereq = mExtra; mxTerm = -1; assert( pNew->nLSlot>=nConstraint ); for(i=0; iaLTerm[i] = 0; pNew->u.vtab.omitMask = 0; for(i=0; i=0 ){ j = pIdxCons->iTermOffset; if( iTerm>=nConstraint || j<0 || j>=pWC->nTerm || pNew->aLTerm[iTerm]!=0 ){ rc = SQLITE_ERROR; sqlite3ErrorMsg(pParse, "%s.xBestIndex() malfunction", pTab->zName); goto whereLoopAddVtab_exit; } testcase( iTerm==nConstraint-1 ); testcase( j==0 ); testcase( j==pWC->nTerm-1 ); pTerm = &pWC->a[j]; pNew->prereq |= pTerm->prereqRight; assert( iTermnLSlot ); pNew->aLTerm[iTerm] = pTerm; if( iTerm>mxTerm ) mxTerm = iTerm; testcase( iTerm==15 ); testcase( iTerm==16 ); if( iTerm<16 && pUsage[i].omit ) pNew->u.vtab.omitMask |= 1<eOperator & WO_IN)!=0 ){ if( pUsage[i].omit==0 ){ /* Do not attempt to use an IN constraint if the virtual table ** says that the equivalent EQ constraint cannot be safely omitted. ** If we do attempt to use such a constraint, some rows might be ** repeated in the output. */ break; } /* A virtual table that is constrained by an IN clause may not ** consume the ORDER BY clause because (1) the order of IN terms ** is not necessarily related to the order of output terms and ** (2) Multiple outputs from a single IN value will not merge ** together. */ pIdxInfo->orderByConsumed = 0; } } } if( i>=nConstraint ){ pNew->nLTerm = mxTerm+1; assert( pNew->nLTerm<=pNew->nLSlot ); pNew->u.vtab.idxNum = pIdxInfo->idxNum; pNew->u.vtab.needFree = pIdxInfo->needToFreeIdxStr; pIdxInfo->needToFreeIdxStr = 0; pNew->u.vtab.idxStr = pIdxInfo->idxStr; pNew->u.vtab.isOrdered = (i8)(pIdxInfo->orderByConsumed ? pIdxInfo->nOrderBy : 0); pNew->rSetup = 0; pNew->rRun = sqlite3LogEstFromDouble(pIdxInfo->estimatedCost); pNew->nOut = sqlite3LogEst(pIdxInfo->estimatedRows); whereLoopInsert(pBuilder, pNew); if( pNew->u.vtab.needFree ){ sqlite3_free(pNew->u.vtab.idxStr); pNew->u.vtab.needFree = 0; } } } whereLoopAddVtab_exit: if( pIdxInfo->needToFreeIdxStr ) sqlite3_free(pIdxInfo->idxStr); sqlite3DbFree(db, pIdxInfo); return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* ** Add WhereLoop entries to handle OR terms. This works for either ** btrees or virtual tables. */ static int whereLoopAddOr( WhereLoopBuilder *pBuilder, Bitmask mExtra, Bitmask mUnusable ){ WhereInfo *pWInfo = pBuilder->pWInfo; WhereClause *pWC; WhereLoop *pNew; WhereTerm *pTerm, *pWCEnd; int rc = SQLITE_OK; int iCur; WhereClause tempWC; WhereLoopBuilder sSubBuild; WhereOrSet sSum, sCur; struct SrcList_item *pItem; pWC = pBuilder->pWC; pWCEnd = pWC->a + pWC->nTerm; pNew = pBuilder->pNew; memset(&sSum, 0, sizeof(sSum)); pItem = pWInfo->pTabList->a + pNew->iTab; iCur = pItem->iCursor; for(pTerm=pWC->a; pTermeOperator & WO_OR)!=0 && (pTerm->u.pOrInfo->indexable & pNew->maskSelf)!=0 ){ WhereClause * const pOrWC = &pTerm->u.pOrInfo->wc; WhereTerm * const pOrWCEnd = &pOrWC->a[pOrWC->nTerm]; WhereTerm *pOrTerm; int once = 1; int i, j; sSubBuild = *pBuilder; sSubBuild.pOrderBy = 0; sSubBuild.pOrSet = &sCur; WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm)); for(pOrTerm=pOrWC->a; pOrTermeOperator & WO_AND)!=0 ){ sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc; }else if( pOrTerm->leftCursor==iCur ){ tempWC.pWInfo = pWC->pWInfo; tempWC.pOuter = pWC; tempWC.op = TK_AND; tempWC.nTerm = 1; tempWC.a = pOrTerm; sSubBuild.pWC = &tempWC; }else{ continue; } sCur.n = 0; #ifdef WHERETRACE_ENABLED WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n", (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm)); if( sqlite3WhereTrace & 0x400 ){ for(i=0; inTerm; i++){ whereTermPrint(&sSubBuild.pWC->a[i], i); } } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pItem->pTab) ){ rc = whereLoopAddVirtual(&sSubBuild, mExtra, mUnusable); }else #endif { rc = whereLoopAddBtree(&sSubBuild, mExtra); } if( rc==SQLITE_OK ){ rc = whereLoopAddOr(&sSubBuild, mExtra, mUnusable); } assert( rc==SQLITE_OK || sCur.n==0 ); if( sCur.n==0 ){ sSum.n = 0; break; }else if( once ){ whereOrMove(&sSum, &sCur); once = 0; }else{ WhereOrSet sPrev; whereOrMove(&sPrev, &sSum); sSum.n = 0; for(i=0; inLTerm = 1; pNew->aLTerm[0] = pTerm; pNew->wsFlags = WHERE_MULTI_OR; pNew->rSetup = 0; pNew->iSortIdx = 0; memset(&pNew->u, 0, sizeof(pNew->u)); for(i=0; rc==SQLITE_OK && irRun = sSum.a[i].rRun + 1; pNew->nOut = sSum.a[i].nOut; pNew->prereq = sSum.a[i].prereq; rc = whereLoopInsert(pBuilder, pNew); } WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm)); } } return rc; } /* ** Add all WhereLoop objects for all tables */ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ WhereInfo *pWInfo = pBuilder->pWInfo; Bitmask mExtra = 0; Bitmask mPrior = 0; int iTab; SrcList *pTabList = pWInfo->pTabList; struct SrcList_item *pItem; struct SrcList_item *pEnd = &pTabList->a[pWInfo->nLevel]; sqlite3 *db = pWInfo->pParse->db; int rc = SQLITE_OK; WhereLoop *pNew; u8 priorJointype = 0; /* Loop over the tables in the join, from left to right */ pNew = pBuilder->pNew; whereLoopInit(pNew); for(iTab=0, pItem=pTabList->a; pItemiTab = iTab; pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor); if( ((pItem->jointype|priorJointype) & (JT_LEFT|JT_CROSS))!=0 ){ /* This condition is true when pItem is the FROM clause term on the ** right-hand-side of a LEFT or CROSS JOIN. */ mExtra = mPrior; } priorJointype = pItem->jointype; if( IsVirtual(pItem->pTab) ){ struct SrcList_item *p; for(p=&pItem[1]; pjointype & (JT_LEFT|JT_CROSS)) ){ mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor); } } rc = whereLoopAddVirtual(pBuilder, mExtra, mUnusable); }else{ rc = whereLoopAddBtree(pBuilder, mExtra); } if( rc==SQLITE_OK ){ rc = whereLoopAddOr(pBuilder, mExtra, mUnusable); } mPrior |= pNew->maskSelf; if( rc || db->mallocFailed ) break; } whereLoopClear(db, pNew); return rc; } /* ** Examine a WherePath (with the addition of the extra WhereLoop of the 5th ** parameters) to see if it outputs rows in the requested ORDER BY ** (or GROUP BY) without requiring a separate sort operation. Return N: ** ** N>0: N terms of the ORDER BY clause are satisfied ** N==0: No terms of the ORDER BY clause are satisfied ** N<0: Unknown yet how many terms of ORDER BY might be satisfied. ** ** Note that processing for WHERE_GROUPBY and WHERE_DISTINCTBY is not as ** strict. With GROUP BY and DISTINCT the only requirement is that ** equivalent rows appear immediately adjacent to one another. GROUP BY ** and DISTINCT do not require rows to appear in any particular order as long ** as equivalent rows are grouped together. Thus for GROUP BY and DISTINCT ** the pOrderBy terms can be matched in any order. With ORDER BY, the ** pOrderBy terms must be matched in strict left-to-right order. */ static i8 wherePathSatisfiesOrderBy( WhereInfo *pWInfo, /* The WHERE clause */ ExprList *pOrderBy, /* ORDER BY or GROUP BY or DISTINCT clause to check */ WherePath *pPath, /* The WherePath to check */ u16 wctrlFlags, /* Might contain WHERE_GROUPBY or WHERE_DISTINCTBY */ u16 nLoop, /* Number of entries in pPath->aLoop[] */ WhereLoop *pLast, /* Add this WhereLoop to the end of pPath->aLoop[] */ Bitmask *pRevMask /* OUT: Mask of WhereLoops to run in reverse order */ ){ u8 revSet; /* True if rev is known */ u8 rev; /* Composite sort order */ u8 revIdx; /* Index sort order */ u8 isOrderDistinct; /* All prior WhereLoops are order-distinct */ u8 distinctColumns; /* True if the loop has UNIQUE NOT NULL columns */ u8 isMatch; /* iColumn matches a term of the ORDER BY clause */ u16 nKeyCol; /* Number of key columns in pIndex */ u16 nColumn; /* Total number of ordered columns in the index */ u16 nOrderBy; /* Number terms in the ORDER BY clause */ int iLoop; /* Index of WhereLoop in pPath being processed */ int i, j; /* Loop counters */ int iCur; /* Cursor number for current WhereLoop */ int iColumn; /* A column number within table iCur */ WhereLoop *pLoop = 0; /* Current WhereLoop being processed. */ WhereTerm *pTerm; /* A single term of the WHERE clause */ Expr *pOBExpr; /* An expression from the ORDER BY clause */ CollSeq *pColl; /* COLLATE function from an ORDER BY clause term */ Index *pIndex; /* The index associated with pLoop */ sqlite3 *db = pWInfo->pParse->db; /* Database connection */ Bitmask obSat = 0; /* Mask of ORDER BY terms satisfied so far */ Bitmask obDone; /* Mask of all ORDER BY terms */ Bitmask orderDistinctMask; /* Mask of all well-ordered loops */ Bitmask ready; /* Mask of inner loops */ /* ** We say the WhereLoop is "one-row" if it generates no more than one ** row of output. A WhereLoop is one-row if all of the following are true: ** (a) All index columns match with WHERE_COLUMN_EQ. ** (b) The index is unique ** Any WhereLoop with an WHERE_COLUMN_EQ constraint on the rowid is one-row. ** Every one-row WhereLoop will have the WHERE_ONEROW bit set in wsFlags. ** ** We say the WhereLoop is "order-distinct" if the set of columns from ** that WhereLoop that are in the ORDER BY clause are different for every ** row of the WhereLoop. Every one-row WhereLoop is automatically ** order-distinct. A WhereLoop that has no columns in the ORDER BY clause ** is not order-distinct. To be order-distinct is not quite the same as being ** UNIQUE since a UNIQUE column or index can have multiple rows that ** are NULL and NULL values are equivalent for the purpose of order-distinct. ** To be order-distinct, the columns must be UNIQUE and NOT NULL. ** ** The rowid for a table is always UNIQUE and NOT NULL so whenever the ** rowid appears in the ORDER BY clause, the corresponding WhereLoop is ** automatically order-distinct. */ assert( pOrderBy!=0 ); if( nLoop && OptimizationDisabled(db, SQLITE_OrderByIdxJoin) ) return 0; nOrderBy = pOrderBy->nExpr; testcase( nOrderBy==BMS-1 ); if( nOrderBy>BMS-1 ) return 0; /* Cannot optimize overly large ORDER BYs */ isOrderDistinct = 1; obDone = MASKBIT(nOrderBy)-1; orderDistinctMask = 0; ready = 0; for(iLoop=0; isOrderDistinct && obSat0 ) ready |= pLoop->maskSelf; pLoop = iLoopaLoop[iLoop] : pLast; if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){ if( pLoop->u.vtab.isOrdered ) obSat = obDone; break; } iCur = pWInfo->pTabList->a[pLoop->iTab].iCursor; /* Mark off any ORDER BY term X that is a column in the table of ** the current loop for which there is term in the WHERE ** clause of the form X IS NULL or X=? that reference only outer ** loops. */ for(i=0; ia[i].pExpr); if( pOBExpr->op!=TK_COLUMN ) continue; if( pOBExpr->iTable!=iCur ) continue; pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn, ~ready, WO_EQ|WO_ISNULL|WO_IS, 0); if( pTerm==0 ) continue; if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){ const char *z1, *z2; pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); if( !pColl ) pColl = db->pDfltColl; z1 = pColl->zName; pColl = sqlite3ExprCollSeq(pWInfo->pParse, pTerm->pExpr); if( !pColl ) pColl = db->pDfltColl; z2 = pColl->zName; if( sqlite3StrICmp(z1, z2)!=0 ) continue; testcase( pTerm->pExpr->op==TK_IS ); } obSat |= MASKBIT(i); } if( (pLoop->wsFlags & WHERE_ONEROW)==0 ){ if( pLoop->wsFlags & WHERE_IPK ){ pIndex = 0; nKeyCol = 0; nColumn = 1; }else if( (pIndex = pLoop->u.btree.pIndex)==0 || pIndex->bUnordered ){ return 0; }else{ nKeyCol = pIndex->nKeyCol; nColumn = pIndex->nColumn; assert( nColumn==nKeyCol+1 || !HasRowid(pIndex->pTable) ); assert( pIndex->aiColumn[nColumn-1]==(-1) || !HasRowid(pIndex->pTable)); isOrderDistinct = IsUniqueIndex(pIndex); } /* Loop through all columns of the index and deal with the ones ** that are not constrained by == or IN. */ rev = revSet = 0; distinctColumns = 0; for(j=0; ju.btree.nEq && pLoop->nSkip==0 && ((i = pLoop->aLTerm[j]->eOperator) & (WO_EQ|WO_ISNULL|WO_IS))!=0 ){ if( i & WO_ISNULL ){ testcase( isOrderDistinct ); isOrderDistinct = 0; } continue; } /* Get the column number in the table (iColumn) and sort order ** (revIdx) for the j-th column of the index. */ if( pIndex ){ iColumn = pIndex->aiColumn[j]; revIdx = pIndex->aSortOrder[j]; if( iColumn==pIndex->pTable->iPKey ) iColumn = -1; }else{ iColumn = -1; revIdx = 0; } /* An unconstrained column that might be NULL means that this ** WhereLoop is not well-ordered */ if( isOrderDistinct && iColumn>=0 && j>=pLoop->u.btree.nEq && pIndex->pTable->aCol[iColumn].notNull==0 ){ isOrderDistinct = 0; } /* Find the ORDER BY term that corresponds to the j-th column ** of the index and mark that ORDER BY term off */ bOnce = 1; isMatch = 0; for(i=0; bOnce && ia[i].pExpr); testcase( wctrlFlags & WHERE_GROUPBY ); testcase( wctrlFlags & WHERE_DISTINCTBY ); if( (wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY))==0 ) bOnce = 0; if( pOBExpr->op!=TK_COLUMN ) continue; if( pOBExpr->iTable!=iCur ) continue; if( pOBExpr->iColumn!=iColumn ) continue; if( iColumn>=0 ){ pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr); if( !pColl ) pColl = db->pDfltColl; if( sqlite3StrICmp(pColl->zName, pIndex->azColl[j])!=0 ) continue; } isMatch = 1; break; } if( isMatch && (wctrlFlags & WHERE_GROUPBY)==0 ){ /* Make sure the sort order is compatible in an ORDER BY clause. ** Sort order is irrelevant for a GROUP BY clause. */ if( revSet ){ if( (rev ^ revIdx)!=pOrderBy->a[i].sortOrder ) isMatch = 0; }else{ rev = revIdx ^ pOrderBy->a[i].sortOrder; if( rev ) *pRevMask |= MASKBIT(iLoop); revSet = 1; } } if( isMatch ){ if( iColumn<0 ){ testcase( distinctColumns==0 ); distinctColumns = 1; } obSat |= MASKBIT(i); }else{ /* No match found */ if( j==0 || jmaskSelf; for(i=0; ia[i].pExpr; mTerm = sqlite3WhereExprUsage(&pWInfo->sMaskSet,p); if( mTerm==0 && !sqlite3ExprIsConstant(p) ) continue; if( (mTerm&~orderDistinctMask)==0 ){ obSat |= MASKBIT(i); } } } } /* End the loop over all WhereLoops from outer-most down to inner-most */ if( obSat==obDone ) return (i8)nOrderBy; if( !isOrderDistinct ){ for(i=nOrderBy-1; i>0; i--){ Bitmask m = MASKBIT(i) - 1; if( (obSat&m)==m ) return i; } return 0; } return -1; } /* ** If the WHERE_GROUPBY flag is set in the mask passed to sqlite3WhereBegin(), ** the planner assumes that the specified pOrderBy list is actually a GROUP ** BY clause - and so any order that groups rows as required satisfies the ** request. ** ** Normally, in this case it is not possible for the caller to determine ** whether or not the rows are really being delivered in sorted order, or ** just in some other order that provides the required grouping. However, ** if the WHERE_SORTBYGROUP flag is also passed to sqlite3WhereBegin(), then ** this function may be called on the returned WhereInfo object. It returns ** true if the rows really will be sorted in the specified order, or false ** otherwise. ** ** For example, assuming: ** ** CREATE INDEX i1 ON t1(x, Y); ** ** then ** ** SELECT * FROM t1 GROUP BY x,y ORDER BY x,y; -- IsSorted()==1 ** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0 */ SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){ assert( pWInfo->wctrlFlags & WHERE_GROUPBY ); assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP ); return pWInfo->sorted; } #ifdef WHERETRACE_ENABLED /* For debugging use only: */ static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){ static char zName[65]; int i; for(i=0; iaLoop[i]->cId; } if( pLast ) zName[i++] = pLast->cId; zName[i] = 0; return zName; } #endif /* ** Return the cost of sorting nRow rows, assuming that the keys have ** nOrderby columns and that the first nSorted columns are already in ** order. */ static LogEst whereSortingCost( WhereInfo *pWInfo, LogEst nRow, int nOrderBy, int nSorted ){ /* TUNING: Estimated cost of a full external sort, where N is ** the number of rows to sort is: ** ** cost = (3.0 * N * log(N)). ** ** Or, if the order-by clause has X terms but only the last Y ** terms are out of order, then block-sorting will reduce the ** sorting cost to: ** ** cost = (3.0 * N * log(N)) * (Y/X) ** ** The (Y/X) term is implemented using stack variable rScale ** below. */ LogEst rScale, rSortCost; assert( nOrderBy>0 && 66==sqlite3LogEst(100) ); rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; rSortCost = nRow + estLog(nRow) + rScale + 16; /* TUNING: The cost of implementing DISTINCT using a B-TREE is ** similar but with a larger constant of proportionality. ** Multiply by an additional factor of 3.0. */ if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ rSortCost += 16; } return rSortCost; } /* ** Given the list of WhereLoop objects at pWInfo->pLoops, this routine ** attempts to find the lowest cost path that visits each WhereLoop ** once. This path is then loaded into the pWInfo->a[].pWLoop fields. ** ** Assume that the total number of output rows that will need to be sorted ** will be nRowEst (in the 10*log2 representation). Or, ignore sorting ** costs if nRowEst==0. ** ** Return SQLITE_OK on success or SQLITE_NOMEM of a memory allocation ** error occurs. */ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxChoice; /* Maximum number of simultaneous paths tracked */ int nLoop; /* Number of terms in the join */ Parse *pParse; /* Parsing context */ sqlite3 *db; /* The database connection */ int iLoop; /* Loop counter over the terms of the join */ int ii, jj; /* Loop counters */ int mxI = 0; /* Index of next entry to replace */ int nOrderBy; /* Number of ORDER BY clause terms */ LogEst mxCost = 0; /* Maximum cost of a set of paths */ LogEst mxUnsorted = 0; /* Maximum unsorted cost of a set of path */ int nTo, nFrom; /* Number of valid entries in aTo[] and aFrom[] */ WherePath *aFrom; /* All nFrom paths at the previous level */ WherePath *aTo; /* The nTo best paths at the current level */ WherePath *pFrom; /* An element of aFrom[] that we are working on */ WherePath *pTo; /* An element of aTo[] that we are working on */ WhereLoop *pWLoop; /* One of the WhereLoop objects */ WhereLoop **pX; /* Used to divy up the pSpace memory */ LogEst *aSortCost = 0; /* Sorting and partial sorting costs */ char *pSpace; /* Temporary memory used by this routine */ int nSpace; /* Bytes of space allocated at pSpace */ pParse = pWInfo->pParse; db = pParse->db; nLoop = pWInfo->nLevel; /* TUNING: For simple queries, only the best path is tracked. ** For 2-way joins, the 5 best paths are followed. ** For joins of 3 or more tables, track the 10 best paths */ mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); assert( nLoop<=pWInfo->pTabList->nSrc ); WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst)); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned ** by the overall query. Once this estimate has been obtained, the caller ** will invoke this function a second time, passing the estimate as the ** nRowEst parameter. */ if( pWInfo->pOrderBy==0 || nRowEst==0 ){ nOrderBy = 0; }else{ nOrderBy = pWInfo->pOrderBy->nExpr; } /* Allocate and initialize space for aTo, aFrom and aSortCost[] */ nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2; nSpace += sizeof(LogEst) * nOrderBy; pSpace = sqlite3DbMallocRaw(db, nSpace); if( pSpace==0 ) return SQLITE_NOMEM; aTo = (WherePath*)pSpace; aFrom = aTo+mxChoice; memset(aFrom, 0, sizeof(aFrom[0])); pX = (WhereLoop**)(aFrom+mxChoice); for(ii=mxChoice*2, pFrom=aTo; ii>0; ii--, pFrom++, pX += nLoop){ pFrom->aLoop = pX; } if( nOrderBy ){ /* If there is an ORDER BY clause and it is not being ignored, set up ** space for the aSortCost[] array. Each element of the aSortCost array ** is either zero - meaning it has not yet been initialized - or the ** cost of sorting nRowEst rows of data where the first X terms of ** the ORDER BY clause are already in order, where X is the array ** index. */ aSortCost = (LogEst*)pX; memset(aSortCost, 0, sizeof(LogEst) * nOrderBy); } assert( aSortCost==0 || &pSpace[nSpace]==(char*)&aSortCost[nOrderBy] ); assert( aSortCost!=0 || &pSpace[nSpace]==(char*)pX ); /* Seed the search with a single WherePath containing zero WhereLoops. ** ** TUNING: Do not let the number of iterations go above 28. If the cost ** of computing an automatic index is not paid back within the first 28 ** rows, then do not use the automatic index. */ aFrom[0].nRow = MIN(pParse->nQueryLoop, 48); assert( 48==sqlite3LogEst(28) ); nFrom = 1; assert( aFrom[0].isOrdered==0 ); if( nOrderBy ){ /* If nLoop is zero, then there are no FROM terms in the query. Since ** in this case the query may return a maximum of one row, the results ** are already in the requested order. Set isOrdered to nOrderBy to ** indicate this. Or, if nLoop is greater than zero, set isOrdered to ** -1, indicating that the result set may or may not be ordered, ** depending on the loops added to the current plan. */ aFrom[0].isOrdered = nLoop>0 ? -1 : nOrderBy; } /* Compute successively longer WherePaths using the previous generation ** of WherePaths as the basis for the next. Keep track of the mxChoice ** best paths at each generation */ for(iLoop=0; iLooppLoops; pWLoop; pWLoop=pWLoop->pNextLoop){ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ LogEst rCost; /* Cost of path (pFrom+pWLoop) */ LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */ Bitmask maskNew; /* Mask of src visited by (..) */ Bitmask revMask = 0; /* Mask of rev-order loops for (..) */ if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue; if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue; /* At this point, pWLoop is a candidate to be the next loop. ** Compute its cost */ rUnsorted = sqlite3LogEstAdd(pWLoop->rSetup,pWLoop->rRun + pFrom->nRow); rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); nOut = pFrom->nRow + pWLoop->nOut; maskNew = pFrom->maskLoop | pWLoop->maskSelf; if( isOrdered<0 ){ isOrdered = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags, iLoop, pWLoop, &revMask); }else{ revMask = pFrom->revLoop; } if( isOrdered>=0 && isOrderedisOrdered^isOrdered)&0x80)==0" is equivalent ** to (pTo->isOrdered==(-1))==(isOrdered==(-1))" for the range ** of legal values for isOrdered, -1..64. */ for(jj=0, pTo=aTo; jjmaskLoop==maskNew && ((pTo->isOrdered^isOrdered)&0x80)==0 ){ testcase( jj==nTo-1 ); break; } } if( jj>=nTo ){ /* None of the existing best-so-far paths match the candidate. */ if( nTo>=mxChoice && (rCost>mxCost || (rCost==mxCost && rUnsorted>=mxUnsorted)) ){ /* The current candidate is no better than any of the mxChoice ** paths currently in the best-so-far buffer. So discard ** this candidate as not viable. */ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf("Skip %s cost=%-3d,%3d order=%c\n", wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, isOrdered>=0 ? isOrdered+'0' : '?'); } #endif continue; } /* If we reach this points it means that the new candidate path ** needs to be added to the set of best-so-far paths. */ if( nTo=0 ? isOrdered+'0' : '?'); } #endif }else{ /* Control reaches here if best-so-far path pTo=aTo[jj] covers the ** same set of loops and has the sam isOrdered setting as the ** candidate path. Check to see if the candidate should replace ** pTo or if the candidate should be skipped */ if( pTo->rCostrCost==rCost && pTo->nRow<=nOut) ){ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Skip %s cost=%-3d,%3d order=%c", wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" vs %s cost=%-3d,%d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif /* Discard the candidate path from further consideration */ testcase( pTo->rCost==rCost ); continue; } testcase( pTo->rCost==rCost+1 ); /* Control reaches here if the candidate path is better than the ** pTo path. Replace pTo with the candidate. */ #ifdef WHERETRACE_ENABLED /* 0x4 */ if( sqlite3WhereTrace&0x4 ){ sqlite3DebugPrintf( "Update %s cost=%-3d,%3d order=%c", wherePathName(pFrom, iLoop, pWLoop), rCost, nOut, isOrdered>=0 ? isOrdered+'0' : '?'); sqlite3DebugPrintf(" was %s cost=%-3d,%3d order=%c\n", wherePathName(pTo, iLoop+1, 0), pTo->rCost, pTo->nRow, pTo->isOrdered>=0 ? pTo->isOrdered+'0' : '?'); } #endif } /* pWLoop is a winner. Add it to the set of best so far */ pTo->maskLoop = pFrom->maskLoop | pWLoop->maskSelf; pTo->revLoop = revMask; pTo->nRow = nOut; pTo->rCost = rCost; pTo->rUnsorted = rUnsorted; pTo->isOrdered = isOrdered; memcpy(pTo->aLoop, pFrom->aLoop, sizeof(WhereLoop*)*iLoop); pTo->aLoop[iLoop] = pWLoop; if( nTo>=mxChoice ){ mxI = 0; mxCost = aTo[0].rCost; mxUnsorted = aTo[0].nRow; for(jj=1, pTo=&aTo[1]; jjrCost>mxCost || (pTo->rCost==mxCost && pTo->rUnsorted>mxUnsorted) ){ mxCost = pTo->rCost; mxUnsorted = pTo->rUnsorted; mxI = jj; } } } } } #ifdef WHERETRACE_ENABLED /* >=2 */ if( sqlite3WhereTrace & 0x02 ){ sqlite3DebugPrintf("---- after round %d ----\n", iLoop); for(ii=0, pTo=aTo; iirCost, pTo->nRow, pTo->isOrdered>=0 ? (pTo->isOrdered+'0') : '?'); if( pTo->isOrdered>0 ){ sqlite3DebugPrintf(" rev=0x%llx\n", pTo->revLoop); }else{ sqlite3DebugPrintf("\n"); } } } #endif /* Swap the roles of aFrom and aTo for the next generation */ pFrom = aTo; aTo = aFrom; aFrom = pFrom; nFrom = nTo; } if( nFrom==0 ){ sqlite3ErrorMsg(pParse, "no query solution"); sqlite3DbFree(db, pSpace); return SQLITE_ERROR; } /* Find the lowest cost path. pFrom will be left pointing to that path */ pFrom = aFrom; for(ii=1; iirCost>aFrom[ii].rCost ) pFrom = &aFrom[ii]; } assert( pWInfo->nLevel==nLoop ); /* Load the lowest cost path into pWInfo */ for(iLoop=0; iLoopa + iLoop; pLevel->pWLoop = pWLoop = pFrom->aLoop[iLoop]; pLevel->iFrom = pWLoop->iTab; pLevel->iTabCur = pWInfo->pTabList->a[pLevel->iFrom].iCursor; } if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 && (pWInfo->wctrlFlags & WHERE_DISTINCTBY)==0 && pWInfo->eDistinct==WHERE_DISTINCT_NOOP && nRowEst ){ Bitmask notUsed; int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pResultSet, pFrom, WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], ¬Used); if( rc==pWInfo->pResultSet->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } } if( pWInfo->pOrderBy ){ if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } }else{ pWInfo->nOBSat = pFrom->isOrdered; if( pWInfo->nOBSat<0 ) pWInfo->nOBSat = 0; pWInfo->revMask = pFrom->revLoop; } if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP) && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr && nLoop>0 ){ Bitmask revMask = 0; int nOrder = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom, 0, nLoop-1, pFrom->aLoop[nLoop-1], &revMask ); assert( pWInfo->sorted==0 ); if( nOrder==pWInfo->pOrderBy->nExpr ){ pWInfo->sorted = 1; pWInfo->revMask = revMask; } } } pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ sqlite3DbFree(db, pSpace); return SQLITE_OK; } /* ** Most queries use only a single table (they are not joins) and have ** simple == constraints against indexed fields. This routine attempts ** to plan those simple cases using much less ceremony than the ** general-purpose query planner, and thereby yield faster sqlite3_prepare() ** times for the common case. ** ** Return non-zero on success, if this query can be handled by this ** no-frills query planner. Return zero if this query needs the ** general-purpose query planner. */ static int whereShortCut(WhereLoopBuilder *pBuilder){ WhereInfo *pWInfo; struct SrcList_item *pItem; WhereClause *pWC; WhereTerm *pTerm; WhereLoop *pLoop; int iCur; int j; Table *pTab; Index *pIdx; pWInfo = pBuilder->pWInfo; if( pWInfo->wctrlFlags & WHERE_FORCE_TABLE ) return 0; assert( pWInfo->pTabList->nSrc>=1 ); pItem = pWInfo->pTabList->a; pTab = pItem->pTab; if( IsVirtual(pTab) ) return 0; if( pItem->zIndexedBy ) return 0; iCur = pItem->iCursor; pWC = &pWInfo->sWC; pLoop = pBuilder->pNew; pLoop->wsFlags = 0; pLoop->nSkip = 0; pTerm = sqlite3WhereFindTerm(pWC, iCur, -1, 0, WO_EQ|WO_IS, 0); if( pTerm ){ testcase( pTerm->eOperator & WO_IS ); pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_IPK|WHERE_ONEROW; pLoop->aLTerm[0] = pTerm; pLoop->nLTerm = 1; pLoop->u.btree.nEq = 1; /* TUNING: Cost of a rowid lookup is 10 */ pLoop->rRun = 33; /* 33==sqlite3LogEst(10) */ }else{ for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ int opMask; assert( pLoop->aLTermSpace==pLoop->aLTerm ); if( !IsUniqueIndex(pIdx) || pIdx->pPartIdxWhere!=0 || pIdx->nKeyCol>ArraySize(pLoop->aLTermSpace) ) continue; opMask = pIdx->uniqNotNull ? (WO_EQ|WO_IS) : WO_EQ; for(j=0; jnKeyCol; j++){ pTerm = sqlite3WhereFindTerm(pWC, iCur, pIdx->aiColumn[j], 0, opMask, pIdx); if( pTerm==0 ) break; testcase( pTerm->eOperator & WO_IS ); pLoop->aLTerm[j] = pTerm; } if( j!=pIdx->nKeyCol ) continue; pLoop->wsFlags = WHERE_COLUMN_EQ|WHERE_ONEROW|WHERE_INDEXED; if( pIdx->isCovering || (pItem->colUsed & ~columnsInIndex(pIdx))==0 ){ pLoop->wsFlags |= WHERE_IDX_ONLY; } pLoop->nLTerm = j; pLoop->u.btree.nEq = j; pLoop->u.btree.pIndex = pIdx; /* TUNING: Cost of a unique index lookup is 15 */ pLoop->rRun = 39; /* 39==sqlite3LogEst(15) */ break; } } if( pLoop->wsFlags ){ pLoop->nOut = (LogEst)1; pWInfo->a[0].pWLoop = pLoop; pLoop->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); pWInfo->a[0].iTabCur = iCur; pWInfo->nRowOut = 1; if( pWInfo->pOrderBy ) pWInfo->nOBSat = pWInfo->pOrderBy->nExpr; if( pWInfo->wctrlFlags & WHERE_WANT_DISTINCT ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } #ifdef SQLITE_DEBUG pLoop->cId = '0'; #endif return 1; } return 0; } /* ** Generate the beginning of the loop used for WHERE clause processing. ** The return value is a pointer to an opaque structure that contains ** information needed to terminate the loop. Later, the calling routine ** should invoke sqlite3WhereEnd() with the return value of this function ** in order to complete the WHERE clause processing. ** ** If an error occurs, this routine returns NULL. ** ** The basic idea is to do a nested loop, one loop for each table in ** the FROM clause of a select. (INSERT and UPDATE statements are the ** same as a SELECT with only a single table in the FROM clause.) For ** example, if the SQL is this: ** ** SELECT * FROM t1, t2, t3 WHERE ...; ** ** Then the code generated is conceptually like the following: ** ** foreach row1 in t1 do \ Code generated ** foreach row2 in t2 do |-- by sqlite3WhereBegin() ** foreach row3 in t3 do / ** ... ** end \ Code generated ** end |-- by sqlite3WhereEnd() ** end / ** ** Note that the loops might not be nested in the order in which they ** appear in the FROM clause if a different order is better able to make ** use of indices. Note also that when the IN operator appears in ** the WHERE clause, it might result in additional nested loops for ** scanning through all values on the right-hand side of the IN. ** ** There are Btree cursors associated with each table. t1 uses cursor ** number pTabList->a[0].iCursor. t2 uses the cursor pTabList->a[1].iCursor. ** And so forth. This routine generates code to open those VDBE cursors ** and sqlite3WhereEnd() generates the code to close them. ** ** The code that sqlite3WhereBegin() generates leaves the cursors named ** in pTabList pointing at their appropriate entries. The [...] code ** can use OP_Column and OP_Rowid opcodes on these cursors to extract ** data from the various tables of the loop. ** ** If the WHERE clause is empty, the foreach loops must each scan their ** entire tables. Thus a three-way join is an O(N^3) operation. But if ** the tables have indices and there are terms in the WHERE clause that ** refer to those indices, a complete table scan can be avoided and the ** code will run much faster. Most of the work of this routine is checking ** to see if there are indices that can be used to speed up the loop. ** ** Terms of the WHERE clause are also used to limit which rows actually ** make it to the "..." in the middle of the loop. After each "foreach", ** terms of the WHERE clause that use only terms in that loop and outer ** loops are evaluated and if false a jump is made around all subsequent ** inner loops (or around the "..." if the test occurs within the inner- ** most loop) ** ** OUTER JOINS ** ** An outer join of tables t1 and t2 is conceptally coded as follows: ** ** foreach row1 in t1 do ** flag = 0 ** foreach row2 in t2 do ** start: ** ... ** flag = 1 ** end ** if flag==0 then ** move the row2 cursor to a null row ** goto start ** fi ** end ** ** ORDER BY CLAUSE PROCESSING ** ** pOrderBy is a pointer to the ORDER BY clause (or the GROUP BY clause ** if the WHERE_GROUPBY flag is set in wctrlFlags) of a SELECT statement ** if there is one. If there is no ORDER BY clause or if this routine ** is called from an UPDATE or DELETE statement, then pOrderBy is NULL. ** ** The iIdxCur parameter is the cursor number of an index. If ** WHERE_ONETABLE_ONLY is set, iIdxCur is the cursor number of an index ** to use for OR clause processing. The WHERE clause should use this ** specific cursor. If WHERE_ONEPASS_DESIRED is set, then iIdxCur is ** the first cursor in an array of cursors for all indices. iIdxCur should ** be used to compute the appropriate cursor depending on which index is ** used. */ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( Parse *pParse, /* The parser context */ SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ Expr *pWhere, /* The WHERE clause */ ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ ExprList *pResultSet, /* Result set of the query */ u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ int iIdxCur /* If WHERE_ONETABLE_ONLY is set, index cursor number */ ){ int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ int nTabList; /* Number of elements in pTabList */ WhereInfo *pWInfo; /* Will become the return value of this function */ Vdbe *v = pParse->pVdbe; /* The virtual database engine */ Bitmask notReady; /* Cursors that are not yet positioned */ WhereLoopBuilder sWLB; /* The WhereLoop builder */ WhereMaskSet *pMaskSet; /* The expression mask set */ WhereLevel *pLevel; /* A single level in pWInfo->a[] */ WhereLoop *pLoop; /* Pointer to a single WhereLoop object */ int ii; /* Loop counter */ sqlite3 *db; /* Database connection */ int rc; /* Return code */ /* Variable initialization */ db = pParse->db; memset(&sWLB, 0, sizeof(sWLB)); /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; sWLB.pOrderBy = pOrderBy; /* Disable the DISTINCT optimization if SQLITE_DistinctOpt is set via ** sqlite3_test_ctrl(SQLITE_TESTCTRL_OPTIMIZATIONS,...) */ if( OptimizationDisabled(db, SQLITE_DistinctOpt) ){ wctrlFlags &= ~WHERE_WANT_DISTINCT; } /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask */ testcase( pTabList->nSrc==BMS ); if( pTabList->nSrc>BMS ){ sqlite3ErrorMsg(pParse, "at most %d tables in a join", BMS); return 0; } /* This function normally generates a nested loop for all tables in ** pTabList. But if the WHERE_ONETABLE_ONLY flag is set, then we should ** only generate code for the first table in pTabList and assume that ** any cursors associated with subsequent tables are uninitialized. */ nTabList = (wctrlFlags & WHERE_ONETABLE_ONLY) ? 1 : pTabList->nSrc; /* Allocate and initialize the WhereInfo structure that will become the ** return value. A single allocation is used to store the WhereInfo ** struct, the contents of WhereInfo.a[], the WhereClause structure ** and the WhereMaskSet structure. Since WhereClause contains an 8-byte ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); pWInfo = sqlite3DbMallocZero(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); pWInfo = 0; goto whereBeginError; } pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1; pWInfo->nLevel = nTabList; pWInfo->pParse = pParse; pWInfo->pTabList = pTabList; pWInfo->pOrderBy = pOrderBy; pWInfo->pResultSet = pResultSet; pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v); pWInfo->wctrlFlags = wctrlFlags; pWInfo->savedNQueryLoop = pParse->nQueryLoop; pMaskSet = &pWInfo->sMaskSet; sWLB.pWInfo = pWInfo; sWLB.pWC = &pWInfo->sWC; sWLB.pNew = (WhereLoop*)(((char*)pWInfo)+nByteWInfo); assert( EIGHT_BYTE_ALIGNMENT(sWLB.pNew) ); whereLoopInit(sWLB.pNew); #ifdef SQLITE_DEBUG sWLB.pNew->cId = '*'; #endif /* Split the WHERE clause into separate subexpressions where each ** subexpression is separated by an AND operator. */ initMaskSet(pMaskSet); sqlite3WhereClauseInit(&pWInfo->sWC, pWInfo); sqlite3WhereSplit(&pWInfo->sWC, pWhere, TK_AND); /* Special case: a WHERE clause that is constant. Evaluate the ** expression and either jump over all of the code or fall thru. */ for(ii=0; iinTerm; ii++){ if( nTabList==0 || sqlite3ExprIsConstantNotJoin(sWLB.pWC->a[ii].pExpr) ){ sqlite3ExprIfFalse(pParse, sWLB.pWC->a[ii].pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL); sWLB.pWC->a[ii].wtFlags |= TERM_CODED; } } /* Special case: No FROM clause */ if( nTabList==0 ){ if( pOrderBy ) pWInfo->nOBSat = pOrderBy->nExpr; if( wctrlFlags & WHERE_WANT_DISTINCT ){ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; } } /* Assign a bit from the bitmask to every term in the FROM clause. ** ** When assigning bitmask values to FROM clause cursors, it must be ** the case that if X is the bitmask for the N-th FROM clause term then ** the bitmask for all FROM clause terms to the left of the N-th term ** is (X-1). An expression from the ON clause of a LEFT JOIN can use ** its Expr.iRightJoinTable value to find the bitmask of the right table ** of the join. Subtracting one from the right table bitmask gives a ** bitmask for all tables to the left of the join. Knowing the bitmask ** for all tables to the left of a left join is important. Ticket #3015. ** ** Note that bitmasks are created for all pTabList->nSrc tables in ** pTabList, not just the first nTabList tables. nTabList is normally ** equal to pTabList->nSrc but might be shortened to 1 if the ** WHERE_ONETABLE_ONLY flag is set. */ for(ii=0; iinSrc; ii++){ createMask(pMaskSet, pTabList->a[ii].iCursor); } #ifndef NDEBUG { Bitmask toTheLeft = 0; for(ii=0; iinSrc; ii++){ Bitmask m = sqlite3WhereGetMask(pMaskSet, pTabList->a[ii].iCursor); assert( (m-1)==toTheLeft ); toTheLeft |= m; } } #endif /* Analyze all of the subexpressions. */ sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC); if( db->mallocFailed ) goto whereBeginError; if( wctrlFlags & WHERE_WANT_DISTINCT ){ if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){ /* The DISTINCT marking is pointless. Ignore it. */ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; }else if( pOrderBy==0 ){ /* Try to ORDER BY the result set to make distinct processing easier */ pWInfo->wctrlFlags |= WHERE_DISTINCTBY; pWInfo->pOrderBy = pResultSet; } } /* Construct the WhereLoop objects */ WHERETRACE(0xffff,("*** Optimizer Start ***\n")); #if defined(WHERETRACE_ENABLED) if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ int i; for(i=0; inTerm; i++){ whereTermPrint(&sWLB.pWC->a[i], i); } } #endif if( nTabList!=1 || whereShortCut(&sWLB)==0 ){ rc = whereLoopAddAll(&sWLB); if( rc ) goto whereBeginError; #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ /* Display all of the WhereLoop objects */ WhereLoop *p; int i; static const char zLabel[] = "0123456789abcdefghijklmnopqrstuvwyxz" "ABCDEFGHIJKLMNOPQRSTUVWYXZ"; for(p=pWInfo->pLoops, i=0; p; p=p->pNextLoop, i++){ p->cId = zLabel[i%sizeof(zLabel)]; whereLoopPrint(p, sWLB.pWC); } } #endif wherePathSolver(pWInfo, 0); if( db->mallocFailed ) goto whereBeginError; if( pWInfo->pOrderBy ){ wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } } if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ pWInfo->revMask = (Bitmask)(-1); } if( pParse->nErr || NEVER(db->mallocFailed) ){ goto whereBeginError; } #ifdef WHERETRACE_ENABLED if( sqlite3WhereTrace ){ sqlite3DebugPrintf("---- Solution nRow=%d", pWInfo->nRowOut); if( pWInfo->nOBSat>0 ){ sqlite3DebugPrintf(" ORDERBY=%d,0x%llx", pWInfo->nOBSat, pWInfo->revMask); } switch( pWInfo->eDistinct ){ case WHERE_DISTINCT_UNIQUE: { sqlite3DebugPrintf(" DISTINCT=unique"); break; } case WHERE_DISTINCT_ORDERED: { sqlite3DebugPrintf(" DISTINCT=ordered"); break; } case WHERE_DISTINCT_UNORDERED: { sqlite3DebugPrintf(" DISTINCT=unordered"); break; } } sqlite3DebugPrintf("\n"); for(ii=0; iinLevel; ii++){ whereLoopPrint(pWInfo->a[ii].pWLoop, sWLB.pWC); } } #endif /* Attempt to omit tables from the join that do not effect the result */ if( pWInfo->nLevel>=2 && pResultSet!=0 && OptimizationEnabled(db, SQLITE_OmitNoopJoin) ){ Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pResultSet); if( sWLB.pOrderBy ){ tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy); } while( pWInfo->nLevel>=2 ){ WhereTerm *pTerm, *pEnd; pLoop = pWInfo->a[pWInfo->nLevel-1].pWLoop; if( (pWInfo->pTabList->a[pLoop->iTab].jointype & JT_LEFT)==0 ) break; if( (wctrlFlags & WHERE_WANT_DISTINCT)==0 && (pLoop->wsFlags & WHERE_ONEROW)==0 ){ break; } if( (tabUsed & pLoop->maskSelf)!=0 ) break; pEnd = sWLB.pWC->a + sWLB.pWC->nTerm; for(pTerm=sWLB.pWC->a; pTermprereqAll & pLoop->maskSelf)!=0 && !ExprHasProperty(pTerm->pExpr, EP_FromJoin) ){ break; } } if( pTerm drop loop %c not used\n", pLoop->cId)); pWInfo->nLevel--; nTabList--; } } WHERETRACE(0xffff,("*** Optimizer Finished ***\n")); pWInfo->pParse->nQueryLoop += pWInfo->nRowOut; /* If the caller is an UPDATE or DELETE statement that is requesting ** to use a one-pass algorithm, determine if this is appropriate. ** The one-pass algorithm only works if the WHERE clause constrains ** the statement to update or delete a single row. */ assert( (wctrlFlags & WHERE_ONEPASS_DESIRED)==0 || pWInfo->nLevel==1 ); if( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 && (pWInfo->a[0].pWLoop->wsFlags & WHERE_ONEROW)!=0 ){ pWInfo->okOnePass = 1; if( HasRowid(pTabList->a[0].pTab) ){ pWInfo->a[0].pWLoop->wsFlags &= ~WHERE_IDX_ONLY; } } /* Open all tables in the pTabList and any indices selected for ** searching those tables. */ for(ii=0, pLevel=pWInfo->a; iia[pLevel->iFrom]; pTab = pTabItem->pTab; iDb = sqlite3SchemaToIndex(db, pTab->pSchema); pLoop = pLevel->pWLoop; if( (pTab->tabFlags & TF_Ephemeral)!=0 || pTab->pSelect ){ /* Do nothing */ }else #ifndef SQLITE_OMIT_VIRTUALTABLE if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){ const char *pVTab = (const char *)sqlite3GetVTable(db, pTab); int iCur = pTabItem->iCursor; sqlite3VdbeAddOp4(v, OP_VOpen, iCur, 0, 0, pVTab, P4_VTAB); }else if( IsVirtual(pTab) ){ /* noop */ }else #endif if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 && (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 ){ int op = OP_OpenRead; if( pWInfo->okOnePass ){ op = OP_OpenWrite; pWInfo->aiCurOnePass[0] = pTabItem->iCursor; }; sqlite3OpenTable(pParse, pTabItem->iCursor, iDb, pTab, op); assert( pTabItem->iCursor==pLevel->iTabCur ); testcase( !pWInfo->okOnePass && pTab->nCol==BMS-1 ); testcase( !pWInfo->okOnePass && pTab->nCol==BMS ); if( !pWInfo->okOnePass && pTab->nColcolUsed; int n = 0; for(; b; b=b>>1, n++){} sqlite3VdbeChangeP4(v, sqlite3VdbeCurrentAddr(v)-1, SQLITE_INT_TO_PTR(n), P4_INT32); assert( n<=pTab->nCol ); } #ifdef SQLITE_ENABLE_COLUMN_USED_MASK sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, pTabItem->iCursor, 0, 0, (const u8*)&pTabItem->colUsed, P4_INT64); #endif }else{ sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); } if( pLoop->wsFlags & WHERE_INDEXED ){ Index *pIx = pLoop->u.btree.pIndex; int iIndexCur; int op = OP_OpenRead; /* iIdxCur is always set if to a positive value if ONEPASS is possible */ assert( iIdxCur!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 ); if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx) && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ){ /* This is one term of an OR-optimization using the PRIMARY KEY of a ** WITHOUT ROWID table. No need for a separate index */ iIndexCur = pLevel->iTabCur; op = 0; }else if( pWInfo->okOnePass ){ Index *pJ = pTabItem->pTab->pIndex; iIndexCur = iIdxCur; assert( wctrlFlags & WHERE_ONEPASS_DESIRED ); while( ALWAYS(pJ) && pJ!=pIx ){ iIndexCur++; pJ = pJ->pNext; } op = OP_OpenWrite; pWInfo->aiCurOnePass[1] = iIndexCur; }else if( iIdxCur && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ){ iIndexCur = iIdxCur; if( wctrlFlags & WHERE_REOPEN_IDX ) op = OP_ReopenIdx; }else{ iIndexCur = pParse->nTab++; } pLevel->iIdxCur = iIndexCur; assert( pIx->pSchema==pTab->pSchema ); assert( iIndexCur>=0 ); if( op ){ sqlite3VdbeAddOp3(v, op, iIndexCur, pIx->tnum, iDb); sqlite3VdbeSetP4KeyInfo(pParse, pIx); if( (pLoop->wsFlags & WHERE_CONSTRAINT)!=0 && (pLoop->wsFlags & (WHERE_COLUMN_RANGE|WHERE_SKIPSCAN))==0 && (pWInfo->wctrlFlags&WHERE_ORDERBY_MIN)==0 ){ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ); /* Hint to COMDB2 */ } VdbeComment((v, "%s", pIx->zName)); #ifdef SQLITE_ENABLE_COLUMN_USED_MASK { u64 colUsed = 0; int ii, jj; for(ii=0; iinColumn; ii++){ jj = pIx->aiColumn[ii]; if( jj<0 ) continue; if( jj>63 ) jj = 63; if( (pTabItem->colUsed & MASKBIT(jj))==0 ) continue; colUsed |= ((u64)1)<<(ii<63 ? ii : 63); } sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, iIndexCur, 0, 0, (u8*)&colUsed, P4_INT64); } #endif /* SQLITE_ENABLE_COLUMN_USED_MASK */ } } if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb); } pWInfo->iTop = sqlite3VdbeCurrentAddr(v); if( db->mallocFailed ) goto whereBeginError; /* Generate the code to do the search. Each iteration of the for ** loop below generates code for a single nested loop of the VM ** program. */ notReady = ~(Bitmask)0; for(ii=0; iia[ii]; wsFlags = pLevel->pWLoop->wsFlags; #ifndef SQLITE_OMIT_AUTOMATIC_INDEX if( (pLevel->pWLoop->wsFlags & WHERE_AUTO_INDEX)!=0 ){ constructAutomaticIndex(pParse, &pWInfo->sWC, &pTabList->a[pLevel->iFrom], notReady, pLevel); if( db->mallocFailed ) goto whereBeginError; } #endif addrExplain = sqlite3WhereExplainOneScan( pParse, pTabList, pLevel, ii, pLevel->iFrom, wctrlFlags ); pLevel->addrBody = sqlite3VdbeCurrentAddr(v); notReady = sqlite3WhereCodeOneLoopStart(pWInfo, ii, notReady); pWInfo->iContinue = pLevel->addrCont; if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_ONETABLE_ONLY)==0 ){ sqlite3WhereAddScanStatus(v, pTabList, pLevel, addrExplain); } } /* Done. */ VdbeModuleComment((v, "Begin WHERE-core")); return pWInfo; /* Jump here if malloc fails */ whereBeginError: if( pWInfo ){ pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } return 0; } /* ** Generate the end of the WHERE loop. See comments on ** sqlite3WhereBegin() for additional information. */ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ Parse *pParse = pWInfo->pParse; Vdbe *v = pParse->pVdbe; int i; WhereLevel *pLevel; WhereLoop *pLoop; SrcList *pTabList = pWInfo->pTabList; sqlite3 *db = pParse->db; /* Generate loop termination code. */ VdbeModuleComment((v, "End WHERE-core")); sqlite3ExprCacheClear(pParse); for(i=pWInfo->nLevel-1; i>=0; i--){ int addr; pLevel = &pWInfo->a[i]; pLoop = pLevel->pWLoop; sqlite3VdbeResolveLabel(v, pLevel->addrCont); if( pLevel->op!=OP_Noop ){ sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3); sqlite3VdbeChangeP5(v, pLevel->p5); VdbeCoverage(v); VdbeCoverageIf(v, pLevel->op==OP_Next); VdbeCoverageIf(v, pLevel->op==OP_Prev); VdbeCoverageIf(v, pLevel->op==OP_VNext); } if( pLoop->wsFlags & WHERE_IN_ABLE && pLevel->u.in.nIn>0 ){ struct InLoop *pIn; int j; sqlite3VdbeResolveLabel(v, pLevel->addrNxt); for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){ sqlite3VdbeJumpHere(v, pIn->addrInTop+1); sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop); VdbeCoverage(v); VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen); VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen); sqlite3VdbeJumpHere(v, pIn->addrInTop-1); } } sqlite3VdbeResolveLabel(v, pLevel->addrBrk); if( pLevel->addrSkip ){ sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrSkip); VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName)); sqlite3VdbeJumpHere(v, pLevel->addrSkip); sqlite3VdbeJumpHere(v, pLevel->addrSkip-2); } if( pLevel->addrLikeRep ){ int op; if( sqlite3VdbeGetOp(v, pLevel->addrLikeRep-1)->p1 ){ op = OP_DecrJumpZero; }else{ op = OP_JumpZeroIncr; } sqlite3VdbeAddOp2(v, op, pLevel->iLikeRepCntr, pLevel->addrLikeRep); VdbeCoverage(v); } if( pLevel->iLeftJoin ){ addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v); assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || (pLoop->wsFlags & WHERE_INDEXED)!=0 ); if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ){ sqlite3VdbeAddOp1(v, OP_NullRow, pTabList->a[i].iCursor); } if( pLoop->wsFlags & WHERE_INDEXED ){ sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur); } if( pLevel->op==OP_Return ){ sqlite3VdbeAddOp2(v, OP_Gosub, pLevel->p1, pLevel->addrFirst); }else{ sqlite3VdbeAddOp2(v, OP_Goto, 0, pLevel->addrFirst); } sqlite3VdbeJumpHere(v, addr); } VdbeModuleComment((v, "End WHERE-loop%d: %s", i, pWInfo->pTabList->a[pLevel->iFrom].pTab->zName)); } /* The "break" point is here, just past the end of the outer loop. ** Set it. */ sqlite3VdbeResolveLabel(v, pWInfo->iBreak); assert( pWInfo->nLevel<=pTabList->nSrc ); for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ int k, last; VdbeOp *pOp; Index *pIdx = 0; struct SrcList_item *pTabItem = &pTabList->a[pLevel->iFrom]; Table *pTab = pTabItem->pTab; assert( pTab!=0 ); pLoop = pLevel->pWLoop; /* For a co-routine, change all OP_Column references to the table of ** the co-routine into OP_Copy of result contained in a register. ** OP_Rowid becomes OP_Null. */ if( pTabItem->viaCoroutine && !db->mallocFailed ){ translateColumnToCopy(v, pLevel->addrBody, pLevel->iTabCur, pTabItem->regResult); continue; } /* Close all of the cursors that were opened by sqlite3WhereBegin. ** Except, do not close cursors that will be reused by the OR optimization ** (WHERE_OMIT_OPEN_CLOSE). And do not close the OP_OpenWrite cursors ** created for the ONEPASS optimization. */ if( (pTab->tabFlags & TF_Ephemeral)==0 && pTab->pSelect==0 && (pWInfo->wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 ){ int ws = pLoop->wsFlags; if( !pWInfo->okOnePass && (ws & WHERE_IDX_ONLY)==0 ){ sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor); } if( (ws & WHERE_INDEXED)!=0 && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0 && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1] ){ sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur); } } /* If this scan uses an index, make VDBE code substitutions to read data ** from the index instead of from the table where possible. In some cases ** this optimization prevents the table from ever being read, which can ** yield a significant performance boost. ** ** Calls to the code generator in between sqlite3WhereBegin and ** sqlite3WhereEnd will have created code that references the table ** directly. This loop scans all that code looking for opcodes ** that reference the table and converts them into opcodes that ** reference the index. */ if( pLoop->wsFlags & (WHERE_INDEXED|WHERE_IDX_ONLY) ){ pIdx = pLoop->u.btree.pIndex; }else if( pLoop->wsFlags & WHERE_MULTI_OR ){ pIdx = pLevel->u.pCovidx; } if( pIdx && !db->mallocFailed ){ last = sqlite3VdbeCurrentAddr(v); k = pLevel->addrBody; pOp = sqlite3VdbeGetOp(v, k); for(; kp1!=pLevel->iTabCur ) continue; if( pOp->opcode==OP_Column ){ int x = pOp->p2; assert( pIdx->pTable==pTab ); if( !HasRowid(pTab) ){ Index *pPk = sqlite3PrimaryKeyIndex(pTab); x = pPk->aiColumn[x]; } x = sqlite3ColumnOfIndex(pIdx, x); if( x>=0 ){ pOp->p2 = x; pOp->p1 = pLevel->iIdxCur; } assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 ); }else if( pOp->opcode==OP_Rowid ){ pOp->p1 = pLevel->iIdxCur; pOp->opcode = OP_IdxRowid; } } } } /* Final cleanup */ pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); return; } /************** End of where.c ***********************************************/ /************** Begin file parse.c *******************************************/ /* Driver template for the LEMON parser generator. ** The author disclaims copyright to this source code. ** ** This version of "lempar.c" is modified, slightly, for use by SQLite. ** The only modifications are the addition of a couple of NEVER() ** macros to disable tests that are needed in the case of a general ** LALR(1) grammar but which are always false in the ** specific grammar used by SQLite. */ /* First off, code is included that follows the "include" declaration ** in the input grammar file. */ /* #include */ /* #include "sqliteInt.h" */ /* ** Disable all error recovery processing in the parser push-down ** automaton. */ #define YYNOERRORRECOVERY 1 /* ** Make yytestcase() the same as testcase() */ #define yytestcase(X) testcase(X) /* ** An instance of this structure holds information about the ** LIMIT clause of a SELECT statement. */ struct LimitVal { Expr *pLimit; /* The LIMIT expression. NULL if there is no limit */ Expr *pOffset; /* The OFFSET expression. NULL if there is none */ }; /* ** An instance of this structure is used to store the LIKE, ** GLOB, NOT LIKE, and NOT GLOB operators. */ struct LikeOp { Token eOperator; /* "like" or "glob" or "regexp" */ int bNot; /* True if the NOT keyword is present */ }; /* ** An instance of the following structure describes the event of a ** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT, ** TK_DELETE, or TK_INSTEAD. If the event is of the form ** ** UPDATE ON (a,b,c) ** ** Then the "b" IdList records the list "a,b,c". */ struct TrigEvent { int a; IdList * b; }; /* ** An instance of this structure holds the ATTACH key and the key type. */ struct AttachKey { int type; Token key; }; /* ** For a compound SELECT statement, make sure p->pPrior->pNext==p for ** all elements in the list. And make sure list length does not exceed ** SQLITE_LIMIT_COMPOUND_SELECT. */ static void parserDoubleLinkSelect(Parse *pParse, Select *p){ if( p->pPrior ){ Select *pNext = 0, *pLoop; int mxSelect, cnt = 0; for(pLoop=p; pLoop; pNext=pLoop, pLoop=pLoop->pPrior, cnt++){ pLoop->pNext = pNext; pLoop->selFlags |= SF_Compound; } if( (p->selFlags & SF_MultiValue)==0 && (mxSelect = pParse->db->aLimit[SQLITE_LIMIT_COMPOUND_SELECT])>0 && cnt>mxSelect ){ sqlite3ErrorMsg(pParse, "too many terms in compound SELECT"); } } } /* This is a utility routine used to set the ExprSpan.zStart and ** ExprSpan.zEnd values of pOut so that the span covers the complete ** range of text beginning with pStart and going to the end of pEnd. */ static void spanSet(ExprSpan *pOut, Token *pStart, Token *pEnd){ pOut->zStart = pStart->z; pOut->zEnd = &pEnd->z[pEnd->n]; } /* Construct a new Expr object from a single identifier. Use the ** new Expr to populate pOut. Set the span of pOut to be the identifier ** that created the expression. */ static void spanExpr(ExprSpan *pOut, Parse *pParse, int op, Token *pValue){ pOut->pExpr = sqlite3PExpr(pParse, op, 0, 0, pValue); pOut->zStart = pValue->z; pOut->zEnd = &pValue->z[pValue->n]; } /* This routine constructs a binary expression node out of two ExprSpan ** objects and uses the result to populate a new ExprSpan object. */ static void spanBinaryExpr( ExprSpan *pOut, /* Write the result here */ Parse *pParse, /* The parsing context. Errors accumulate here */ int op, /* The binary operation */ ExprSpan *pLeft, /* The left operand */ ExprSpan *pRight /* The right operand */ ){ pOut->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr, 0); pOut->zStart = pLeft->zStart; pOut->zEnd = pRight->zEnd; } /* Construct an expression node for a unary postfix operator */ static void spanUnaryPostfix( ExprSpan *pOut, /* Write the new expression node here */ Parse *pParse, /* Parsing context to record errors */ int op, /* The operator */ ExprSpan *pOperand, /* The operand */ Token *pPostOp /* The operand token for setting the span */ ){ pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); pOut->zStart = pOperand->zStart; pOut->zEnd = &pPostOp->z[pPostOp->n]; } /* A routine to convert a binary TK_IS or TK_ISNOT expression into a ** unary TK_ISNULL or TK_NOTNULL expression. */ static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){ sqlite3 *db = pParse->db; if( pY && pA && pY->op==TK_NULL ){ pA->op = (u8)op; sqlite3ExprDelete(db, pA->pRight); pA->pRight = 0; } } /* Construct an expression node for a unary prefix operator */ static void spanUnaryPrefix( ExprSpan *pOut, /* Write the new expression node here */ Parse *pParse, /* Parsing context to record errors */ int op, /* The operator */ ExprSpan *pOperand, /* The operand */ Token *pPreOp /* The operand token for setting the span */ ){ pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0); pOut->zStart = pPreOp->z; pOut->zEnd = pOperand->zEnd; } /* Next is all token values, in a form suitable for use by makeheaders. ** This section will be null unless lemon is run with the -m switch. */ /* ** These constants (all generated automatically by the parser generator) ** specify the various kinds of tokens (terminals) that the parser ** understands. ** ** Each symbol here is a terminal symbol in the grammar. */ /* Make sure the INTERFACE macro is defined. */ #ifndef INTERFACE # define INTERFACE 1 #endif /* The next thing included is series of defines which control ** various aspects of the generated parser. ** YYCODETYPE is the data type used for storing terminal ** and nonterminal numbers. "unsigned char" is ** used if there are fewer than 250 terminals ** and nonterminals. "int" is used otherwise. ** YYNOCODE is a number of type YYCODETYPE which corresponds ** to no legal terminal or nonterminal number. This ** number is used to fill in empty slots of the hash ** table. ** YYFALLBACK If defined, this indicates that one or more tokens ** have fall-back values which should be used if the ** original value of the token will not parse. ** YYACTIONTYPE is the data type used for storing terminal ** and nonterminal numbers. "unsigned char" is ** used if there are fewer than 250 rules and ** states combined. "int" is used otherwise. ** sqlite3ParserTOKENTYPE is the data type used for minor tokens given ** directly to the parser from the tokenizer. ** YYMINORTYPE is the data type used for all minor tokens. ** This is typically a union of many types, one of ** which is sqlite3ParserTOKENTYPE. The entry in the union ** for base tokens is called "yy0". ** YYSTACKDEPTH is the maximum depth of the parser's stack. If ** zero the stack is dynamically sized using realloc() ** sqlite3ParserARG_SDECL A static variable declaration for the %extra_argument ** sqlite3ParserARG_PDECL A parameter declaration for the %extra_argument ** sqlite3ParserARG_STORE Code to store %extra_argument into yypParser ** sqlite3ParserARG_FETCH Code to extract %extra_argument from yypParser ** YYNSTATE the combined number of states. ** YYNRULE the number of rules in the grammar ** YYERRORSYMBOL is the code number of the error symbol. If not ** defined, then do no error processing. */ #define YYCODETYPE unsigned char #define YYNOCODE 254 #define YYACTIONTYPE unsigned short int #define YYWILDCARD 70 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; Select* yy3; ExprList* yy14; With* yy59; SrcList* yy65; struct LikeOp yy96; Expr* yy132; u8 yy186; int yy328; ExprSpan yy346; struct TrigEvent yy378; u16 yy381; IdList* yy408; struct {int value; int mask;} yy429; TriggerStep* yy473; struct LimitVal yy476; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 #endif #define sqlite3ParserARG_SDECL Parse *pParse; #define sqlite3ParserARG_PDECL ,Parse *pParse #define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse #define sqlite3ParserARG_STORE yypParser->pParse = pParse #define YYNSTATE 642 #define YYNRULE 327 #define YYFALLBACK 1 #define YY_NO_ACTION (YYNSTATE+YYNRULE+2) #define YY_ACCEPT_ACTION (YYNSTATE+YYNRULE+1) #define YY_ERROR_ACTION (YYNSTATE+YYNRULE) /* The yyzerominor constant is used to initialize instances of ** YYMINORTYPE objects to zero. */ static const YYMINORTYPE yyzerominor = { 0 }; /* Define the yytestcase() macro to be a no-op if is not already defined ** otherwise. ** ** Applications can choose to define yytestcase() in the %include section ** to a macro that can assist in verifying code coverage. For production ** code the yytestcase() macro should be turned off. But it is useful ** for testing. */ #ifndef yytestcase # define yytestcase(X) #endif /* Next are the tables used to determine what action to take based on the ** current state and lookahead token. These tables are used to implement ** functions that take a state number and lookahead value and return an ** action integer. ** ** Suppose the action integer is N. Then the action is determined as ** follows ** ** 0 <= N < YYNSTATE Shift N. That is, push the lookahead ** token onto the stack and goto state N. ** ** YYNSTATE <= N < YYNSTATE+YYNRULE Reduce by rule N-YYNSTATE. ** ** N == YYNSTATE+YYNRULE A syntax error has occurred. ** ** N == YYNSTATE+YYNRULE+1 The parser accepts its input. ** ** N == YYNSTATE+YYNRULE+2 No such action. Denotes unused ** slots in the yy_action[] table. ** ** The action table is constructed as a single large table named yy_action[]. ** Given state S and lookahead X, the action is computed as ** ** yy_action[ yy_shift_ofst[S] + X ] ** ** If the index value yy_shift_ofst[S]+X is out of range or if the value ** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S] ** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table ** and that yy_default[S] should be used instead. ** ** The formula above is for computing the action when the lookahead is ** a terminal symbol. If the lookahead is a non-terminal (as occurs after ** a reduce action) then the yy_reduce_ofst[] array is used in place of ** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of ** YY_SHIFT_USE_DFLT. ** ** The following are the tables generated in this section: ** ** yy_action[] A single table containing all actions. ** yy_lookahead[] A table containing the lookahead for each entry in ** yy_action. Used to detect hash collisions. ** yy_shift_ofst[] For each state, the offset into yy_action for ** shifting terminals. ** yy_reduce_ofst[] For each state, the offset into yy_action for ** shifting non-terminals after a reduce. ** yy_default[] Default action for each state. */ #define YY_ACTTAB_COUNT (1497) static const YYACTIONTYPE yy_action[] = { /* 0 */ 306, 212, 432, 955, 639, 191, 955, 295, 559, 88, /* 10 */ 88, 88, 88, 81, 86, 86, 86, 86, 85, 85, /* 20 */ 84, 84, 84, 83, 330, 185, 184, 183, 635, 635, /* 30 */ 292, 606, 606, 88, 88, 88, 88, 683, 86, 86, /* 40 */ 86, 86, 85, 85, 84, 84, 84, 83, 330, 16, /* 50 */ 436, 597, 89, 90, 80, 600, 599, 601, 601, 87, /* 60 */ 87, 88, 88, 88, 88, 684, 86, 86, 86, 86, /* 70 */ 85, 85, 84, 84, 84, 83, 330, 306, 559, 84, /* 80 */ 84, 84, 83, 330, 65, 86, 86, 86, 86, 85, /* 90 */ 85, 84, 84, 84, 83, 330, 635, 635, 634, 633, /* 100 */ 182, 682, 550, 379, 376, 375, 17, 322, 606, 606, /* 110 */ 371, 198, 479, 91, 374, 82, 79, 165, 85, 85, /* 120 */ 84, 84, 84, 83, 330, 598, 635, 635, 107, 89, /* 130 */ 90, 80, 600, 599, 601, 601, 87, 87, 88, 88, /* 140 */ 88, 88, 186, 86, 86, 86, 86, 85, 85, 84, /* 150 */ 84, 84, 83, 330, 306, 594, 594, 142, 328, 327, /* 160 */ 484, 249, 344, 238, 635, 635, 634, 633, 585, 448, /* 170 */ 526, 525, 229, 388, 1, 394, 450, 584, 449, 635, /* 180 */ 635, 635, 635, 319, 395, 606, 606, 199, 157, 273, /* 190 */ 382, 268, 381, 187, 635, 635, 634, 633, 311, 555, /* 200 */ 266, 593, 593, 266, 347, 588, 89, 90, 80, 600, /* 210 */ 599, 601, 601, 87, 87, 88, 88, 88, 88, 478, /* 220 */ 86, 86, 86, 86, 85, 85, 84, 84, 84, 83, /* 230 */ 330, 306, 272, 536, 634, 633, 146, 610, 197, 310, /* 240 */ 575, 182, 482, 271, 379, 376, 375, 506, 21, 634, /* 250 */ 633, 634, 633, 635, 635, 374, 611, 574, 548, 440, /* 260 */ 111, 563, 606, 606, 634, 633, 324, 479, 608, 608, /* 270 */ 608, 300, 435, 573, 119, 407, 210, 162, 562, 883, /* 280 */ 592, 592, 306, 89, 90, 80, 600, 599, 601, 601, /* 290 */ 87, 87, 88, 88, 88, 88, 506, 86, 86, 86, /* 300 */ 86, 85, 85, 84, 84, 84, 83, 330, 620, 111, /* 310 */ 635, 635, 361, 606, 606, 358, 249, 349, 248, 433, /* 320 */ 243, 479, 586, 634, 633, 195, 611, 93, 119, 221, /* 330 */ 575, 497, 534, 534, 89, 90, 80, 600, 599, 601, /* 340 */ 601, 87, 87, 88, 88, 88, 88, 574, 86, 86, /* 350 */ 86, 86, 85, 85, 84, 84, 84, 83, 330, 306, /* 360 */ 77, 429, 638, 573, 589, 530, 240, 230, 242, 105, /* 370 */ 249, 349, 248, 515, 588, 208, 460, 529, 564, 173, /* 380 */ 634, 633, 970, 144, 430, 2, 424, 228, 380, 557, /* 390 */ 606, 606, 190, 153, 159, 158, 514, 51, 632, 631, /* 400 */ 630, 71, 536, 432, 954, 196, 610, 954, 614, 45, /* 410 */ 18, 89, 90, 80, 600, 599, 601, 601, 87, 87, /* 420 */ 88, 88, 88, 88, 261, 86, 86, 86, 86, 85, /* 430 */ 85, 84, 84, 84, 83, 330, 306, 608, 608, 608, /* 440 */ 542, 424, 402, 385, 241, 506, 451, 320, 211, 543, /* 450 */ 164, 436, 386, 293, 451, 587, 108, 496, 111, 334, /* 460 */ 391, 591, 424, 614, 27, 452, 453, 606, 606, 72, /* 470 */ 257, 70, 259, 452, 339, 342, 564, 582, 68, 415, /* 480 */ 469, 328, 327, 62, 614, 45, 110, 393, 89, 90, /* 490 */ 80, 600, 599, 601, 601, 87, 87, 88, 88, 88, /* 500 */ 88, 152, 86, 86, 86, 86, 85, 85, 84, 84, /* 510 */ 84, 83, 330, 306, 110, 499, 520, 538, 402, 389, /* 520 */ 424, 110, 566, 500, 593, 593, 454, 82, 79, 165, /* 530 */ 424, 591, 384, 564, 340, 615, 188, 162, 424, 350, /* 540 */ 616, 424, 614, 44, 606, 606, 445, 582, 300, 434, /* 550 */ 151, 19, 614, 9, 568, 580, 348, 615, 469, 567, /* 560 */ 614, 26, 616, 614, 45, 89, 90, 80, 600, 599, /* 570 */ 601, 601, 87, 87, 88, 88, 88, 88, 411, 86, /* 580 */ 86, 86, 86, 85, 85, 84, 84, 84, 83, 330, /* 590 */ 306, 579, 110, 578, 521, 282, 433, 398, 400, 255, /* 600 */ 486, 82, 79, 165, 487, 164, 82, 79, 165, 488, /* 610 */ 488, 364, 387, 424, 544, 544, 509, 350, 362, 155, /* 620 */ 191, 606, 606, 559, 642, 640, 333, 82, 79, 165, /* 630 */ 305, 564, 507, 312, 357, 614, 45, 329, 596, 595, /* 640 */ 194, 337, 89, 90, 80, 600, 599, 601, 601, 87, /* 650 */ 87, 88, 88, 88, 88, 424, 86, 86, 86, 86, /* 660 */ 85, 85, 84, 84, 84, 83, 330, 306, 20, 323, /* 670 */ 150, 263, 211, 543, 421, 596, 595, 614, 22, 424, /* 680 */ 193, 424, 284, 424, 391, 424, 509, 424, 577, 424, /* 690 */ 186, 335, 424, 559, 424, 313, 120, 546, 606, 606, /* 700 */ 67, 614, 47, 614, 50, 614, 48, 614, 100, 614, /* 710 */ 99, 614, 101, 576, 614, 102, 614, 109, 326, 89, /* 720 */ 90, 80, 600, 599, 601, 601, 87, 87, 88, 88, /* 730 */ 88, 88, 424, 86, 86, 86, 86, 85, 85, 84, /* 740 */ 84, 84, 83, 330, 306, 424, 311, 424, 585, 54, /* 750 */ 424, 516, 517, 590, 614, 112, 424, 584, 424, 572, /* 760 */ 424, 195, 424, 571, 424, 67, 424, 614, 94, 614, /* 770 */ 98, 424, 614, 97, 264, 606, 606, 195, 614, 46, /* 780 */ 614, 96, 614, 30, 614, 49, 614, 115, 614, 114, /* 790 */ 418, 229, 388, 614, 113, 306, 89, 90, 80, 600, /* 800 */ 599, 601, 601, 87, 87, 88, 88, 88, 88, 424, /* 810 */ 86, 86, 86, 86, 85, 85, 84, 84, 84, 83, /* 820 */ 330, 119, 424, 590, 110, 372, 606, 606, 195, 53, /* 830 */ 250, 614, 29, 195, 472, 438, 729, 190, 302, 498, /* 840 */ 14, 523, 641, 2, 614, 43, 306, 89, 90, 80, /* 850 */ 600, 599, 601, 601, 87, 87, 88, 88, 88, 88, /* 860 */ 424, 86, 86, 86, 86, 85, 85, 84, 84, 84, /* 870 */ 83, 330, 424, 613, 964, 964, 354, 606, 606, 420, /* 880 */ 312, 64, 614, 42, 391, 355, 283, 437, 301, 255, /* 890 */ 414, 410, 495, 492, 614, 28, 471, 306, 89, 90, /* 900 */ 80, 600, 599, 601, 601, 87, 87, 88, 88, 88, /* 910 */ 88, 424, 86, 86, 86, 86, 85, 85, 84, 84, /* 920 */ 84, 83, 330, 424, 110, 110, 110, 110, 606, 606, /* 930 */ 110, 254, 13, 614, 41, 532, 531, 283, 481, 531, /* 940 */ 457, 284, 119, 561, 356, 614, 40, 284, 306, 89, /* 950 */ 78, 80, 600, 599, 601, 601, 87, 87, 88, 88, /* 960 */ 88, 88, 424, 86, 86, 86, 86, 85, 85, 84, /* 970 */ 84, 84, 83, 330, 110, 424, 341, 220, 555, 606, /* 980 */ 606, 351, 555, 318, 614, 95, 413, 255, 83, 330, /* 990 */ 284, 284, 255, 640, 333, 356, 255, 614, 39, 306, /* 1000 */ 356, 90, 80, 600, 599, 601, 601, 87, 87, 88, /* 1010 */ 88, 88, 88, 424, 86, 86, 86, 86, 85, 85, /* 1020 */ 84, 84, 84, 83, 330, 424, 317, 316, 141, 465, /* 1030 */ 606, 606, 219, 619, 463, 614, 10, 417, 462, 255, /* 1040 */ 189, 510, 553, 351, 207, 363, 161, 614, 38, 315, /* 1050 */ 218, 255, 255, 80, 600, 599, 601, 601, 87, 87, /* 1060 */ 88, 88, 88, 88, 424, 86, 86, 86, 86, 85, /* 1070 */ 85, 84, 84, 84, 83, 330, 76, 419, 255, 3, /* 1080 */ 878, 461, 424, 247, 331, 331, 614, 37, 217, 76, /* 1090 */ 419, 390, 3, 216, 215, 422, 4, 331, 331, 424, /* 1100 */ 547, 12, 424, 545, 614, 36, 424, 541, 422, 424, /* 1110 */ 540, 424, 214, 424, 408, 424, 539, 403, 605, 605, /* 1120 */ 237, 614, 25, 119, 614, 24, 588, 408, 614, 45, /* 1130 */ 118, 614, 35, 614, 34, 614, 33, 614, 23, 588, /* 1140 */ 60, 223, 603, 602, 513, 378, 73, 74, 140, 139, /* 1150 */ 424, 110, 265, 75, 426, 425, 59, 424, 610, 73, /* 1160 */ 74, 549, 402, 404, 424, 373, 75, 426, 425, 604, /* 1170 */ 138, 610, 614, 11, 392, 76, 419, 181, 3, 614, /* 1180 */ 32, 271, 369, 331, 331, 493, 614, 31, 149, 608, /* 1190 */ 608, 608, 607, 15, 422, 365, 614, 8, 137, 489, /* 1200 */ 136, 190, 608, 608, 608, 607, 15, 485, 176, 135, /* 1210 */ 7, 252, 477, 408, 174, 133, 175, 474, 57, 56, /* 1220 */ 132, 130, 119, 76, 419, 588, 3, 468, 245, 464, /* 1230 */ 171, 331, 331, 125, 123, 456, 447, 122, 446, 104, /* 1240 */ 336, 231, 422, 166, 154, 73, 74, 332, 116, 431, /* 1250 */ 121, 309, 75, 426, 425, 222, 106, 610, 308, 637, /* 1260 */ 204, 408, 629, 627, 628, 6, 200, 428, 427, 290, /* 1270 */ 203, 622, 201, 588, 62, 63, 289, 66, 419, 399, /* 1280 */ 3, 401, 288, 92, 143, 331, 331, 287, 608, 608, /* 1290 */ 608, 607, 15, 73, 74, 227, 422, 325, 69, 416, /* 1300 */ 75, 426, 425, 612, 412, 610, 192, 61, 569, 209, /* 1310 */ 396, 226, 278, 225, 383, 408, 527, 558, 276, 533, /* 1320 */ 552, 528, 321, 523, 370, 508, 180, 588, 494, 179, /* 1330 */ 366, 117, 253, 269, 522, 503, 608, 608, 608, 607, /* 1340 */ 15, 551, 502, 58, 274, 524, 178, 73, 74, 304, /* 1350 */ 501, 368, 303, 206, 75, 426, 425, 491, 360, 610, /* 1360 */ 213, 177, 483, 131, 345, 298, 297, 296, 202, 294, /* 1370 */ 480, 490, 466, 134, 172, 129, 444, 346, 470, 128, /* 1380 */ 314, 459, 103, 127, 126, 148, 124, 167, 443, 235, /* 1390 */ 608, 608, 608, 607, 15, 442, 439, 623, 234, 299, /* 1400 */ 145, 583, 291, 377, 581, 160, 119, 156, 270, 636, /* 1410 */ 971, 169, 279, 626, 520, 625, 473, 624, 170, 621, /* 1420 */ 618, 119, 168, 55, 409, 423, 537, 609, 286, 285, /* 1430 */ 405, 570, 560, 556, 5, 52, 458, 554, 147, 267, /* 1440 */ 519, 504, 518, 406, 262, 239, 260, 512, 343, 511, /* 1450 */ 258, 353, 565, 256, 224, 251, 359, 277, 275, 476, /* 1460 */ 475, 246, 352, 244, 467, 455, 236, 233, 232, 307, /* 1470 */ 441, 281, 205, 163, 397, 280, 535, 505, 330, 617, /* 1480 */ 971, 971, 971, 971, 367, 971, 971, 971, 971, 971, /* 1490 */ 971, 971, 971, 971, 971, 971, 338, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 19, 22, 22, 23, 1, 24, 26, 15, 27, 80, /* 10 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, /* 20 */ 91, 92, 93, 94, 95, 108, 109, 110, 27, 28, /* 30 */ 23, 50, 51, 80, 81, 82, 83, 122, 85, 86, /* 40 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 22, /* 50 */ 70, 23, 71, 72, 73, 74, 75, 76, 77, 78, /* 60 */ 79, 80, 81, 82, 83, 122, 85, 86, 87, 88, /* 70 */ 89, 90, 91, 92, 93, 94, 95, 19, 97, 91, /* 80 */ 92, 93, 94, 95, 26, 85, 86, 87, 88, 89, /* 90 */ 90, 91, 92, 93, 94, 95, 27, 28, 97, 98, /* 100 */ 99, 122, 211, 102, 103, 104, 79, 19, 50, 51, /* 110 */ 19, 122, 59, 55, 113, 224, 225, 226, 89, 90, /* 120 */ 91, 92, 93, 94, 95, 23, 27, 28, 26, 71, /* 130 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, /* 140 */ 82, 83, 51, 85, 86, 87, 88, 89, 90, 91, /* 150 */ 92, 93, 94, 95, 19, 132, 133, 58, 89, 90, /* 160 */ 21, 108, 109, 110, 27, 28, 97, 98, 33, 100, /* 170 */ 7, 8, 119, 120, 22, 19, 107, 42, 109, 27, /* 180 */ 28, 27, 28, 95, 28, 50, 51, 99, 100, 101, /* 190 */ 102, 103, 104, 105, 27, 28, 97, 98, 107, 152, /* 200 */ 112, 132, 133, 112, 65, 69, 71, 72, 73, 74, /* 210 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 11, /* 220 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, /* 230 */ 95, 19, 101, 97, 97, 98, 24, 101, 122, 157, /* 240 */ 12, 99, 103, 112, 102, 103, 104, 152, 22, 97, /* 250 */ 98, 97, 98, 27, 28, 113, 27, 29, 91, 164, /* 260 */ 165, 124, 50, 51, 97, 98, 219, 59, 132, 133, /* 270 */ 134, 22, 23, 45, 66, 47, 212, 213, 124, 140, /* 280 */ 132, 133, 19, 71, 72, 73, 74, 75, 76, 77, /* 290 */ 78, 79, 80, 81, 82, 83, 152, 85, 86, 87, /* 300 */ 88, 89, 90, 91, 92, 93, 94, 95, 164, 165, /* 310 */ 27, 28, 230, 50, 51, 233, 108, 109, 110, 70, /* 320 */ 16, 59, 23, 97, 98, 26, 97, 22, 66, 185, /* 330 */ 12, 187, 27, 28, 71, 72, 73, 74, 75, 76, /* 340 */ 77, 78, 79, 80, 81, 82, 83, 29, 85, 86, /* 350 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 19, /* 360 */ 22, 148, 149, 45, 23, 47, 62, 154, 64, 156, /* 370 */ 108, 109, 110, 37, 69, 23, 163, 59, 26, 26, /* 380 */ 97, 98, 144, 145, 146, 147, 152, 200, 52, 23, /* 390 */ 50, 51, 26, 22, 89, 90, 60, 210, 7, 8, /* 400 */ 9, 138, 97, 22, 23, 26, 101, 26, 174, 175, /* 410 */ 197, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 420 */ 80, 81, 82, 83, 16, 85, 86, 87, 88, 89, /* 430 */ 90, 91, 92, 93, 94, 95, 19, 132, 133, 134, /* 440 */ 23, 152, 208, 209, 140, 152, 152, 111, 195, 196, /* 450 */ 98, 70, 163, 160, 152, 23, 22, 164, 165, 246, /* 460 */ 207, 27, 152, 174, 175, 171, 172, 50, 51, 137, /* 470 */ 62, 139, 64, 171, 172, 222, 124, 27, 138, 24, /* 480 */ 163, 89, 90, 130, 174, 175, 197, 163, 71, 72, /* 490 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, /* 500 */ 83, 22, 85, 86, 87, 88, 89, 90, 91, 92, /* 510 */ 93, 94, 95, 19, 197, 181, 182, 23, 208, 209, /* 520 */ 152, 197, 26, 189, 132, 133, 232, 224, 225, 226, /* 530 */ 152, 97, 91, 26, 232, 116, 212, 213, 152, 222, /* 540 */ 121, 152, 174, 175, 50, 51, 243, 97, 22, 23, /* 550 */ 22, 234, 174, 175, 177, 23, 239, 116, 163, 177, /* 560 */ 174, 175, 121, 174, 175, 71, 72, 73, 74, 75, /* 570 */ 76, 77, 78, 79, 80, 81, 82, 83, 24, 85, /* 580 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 590 */ 19, 23, 197, 11, 23, 227, 70, 208, 220, 152, /* 600 */ 31, 224, 225, 226, 35, 98, 224, 225, 226, 108, /* 610 */ 109, 110, 115, 152, 117, 118, 27, 222, 49, 123, /* 620 */ 24, 50, 51, 27, 0, 1, 2, 224, 225, 226, /* 630 */ 166, 124, 168, 169, 239, 174, 175, 170, 171, 172, /* 640 */ 22, 194, 71, 72, 73, 74, 75, 76, 77, 78, /* 650 */ 79, 80, 81, 82, 83, 152, 85, 86, 87, 88, /* 660 */ 89, 90, 91, 92, 93, 94, 95, 19, 22, 208, /* 670 */ 24, 23, 195, 196, 170, 171, 172, 174, 175, 152, /* 680 */ 26, 152, 152, 152, 207, 152, 97, 152, 23, 152, /* 690 */ 51, 244, 152, 97, 152, 247, 248, 23, 50, 51, /* 700 */ 26, 174, 175, 174, 175, 174, 175, 174, 175, 174, /* 710 */ 175, 174, 175, 23, 174, 175, 174, 175, 188, 71, /* 720 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, /* 730 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91, /* 740 */ 92, 93, 94, 95, 19, 152, 107, 152, 33, 24, /* 750 */ 152, 100, 101, 27, 174, 175, 152, 42, 152, 23, /* 760 */ 152, 26, 152, 23, 152, 26, 152, 174, 175, 174, /* 770 */ 175, 152, 174, 175, 23, 50, 51, 26, 174, 175, /* 780 */ 174, 175, 174, 175, 174, 175, 174, 175, 174, 175, /* 790 */ 163, 119, 120, 174, 175, 19, 71, 72, 73, 74, /* 800 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 152, /* 810 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, /* 820 */ 95, 66, 152, 97, 197, 23, 50, 51, 26, 53, /* 830 */ 23, 174, 175, 26, 23, 23, 23, 26, 26, 26, /* 840 */ 36, 106, 146, 147, 174, 175, 19, 71, 72, 73, /* 850 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, /* 860 */ 152, 85, 86, 87, 88, 89, 90, 91, 92, 93, /* 870 */ 94, 95, 152, 196, 119, 120, 19, 50, 51, 168, /* 880 */ 169, 26, 174, 175, 207, 28, 152, 249, 250, 152, /* 890 */ 163, 163, 163, 163, 174, 175, 163, 19, 71, 72, /* 900 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, /* 910 */ 83, 152, 85, 86, 87, 88, 89, 90, 91, 92, /* 920 */ 93, 94, 95, 152, 197, 197, 197, 197, 50, 51, /* 930 */ 197, 194, 36, 174, 175, 191, 192, 152, 191, 192, /* 940 */ 163, 152, 66, 124, 152, 174, 175, 152, 19, 71, /* 950 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, /* 960 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91, /* 970 */ 92, 93, 94, 95, 197, 152, 100, 188, 152, 50, /* 980 */ 51, 152, 152, 188, 174, 175, 252, 152, 94, 95, /* 990 */ 152, 152, 152, 1, 2, 152, 152, 174, 175, 19, /* 1000 */ 152, 72, 73, 74, 75, 76, 77, 78, 79, 80, /* 1010 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90, /* 1020 */ 91, 92, 93, 94, 95, 152, 188, 188, 22, 194, /* 1030 */ 50, 51, 240, 173, 194, 174, 175, 252, 194, 152, /* 1040 */ 36, 181, 28, 152, 23, 219, 122, 174, 175, 219, /* 1050 */ 221, 152, 152, 73, 74, 75, 76, 77, 78, 79, /* 1060 */ 80, 81, 82, 83, 152, 85, 86, 87, 88, 89, /* 1070 */ 90, 91, 92, 93, 94, 95, 19, 20, 152, 22, /* 1080 */ 23, 194, 152, 240, 27, 28, 174, 175, 240, 19, /* 1090 */ 20, 26, 22, 194, 194, 38, 22, 27, 28, 152, /* 1100 */ 23, 22, 152, 116, 174, 175, 152, 23, 38, 152, /* 1110 */ 23, 152, 221, 152, 57, 152, 23, 163, 50, 51, /* 1120 */ 194, 174, 175, 66, 174, 175, 69, 57, 174, 175, /* 1130 */ 40, 174, 175, 174, 175, 174, 175, 174, 175, 69, /* 1140 */ 22, 53, 74, 75, 30, 53, 89, 90, 22, 22, /* 1150 */ 152, 197, 23, 96, 97, 98, 22, 152, 101, 89, /* 1160 */ 90, 91, 208, 209, 152, 53, 96, 97, 98, 101, /* 1170 */ 22, 101, 174, 175, 152, 19, 20, 105, 22, 174, /* 1180 */ 175, 112, 19, 27, 28, 20, 174, 175, 24, 132, /* 1190 */ 133, 134, 135, 136, 38, 44, 174, 175, 107, 61, /* 1200 */ 54, 26, 132, 133, 134, 135, 136, 54, 107, 22, /* 1210 */ 5, 140, 1, 57, 36, 111, 122, 28, 79, 79, /* 1220 */ 131, 123, 66, 19, 20, 69, 22, 1, 16, 20, /* 1230 */ 125, 27, 28, 123, 111, 120, 23, 131, 23, 16, /* 1240 */ 68, 142, 38, 15, 22, 89, 90, 3, 167, 4, /* 1250 */ 248, 251, 96, 97, 98, 180, 180, 101, 251, 151, /* 1260 */ 6, 57, 151, 13, 151, 26, 25, 151, 161, 202, /* 1270 */ 153, 162, 153, 69, 130, 128, 203, 19, 20, 127, /* 1280 */ 22, 126, 204, 129, 22, 27, 28, 205, 132, 133, /* 1290 */ 134, 135, 136, 89, 90, 231, 38, 95, 137, 179, /* 1300 */ 96, 97, 98, 206, 179, 101, 122, 107, 159, 159, /* 1310 */ 125, 231, 216, 228, 107, 57, 184, 217, 216, 176, /* 1320 */ 217, 176, 48, 106, 18, 184, 158, 69, 159, 158, /* 1330 */ 46, 71, 237, 176, 176, 176, 132, 133, 134, 135, /* 1340 */ 136, 217, 176, 137, 216, 178, 158, 89, 90, 179, /* 1350 */ 176, 159, 179, 159, 96, 97, 98, 159, 159, 101, /* 1360 */ 5, 158, 202, 22, 18, 10, 11, 12, 13, 14, /* 1370 */ 190, 238, 17, 190, 158, 193, 41, 159, 202, 193, /* 1380 */ 159, 202, 245, 193, 193, 223, 190, 32, 159, 34, /* 1390 */ 132, 133, 134, 135, 136, 159, 39, 155, 43, 150, /* 1400 */ 223, 177, 201, 178, 177, 186, 66, 199, 177, 152, /* 1410 */ 253, 56, 215, 152, 182, 152, 202, 152, 63, 152, /* 1420 */ 152, 66, 67, 242, 229, 152, 174, 152, 152, 152, /* 1430 */ 152, 152, 152, 152, 199, 242, 202, 152, 198, 152, /* 1440 */ 152, 152, 183, 192, 152, 215, 152, 183, 215, 183, /* 1450 */ 152, 241, 214, 152, 211, 152, 152, 211, 211, 152, /* 1460 */ 152, 241, 152, 152, 152, 152, 152, 152, 152, 114, /* 1470 */ 152, 152, 235, 152, 152, 152, 174, 187, 95, 174, /* 1480 */ 253, 253, 253, 253, 236, 253, 253, 253, 253, 253, /* 1490 */ 253, 253, 253, 253, 253, 253, 141, }; #define YY_SHIFT_USE_DFLT (-86) #define YY_SHIFT_COUNT (429) #define YY_SHIFT_MIN (-85) #define YY_SHIFT_MAX (1383) static const short yy_shift_ofst[] = { /* 0 */ 992, 1057, 1355, 1156, 1204, 1204, 1, 262, -19, 135, /* 10 */ 135, 776, 1204, 1204, 1204, 1204, 69, 69, 53, 208, /* 20 */ 283, 755, 58, 725, 648, 571, 494, 417, 340, 263, /* 30 */ 212, 827, 827, 827, 827, 827, 827, 827, 827, 827, /* 40 */ 827, 827, 827, 827, 827, 827, 878, 827, 929, 980, /* 50 */ 980, 1070, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, /* 60 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, /* 70 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, /* 80 */ 1258, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, /* 90 */ 1204, 1204, 1204, 1204, -71, -47, -47, -47, -47, -47, /* 100 */ 0, 29, -12, 283, 283, 139, 91, 392, 392, 894, /* 110 */ 672, 726, 1383, -86, -86, -86, 88, 318, 318, 99, /* 120 */ 381, -20, 283, 283, 283, 283, 283, 283, 283, 283, /* 130 */ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, /* 140 */ 283, 283, 283, 283, 624, 876, 726, 672, 1340, 1340, /* 150 */ 1340, 1340, 1340, 1340, -86, -86, -86, 305, 136, 136, /* 160 */ 142, 167, 226, 154, 137, 152, 283, 283, 283, 283, /* 170 */ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, /* 180 */ 283, 283, 283, 336, 336, 336, 283, 283, 352, 283, /* 190 */ 283, 283, 283, 283, 228, 283, 283, 283, 283, 283, /* 200 */ 283, 283, 283, 283, 283, 501, 569, 596, 596, 596, /* 210 */ 507, 497, 441, 391, 353, 156, 156, 857, 353, 857, /* 220 */ 735, 813, 639, 715, 156, 332, 715, 715, 496, 419, /* 230 */ 646, 1357, 1184, 1184, 1335, 1335, 1184, 1341, 1260, 1144, /* 240 */ 1346, 1346, 1346, 1346, 1184, 1306, 1144, 1341, 1260, 1260, /* 250 */ 1144, 1184, 1306, 1206, 1284, 1184, 1184, 1306, 1184, 1306, /* 260 */ 1184, 1306, 1262, 1207, 1207, 1207, 1274, 1262, 1207, 1217, /* 270 */ 1207, 1274, 1207, 1207, 1185, 1200, 1185, 1200, 1185, 1200, /* 280 */ 1184, 1184, 1161, 1262, 1202, 1202, 1262, 1154, 1155, 1147, /* 290 */ 1152, 1144, 1241, 1239, 1250, 1250, 1254, 1254, 1254, 1254, /* 300 */ -86, -86, -86, -86, -86, -86, 1068, 304, 526, 249, /* 310 */ 408, -83, 434, 812, 27, 811, 807, 802, 751, 589, /* 320 */ 651, 163, 131, 674, 366, 450, 299, 148, 23, 102, /* 330 */ 229, -21, 1245, 1244, 1222, 1099, 1228, 1172, 1223, 1215, /* 340 */ 1213, 1115, 1106, 1123, 1110, 1209, 1105, 1212, 1226, 1098, /* 350 */ 1089, 1140, 1139, 1104, 1189, 1178, 1094, 1211, 1205, 1187, /* 360 */ 1101, 1071, 1153, 1175, 1146, 1138, 1151, 1091, 1164, 1165, /* 370 */ 1163, 1069, 1072, 1148, 1112, 1134, 1127, 1129, 1126, 1092, /* 380 */ 1114, 1118, 1088, 1090, 1093, 1087, 1084, 987, 1079, 1077, /* 390 */ 1074, 1065, 924, 1021, 1014, 1004, 1006, 819, 739, 896, /* 400 */ 855, 804, 739, 740, 736, 690, 654, 665, 618, 582, /* 410 */ 568, 528, 554, 379, 532, 479, 455, 379, 432, 371, /* 420 */ 341, 28, 338, 116, -11, -57, -85, 7, -8, 3, }; #define YY_REDUCE_USE_DFLT (-110) #define YY_REDUCE_COUNT (305) #define YY_REDUCE_MIN (-109) #define YY_REDUCE_MAX (1323) static const short yy_reduce_ofst[] = { /* 0 */ 238, 954, 213, 289, 310, 234, 144, 317, -109, 382, /* 10 */ 377, 303, 461, 389, 378, 368, 302, 294, 253, 395, /* 20 */ 293, 324, 403, 403, 403, 403, 403, 403, 403, 403, /* 30 */ 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, /* 40 */ 403, 403, 403, 403, 403, 403, 403, 403, 403, 403, /* 50 */ 403, 1022, 1012, 1005, 998, 963, 961, 959, 957, 950, /* 60 */ 947, 930, 912, 873, 861, 823, 810, 771, 759, 720, /* 70 */ 708, 670, 657, 619, 614, 612, 610, 608, 606, 604, /* 80 */ 598, 595, 593, 580, 542, 540, 537, 535, 533, 531, /* 90 */ 529, 527, 503, 386, 403, 403, 403, 403, 403, 403, /* 100 */ 403, 403, 403, 95, 447, 82, 334, 504, 467, 403, /* 110 */ 477, 464, 403, 403, 403, 403, 860, 747, 744, 785, /* 120 */ 638, 638, 926, 891, 900, 899, 887, 844, 840, 835, /* 130 */ 848, 830, 843, 829, 792, 839, 826, 737, 838, 795, /* 140 */ 789, 47, 734, 530, 696, 777, 711, 677, 733, 730, /* 150 */ 729, 728, 727, 627, 448, 64, 187, 1305, 1302, 1252, /* 160 */ 1290, 1273, 1323, 1322, 1321, 1319, 1318, 1316, 1315, 1314, /* 170 */ 1313, 1312, 1311, 1310, 1308, 1307, 1304, 1303, 1301, 1298, /* 180 */ 1294, 1292, 1289, 1266, 1264, 1259, 1288, 1287, 1238, 1285, /* 190 */ 1281, 1280, 1279, 1278, 1251, 1277, 1276, 1275, 1273, 1268, /* 200 */ 1267, 1265, 1263, 1261, 1257, 1248, 1237, 1247, 1246, 1243, /* 210 */ 1238, 1240, 1235, 1249, 1234, 1233, 1230, 1220, 1214, 1210, /* 220 */ 1225, 1219, 1232, 1231, 1197, 1195, 1227, 1224, 1201, 1208, /* 230 */ 1242, 1137, 1236, 1229, 1193, 1181, 1221, 1177, 1196, 1179, /* 240 */ 1191, 1190, 1186, 1182, 1218, 1216, 1176, 1162, 1183, 1180, /* 250 */ 1160, 1199, 1203, 1133, 1095, 1198, 1194, 1188, 1192, 1171, /* 260 */ 1169, 1168, 1173, 1174, 1166, 1159, 1141, 1170, 1158, 1167, /* 270 */ 1157, 1132, 1145, 1143, 1124, 1128, 1103, 1102, 1100, 1096, /* 280 */ 1150, 1149, 1085, 1125, 1080, 1064, 1120, 1097, 1082, 1078, /* 290 */ 1073, 1067, 1109, 1107, 1119, 1117, 1116, 1113, 1111, 1108, /* 300 */ 1007, 1000, 1002, 1076, 1075, 1081, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 647, 964, 964, 964, 878, 878, 969, 964, 774, 802, /* 10 */ 802, 938, 969, 969, 969, 876, 969, 969, 969, 964, /* 20 */ 969, 778, 808, 969, 969, 969, 969, 969, 969, 969, /* 30 */ 969, 937, 939, 816, 815, 918, 789, 813, 806, 810, /* 40 */ 879, 872, 873, 871, 875, 880, 969, 809, 841, 856, /* 50 */ 840, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 60 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 70 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 80 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 90 */ 969, 969, 969, 969, 850, 855, 862, 854, 851, 843, /* 100 */ 842, 844, 845, 969, 969, 673, 739, 969, 969, 846, /* 110 */ 969, 685, 847, 859, 858, 857, 680, 969, 969, 969, /* 120 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 130 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 140 */ 969, 969, 969, 969, 647, 964, 969, 969, 964, 964, /* 150 */ 964, 964, 964, 964, 956, 778, 768, 969, 969, 969, /* 160 */ 969, 969, 969, 969, 969, 969, 969, 944, 942, 969, /* 170 */ 891, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 180 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 190 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 200 */ 969, 969, 969, 969, 653, 969, 911, 774, 774, 774, /* 210 */ 776, 754, 766, 655, 812, 791, 791, 923, 812, 923, /* 220 */ 710, 733, 707, 802, 791, 874, 802, 802, 775, 766, /* 230 */ 969, 949, 782, 782, 941, 941, 782, 821, 743, 812, /* 240 */ 750, 750, 750, 750, 782, 670, 812, 821, 743, 743, /* 250 */ 812, 782, 670, 917, 915, 782, 782, 670, 782, 670, /* 260 */ 782, 670, 884, 741, 741, 741, 725, 884, 741, 710, /* 270 */ 741, 725, 741, 741, 795, 790, 795, 790, 795, 790, /* 280 */ 782, 782, 969, 884, 888, 888, 884, 807, 796, 805, /* 290 */ 803, 812, 676, 728, 663, 663, 652, 652, 652, 652, /* 300 */ 961, 961, 956, 712, 712, 695, 969, 969, 969, 969, /* 310 */ 969, 969, 687, 969, 893, 969, 969, 969, 969, 969, /* 320 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 330 */ 969, 828, 969, 648, 951, 969, 969, 948, 969, 969, /* 340 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 350 */ 969, 969, 969, 969, 969, 969, 921, 969, 969, 969, /* 360 */ 969, 969, 969, 914, 913, 969, 969, 969, 969, 969, /* 370 */ 969, 969, 969, 969, 969, 969, 969, 969, 969, 969, /* 380 */ 969, 969, 969, 969, 969, 969, 969, 757, 969, 969, /* 390 */ 969, 761, 969, 969, 969, 969, 969, 969, 804, 969, /* 400 */ 797, 969, 877, 969, 969, 969, 969, 969, 969, 969, /* 410 */ 969, 969, 969, 966, 969, 969, 969, 965, 969, 969, /* 420 */ 969, 969, 969, 830, 969, 829, 833, 969, 661, 969, /* 430 */ 644, 649, 960, 963, 962, 959, 958, 957, 952, 950, /* 440 */ 947, 946, 945, 943, 940, 936, 897, 895, 902, 901, /* 450 */ 900, 899, 898, 896, 894, 892, 818, 817, 814, 811, /* 460 */ 753, 935, 890, 752, 749, 748, 669, 953, 920, 929, /* 470 */ 928, 927, 822, 926, 925, 924, 922, 919, 906, 820, /* 480 */ 819, 744, 882, 881, 672, 910, 909, 908, 912, 916, /* 490 */ 907, 784, 751, 671, 668, 675, 679, 731, 732, 740, /* 500 */ 738, 737, 736, 735, 734, 730, 681, 686, 724, 709, /* 510 */ 708, 717, 716, 722, 721, 720, 719, 718, 715, 714, /* 520 */ 713, 706, 705, 711, 704, 727, 726, 723, 703, 747, /* 530 */ 746, 745, 742, 702, 701, 700, 833, 699, 698, 838, /* 540 */ 837, 866, 826, 755, 759, 758, 762, 763, 771, 770, /* 550 */ 769, 780, 781, 793, 792, 824, 823, 794, 779, 773, /* 560 */ 772, 788, 787, 786, 785, 777, 767, 799, 798, 868, /* 570 */ 783, 867, 865, 934, 933, 932, 931, 930, 870, 967, /* 580 */ 968, 887, 889, 886, 801, 800, 885, 869, 839, 836, /* 590 */ 690, 691, 905, 904, 903, 693, 692, 689, 688, 863, /* 600 */ 860, 852, 864, 861, 853, 849, 848, 834, 832, 831, /* 610 */ 827, 835, 760, 756, 825, 765, 764, 697, 696, 694, /* 620 */ 678, 677, 674, 667, 665, 664, 666, 662, 660, 659, /* 630 */ 658, 657, 656, 684, 683, 682, 654, 651, 650, 646, /* 640 */ 645, 643, }; /* The next table maps tokens into fallback tokens. If a construct ** like the following: ** ** %fallback ID X Y Z. ** ** appears in the grammar, then ID becomes a fallback token for X, Y, ** and Z. Whenever one of the tokens X, Y, or Z is input to the parser ** but it does not parse, the type of the token is changed to ID and ** the parse is retried before an error is thrown. */ #ifdef YYFALLBACK static const YYCODETYPE yyFallback[] = { 0, /* $ => nothing */ 0, /* SEMI => nothing */ 27, /* EXPLAIN => ID */ 27, /* QUERY => ID */ 27, /* PLAN => ID */ 27, /* BEGIN => ID */ 0, /* TRANSACTION => nothing */ 27, /* DEFERRED => ID */ 27, /* IMMEDIATE => ID */ 27, /* EXCLUSIVE => ID */ 0, /* COMMIT => nothing */ 27, /* END => ID */ 27, /* ROLLBACK => ID */ 27, /* SAVEPOINT => ID */ 27, /* RELEASE => ID */ 0, /* TO => nothing */ 0, /* TABLE => nothing */ 0, /* CREATE => nothing */ 27, /* IF => ID */ 0, /* NOT => nothing */ 0, /* EXISTS => nothing */ 27, /* TEMP => ID */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ 27, /* WITHOUT => ID */ 0, /* COMMA => nothing */ 0, /* ID => nothing */ 0, /* INDEXED => nothing */ 27, /* ABORT => ID */ 27, /* ACTION => ID */ 27, /* AFTER => ID */ 27, /* ANALYZE => ID */ 27, /* ASC => ID */ 27, /* ATTACH => ID */ 27, /* BEFORE => ID */ 27, /* BY => ID */ 27, /* CASCADE => ID */ 27, /* CAST => ID */ 27, /* COLUMNKW => ID */ 27, /* CONFLICT => ID */ 27, /* DATABASE => ID */ 27, /* DESC => ID */ 27, /* DETACH => ID */ 27, /* EACH => ID */ 27, /* FAIL => ID */ 27, /* FOR => ID */ 27, /* IGNORE => ID */ 27, /* INITIALLY => ID */ 27, /* INSTEAD => ID */ 27, /* LIKE_KW => ID */ 27, /* MATCH => ID */ 27, /* NO => ID */ 27, /* KEY => ID */ 27, /* OF => ID */ 27, /* OFFSET => ID */ 27, /* PRAGMA => ID */ 27, /* RAISE => ID */ 27, /* RECURSIVE => ID */ 27, /* REPLACE => ID */ 27, /* RESTRICT => ID */ 27, /* ROW => ID */ 27, /* TRIGGER => ID */ 27, /* VACUUM => ID */ 27, /* VIEW => ID */ 27, /* VIRTUAL => ID */ 27, /* WITH => ID */ 27, /* REINDEX => ID */ 27, /* RENAME => ID */ 27, /* CTIME_KW => ID */ }; #endif /* YYFALLBACK */ /* The following structure represents a single element of the ** parser's stack. Information stored includes: ** ** + The state number for the parser at this level of the stack. ** ** + The value of the token stored at this level of the stack. ** (In other words, the "major" token.) ** ** + The semantic value stored at this level of the stack. This is ** the information used by the action routines in the grammar. ** It is sometimes called the "minor" token. */ struct yyStackEntry { YYACTIONTYPE stateno; /* The state-number */ YYCODETYPE major; /* The major token value. This is the code ** number for the token at this stack level */ YYMINORTYPE minor; /* The user-supplied minor token value. This ** is the value of the token */ }; typedef struct yyStackEntry yyStackEntry; /* The state of the parser is completely contained in an instance of ** the following structure */ struct yyParser { int yyidx; /* Index of top element in stack */ #ifdef YYTRACKMAXSTACKDEPTH int yyidxMax; /* Maximum value of yyidx */ #endif int yyerrcnt; /* Shifts left before out of the error */ sqlite3ParserARG_SDECL /* A place to hold %extra_argument */ #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ #else yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ #endif }; typedef struct yyParser yyParser; #ifndef NDEBUG /* #include */ static FILE *yyTraceFILE = 0; static char *yyTracePrompt = 0; #endif /* NDEBUG */ #ifndef NDEBUG /* ** Turn parser tracing on by giving a stream to which to write the trace ** and a prompt to preface each trace message. Tracing is turned off ** by making either argument NULL ** ** Inputs: **
      **
    • A FILE* to which trace output should be written. ** If NULL, then tracing is turned off. **
    • A prefix string written at the beginning of every ** line of trace output. If NULL, then tracing is ** turned off. **
    ** ** Outputs: ** None. */ SQLITE_PRIVATE void sqlite3ParserTrace(FILE *TraceFILE, char *zTracePrompt){ yyTraceFILE = TraceFILE; yyTracePrompt = zTracePrompt; if( yyTraceFILE==0 ) yyTracePrompt = 0; else if( yyTracePrompt==0 ) yyTraceFILE = 0; } #endif /* NDEBUG */ #ifndef NDEBUG /* For tracing shifts, the names of all terminals and nonterminals ** are required. The following table supplies these names */ static const char *const yyTokenName[] = { "$", "SEMI", "EXPLAIN", "QUERY", "PLAN", "BEGIN", "TRANSACTION", "DEFERRED", "IMMEDIATE", "EXCLUSIVE", "COMMIT", "END", "ROLLBACK", "SAVEPOINT", "RELEASE", "TO", "TABLE", "CREATE", "IF", "NOT", "EXISTS", "TEMP", "LP", "RP", "AS", "WITHOUT", "COMMA", "ID", "INDEXED", "ABORT", "ACTION", "AFTER", "ANALYZE", "ASC", "ATTACH", "BEFORE", "BY", "CASCADE", "CAST", "COLUMNKW", "CONFLICT", "DATABASE", "DESC", "DETACH", "EACH", "FAIL", "FOR", "IGNORE", "INITIALLY", "INSTEAD", "LIKE_KW", "MATCH", "NO", "KEY", "OF", "OFFSET", "PRAGMA", "RAISE", "RECURSIVE", "REPLACE", "RESTRICT", "ROW", "TRIGGER", "VACUUM", "VIEW", "VIRTUAL", "WITH", "REINDEX", "RENAME", "CTIME_KW", "ANY", "OR", "AND", "IS", "BETWEEN", "IN", "ISNULL", "NOTNULL", "NE", "EQ", "GT", "LE", "LT", "GE", "ESCAPE", "BITAND", "BITOR", "LSHIFT", "RSHIFT", "PLUS", "MINUS", "STAR", "SLASH", "REM", "CONCAT", "COLLATE", "BITNOT", "STRING", "JOIN_KW", "CONSTRAINT", "DEFAULT", "NULL", "PRIMARY", "UNIQUE", "CHECK", "REFERENCES", "AUTOINCR", "ON", "INSERT", "DELETE", "UPDATE", "SET", "DEFERRABLE", "FOREIGN", "DROP", "UNION", "ALL", "EXCEPT", "INTERSECT", "SELECT", "VALUES", "DISTINCT", "DOT", "FROM", "JOIN", "USING", "ORDER", "GROUP", "HAVING", "LIMIT", "WHERE", "INTO", "INTEGER", "FLOAT", "BLOB", "VARIABLE", "CASE", "WHEN", "THEN", "ELSE", "INDEX", "ALTER", "ADD", "error", "input", "cmdlist", "ecmd", "explain", "cmdx", "cmd", "transtype", "trans_opt", "nm", "savepoint_opt", "create_table", "create_table_args", "createkw", "temp", "ifnotexists", "dbnm", "columnlist", "conslist_opt", "table_options", "select", "column", "columnid", "type", "carglist", "typetoken", "typename", "signed", "plus_num", "minus_num", "ccons", "term", "expr", "onconf", "sortorder", "autoinc", "idxlist_opt", "refargs", "defer_subclause", "refarg", "refact", "init_deferred_pred_opt", "conslist", "tconscomma", "tcons", "idxlist", "defer_subclause_opt", "orconf", "resolvetype", "raisetype", "ifexists", "fullname", "selectnowith", "oneselect", "with", "multiselect_op", "distinct", "selcollist", "from", "where_opt", "groupby_opt", "having_opt", "orderby_opt", "limit_opt", "values", "nexprlist", "exprlist", "sclp", "as", "seltablist", "stl_prefix", "joinop", "indexed_opt", "on_opt", "using_opt", "joinop2", "idlist", "sortlist", "setlist", "insert_cmd", "inscollist_opt", "likeop", "between_op", "in_op", "case_operand", "case_exprlist", "case_else", "uniqueflag", "collate", "nmnum", "trigger_decl", "trigger_cmd_list", "trigger_time", "trigger_event", "foreach_clause", "when_clause", "trigger_cmd", "trnm", "tridxby", "database_kw_opt", "key_opt", "add_column_fullname", "kwcolumn_opt", "create_vtab", "vtabarglist", "vtabarg", "vtabargtoken", "lp", "anylist", "wqlist", }; #endif /* NDEBUG */ #ifndef NDEBUG /* For tracing reduce actions, the names of all rules are required. */ static const char *const yyRuleName[] = { /* 0 */ "input ::= cmdlist", /* 1 */ "cmdlist ::= cmdlist ecmd", /* 2 */ "cmdlist ::= ecmd", /* 3 */ "ecmd ::= SEMI", /* 4 */ "ecmd ::= explain cmdx SEMI", /* 5 */ "explain ::=", /* 6 */ "explain ::= EXPLAIN", /* 7 */ "explain ::= EXPLAIN QUERY PLAN", /* 8 */ "cmdx ::= cmd", /* 9 */ "cmd ::= BEGIN transtype trans_opt", /* 10 */ "trans_opt ::=", /* 11 */ "trans_opt ::= TRANSACTION", /* 12 */ "trans_opt ::= TRANSACTION nm", /* 13 */ "transtype ::=", /* 14 */ "transtype ::= DEFERRED", /* 15 */ "transtype ::= IMMEDIATE", /* 16 */ "transtype ::= EXCLUSIVE", /* 17 */ "cmd ::= COMMIT trans_opt", /* 18 */ "cmd ::= END trans_opt", /* 19 */ "cmd ::= ROLLBACK trans_opt", /* 20 */ "savepoint_opt ::= SAVEPOINT", /* 21 */ "savepoint_opt ::=", /* 22 */ "cmd ::= SAVEPOINT nm", /* 23 */ "cmd ::= RELEASE savepoint_opt nm", /* 24 */ "cmd ::= ROLLBACK trans_opt TO savepoint_opt nm", /* 25 */ "cmd ::= create_table create_table_args", /* 26 */ "create_table ::= createkw temp TABLE ifnotexists nm dbnm", /* 27 */ "createkw ::= CREATE", /* 28 */ "ifnotexists ::=", /* 29 */ "ifnotexists ::= IF NOT EXISTS", /* 30 */ "temp ::= TEMP", /* 31 */ "temp ::=", /* 32 */ "create_table_args ::= LP columnlist conslist_opt RP table_options", /* 33 */ "create_table_args ::= AS select", /* 34 */ "table_options ::=", /* 35 */ "table_options ::= WITHOUT nm", /* 36 */ "columnlist ::= columnlist COMMA column", /* 37 */ "columnlist ::= column", /* 38 */ "column ::= columnid type carglist", /* 39 */ "columnid ::= nm", /* 40 */ "nm ::= ID|INDEXED", /* 41 */ "nm ::= STRING", /* 42 */ "nm ::= JOIN_KW", /* 43 */ "type ::=", /* 44 */ "type ::= typetoken", /* 45 */ "typetoken ::= typename", /* 46 */ "typetoken ::= typename LP signed RP", /* 47 */ "typetoken ::= typename LP signed COMMA signed RP", /* 48 */ "typename ::= ID|STRING", /* 49 */ "typename ::= typename ID|STRING", /* 50 */ "signed ::= plus_num", /* 51 */ "signed ::= minus_num", /* 52 */ "carglist ::= carglist ccons", /* 53 */ "carglist ::=", /* 54 */ "ccons ::= CONSTRAINT nm", /* 55 */ "ccons ::= DEFAULT term", /* 56 */ "ccons ::= DEFAULT LP expr RP", /* 57 */ "ccons ::= DEFAULT PLUS term", /* 58 */ "ccons ::= DEFAULT MINUS term", /* 59 */ "ccons ::= DEFAULT ID|INDEXED", /* 60 */ "ccons ::= NULL onconf", /* 61 */ "ccons ::= NOT NULL onconf", /* 62 */ "ccons ::= PRIMARY KEY sortorder onconf autoinc", /* 63 */ "ccons ::= UNIQUE onconf", /* 64 */ "ccons ::= CHECK LP expr RP", /* 65 */ "ccons ::= REFERENCES nm idxlist_opt refargs", /* 66 */ "ccons ::= defer_subclause", /* 67 */ "ccons ::= COLLATE ID|STRING", /* 68 */ "autoinc ::=", /* 69 */ "autoinc ::= AUTOINCR", /* 70 */ "refargs ::=", /* 71 */ "refargs ::= refargs refarg", /* 72 */ "refarg ::= MATCH nm", /* 73 */ "refarg ::= ON INSERT refact", /* 74 */ "refarg ::= ON DELETE refact", /* 75 */ "refarg ::= ON UPDATE refact", /* 76 */ "refact ::= SET NULL", /* 77 */ "refact ::= SET DEFAULT", /* 78 */ "refact ::= CASCADE", /* 79 */ "refact ::= RESTRICT", /* 80 */ "refact ::= NO ACTION", /* 81 */ "defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt", /* 82 */ "defer_subclause ::= DEFERRABLE init_deferred_pred_opt", /* 83 */ "init_deferred_pred_opt ::=", /* 84 */ "init_deferred_pred_opt ::= INITIALLY DEFERRED", /* 85 */ "init_deferred_pred_opt ::= INITIALLY IMMEDIATE", /* 86 */ "conslist_opt ::=", /* 87 */ "conslist_opt ::= COMMA conslist", /* 88 */ "conslist ::= conslist tconscomma tcons", /* 89 */ "conslist ::= tcons", /* 90 */ "tconscomma ::= COMMA", /* 91 */ "tconscomma ::=", /* 92 */ "tcons ::= CONSTRAINT nm", /* 93 */ "tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf", /* 94 */ "tcons ::= UNIQUE LP idxlist RP onconf", /* 95 */ "tcons ::= CHECK LP expr RP onconf", /* 96 */ "tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt", /* 97 */ "defer_subclause_opt ::=", /* 98 */ "defer_subclause_opt ::= defer_subclause", /* 99 */ "onconf ::=", /* 100 */ "onconf ::= ON CONFLICT resolvetype", /* 101 */ "orconf ::=", /* 102 */ "orconf ::= OR resolvetype", /* 103 */ "resolvetype ::= raisetype", /* 104 */ "resolvetype ::= IGNORE", /* 105 */ "resolvetype ::= REPLACE", /* 106 */ "cmd ::= DROP TABLE ifexists fullname", /* 107 */ "ifexists ::= IF EXISTS", /* 108 */ "ifexists ::=", /* 109 */ "cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select", /* 110 */ "cmd ::= DROP VIEW ifexists fullname", /* 111 */ "cmd ::= select", /* 112 */ "select ::= with selectnowith", /* 113 */ "selectnowith ::= oneselect", /* 114 */ "selectnowith ::= selectnowith multiselect_op oneselect", /* 115 */ "multiselect_op ::= UNION", /* 116 */ "multiselect_op ::= UNION ALL", /* 117 */ "multiselect_op ::= EXCEPT|INTERSECT", /* 118 */ "oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt", /* 119 */ "oneselect ::= values", /* 120 */ "values ::= VALUES LP nexprlist RP", /* 121 */ "values ::= values COMMA LP exprlist RP", /* 122 */ "distinct ::= DISTINCT", /* 123 */ "distinct ::= ALL", /* 124 */ "distinct ::=", /* 125 */ "sclp ::= selcollist COMMA", /* 126 */ "sclp ::=", /* 127 */ "selcollist ::= sclp expr as", /* 128 */ "selcollist ::= sclp STAR", /* 129 */ "selcollist ::= sclp nm DOT STAR", /* 130 */ "as ::= AS nm", /* 131 */ "as ::= ID|STRING", /* 132 */ "as ::=", /* 133 */ "from ::=", /* 134 */ "from ::= FROM seltablist", /* 135 */ "stl_prefix ::= seltablist joinop", /* 136 */ "stl_prefix ::=", /* 137 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt", /* 138 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt", /* 139 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt", /* 140 */ "dbnm ::=", /* 141 */ "dbnm ::= DOT nm", /* 142 */ "fullname ::= nm dbnm", /* 143 */ "joinop ::= COMMA|JOIN", /* 144 */ "joinop ::= JOIN_KW JOIN", /* 145 */ "joinop ::= JOIN_KW nm JOIN", /* 146 */ "joinop ::= JOIN_KW nm nm JOIN", /* 147 */ "on_opt ::= ON expr", /* 148 */ "on_opt ::=", /* 149 */ "indexed_opt ::=", /* 150 */ "indexed_opt ::= INDEXED BY nm", /* 151 */ "indexed_opt ::= NOT INDEXED", /* 152 */ "using_opt ::= USING LP idlist RP", /* 153 */ "using_opt ::=", /* 154 */ "orderby_opt ::=", /* 155 */ "orderby_opt ::= ORDER BY sortlist", /* 156 */ "sortlist ::= sortlist COMMA expr sortorder", /* 157 */ "sortlist ::= expr sortorder", /* 158 */ "sortorder ::= ASC", /* 159 */ "sortorder ::= DESC", /* 160 */ "sortorder ::=", /* 161 */ "groupby_opt ::=", /* 162 */ "groupby_opt ::= GROUP BY nexprlist", /* 163 */ "having_opt ::=", /* 164 */ "having_opt ::= HAVING expr", /* 165 */ "limit_opt ::=", /* 166 */ "limit_opt ::= LIMIT expr", /* 167 */ "limit_opt ::= LIMIT expr OFFSET expr", /* 168 */ "limit_opt ::= LIMIT expr COMMA expr", /* 169 */ "cmd ::= with DELETE FROM fullname indexed_opt where_opt", /* 170 */ "where_opt ::=", /* 171 */ "where_opt ::= WHERE expr", /* 172 */ "cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt", /* 173 */ "setlist ::= setlist COMMA nm EQ expr", /* 174 */ "setlist ::= nm EQ expr", /* 175 */ "cmd ::= with insert_cmd INTO fullname inscollist_opt select", /* 176 */ "cmd ::= with insert_cmd INTO fullname inscollist_opt DEFAULT VALUES", /* 177 */ "insert_cmd ::= INSERT orconf", /* 178 */ "insert_cmd ::= REPLACE", /* 179 */ "inscollist_opt ::=", /* 180 */ "inscollist_opt ::= LP idlist RP", /* 181 */ "idlist ::= idlist COMMA nm", /* 182 */ "idlist ::= nm", /* 183 */ "expr ::= term", /* 184 */ "expr ::= LP expr RP", /* 185 */ "term ::= NULL", /* 186 */ "expr ::= ID|INDEXED", /* 187 */ "expr ::= JOIN_KW", /* 188 */ "expr ::= nm DOT nm", /* 189 */ "expr ::= nm DOT nm DOT nm", /* 190 */ "term ::= INTEGER|FLOAT|BLOB", /* 191 */ "term ::= STRING", /* 192 */ "expr ::= VARIABLE", /* 193 */ "expr ::= expr COLLATE ID|STRING", /* 194 */ "expr ::= CAST LP expr AS typetoken RP", /* 195 */ "expr ::= ID|INDEXED LP distinct exprlist RP", /* 196 */ "expr ::= ID|INDEXED LP STAR RP", /* 197 */ "term ::= CTIME_KW", /* 198 */ "expr ::= expr AND expr", /* 199 */ "expr ::= expr OR expr", /* 200 */ "expr ::= expr LT|GT|GE|LE expr", /* 201 */ "expr ::= expr EQ|NE expr", /* 202 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", /* 203 */ "expr ::= expr PLUS|MINUS expr", /* 204 */ "expr ::= expr STAR|SLASH|REM expr", /* 205 */ "expr ::= expr CONCAT expr", /* 206 */ "likeop ::= LIKE_KW|MATCH", /* 207 */ "likeop ::= NOT LIKE_KW|MATCH", /* 208 */ "expr ::= expr likeop expr", /* 209 */ "expr ::= expr likeop expr ESCAPE expr", /* 210 */ "expr ::= expr ISNULL|NOTNULL", /* 211 */ "expr ::= expr NOT NULL", /* 212 */ "expr ::= expr IS expr", /* 213 */ "expr ::= expr IS NOT expr", /* 214 */ "expr ::= NOT expr", /* 215 */ "expr ::= BITNOT expr", /* 216 */ "expr ::= MINUS expr", /* 217 */ "expr ::= PLUS expr", /* 218 */ "between_op ::= BETWEEN", /* 219 */ "between_op ::= NOT BETWEEN", /* 220 */ "expr ::= expr between_op expr AND expr", /* 221 */ "in_op ::= IN", /* 222 */ "in_op ::= NOT IN", /* 223 */ "expr ::= expr in_op LP exprlist RP", /* 224 */ "expr ::= LP select RP", /* 225 */ "expr ::= expr in_op LP select RP", /* 226 */ "expr ::= expr in_op nm dbnm", /* 227 */ "expr ::= EXISTS LP select RP", /* 228 */ "expr ::= CASE case_operand case_exprlist case_else END", /* 229 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", /* 230 */ "case_exprlist ::= WHEN expr THEN expr", /* 231 */ "case_else ::= ELSE expr", /* 232 */ "case_else ::=", /* 233 */ "case_operand ::= expr", /* 234 */ "case_operand ::=", /* 235 */ "exprlist ::= nexprlist", /* 236 */ "exprlist ::=", /* 237 */ "nexprlist ::= nexprlist COMMA expr", /* 238 */ "nexprlist ::= expr", /* 239 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt", /* 240 */ "uniqueflag ::= UNIQUE", /* 241 */ "uniqueflag ::=", /* 242 */ "idxlist_opt ::=", /* 243 */ "idxlist_opt ::= LP idxlist RP", /* 244 */ "idxlist ::= idxlist COMMA nm collate sortorder", /* 245 */ "idxlist ::= nm collate sortorder", /* 246 */ "collate ::=", /* 247 */ "collate ::= COLLATE ID|STRING", /* 248 */ "cmd ::= DROP INDEX ifexists fullname", /* 249 */ "cmd ::= VACUUM", /* 250 */ "cmd ::= VACUUM nm", /* 251 */ "cmd ::= PRAGMA nm dbnm", /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", /* 256 */ "nmnum ::= plus_num", /* 257 */ "nmnum ::= nm", /* 258 */ "nmnum ::= ON", /* 259 */ "nmnum ::= DELETE", /* 260 */ "nmnum ::= DEFAULT", /* 261 */ "plus_num ::= PLUS INTEGER|FLOAT", /* 262 */ "plus_num ::= INTEGER|FLOAT", /* 263 */ "minus_num ::= MINUS INTEGER|FLOAT", /* 264 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", /* 265 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", /* 266 */ "trigger_time ::= BEFORE", /* 267 */ "trigger_time ::= AFTER", /* 268 */ "trigger_time ::= INSTEAD OF", /* 269 */ "trigger_time ::=", /* 270 */ "trigger_event ::= DELETE|INSERT", /* 271 */ "trigger_event ::= UPDATE", /* 272 */ "trigger_event ::= UPDATE OF idlist", /* 273 */ "foreach_clause ::=", /* 274 */ "foreach_clause ::= FOR EACH ROW", /* 275 */ "when_clause ::=", /* 276 */ "when_clause ::= WHEN expr", /* 277 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", /* 278 */ "trigger_cmd_list ::= trigger_cmd SEMI", /* 279 */ "trnm ::= nm", /* 280 */ "trnm ::= nm DOT nm", /* 281 */ "tridxby ::=", /* 282 */ "tridxby ::= INDEXED BY nm", /* 283 */ "tridxby ::= NOT INDEXED", /* 284 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt", /* 285 */ "trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select", /* 286 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt", /* 287 */ "trigger_cmd ::= select", /* 288 */ "expr ::= RAISE LP IGNORE RP", /* 289 */ "expr ::= RAISE LP raisetype COMMA nm RP", /* 290 */ "raisetype ::= ROLLBACK", /* 291 */ "raisetype ::= ABORT", /* 292 */ "raisetype ::= FAIL", /* 293 */ "cmd ::= DROP TRIGGER ifexists fullname", /* 294 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", /* 295 */ "cmd ::= DETACH database_kw_opt expr", /* 296 */ "key_opt ::=", /* 297 */ "key_opt ::= KEY expr", /* 298 */ "database_kw_opt ::= DATABASE", /* 299 */ "database_kw_opt ::=", /* 300 */ "cmd ::= REINDEX", /* 301 */ "cmd ::= REINDEX nm dbnm", /* 302 */ "cmd ::= ANALYZE", /* 303 */ "cmd ::= ANALYZE nm dbnm", /* 304 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", /* 305 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column", /* 306 */ "add_column_fullname ::= fullname", /* 307 */ "kwcolumn_opt ::=", /* 308 */ "kwcolumn_opt ::= COLUMNKW", /* 309 */ "cmd ::= create_vtab", /* 310 */ "cmd ::= create_vtab LP vtabarglist RP", /* 311 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", /* 312 */ "vtabarglist ::= vtabarg", /* 313 */ "vtabarglist ::= vtabarglist COMMA vtabarg", /* 314 */ "vtabarg ::=", /* 315 */ "vtabarg ::= vtabarg vtabargtoken", /* 316 */ "vtabargtoken ::= ANY", /* 317 */ "vtabargtoken ::= lp anylist RP", /* 318 */ "lp ::= LP", /* 319 */ "anylist ::=", /* 320 */ "anylist ::= anylist LP anylist RP", /* 321 */ "anylist ::= anylist ANY", /* 322 */ "with ::=", /* 323 */ "with ::= WITH wqlist", /* 324 */ "with ::= WITH RECURSIVE wqlist", /* 325 */ "wqlist ::= nm idxlist_opt AS LP select RP", /* 326 */ "wqlist ::= wqlist COMMA nm idxlist_opt AS LP select RP", }; #endif /* NDEBUG */ #if YYSTACKDEPTH<=0 /* ** Try to increase the size of the parser stack. */ static void yyGrowStack(yyParser *p){ int newSize; yyStackEntry *pNew; newSize = p->yystksz*2 + 100; pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); if( pNew ){ p->yystack = pNew; p->yystksz = newSize; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", yyTracePrompt, p->yystksz); } #endif } } #endif /* ** This function allocates a new parser. ** The only argument is a pointer to a function which works like ** malloc. ** ** Inputs: ** A pointer to the function used to allocate memory. ** ** Outputs: ** A pointer to a parser. This pointer is used in subsequent calls ** to sqlite3Parser and sqlite3ParserFree. */ SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(u64)){ yyParser *pParser; pParser = (yyParser*)(*mallocProc)( (u64)sizeof(yyParser) ); if( pParser ){ pParser->yyidx = -1; #ifdef YYTRACKMAXSTACKDEPTH pParser->yyidxMax = 0; #endif #if YYSTACKDEPTH<=0 pParser->yystack = NULL; pParser->yystksz = 0; yyGrowStack(pParser); #endif } return pParser; } /* The following function deletes the value associated with a ** symbol. The symbol can be either a terminal or nonterminal. ** "yymajor" is the symbol code, and "yypminor" is a pointer to ** the value. */ static void yy_destructor( yyParser *yypParser, /* The parser */ YYCODETYPE yymajor, /* Type code for object to destroy */ YYMINORTYPE *yypminor /* The object to be destroyed */ ){ sqlite3ParserARG_FETCH; switch( yymajor ){ /* Here is inserted the actions which take place when a ** terminal or non-terminal is destroyed. This can happen ** when the symbol is popped from the stack during a ** reduce or during error processing or when a parser is ** being destroyed before it is finished parsing. ** ** Note: during a reduce, the only symbols destroyed are those ** which appear on the RHS of the rule, but which are not used ** inside the C code. */ case 163: /* select */ case 195: /* selectnowith */ case 196: /* oneselect */ case 207: /* values */ { sqlite3SelectDelete(pParse->db, (yypminor->yy3)); } break; case 174: /* term */ case 175: /* expr */ { sqlite3ExprDelete(pParse->db, (yypminor->yy346).pExpr); } break; case 179: /* idxlist_opt */ case 188: /* idxlist */ case 200: /* selcollist */ case 203: /* groupby_opt */ case 205: /* orderby_opt */ case 208: /* nexprlist */ case 209: /* exprlist */ case 210: /* sclp */ case 220: /* sortlist */ case 221: /* setlist */ case 228: /* case_exprlist */ { sqlite3ExprListDelete(pParse->db, (yypminor->yy14)); } break; case 194: /* fullname */ case 201: /* from */ case 212: /* seltablist */ case 213: /* stl_prefix */ { sqlite3SrcListDelete(pParse->db, (yypminor->yy65)); } break; case 197: /* with */ case 252: /* wqlist */ { sqlite3WithDelete(pParse->db, (yypminor->yy59)); } break; case 202: /* where_opt */ case 204: /* having_opt */ case 216: /* on_opt */ case 227: /* case_operand */ case 229: /* case_else */ case 238: /* when_clause */ case 243: /* key_opt */ { sqlite3ExprDelete(pParse->db, (yypminor->yy132)); } break; case 217: /* using_opt */ case 219: /* idlist */ case 223: /* inscollist_opt */ { sqlite3IdListDelete(pParse->db, (yypminor->yy408)); } break; case 234: /* trigger_cmd_list */ case 239: /* trigger_cmd */ { sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy473)); } break; case 236: /* trigger_event */ { sqlite3IdListDelete(pParse->db, (yypminor->yy378).b); } break; default: break; /* If no destructor action specified: do nothing */ } } /* ** Pop the parser's stack once. ** ** If there is a destructor routine associated with the token which ** is popped from the stack, then call it. ** ** Return the major token number for the symbol popped. */ static int yy_pop_parser_stack(yyParser *pParser){ YYCODETYPE yymajor; yyStackEntry *yytos = &pParser->yystack[pParser->yyidx]; /* There is no mechanism by which the parser stack can be popped below ** empty in SQLite. */ assert( pParser->yyidx>=0 ); #ifndef NDEBUG if( yyTraceFILE && pParser->yyidx>=0 ){ fprintf(yyTraceFILE,"%sPopping %s\n", yyTracePrompt, yyTokenName[yytos->major]); } #endif yymajor = yytos->major; yy_destructor(pParser, yymajor, &yytos->minor); pParser->yyidx--; return yymajor; } /* ** Deallocate and destroy a parser. Destructors are all called for ** all stack elements before shutting the parser down. ** ** Inputs: **
      **
    • A pointer to the parser. This should be a pointer ** obtained from sqlite3ParserAlloc. **
    • A pointer to a function used to reclaim memory obtained ** from malloc. **
    */ SQLITE_PRIVATE void sqlite3ParserFree( void *p, /* The parser to be deleted */ void (*freeProc)(void*) /* Function used to reclaim memory */ ){ yyParser *pParser = (yyParser*)p; /* In SQLite, we never try to destroy a parser that was not successfully ** created in the first place. */ if( NEVER(pParser==0) ) return; while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); #if YYSTACKDEPTH<=0 free(pParser->yystack); #endif (*freeProc)((void*)pParser); } /* ** Return the peak depth of the stack for a parser. */ #ifdef YYTRACKMAXSTACKDEPTH SQLITE_PRIVATE int sqlite3ParserStackPeak(void *p){ yyParser *pParser = (yyParser*)p; return pParser->yyidxMax; } #endif /* ** Find the appropriate action for a parser given the terminal ** look-ahead token iLookAhead. ** ** If the look-ahead token is YYNOCODE, then check to see if the action is ** independent of the look-ahead. If it is, return the action, otherwise ** return YY_NO_ACTION. */ static int yy_find_shift_action( yyParser *pParser, /* The parser */ YYCODETYPE iLookAhead /* The look-ahead token */ ){ int i; int stateno = pParser->yystack[pParser->yyidx].stateno; if( stateno>YY_SHIFT_COUNT || (i = yy_shift_ofst[stateno])==YY_SHIFT_USE_DFLT ){ return yy_default[stateno]; } assert( iLookAhead!=YYNOCODE ); i += iLookAhead; if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ if( iLookAhead>0 ){ #ifdef YYFALLBACK YYCODETYPE iFallback; /* Fallback token */ if( iLookAhead %s\n", yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]); } #endif return yy_find_shift_action(pParser, iFallback); } #endif #ifdef YYWILDCARD { int j = i - iLookAhead + YYWILDCARD; if( #if YY_SHIFT_MIN+YYWILDCARD<0 j>=0 && #endif #if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT j %s\n", yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[YYWILDCARD]); } #endif /* NDEBUG */ return yy_action[j]; } } #endif /* YYWILDCARD */ } return yy_default[stateno]; }else{ return yy_action[i]; } } /* ** Find the appropriate action for a parser given the non-terminal ** look-ahead token iLookAhead. ** ** If the look-ahead token is YYNOCODE, then check to see if the action is ** independent of the look-ahead. If it is, return the action, otherwise ** return YY_NO_ACTION. */ static int yy_find_reduce_action( int stateno, /* Current state number */ YYCODETYPE iLookAhead /* The look-ahead token */ ){ int i; #ifdef YYERRORSYMBOL if( stateno>YY_REDUCE_COUNT ){ return yy_default[stateno]; } #else assert( stateno<=YY_REDUCE_COUNT ); #endif i = yy_reduce_ofst[stateno]; assert( i!=YY_REDUCE_USE_DFLT ); assert( iLookAhead!=YYNOCODE ); i += iLookAhead; #ifdef YYERRORSYMBOL if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){ return yy_default[stateno]; } #else assert( i>=0 && iyyidx--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); } #endif while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will execute if the parser ** stack every overflows */ UNUSED_PARAMETER(yypMinor); /* Silence some compiler warnings */ sqlite3ErrorMsg(pParse, "parser stack overflow"); sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument var */ } /* ** Perform a shift action. */ static void yy_shift( yyParser *yypParser, /* The parser to be shifted */ int yyNewState, /* The new state to shift in */ int yyMajor, /* The major token to shift in */ YYMINORTYPE *yypMinor /* Pointer to the minor token to shift in */ ){ yyStackEntry *yytos; yypParser->yyidx++; #ifdef YYTRACKMAXSTACKDEPTH if( yypParser->yyidx>yypParser->yyidxMax ){ yypParser->yyidxMax = yypParser->yyidx; } #endif #if YYSTACKDEPTH>0 if( yypParser->yyidx>=YYSTACKDEPTH ){ yyStackOverflow(yypParser, yypMinor); return; } #else if( yypParser->yyidx>=yypParser->yystksz ){ yyGrowStack(yypParser); if( yypParser->yyidx>=yypParser->yystksz ){ yyStackOverflow(yypParser, yypMinor); return; } } #endif yytos = &yypParser->yystack[yypParser->yyidx]; yytos->stateno = (YYACTIONTYPE)yyNewState; yytos->major = (YYCODETYPE)yyMajor; yytos->minor = *yypMinor; #ifndef NDEBUG if( yyTraceFILE && yypParser->yyidx>0 ){ int i; fprintf(yyTraceFILE,"%sShift %d\n",yyTracePrompt,yyNewState); fprintf(yyTraceFILE,"%sStack:",yyTracePrompt); for(i=1; i<=yypParser->yyidx; i++) fprintf(yyTraceFILE," %s",yyTokenName[yypParser->yystack[i].major]); fprintf(yyTraceFILE,"\n"); } #endif } /* The following table contains information about every rule that ** is used during the reduce. */ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ unsigned char nrhs; /* Number of right-hand side symbols in the rule */ } yyRuleInfo[] = { { 144, 1 }, { 145, 2 }, { 145, 1 }, { 146, 1 }, { 146, 3 }, { 147, 0 }, { 147, 1 }, { 147, 3 }, { 148, 1 }, { 149, 3 }, { 151, 0 }, { 151, 1 }, { 151, 2 }, { 150, 0 }, { 150, 1 }, { 150, 1 }, { 150, 1 }, { 149, 2 }, { 149, 2 }, { 149, 2 }, { 153, 1 }, { 153, 0 }, { 149, 2 }, { 149, 3 }, { 149, 5 }, { 149, 2 }, { 154, 6 }, { 156, 1 }, { 158, 0 }, { 158, 3 }, { 157, 1 }, { 157, 0 }, { 155, 5 }, { 155, 2 }, { 162, 0 }, { 162, 2 }, { 160, 3 }, { 160, 1 }, { 164, 3 }, { 165, 1 }, { 152, 1 }, { 152, 1 }, { 152, 1 }, { 166, 0 }, { 166, 1 }, { 168, 1 }, { 168, 4 }, { 168, 6 }, { 169, 1 }, { 169, 2 }, { 170, 1 }, { 170, 1 }, { 167, 2 }, { 167, 0 }, { 173, 2 }, { 173, 2 }, { 173, 4 }, { 173, 3 }, { 173, 3 }, { 173, 2 }, { 173, 2 }, { 173, 3 }, { 173, 5 }, { 173, 2 }, { 173, 4 }, { 173, 4 }, { 173, 1 }, { 173, 2 }, { 178, 0 }, { 178, 1 }, { 180, 0 }, { 180, 2 }, { 182, 2 }, { 182, 3 }, { 182, 3 }, { 182, 3 }, { 183, 2 }, { 183, 2 }, { 183, 1 }, { 183, 1 }, { 183, 2 }, { 181, 3 }, { 181, 2 }, { 184, 0 }, { 184, 2 }, { 184, 2 }, { 161, 0 }, { 161, 2 }, { 185, 3 }, { 185, 1 }, { 186, 1 }, { 186, 0 }, { 187, 2 }, { 187, 7 }, { 187, 5 }, { 187, 5 }, { 187, 10 }, { 189, 0 }, { 189, 1 }, { 176, 0 }, { 176, 3 }, { 190, 0 }, { 190, 2 }, { 191, 1 }, { 191, 1 }, { 191, 1 }, { 149, 4 }, { 193, 2 }, { 193, 0 }, { 149, 8 }, { 149, 4 }, { 149, 1 }, { 163, 2 }, { 195, 1 }, { 195, 3 }, { 198, 1 }, { 198, 2 }, { 198, 1 }, { 196, 9 }, { 196, 1 }, { 207, 4 }, { 207, 5 }, { 199, 1 }, { 199, 1 }, { 199, 0 }, { 210, 2 }, { 210, 0 }, { 200, 3 }, { 200, 2 }, { 200, 4 }, { 211, 2 }, { 211, 1 }, { 211, 0 }, { 201, 0 }, { 201, 2 }, { 213, 2 }, { 213, 0 }, { 212, 7 }, { 212, 7 }, { 212, 7 }, { 159, 0 }, { 159, 2 }, { 194, 2 }, { 214, 1 }, { 214, 2 }, { 214, 3 }, { 214, 4 }, { 216, 2 }, { 216, 0 }, { 215, 0 }, { 215, 3 }, { 215, 2 }, { 217, 4 }, { 217, 0 }, { 205, 0 }, { 205, 3 }, { 220, 4 }, { 220, 2 }, { 177, 1 }, { 177, 1 }, { 177, 0 }, { 203, 0 }, { 203, 3 }, { 204, 0 }, { 204, 2 }, { 206, 0 }, { 206, 2 }, { 206, 4 }, { 206, 4 }, { 149, 6 }, { 202, 0 }, { 202, 2 }, { 149, 8 }, { 221, 5 }, { 221, 3 }, { 149, 6 }, { 149, 7 }, { 222, 2 }, { 222, 1 }, { 223, 0 }, { 223, 3 }, { 219, 3 }, { 219, 1 }, { 175, 1 }, { 175, 3 }, { 174, 1 }, { 175, 1 }, { 175, 1 }, { 175, 3 }, { 175, 5 }, { 174, 1 }, { 174, 1 }, { 175, 1 }, { 175, 3 }, { 175, 6 }, { 175, 5 }, { 175, 4 }, { 174, 1 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 175, 3 }, { 224, 1 }, { 224, 2 }, { 175, 3 }, { 175, 5 }, { 175, 2 }, { 175, 3 }, { 175, 3 }, { 175, 4 }, { 175, 2 }, { 175, 2 }, { 175, 2 }, { 175, 2 }, { 225, 1 }, { 225, 2 }, { 175, 5 }, { 226, 1 }, { 226, 2 }, { 175, 5 }, { 175, 3 }, { 175, 5 }, { 175, 4 }, { 175, 4 }, { 175, 5 }, { 228, 5 }, { 228, 4 }, { 229, 2 }, { 229, 0 }, { 227, 1 }, { 227, 0 }, { 209, 1 }, { 209, 0 }, { 208, 3 }, { 208, 1 }, { 149, 12 }, { 230, 1 }, { 230, 0 }, { 179, 0 }, { 179, 3 }, { 188, 5 }, { 188, 3 }, { 231, 0 }, { 231, 2 }, { 149, 4 }, { 149, 1 }, { 149, 2 }, { 149, 3 }, { 149, 5 }, { 149, 6 }, { 149, 5 }, { 149, 6 }, { 232, 1 }, { 232, 1 }, { 232, 1 }, { 232, 1 }, { 232, 1 }, { 171, 2 }, { 171, 1 }, { 172, 2 }, { 149, 5 }, { 233, 11 }, { 235, 1 }, { 235, 1 }, { 235, 2 }, { 235, 0 }, { 236, 1 }, { 236, 1 }, { 236, 3 }, { 237, 0 }, { 237, 3 }, { 238, 0 }, { 238, 2 }, { 234, 3 }, { 234, 2 }, { 240, 1 }, { 240, 3 }, { 241, 0 }, { 241, 3 }, { 241, 2 }, { 239, 7 }, { 239, 5 }, { 239, 5 }, { 239, 1 }, { 175, 4 }, { 175, 6 }, { 192, 1 }, { 192, 1 }, { 192, 1 }, { 149, 4 }, { 149, 6 }, { 149, 3 }, { 243, 0 }, { 243, 2 }, { 242, 1 }, { 242, 0 }, { 149, 1 }, { 149, 3 }, { 149, 1 }, { 149, 3 }, { 149, 6 }, { 149, 6 }, { 244, 1 }, { 245, 0 }, { 245, 1 }, { 149, 1 }, { 149, 4 }, { 246, 8 }, { 247, 1 }, { 247, 3 }, { 248, 0 }, { 248, 2 }, { 249, 1 }, { 249, 3 }, { 250, 1 }, { 251, 0 }, { 251, 4 }, { 251, 2 }, { 197, 0 }, { 197, 2 }, { 197, 3 }, { 252, 6 }, { 252, 8 }, }; static void yy_accept(yyParser*); /* Forward Declaration */ /* ** Perform a reduce action and the shift that must immediately ** follow the reduce. */ static void yy_reduce( yyParser *yypParser, /* The parser */ int yyruleno /* Number of the rule by which to reduce */ ){ int yygoto; /* The next state */ int yyact; /* The next action */ YYMINORTYPE yygotominor; /* The LHS of the rule reduced */ yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ sqlite3ParserARG_FETCH; yymsp = &yypParser->yystack[yypParser->yyidx]; #ifndef NDEBUG if( yyTraceFILE && yyruleno>=0 && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ fprintf(yyTraceFILE, "%sReduce [%s].\n", yyTracePrompt, yyRuleName[yyruleno]); } #endif /* NDEBUG */ /* Silence complaints from purify about yygotominor being uninitialized ** in some cases when it is copied into the stack after the following ** switch. yygotominor is uninitialized when a rule reduces that does ** not set the value of its left-hand side nonterminal. Leaving the ** value of the nonterminal uninitialized is utterly harmless as long ** as the value is never used. So really the only thing this code ** accomplishes is to quieten purify. ** ** 2007-01-16: The wireshark project (www.wireshark.org) reports that ** without this code, their parser segfaults. I'm not sure what there ** parser is doing to make this happen. This is the second bug report ** from wireshark this week. Clearly they are stressing Lemon in ways ** that it has not been previously stressed... (SQLite ticket #2172) */ /*memset(&yygotominor, 0, sizeof(yygotominor));*/ yygotominor = yyzerominor; switch( yyruleno ){ /* Beginning here are the reduction cases. A typical example ** follows: ** case 0: ** #line ** { ... } // User supplied code ** #line ** break; */ case 5: /* explain ::= */ { sqlite3BeginParse(pParse, 0); } break; case 6: /* explain ::= EXPLAIN */ { sqlite3BeginParse(pParse, 1); } break; case 7: /* explain ::= EXPLAIN QUERY PLAN */ { sqlite3BeginParse(pParse, 2); } break; case 8: /* cmdx ::= cmd */ { sqlite3FinishCoding(pParse); } break; case 9: /* cmd ::= BEGIN transtype trans_opt */ {sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy328);} break; case 13: /* transtype ::= */ {yygotominor.yy328 = TK_DEFERRED;} break; case 14: /* transtype ::= DEFERRED */ case 15: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==15); case 16: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==16); case 115: /* multiselect_op ::= UNION */ yytestcase(yyruleno==115); case 117: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==117); {yygotominor.yy328 = yymsp[0].major;} break; case 17: /* cmd ::= COMMIT trans_opt */ case 18: /* cmd ::= END trans_opt */ yytestcase(yyruleno==18); {sqlite3CommitTransaction(pParse);} break; case 19: /* cmd ::= ROLLBACK trans_opt */ {sqlite3RollbackTransaction(pParse);} break; case 22: /* cmd ::= SAVEPOINT nm */ { sqlite3Savepoint(pParse, SAVEPOINT_BEGIN, &yymsp[0].minor.yy0); } break; case 23: /* cmd ::= RELEASE savepoint_opt nm */ { sqlite3Savepoint(pParse, SAVEPOINT_RELEASE, &yymsp[0].minor.yy0); } break; case 24: /* cmd ::= ROLLBACK trans_opt TO savepoint_opt nm */ { sqlite3Savepoint(pParse, SAVEPOINT_ROLLBACK, &yymsp[0].minor.yy0); } break; case 26: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy328,0,0,yymsp[-2].minor.yy328); } break; case 27: /* createkw ::= CREATE */ { pParse->db->lookaside.bEnabled = 0; yygotominor.yy0 = yymsp[0].minor.yy0; } break; case 28: /* ifnotexists ::= */ case 31: /* temp ::= */ yytestcase(yyruleno==31); case 68: /* autoinc ::= */ yytestcase(yyruleno==68); case 81: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ yytestcase(yyruleno==81); case 83: /* init_deferred_pred_opt ::= */ yytestcase(yyruleno==83); case 85: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ yytestcase(yyruleno==85); case 97: /* defer_subclause_opt ::= */ yytestcase(yyruleno==97); case 108: /* ifexists ::= */ yytestcase(yyruleno==108); case 218: /* between_op ::= BETWEEN */ yytestcase(yyruleno==218); case 221: /* in_op ::= IN */ yytestcase(yyruleno==221); {yygotominor.yy328 = 0;} break; case 29: /* ifnotexists ::= IF NOT EXISTS */ case 30: /* temp ::= TEMP */ yytestcase(yyruleno==30); case 69: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==69); case 84: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ yytestcase(yyruleno==84); case 107: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==107); case 219: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==219); case 222: /* in_op ::= NOT IN */ yytestcase(yyruleno==222); {yygotominor.yy328 = 1;} break; case 32: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ { sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy186,0); } break; case 33: /* create_table_args ::= AS select */ { sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy3); sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3); } break; case 34: /* table_options ::= */ {yygotominor.yy186 = 0;} break; case 35: /* table_options ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ yygotominor.yy186 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ yygotominor.yy186 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } break; case 38: /* column ::= columnid type carglist */ { yygotominor.yy0.z = yymsp[-2].minor.yy0.z; yygotominor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-2].minor.yy0.z) + pParse->sLastToken.n; } break; case 39: /* columnid ::= nm */ { sqlite3AddColumn(pParse,&yymsp[0].minor.yy0); yygotominor.yy0 = yymsp[0].minor.yy0; pParse->constraintName.n = 0; } break; case 40: /* nm ::= ID|INDEXED */ case 41: /* nm ::= STRING */ yytestcase(yyruleno==41); case 42: /* nm ::= JOIN_KW */ yytestcase(yyruleno==42); case 45: /* typetoken ::= typename */ yytestcase(yyruleno==45); case 48: /* typename ::= ID|STRING */ yytestcase(yyruleno==48); case 130: /* as ::= AS nm */ yytestcase(yyruleno==130); case 131: /* as ::= ID|STRING */ yytestcase(yyruleno==131); case 141: /* dbnm ::= DOT nm */ yytestcase(yyruleno==141); case 150: /* indexed_opt ::= INDEXED BY nm */ yytestcase(yyruleno==150); case 247: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==247); case 256: /* nmnum ::= plus_num */ yytestcase(yyruleno==256); case 257: /* nmnum ::= nm */ yytestcase(yyruleno==257); case 258: /* nmnum ::= ON */ yytestcase(yyruleno==258); case 259: /* nmnum ::= DELETE */ yytestcase(yyruleno==259); case 260: /* nmnum ::= DEFAULT */ yytestcase(yyruleno==260); case 261: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==261); case 262: /* plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==262); case 263: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==263); case 279: /* trnm ::= nm */ yytestcase(yyruleno==279); {yygotominor.yy0 = yymsp[0].minor.yy0;} break; case 44: /* type ::= typetoken */ {sqlite3AddColumnType(pParse,&yymsp[0].minor.yy0);} break; case 46: /* typetoken ::= typename LP signed RP */ { yygotominor.yy0.z = yymsp[-3].minor.yy0.z; yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-3].minor.yy0.z); } break; case 47: /* typetoken ::= typename LP signed COMMA signed RP */ { yygotominor.yy0.z = yymsp[-5].minor.yy0.z; yygotominor.yy0.n = (int)(&yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] - yymsp[-5].minor.yy0.z); } break; case 49: /* typename ::= typename ID|STRING */ {yygotominor.yy0.z=yymsp[-1].minor.yy0.z; yygotominor.yy0.n=yymsp[0].minor.yy0.n+(int)(yymsp[0].minor.yy0.z-yymsp[-1].minor.yy0.z);} break; case 54: /* ccons ::= CONSTRAINT nm */ case 92: /* tcons ::= CONSTRAINT nm */ yytestcase(yyruleno==92); {pParse->constraintName = yymsp[0].minor.yy0;} break; case 55: /* ccons ::= DEFAULT term */ case 57: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==57); {sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy346);} break; case 56: /* ccons ::= DEFAULT LP expr RP */ {sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy346);} break; case 58: /* ccons ::= DEFAULT MINUS term */ { ExprSpan v; v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy346.pExpr, 0, 0); v.zStart = yymsp[-1].minor.yy0.z; v.zEnd = yymsp[0].minor.yy346.zEnd; sqlite3AddDefaultValue(pParse,&v); } break; case 59: /* ccons ::= DEFAULT ID|INDEXED */ { ExprSpan v; spanExpr(&v, pParse, TK_STRING, &yymsp[0].minor.yy0); sqlite3AddDefaultValue(pParse,&v); } break; case 61: /* ccons ::= NOT NULL onconf */ {sqlite3AddNotNull(pParse, yymsp[0].minor.yy328);} break; case 62: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ {sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy328,yymsp[0].minor.yy328,yymsp[-2].minor.yy328);} break; case 63: /* ccons ::= UNIQUE onconf */ {sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy328,0,0,0,0);} break; case 64: /* ccons ::= CHECK LP expr RP */ {sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy346.pExpr);} break; case 65: /* ccons ::= REFERENCES nm idxlist_opt refargs */ {sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy14,yymsp[0].minor.yy328);} break; case 66: /* ccons ::= defer_subclause */ {sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy328);} break; case 67: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 70: /* refargs ::= */ { yygotominor.yy328 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 71: /* refargs ::= refargs refarg */ { yygotominor.yy328 = (yymsp[-1].minor.yy328 & ~yymsp[0].minor.yy429.mask) | yymsp[0].minor.yy429.value; } break; case 72: /* refarg ::= MATCH nm */ case 73: /* refarg ::= ON INSERT refact */ yytestcase(yyruleno==73); { yygotominor.yy429.value = 0; yygotominor.yy429.mask = 0x000000; } break; case 74: /* refarg ::= ON DELETE refact */ { yygotominor.yy429.value = yymsp[0].minor.yy328; yygotominor.yy429.mask = 0x0000ff; } break; case 75: /* refarg ::= ON UPDATE refact */ { yygotominor.yy429.value = yymsp[0].minor.yy328<<8; yygotominor.yy429.mask = 0x00ff00; } break; case 76: /* refact ::= SET NULL */ { yygotominor.yy328 = OE_SetNull; /* EV: R-33326-45252 */} break; case 77: /* refact ::= SET DEFAULT */ { yygotominor.yy328 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 78: /* refact ::= CASCADE */ { yygotominor.yy328 = OE_Cascade; /* EV: R-33326-45252 */} break; case 79: /* refact ::= RESTRICT */ { yygotominor.yy328 = OE_Restrict; /* EV: R-33326-45252 */} break; case 80: /* refact ::= NO ACTION */ { yygotominor.yy328 = OE_None; /* EV: R-33326-45252 */} break; case 82: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 98: /* defer_subclause_opt ::= defer_subclause */ yytestcase(yyruleno==98); case 100: /* onconf ::= ON CONFLICT resolvetype */ yytestcase(yyruleno==100); case 103: /* resolvetype ::= raisetype */ yytestcase(yyruleno==103); {yygotominor.yy328 = yymsp[0].minor.yy328;} break; case 86: /* conslist_opt ::= */ {yygotominor.yy0.n = 0; yygotominor.yy0.z = 0;} break; case 87: /* conslist_opt ::= COMMA conslist */ {yygotominor.yy0 = yymsp[-1].minor.yy0;} break; case 90: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 93: /* tcons ::= PRIMARY KEY LP idxlist autoinc RP onconf */ {sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy14,yymsp[0].minor.yy328,yymsp[-2].minor.yy328,0);} break; case 94: /* tcons ::= UNIQUE LP idxlist RP onconf */ {sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy14,yymsp[0].minor.yy328,0,0,0,0);} break; case 95: /* tcons ::= CHECK LP expr RP onconf */ {sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy346.pExpr);} break; case 96: /* tcons ::= FOREIGN KEY LP idxlist RP REFERENCES nm idxlist_opt refargs defer_subclause_opt */ { sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy14, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy14, yymsp[-1].minor.yy328); sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy328); } break; case 99: /* onconf ::= */ {yygotominor.yy328 = OE_Default;} break; case 101: /* orconf ::= */ {yygotominor.yy186 = OE_Default;} break; case 102: /* orconf ::= OR resolvetype */ {yygotominor.yy186 = (u8)yymsp[0].minor.yy328;} break; case 104: /* resolvetype ::= IGNORE */ {yygotominor.yy328 = OE_Ignore;} break; case 105: /* resolvetype ::= REPLACE */ {yygotominor.yy328 = OE_Replace;} break; case 106: /* cmd ::= DROP TABLE ifexists fullname */ { sqlite3DropTable(pParse, yymsp[0].minor.yy65, 0, yymsp[-1].minor.yy328); } break; case 109: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm AS select */ { sqlite3CreateView(pParse, &yymsp[-7].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, yymsp[0].minor.yy3, yymsp[-6].minor.yy328, yymsp[-4].minor.yy328); } break; case 110: /* cmd ::= DROP VIEW ifexists fullname */ { sqlite3DropTable(pParse, yymsp[0].minor.yy65, 1, yymsp[-1].minor.yy328); } break; case 111: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0}; sqlite3Select(pParse, yymsp[0].minor.yy3, &dest); sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy3); } break; case 112: /* select ::= with selectnowith */ { Select *p = yymsp[0].minor.yy3; if( p ){ p->pWith = yymsp[-1].minor.yy59; parserDoubleLinkSelect(pParse, p); }else{ sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy59); } yygotominor.yy3 = p; } break; case 113: /* selectnowith ::= oneselect */ case 119: /* oneselect ::= values */ yytestcase(yyruleno==119); {yygotominor.yy3 = yymsp[0].minor.yy3;} break; case 114: /* selectnowith ::= selectnowith multiselect_op oneselect */ { Select *pRhs = yymsp[0].minor.yy3; Select *pLhs = yymsp[-2].minor.yy3; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; x.n = 0; parserDoubleLinkSelect(pParse, pRhs); pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0); pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0,0); } if( pRhs ){ pRhs->op = (u8)yymsp[-1].minor.yy328; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; if( yymsp[-1].minor.yy328!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } yygotominor.yy3 = pRhs; } break; case 116: /* multiselect_op ::= UNION ALL */ {yygotominor.yy328 = TK_ALL;} break; case 118: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy14,yymsp[-5].minor.yy65,yymsp[-4].minor.yy132,yymsp[-3].minor.yy14,yymsp[-2].minor.yy132,yymsp[-1].minor.yy14,yymsp[-7].minor.yy381,yymsp[0].minor.yy476.pLimit,yymsp[0].minor.yy476.pOffset); #if SELECTTRACE_ENABLED /* Populate the Select.zSelName[] string that is used to help with ** query planner debugging, to differentiate between multiple Select ** objects in a complex query. ** ** If the SELECT keyword is immediately followed by a C-style comment ** then extract the first few alphanumeric characters from within that ** comment to be the zSelName value. Otherwise, the label is #N where ** is an integer that is incremented with each SELECT statement seen. */ if( yygotominor.yy3!=0 ){ const char *z = yymsp[-8].minor.yy0.z+6; int i; sqlite3_snprintf(sizeof(yygotominor.yy3->zSelName), yygotominor.yy3->zSelName, "#%d", ++pParse->nSelect); while( z[0]==' ' ) z++; if( z[0]=='/' && z[1]=='*' ){ z += 2; while( z[0]==' ' ) z++; for(i=0; sqlite3Isalnum(z[i]); i++){} sqlite3_snprintf(sizeof(yygotominor.yy3->zSelName), yygotominor.yy3->zSelName, "%.*s", i, z); } } #endif /* SELECTRACE_ENABLED */ } break; case 120: /* values ::= VALUES LP nexprlist RP */ { yygotominor.yy3 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values,0,0); } break; case 121: /* values ::= values COMMA LP exprlist RP */ { Select *pRight, *pLeft = yymsp[-4].minor.yy3; pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy14,0,0,0,0,0,SF_Values|SF_MultiValue,0,0); if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; if( pRight ){ pRight->op = TK_ALL; pLeft = yymsp[-4].minor.yy3; pRight->pPrior = pLeft; yygotominor.yy3 = pRight; }else{ yygotominor.yy3 = pLeft; } } break; case 122: /* distinct ::= DISTINCT */ {yygotominor.yy381 = SF_Distinct;} break; case 123: /* distinct ::= ALL */ {yygotominor.yy381 = SF_All;} break; case 124: /* distinct ::= */ {yygotominor.yy381 = 0;} break; case 125: /* sclp ::= selcollist COMMA */ case 243: /* idxlist_opt ::= LP idxlist RP */ yytestcase(yyruleno==243); {yygotominor.yy14 = yymsp[-1].minor.yy14;} break; case 126: /* sclp ::= */ case 154: /* orderby_opt ::= */ yytestcase(yyruleno==154); case 161: /* groupby_opt ::= */ yytestcase(yyruleno==161); case 236: /* exprlist ::= */ yytestcase(yyruleno==236); case 242: /* idxlist_opt ::= */ yytestcase(yyruleno==242); {yygotominor.yy14 = 0;} break; case 127: /* selcollist ::= sclp expr as */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy14, yymsp[-1].minor.yy346.pExpr); if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[0].minor.yy0, 1); sqlite3ExprListSetSpan(pParse,yygotominor.yy14,&yymsp[-1].minor.yy346); } break; case 128: /* selcollist ::= sclp STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ALL, 0); yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy14, p); } break; case 129: /* selcollist ::= sclp nm DOT STAR */ { Expr *pRight = sqlite3PExpr(pParse, TK_ALL, 0, 0, &yymsp[0].minor.yy0); Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14, pDot); } break; case 132: /* as ::= */ {yygotominor.yy0.n = 0;} break; case 133: /* from ::= */ {yygotominor.yy65 = sqlite3DbMallocZero(pParse->db, sizeof(*yygotominor.yy65));} break; case 134: /* from ::= FROM seltablist */ { yygotominor.yy65 = yymsp[0].minor.yy65; sqlite3SrcListShiftJoinType(yygotominor.yy65); } break; case 135: /* stl_prefix ::= seltablist joinop */ { yygotominor.yy65 = yymsp[-1].minor.yy65; if( ALWAYS(yygotominor.yy65 && yygotominor.yy65->nSrc>0) ) yygotominor.yy65->a[yygotominor.yy65->nSrc-1].jointype = (u8)yymsp[0].minor.yy328; } break; case 136: /* stl_prefix ::= */ {yygotominor.yy65 = 0;} break; case 137: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ { yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); sqlite3SrcListIndexedBy(pParse, yygotominor.yy65, &yymsp[-2].minor.yy0); } break; case 138: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ { yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy3,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); } break; case 139: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ { if( yymsp[-6].minor.yy65==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy132==0 && yymsp[0].minor.yy408==0 ){ yygotominor.yy65 = yymsp[-4].minor.yy65; }else if( yymsp[-4].minor.yy65->nSrc==1 ){ yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); if( yygotominor.yy65 ){ struct SrcList_item *pNew = &yygotominor.yy65->a[yygotominor.yy65->nSrc-1]; struct SrcList_item *pOld = yymsp[-4].minor.yy65->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy65); }else{ Select *pSubquery; sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy65); pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy65,0,0,0,0,SF_NestedFrom,0,0); yygotominor.yy65 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy65,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy132,yymsp[0].minor.yy408); } } break; case 140: /* dbnm ::= */ case 149: /* indexed_opt ::= */ yytestcase(yyruleno==149); {yygotominor.yy0.z=0; yygotominor.yy0.n=0;} break; case 142: /* fullname ::= nm dbnm */ {yygotominor.yy65 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0);} break; case 143: /* joinop ::= COMMA|JOIN */ { yygotominor.yy328 = JT_INNER; } break; case 144: /* joinop ::= JOIN_KW JOIN */ { yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); } break; case 145: /* joinop ::= JOIN_KW nm JOIN */ { yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); } break; case 146: /* joinop ::= JOIN_KW nm nm JOIN */ { yygotominor.yy328 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); } break; case 147: /* on_opt ::= ON expr */ case 164: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==164); case 171: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==171); case 231: /* case_else ::= ELSE expr */ yytestcase(yyruleno==231); case 233: /* case_operand ::= expr */ yytestcase(yyruleno==233); {yygotominor.yy132 = yymsp[0].minor.yy346.pExpr;} break; case 148: /* on_opt ::= */ case 163: /* having_opt ::= */ yytestcase(yyruleno==163); case 170: /* where_opt ::= */ yytestcase(yyruleno==170); case 232: /* case_else ::= */ yytestcase(yyruleno==232); case 234: /* case_operand ::= */ yytestcase(yyruleno==234); {yygotominor.yy132 = 0;} break; case 151: /* indexed_opt ::= NOT INDEXED */ {yygotominor.yy0.z=0; yygotominor.yy0.n=1;} break; case 152: /* using_opt ::= USING LP idlist RP */ case 180: /* inscollist_opt ::= LP idlist RP */ yytestcase(yyruleno==180); {yygotominor.yy408 = yymsp[-1].minor.yy408;} break; case 153: /* using_opt ::= */ case 179: /* inscollist_opt ::= */ yytestcase(yyruleno==179); {yygotominor.yy408 = 0;} break; case 155: /* orderby_opt ::= ORDER BY sortlist */ case 162: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==162); case 235: /* exprlist ::= nexprlist */ yytestcase(yyruleno==235); {yygotominor.yy14 = yymsp[0].minor.yy14;} break; case 156: /* sortlist ::= sortlist COMMA expr sortorder */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy14,yymsp[-1].minor.yy346.pExpr); if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; } break; case 157: /* sortlist ::= expr sortorder */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy346.pExpr); if( yygotominor.yy14 && ALWAYS(yygotominor.yy14->a) ) yygotominor.yy14->a[0].sortOrder = (u8)yymsp[0].minor.yy328; } break; case 158: /* sortorder ::= ASC */ case 160: /* sortorder ::= */ yytestcase(yyruleno==160); {yygotominor.yy328 = SQLITE_SO_ASC;} break; case 159: /* sortorder ::= DESC */ {yygotominor.yy328 = SQLITE_SO_DESC;} break; case 165: /* limit_opt ::= */ {yygotominor.yy476.pLimit = 0; yygotominor.yy476.pOffset = 0;} break; case 166: /* limit_opt ::= LIMIT expr */ {yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr; yygotominor.yy476.pOffset = 0;} break; case 167: /* limit_opt ::= LIMIT expr OFFSET expr */ {yygotominor.yy476.pLimit = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pOffset = yymsp[0].minor.yy346.pExpr;} break; case 168: /* limit_opt ::= LIMIT expr COMMA expr */ {yygotominor.yy476.pOffset = yymsp[-2].minor.yy346.pExpr; yygotominor.yy476.pLimit = yymsp[0].minor.yy346.pExpr;} break; case 169: /* cmd ::= with DELETE FROM fullname indexed_opt where_opt */ { sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1); sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy65, &yymsp[-1].minor.yy0); sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy65,yymsp[0].minor.yy132); } break; case 172: /* cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt */ { sqlite3WithPush(pParse, yymsp[-7].minor.yy59, 1); sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy65, &yymsp[-3].minor.yy0); sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy14,"set list"); sqlite3Update(pParse,yymsp[-4].minor.yy65,yymsp[-1].minor.yy14,yymsp[0].minor.yy132,yymsp[-5].minor.yy186); } break; case 173: /* setlist ::= setlist COMMA nm EQ expr */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy14, yymsp[0].minor.yy346.pExpr); sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); } break; case 174: /* setlist ::= nm EQ expr */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy346.pExpr); sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); } break; case 175: /* cmd ::= with insert_cmd INTO fullname inscollist_opt select */ { sqlite3WithPush(pParse, yymsp[-5].minor.yy59, 1); sqlite3Insert(pParse, yymsp[-2].minor.yy65, yymsp[0].minor.yy3, yymsp[-1].minor.yy408, yymsp[-4].minor.yy186); } break; case 176: /* cmd ::= with insert_cmd INTO fullname inscollist_opt DEFAULT VALUES */ { sqlite3WithPush(pParse, yymsp[-6].minor.yy59, 1); sqlite3Insert(pParse, yymsp[-3].minor.yy65, 0, yymsp[-2].minor.yy408, yymsp[-5].minor.yy186); } break; case 177: /* insert_cmd ::= INSERT orconf */ {yygotominor.yy186 = yymsp[0].minor.yy186;} break; case 178: /* insert_cmd ::= REPLACE */ {yygotominor.yy186 = OE_Replace;} break; case 181: /* idlist ::= idlist COMMA nm */ {yygotominor.yy408 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy408,&yymsp[0].minor.yy0);} break; case 182: /* idlist ::= nm */ {yygotominor.yy408 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0);} break; case 183: /* expr ::= term */ {yygotominor.yy346 = yymsp[0].minor.yy346;} break; case 184: /* expr ::= LP expr RP */ {yygotominor.yy346.pExpr = yymsp[-1].minor.yy346.pExpr; spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);} break; case 185: /* term ::= NULL */ case 190: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==190); case 191: /* term ::= STRING */ yytestcase(yyruleno==191); {spanExpr(&yygotominor.yy346, pParse, yymsp[0].major, &yymsp[0].minor.yy0);} break; case 186: /* expr ::= ID|INDEXED */ case 187: /* expr ::= JOIN_KW */ yytestcase(yyruleno==187); {spanExpr(&yygotominor.yy346, pParse, TK_ID, &yymsp[0].minor.yy0);} break; case 188: /* expr ::= nm DOT nm */ { Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); spanSet(&yygotominor.yy346,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); } break; case 189: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-4].minor.yy0); Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); } break; case 192: /* expr ::= VARIABLE */ { if( yymsp[0].minor.yy0.n>=2 && yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1]) ){ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers ** in the virtual machine. #N is the N-th register. */ if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &yymsp[0].minor.yy0); yygotominor.yy346.pExpr = 0; }else{ yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &yymsp[0].minor.yy0); if( yygotominor.yy346.pExpr ) sqlite3GetInt32(&yymsp[0].minor.yy0.z[1], &yygotominor.yy346.pExpr->iTable); } }else{ spanExpr(&yygotominor.yy346, pParse, TK_VARIABLE, &yymsp[0].minor.yy0); sqlite3ExprAssignVarNumber(pParse, yygotominor.yy346.pExpr); } spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); } break; case 193: /* expr ::= expr COLLATE ID|STRING */ { yygotominor.yy346.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy346.pExpr, &yymsp[0].minor.yy0, 1); yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 194: /* expr ::= CAST LP expr AS typetoken RP */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy346.pExpr, 0, &yymsp[-1].minor.yy0); spanSet(&yygotominor.yy346,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); } break; case 195: /* expr ::= ID|INDEXED LP distinct exprlist RP */ { if( yymsp[-1].minor.yy14 && yymsp[-1].minor.yy14->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0); } yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy14, &yymsp[-4].minor.yy0); spanSet(&yygotominor.yy346,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); if( yymsp[-2].minor.yy381==SF_Distinct && yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->flags |= EP_Distinct; } } break; case 196: /* expr ::= ID|INDEXED LP STAR RP */ { yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0); spanSet(&yygotominor.yy346,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); } break; case 197: /* term ::= CTIME_KW */ { yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0); spanSet(&yygotominor.yy346, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); } break; case 198: /* expr ::= expr AND expr */ case 199: /* expr ::= expr OR expr */ yytestcase(yyruleno==199); case 200: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==200); case 201: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==201); case 202: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==202); case 203: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==203); case 204: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==204); case 205: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==205); {spanBinaryExpr(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346);} break; case 206: /* likeop ::= LIKE_KW|MATCH */ {yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 0;} break; case 207: /* likeop ::= NOT LIKE_KW|MATCH */ {yygotominor.yy96.eOperator = yymsp[0].minor.yy0; yygotominor.yy96.bNot = 1;} break; case 208: /* expr ::= expr likeop expr */ { ExprList *pList; pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy346.pExpr); pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy346.pExpr); yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy96.eOperator); if( yymsp[-1].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); yygotominor.yy346.zStart = yymsp[-2].minor.yy346.zStart; yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc; } break; case 209: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy346.pExpr); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr); yygotominor.yy346.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy96.eOperator); if( yymsp[-3].minor.yy96.bNot ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; if( yygotominor.yy346.pExpr ) yygotominor.yy346.pExpr->flags |= EP_InfixFunc; } break; case 210: /* expr ::= expr ISNULL|NOTNULL */ {spanUnaryPostfix(&yygotominor.yy346,pParse,yymsp[0].major,&yymsp[-1].minor.yy346,&yymsp[0].minor.yy0);} break; case 211: /* expr ::= expr NOT NULL */ {spanUnaryPostfix(&yygotominor.yy346,pParse,TK_NOTNULL,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy0);} break; case 212: /* expr ::= expr IS expr */ { spanBinaryExpr(&yygotominor.yy346,pParse,TK_IS,&yymsp[-2].minor.yy346,&yymsp[0].minor.yy346); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_ISNULL); } break; case 213: /* expr ::= expr IS NOT expr */ { spanBinaryExpr(&yygotominor.yy346,pParse,TK_ISNOT,&yymsp[-3].minor.yy346,&yymsp[0].minor.yy346); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy346.pExpr, yygotominor.yy346.pExpr, TK_NOTNULL); } break; case 214: /* expr ::= NOT expr */ case 215: /* expr ::= BITNOT expr */ yytestcase(yyruleno==215); {spanUnaryPrefix(&yygotominor.yy346,pParse,yymsp[-1].major,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} break; case 216: /* expr ::= MINUS expr */ {spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UMINUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} break; case 217: /* expr ::= PLUS expr */ {spanUnaryPrefix(&yygotominor.yy346,pParse,TK_UPLUS,&yymsp[0].minor.yy346,&yymsp[-1].minor.yy0);} break; case 220: /* expr ::= expr between_op expr AND expr */ { ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy346.pExpr); yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy346.pExpr, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; yygotominor.yy346.zEnd = yymsp[0].minor.yy346.zEnd; } break; case 223: /* expr ::= expr in_op LP exprlist RP */ { if( yymsp[-1].minor.yy14==0 ){ /* Expressions of the form ** ** expr1 IN () ** expr1 NOT IN () ** ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy328]); sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy346.pExpr); }else if( yymsp[-1].minor.yy14->nExpr==1 ){ /* Expressions of the form: ** ** expr1 IN (?1) ** expr1 NOT IN (?2) ** ** with exactly one value on the RHS can be simplified to something ** like this: ** ** expr1 == ?1 ** expr1 <> ?2 ** ** But, the RHS of the == or <> is marked with the EP_Generic flag ** so that it may not contribute to the computation of comparison ** affinity or the collating sequence to use for comparison. Otherwise, ** the semantics would be subtly different from IN or NOT IN. */ Expr *pRHS = yymsp[-1].minor.yy14->a[0].pExpr; yymsp[-1].minor.yy14->a[0].pExpr = 0; sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); /* pRHS cannot be NULL because a malloc error would have been detected ** before now and control would have never reached this point */ if( ALWAYS(pRHS) ){ pRHS->flags &= ~EP_Collate; pRHS->flags |= EP_Generic; } yygotominor.yy346.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy328 ? TK_NE : TK_EQ, yymsp[-4].minor.yy346.pExpr, pRHS, 0); }else{ yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy14; sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr); }else{ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy14); } if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); } yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 224: /* expr ::= LP select RP */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3; ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery); sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr); }else{ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); } yygotominor.yy346.zStart = yymsp[-2].minor.yy0.z; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 225: /* expr ::= expr in_op LP select RP */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy346.pExpr, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pSelect = yymsp[-1].minor.yy3; ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery); sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr); }else{ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); } if( yymsp[-3].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); yygotominor.yy346.zStart = yymsp[-4].minor.yy346.zStart; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 226: /* expr ::= expr in_op nm dbnm */ { SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-3].minor.yy346.pExpr, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); ExprSetProperty(yygotominor.yy346.pExpr, EP_xIsSelect|EP_Subquery); sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr); }else{ sqlite3SrcListDelete(pParse->db, pSrc); } if( yymsp[-2].minor.yy328 ) yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_NOT, yygotominor.yy346.pExpr, 0, 0); yygotominor.yy346.zStart = yymsp[-3].minor.yy346.zStart; yygotominor.yy346.zEnd = yymsp[0].minor.yy0.z ? &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] : &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]; } break; case 227: /* expr ::= EXISTS LP select RP */ { Expr *p = yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); if( p ){ p->x.pSelect = yymsp[-1].minor.yy3; ExprSetProperty(p, EP_xIsSelect|EP_Subquery); sqlite3ExprSetHeightAndFlags(pParse, p); }else{ sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy3); } yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 228: /* expr ::= CASE case_operand case_exprlist case_else END */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy132, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->x.pList = yymsp[-1].minor.yy132 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[-1].minor.yy132) : yymsp[-2].minor.yy14; sqlite3ExprSetHeightAndFlags(pParse, yygotominor.yy346.pExpr); }else{ sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy14); sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy132); } yygotominor.yy346.zStart = yymsp[-4].minor.yy0.z; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 229: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, yymsp[-2].minor.yy346.pExpr); yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr); } break; case 230: /* case_exprlist ::= WHEN expr THEN expr */ { yygotominor.yy14 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy346.pExpr); yygotominor.yy14 = sqlite3ExprListAppend(pParse,yygotominor.yy14, yymsp[0].minor.yy346.pExpr); } break; case 237: /* nexprlist ::= nexprlist COMMA expr */ {yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy14,yymsp[0].minor.yy346.pExpr);} break; case 238: /* nexprlist ::= expr */ {yygotominor.yy14 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy346.pExpr);} break; case 239: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP idxlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy14, yymsp[-10].minor.yy328, &yymsp[-11].minor.yy0, yymsp[0].minor.yy132, SQLITE_SO_ASC, yymsp[-8].minor.yy328); } break; case 240: /* uniqueflag ::= UNIQUE */ case 291: /* raisetype ::= ABORT */ yytestcase(yyruleno==291); {yygotominor.yy328 = OE_Abort;} break; case 241: /* uniqueflag ::= */ {yygotominor.yy328 = OE_None;} break; case 244: /* idxlist ::= idxlist COMMA nm collate sortorder */ { Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0, 1); yygotominor.yy14 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy14, p); sqlite3ExprListSetName(pParse,yygotominor.yy14,&yymsp[-2].minor.yy0,1); sqlite3ExprListCheckLength(pParse, yygotominor.yy14, "index"); if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; } break; case 245: /* idxlist ::= nm collate sortorder */ { Expr *p = sqlite3ExprAddCollateToken(pParse, 0, &yymsp[-1].minor.yy0, 1); yygotominor.yy14 = sqlite3ExprListAppend(pParse,0, p); sqlite3ExprListSetName(pParse, yygotominor.yy14, &yymsp[-2].minor.yy0, 1); sqlite3ExprListCheckLength(pParse, yygotominor.yy14, "index"); if( yygotominor.yy14 ) yygotominor.yy14->a[yygotominor.yy14->nExpr-1].sortOrder = (u8)yymsp[0].minor.yy328; } break; case 246: /* collate ::= */ {yygotominor.yy0.z = 0; yygotominor.yy0.n = 0;} break; case 248: /* cmd ::= DROP INDEX ifexists fullname */ {sqlite3DropIndex(pParse, yymsp[0].minor.yy65, yymsp[-1].minor.yy328);} break; case 249: /* cmd ::= VACUUM */ case 250: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==250); {sqlite3Vacuum(pParse);} break; case 251: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; case 264: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy473, &all); } break; case 265: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy328, yymsp[-4].minor.yy378.a, yymsp[-4].minor.yy378.b, yymsp[-2].minor.yy65, yymsp[0].minor.yy132, yymsp[-10].minor.yy328, yymsp[-8].minor.yy328); yygotominor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); } break; case 266: /* trigger_time ::= BEFORE */ case 269: /* trigger_time ::= */ yytestcase(yyruleno==269); { yygotominor.yy328 = TK_BEFORE; } break; case 267: /* trigger_time ::= AFTER */ { yygotominor.yy328 = TK_AFTER; } break; case 268: /* trigger_time ::= INSTEAD OF */ { yygotominor.yy328 = TK_INSTEAD;} break; case 270: /* trigger_event ::= DELETE|INSERT */ case 271: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==271); {yygotominor.yy378.a = yymsp[0].major; yygotominor.yy378.b = 0;} break; case 272: /* trigger_event ::= UPDATE OF idlist */ {yygotominor.yy378.a = TK_UPDATE; yygotominor.yy378.b = yymsp[0].minor.yy408;} break; case 275: /* when_clause ::= */ case 296: /* key_opt ::= */ yytestcase(yyruleno==296); { yygotominor.yy132 = 0; } break; case 276: /* when_clause ::= WHEN expr */ case 297: /* key_opt ::= KEY expr */ yytestcase(yyruleno==297); { yygotominor.yy132 = yymsp[0].minor.yy346.pExpr; } break; case 277: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { assert( yymsp[-2].minor.yy473!=0 ); yymsp[-2].minor.yy473->pLast->pNext = yymsp[-1].minor.yy473; yymsp[-2].minor.yy473->pLast = yymsp[-1].minor.yy473; yygotominor.yy473 = yymsp[-2].minor.yy473; } break; case 278: /* trigger_cmd_list ::= trigger_cmd SEMI */ { assert( yymsp[-1].minor.yy473!=0 ); yymsp[-1].minor.yy473->pLast = yymsp[-1].minor.yy473; yygotominor.yy473 = yymsp[-1].minor.yy473; } break; case 280: /* trnm ::= nm DOT nm */ { yygotominor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, "qualified table names are not allowed on INSERT, UPDATE, and DELETE " "statements within triggers"); } break; case 282: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; case 283: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; case 284: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */ { yygotominor.yy473 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy14, yymsp[0].minor.yy132, yymsp[-5].minor.yy186); } break; case 285: /* trigger_cmd ::= insert_cmd INTO trnm inscollist_opt select */ {yygotominor.yy473 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy408, yymsp[0].minor.yy3, yymsp[-4].minor.yy186);} break; case 286: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */ {yygotominor.yy473 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy132);} break; case 287: /* trigger_cmd ::= select */ {yygotominor.yy473 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy3); } break; case 288: /* expr ::= RAISE LP IGNORE RP */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); if( yygotominor.yy346.pExpr ){ yygotominor.yy346.pExpr->affinity = OE_Ignore; } yygotominor.yy346.zStart = yymsp[-3].minor.yy0.z; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 289: /* expr ::= RAISE LP raisetype COMMA nm RP */ { yygotominor.yy346.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0); if( yygotominor.yy346.pExpr ) { yygotominor.yy346.pExpr->affinity = (char)yymsp[-3].minor.yy328; } yygotominor.yy346.zStart = yymsp[-5].minor.yy0.z; yygotominor.yy346.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 290: /* raisetype ::= ROLLBACK */ {yygotominor.yy328 = OE_Rollback;} break; case 292: /* raisetype ::= FAIL */ {yygotominor.yy328 = OE_Fail;} break; case 293: /* cmd ::= DROP TRIGGER ifexists fullname */ { sqlite3DropTrigger(pParse,yymsp[0].minor.yy65,yymsp[-1].minor.yy328); } break; case 294: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { sqlite3Attach(pParse, yymsp[-3].minor.yy346.pExpr, yymsp[-1].minor.yy346.pExpr, yymsp[0].minor.yy132); } break; case 295: /* cmd ::= DETACH database_kw_opt expr */ { sqlite3Detach(pParse, yymsp[0].minor.yy346.pExpr); } break; case 300: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; case 301: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; case 302: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; case 303: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; case 304: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy65,&yymsp[0].minor.yy0); } break; case 305: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt column */ { sqlite3AlterFinishAddColumn(pParse, &yymsp[0].minor.yy0); } break; case 306: /* add_column_fullname ::= fullname */ { pParse->db->lookaside.bEnabled = 0; sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy65); } break; case 309: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; case 310: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; case 311: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy328); } break; case 314: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; case 316: /* vtabargtoken ::= ANY */ case 317: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==317); case 318: /* lp ::= LP */ yytestcase(yyruleno==318); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; case 322: /* with ::= */ {yygotominor.yy59 = 0;} break; case 323: /* with ::= WITH wqlist */ case 324: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==324); { yygotominor.yy59 = yymsp[0].minor.yy59; } break; case 325: /* wqlist ::= nm idxlist_opt AS LP select RP */ { yygotominor.yy59 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3); } break; case 326: /* wqlist ::= wqlist COMMA nm idxlist_opt AS LP select RP */ { yygotominor.yy59 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy59, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy14, yymsp[-1].minor.yy3); } break; default: /* (0) input ::= cmdlist */ yytestcase(yyruleno==0); /* (1) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==1); /* (2) cmdlist ::= ecmd */ yytestcase(yyruleno==2); /* (3) ecmd ::= SEMI */ yytestcase(yyruleno==3); /* (4) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==4); /* (10) trans_opt ::= */ yytestcase(yyruleno==10); /* (11) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==11); /* (12) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==12); /* (20) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==20); /* (21) savepoint_opt ::= */ yytestcase(yyruleno==21); /* (25) cmd ::= create_table create_table_args */ yytestcase(yyruleno==25); /* (36) columnlist ::= columnlist COMMA column */ yytestcase(yyruleno==36); /* (37) columnlist ::= column */ yytestcase(yyruleno==37); /* (43) type ::= */ yytestcase(yyruleno==43); /* (50) signed ::= plus_num */ yytestcase(yyruleno==50); /* (51) signed ::= minus_num */ yytestcase(yyruleno==51); /* (52) carglist ::= carglist ccons */ yytestcase(yyruleno==52); /* (53) carglist ::= */ yytestcase(yyruleno==53); /* (60) ccons ::= NULL onconf */ yytestcase(yyruleno==60); /* (88) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==88); /* (89) conslist ::= tcons */ yytestcase(yyruleno==89); /* (91) tconscomma ::= */ yytestcase(yyruleno==91); /* (273) foreach_clause ::= */ yytestcase(yyruleno==273); /* (274) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==274); /* (281) tridxby ::= */ yytestcase(yyruleno==281); /* (298) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==298); /* (299) database_kw_opt ::= */ yytestcase(yyruleno==299); /* (307) kwcolumn_opt ::= */ yytestcase(yyruleno==307); /* (308) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==308); /* (312) vtabarglist ::= vtabarg */ yytestcase(yyruleno==312); /* (313) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==313); /* (315) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==315); /* (319) anylist ::= */ yytestcase(yyruleno==319); /* (320) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==320); /* (321) anylist ::= anylist ANY */ yytestcase(yyruleno==321); break; }; assert( yyruleno>=0 && yyrulenoyyidx -= yysize; yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); if( yyact < YYNSTATE ){ #ifdef NDEBUG /* If we are not debugging and the reduce action popped at least ** one element off the stack, then we can push the new element back ** onto the stack here, and skip the stack overflow test in yy_shift(). ** That gives a significant speed improvement. */ if( yysize ){ yypParser->yyidx++; yymsp -= yysize-1; yymsp->stateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yymsp->minor = yygotominor; }else #endif { yy_shift(yypParser,yyact,yygoto,&yygotominor); } }else{ assert( yyact == YYNSTATE + YYNRULE + 1 ); yy_accept(yypParser); } } /* ** The following code executes when the parse fails */ #ifndef YYNOERRORRECOVERY static void yy_parse_failed( yyParser *yypParser /* The parser */ ){ sqlite3ParserARG_FETCH; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); } #endif while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will be executed whenever the ** parser fails */ sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ } #endif /* YYNOERRORRECOVERY */ /* ** The following code executes when a syntax error first occurs. */ static void yy_syntax_error( yyParser *yypParser, /* The parser */ int yymajor, /* The major type of the error token */ YYMINORTYPE yyminor /* The minor type of the error token */ ){ sqlite3ParserARG_FETCH; #define TOKEN (yyminor.yy0) UNUSED_PARAMETER(yymajor); /* Silence some compiler warnings */ assert( TOKEN.z[0] ); /* The tokenizer always gives us a token */ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &TOKEN); sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ } /* ** The following is executed when the parser accepts */ static void yy_accept( yyParser *yypParser /* The parser */ ){ sqlite3ParserARG_FETCH; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); } #endif while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will be executed whenever the ** parser accepts */ sqlite3ParserARG_STORE; /* Suppress warning about unused %extra_argument variable */ } /* The main parser program. ** The first argument is a pointer to a structure obtained from ** "sqlite3ParserAlloc" which describes the current state of the parser. ** The second argument is the major token number. The third is ** the minor token. The fourth optional argument is whatever the ** user wants (and specified in the grammar) and is available for ** use by the action routines. ** ** Inputs: **
      **
    • A pointer to the parser (an opaque structure.) **
    • The major token number. **
    • The minor token number. **
    • An option argument of a grammar-specified type. **
    ** ** Outputs: ** None. */ SQLITE_PRIVATE void sqlite3Parser( void *yyp, /* The parser */ int yymajor, /* The major token code number */ sqlite3ParserTOKENTYPE yyminor /* The value for the token */ sqlite3ParserARG_PDECL /* Optional %extra_argument parameter */ ){ YYMINORTYPE yyminorunion; int yyact; /* The parser action. */ #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) int yyendofinput; /* True if we are at the end of input */ #endif #ifdef YYERRORSYMBOL int yyerrorhit = 0; /* True if yymajor has invoked an error */ #endif yyParser *yypParser; /* The parser */ /* (re)initialize the parser, if necessary */ yypParser = (yyParser*)yyp; if( yypParser->yyidx<0 ){ #if YYSTACKDEPTH<=0 if( yypParser->yystksz <=0 ){ /*memset(&yyminorunion, 0, sizeof(yyminorunion));*/ yyminorunion = yyzerominor; yyStackOverflow(yypParser, &yyminorunion); return; } #endif yypParser->yyidx = 0; yypParser->yyerrcnt = -1; yypParser->yystack[0].stateno = 0; yypParser->yystack[0].major = 0; } yyminorunion.yy0 = yyminor; #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif sqlite3ParserARG_STORE; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sInput %s\n",yyTracePrompt,yyTokenName[yymajor]); } #endif do{ yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); if( yyactyyerrcnt--; yymajor = YYNOCODE; }else if( yyact < YYNSTATE + YYNRULE ){ yy_reduce(yypParser,yyact-YYNSTATE); }else{ assert( yyact == YY_ERROR_ACTION ); #ifdef YYERRORSYMBOL int yymx; #endif #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt); } #endif #ifdef YYERRORSYMBOL /* A syntax error has occurred. ** The response to an error depends upon whether or not the ** grammar defines an error token "ERROR". ** ** This is what we do if the grammar does define ERROR: ** ** * Call the %syntax_error function. ** ** * Begin popping the stack until we enter a state where ** it is legal to shift the error symbol, then shift ** the error symbol. ** ** * Set the error count to three. ** ** * Begin accepting and shifting new tokens. No new error ** processing will occur until three tokens have been ** shifted successfully. ** */ if( yypParser->yyerrcnt<0 ){ yy_syntax_error(yypParser,yymajor,yyminorunion); } yymx = yypParser->yystack[yypParser->yyidx].major; if( yymx==YYERRORSYMBOL || yyerrorhit ){ #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sDiscard input token %s\n", yyTracePrompt,yyTokenName[yymajor]); } #endif yy_destructor(yypParser, (YYCODETYPE)yymajor,&yyminorunion); yymajor = YYNOCODE; }else{ while( yypParser->yyidx >= 0 && yymx != YYERRORSYMBOL && (yyact = yy_find_reduce_action( yypParser->yystack[yypParser->yyidx].stateno, YYERRORSYMBOL)) >= YYNSTATE ){ yy_pop_parser_stack(yypParser); } if( yypParser->yyidx < 0 || yymajor==0 ){ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); yymajor = YYNOCODE; }else if( yymx!=YYERRORSYMBOL ){ YYMINORTYPE u2; u2.YYERRSYMDT = 0; yy_shift(yypParser,yyact,YYERRORSYMBOL,&u2); } } yypParser->yyerrcnt = 3; yyerrorhit = 1; #elif defined(YYNOERRORRECOVERY) /* If the YYNOERRORRECOVERY macro is defined, then do not attempt to ** do any kind of error recovery. Instead, simply invoke the syntax ** error routine and continue going as if nothing had happened. ** ** Applications can set this macro (for example inside %include) if ** they intend to abandon the parse upon the first syntax error seen. */ yy_syntax_error(yypParser,yymajor,yyminorunion); yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yymajor = YYNOCODE; #else /* YYERRORSYMBOL is not defined */ /* This is what we do if the grammar does not define ERROR: ** ** * Report an error message, and throw away the input token. ** ** * If the input token is $, then fail the parse. ** ** As before, subsequent error messages are suppressed until ** three input tokens have been successfully shifted. */ if( yypParser->yyerrcnt<=0 ){ yy_syntax_error(yypParser,yymajor,yyminorunion); } yypParser->yyerrcnt = 3; yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); if( yyendofinput ){ yy_parse_failed(yypParser); } yymajor = YYNOCODE; #endif } }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); return; } /************** End of parse.c ***********************************************/ /************** Begin file tokenize.c ****************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** An tokenizer for SQL ** ** This file contains C code that splits an SQL input string up into ** individual tokens and sends those tokens one-by-one over to the ** parser for analysis. */ /* #include "sqliteInt.h" */ /* #include */ /* ** The charMap() macro maps alphabetic characters into their ** lower-case ASCII equivalent. On ASCII machines, this is just ** an upper-to-lower case map. On EBCDIC machines we also need ** to adjust the encoding. Only alphabetic characters and underscores ** need to be translated. */ #ifdef SQLITE_ASCII # define charMap(X) sqlite3UpperToLower[(unsigned char)X] #endif #ifdef SQLITE_EBCDIC # define charMap(X) ebcdicToAscii[(unsigned char)X] const unsigned char ebcdicToAscii[] = { /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 0, 0, /* 6x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7x */ 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* 8x */ 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* 9x */ 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ax */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */ 0, 97, 98, 99,100,101,102,103,104,105, 0, 0, 0, 0, 0, 0, /* Cx */ 0,106,107,108,109,110,111,112,113,114, 0, 0, 0, 0, 0, 0, /* Dx */ 0, 0,115,116,117,118,119,120,121,122, 0, 0, 0, 0, 0, 0, /* Ex */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Fx */ }; #endif /* ** The sqlite3KeywordCode function looks up an identifier to determine if ** it is a keyword. If it is a keyword, the token code of that keyword is ** returned. If the input is not a keyword, TK_ID is returned. ** ** The implementation of this routine was generated by a program, ** mkkeywordhash.h, located in the tool subdirectory of the distribution. ** The output of the mkkeywordhash.c program is written into a file ** named keywordhash.h and then included into this source file by ** the #include below. */ /************** Include keywordhash.h in the middle of tokenize.c ************/ /************** Begin file keywordhash.h *************************************/ /***** This file contains automatically generated code ****** ** ** The code in this file has been automatically generated by ** ** sqlite/tool/mkkeywordhash.c ** ** The code in this file implements a function that determines whether ** or not a given identifier is really an SQL keyword. The same thing ** might be implemented more directly using a hand-written hash table. ** But by using this automatically generated code, the size of the code ** is substantially reduced. This is important for embedded applications ** on platforms with limited memory. */ /* Hash score: 182 */ static int keywordCode(const char *z, int n){ /* zText[] encodes 834 bytes of keywords in 554 bytes */ /* REINDEXEDESCAPEACHECKEYBEFOREIGNOREGEXPLAINSTEADDATABASELECT */ /* ABLEFTHENDEFERRABLELSEXCEPTRANSACTIONATURALTERAISEXCLUSIVE */ /* XISTSAVEPOINTERSECTRIGGEREFERENCESCONSTRAINTOFFSETEMPORARY */ /* UNIQUERYWITHOUTERELEASEATTACHAVINGROUPDATEBEGINNERECURSIVE */ /* BETWEENOTNULLIKECASCADELETECASECOLLATECREATECURRENT_DATEDETACH */ /* IMMEDIATEJOINSERTMATCHPLANALYZEPRAGMABORTVALUESVIRTUALIMITWHEN */ /* WHERENAMEAFTEREPLACEANDEFAULTAUTOINCREMENTCASTCOLUMNCOMMIT */ /* CONFLICTCROSSCURRENT_TIMESTAMPRIMARYDEFERREDISTINCTDROPFAIL */ /* FROMFULLGLOBYIFISNULLORDERESTRICTRIGHTROLLBACKROWUNIONUSING */ /* VACUUMVIEWINITIALLY */ static const char zText[553] = { 'R','E','I','N','D','E','X','E','D','E','S','C','A','P','E','A','C','H', 'E','C','K','E','Y','B','E','F','O','R','E','I','G','N','O','R','E','G', 'E','X','P','L','A','I','N','S','T','E','A','D','D','A','T','A','B','A', 'S','E','L','E','C','T','A','B','L','E','F','T','H','E','N','D','E','F', 'E','R','R','A','B','L','E','L','S','E','X','C','E','P','T','R','A','N', 'S','A','C','T','I','O','N','A','T','U','R','A','L','T','E','R','A','I', 'S','E','X','C','L','U','S','I','V','E','X','I','S','T','S','A','V','E', 'P','O','I','N','T','E','R','S','E','C','T','R','I','G','G','E','R','E', 'F','E','R','E','N','C','E','S','C','O','N','S','T','R','A','I','N','T', 'O','F','F','S','E','T','E','M','P','O','R','A','R','Y','U','N','I','Q', 'U','E','R','Y','W','I','T','H','O','U','T','E','R','E','L','E','A','S', 'E','A','T','T','A','C','H','A','V','I','N','G','R','O','U','P','D','A', 'T','E','B','E','G','I','N','N','E','R','E','C','U','R','S','I','V','E', 'B','E','T','W','E','E','N','O','T','N','U','L','L','I','K','E','C','A', 'S','C','A','D','E','L','E','T','E','C','A','S','E','C','O','L','L','A', 'T','E','C','R','E','A','T','E','C','U','R','R','E','N','T','_','D','A', 'T','E','D','E','T','A','C','H','I','M','M','E','D','I','A','T','E','J', 'O','I','N','S','E','R','T','M','A','T','C','H','P','L','A','N','A','L', 'Y','Z','E','P','R','A','G','M','A','B','O','R','T','V','A','L','U','E', 'S','V','I','R','T','U','A','L','I','M','I','T','W','H','E','N','W','H', 'E','R','E','N','A','M','E','A','F','T','E','R','E','P','L','A','C','E', 'A','N','D','E','F','A','U','L','T','A','U','T','O','I','N','C','R','E', 'M','E','N','T','C','A','S','T','C','O','L','U','M','N','C','O','M','M', 'I','T','C','O','N','F','L','I','C','T','C','R','O','S','S','C','U','R', 'R','E','N','T','_','T','I','M','E','S','T','A','M','P','R','I','M','A', 'R','Y','D','E','F','E','R','R','E','D','I','S','T','I','N','C','T','D', 'R','O','P','F','A','I','L','F','R','O','M','F','U','L','L','G','L','O', 'B','Y','I','F','I','S','N','U','L','L','O','R','D','E','R','E','S','T', 'R','I','C','T','R','I','G','H','T','R','O','L','L','B','A','C','K','R', 'O','W','U','N','I','O','N','U','S','I','N','G','V','A','C','U','U','M', 'V','I','E','W','I','N','I','T','I','A','L','L','Y', }; static const unsigned char aHash[127] = { 76, 105, 117, 74, 0, 45, 0, 0, 82, 0, 77, 0, 0, 42, 12, 78, 15, 0, 116, 85, 54, 112, 0, 19, 0, 0, 121, 0, 119, 115, 0, 22, 93, 0, 9, 0, 0, 70, 71, 0, 69, 6, 0, 48, 90, 102, 0, 118, 101, 0, 0, 44, 0, 103, 24, 0, 17, 0, 122, 53, 23, 0, 5, 110, 25, 96, 0, 0, 124, 106, 60, 123, 57, 28, 55, 0, 91, 0, 100, 26, 0, 99, 0, 0, 0, 95, 92, 97, 88, 109, 14, 39, 108, 0, 81, 0, 18, 89, 111, 32, 0, 120, 80, 113, 62, 46, 84, 0, 0, 94, 40, 59, 114, 0, 36, 0, 0, 29, 0, 86, 63, 64, 0, 20, 61, 0, 56, }; static const unsigned char aNext[124] = { 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 33, 0, 21, 0, 0, 0, 0, 0, 50, 0, 43, 3, 47, 0, 0, 0, 0, 30, 0, 58, 0, 38, 0, 0, 0, 1, 66, 0, 0, 67, 0, 41, 0, 0, 0, 0, 0, 0, 49, 65, 0, 0, 0, 0, 31, 52, 16, 34, 10, 0, 0, 0, 0, 0, 0, 0, 11, 72, 79, 0, 8, 0, 104, 98, 0, 107, 0, 87, 0, 75, 51, 0, 27, 37, 73, 83, 0, 35, 68, 0, 0, }; static const unsigned char aLen[124] = { 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 6, 11, 6, 2, 7, 5, 5, 9, 6, 9, 9, 7, 10, 10, 4, 6, 2, 3, 9, 4, 2, 6, 5, 7, 4, 5, 7, 6, 6, 5, 6, 5, 5, 9, 7, 7, 3, 2, 4, 4, 7, 3, 6, 4, 7, 6, 12, 6, 9, 4, 6, 5, 4, 7, 6, 5, 6, 7, 5, 4, 5, 6, 5, 7, 3, 7, 13, 2, 2, 4, 6, 6, 8, 5, 17, 12, 7, 8, 8, 2, 4, 4, 4, 4, 4, 2, 2, 6, 5, 8, 5, 8, 3, 5, 5, 6, 4, 9, 3, }; static const unsigned short int aOffset[124] = { 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 91, 95, 96, 101, 105, 109, 117, 122, 128, 136, 142, 152, 159, 162, 162, 165, 167, 167, 171, 176, 179, 184, 184, 188, 192, 199, 204, 209, 212, 218, 221, 225, 234, 240, 240, 240, 243, 246, 250, 251, 255, 261, 265, 272, 278, 290, 296, 305, 307, 313, 318, 320, 327, 332, 337, 343, 349, 354, 358, 361, 367, 371, 378, 380, 387, 389, 391, 400, 404, 410, 416, 424, 429, 429, 445, 452, 459, 460, 467, 471, 475, 479, 483, 486, 488, 490, 496, 500, 508, 513, 521, 524, 529, 534, 540, 544, 549, }; static const unsigned char aCode[124] = { TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, TK_ADD, TK_DATABASE, TK_AS, TK_SELECT, TK_TABLE, TK_JOIN_KW, TK_THEN, TK_END, TK_DEFERRABLE, TK_ELSE, TK_EXCEPT, TK_TRANSACTION,TK_ACTION, TK_ON, TK_JOIN_KW, TK_ALTER, TK_RAISE, TK_EXCLUSIVE, TK_EXISTS, TK_SAVEPOINT, TK_INTERSECT, TK_TRIGGER, TK_REFERENCES, TK_CONSTRAINT, TK_INTO, TK_OFFSET, TK_OF, TK_SET, TK_TEMP, TK_TEMP, TK_OR, TK_UNIQUE, TK_QUERY, TK_WITHOUT, TK_WITH, TK_JOIN_KW, TK_RELEASE, TK_ATTACH, TK_HAVING, TK_GROUP, TK_UPDATE, TK_BEGIN, TK_JOIN_KW, TK_RECURSIVE, TK_BETWEEN, TK_NOTNULL, TK_NOT, TK_NO, TK_NULL, TK_LIKE_KW, TK_CASCADE, TK_ASC, TK_DELETE, TK_CASE, TK_COLLATE, TK_CREATE, TK_CTIME_KW, TK_DETACH, TK_IMMEDIATE, TK_JOIN, TK_INSERT, TK_MATCH, TK_PLAN, TK_ANALYZE, TK_PRAGMA, TK_ABORT, TK_VALUES, TK_VIRTUAL, TK_LIMIT, TK_WHEN, TK_WHERE, TK_RENAME, TK_AFTER, TK_REPLACE, TK_AND, TK_DEFAULT, TK_AUTOINCR, TK_TO, TK_IN, TK_CAST, TK_COLUMNKW, TK_COMMIT, TK_CONFLICT, TK_JOIN_KW, TK_CTIME_KW, TK_CTIME_KW, TK_PRIMARY, TK_DEFERRED, TK_DISTINCT, TK_IS, TK_DROP, TK_FAIL, TK_FROM, TK_JOIN_KW, TK_LIKE_KW, TK_BY, TK_IF, TK_ISNULL, TK_ORDER, TK_RESTRICT, TK_JOIN_KW, TK_ROLLBACK, TK_ROW, TK_UNION, TK_USING, TK_VACUUM, TK_VIEW, TK_INITIALLY, TK_ALL, }; int h, i; if( n<2 ) return TK_ID; h = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n) % 127; for(i=((int)aHash[h])-1; i>=0; i=((int)aNext[i])-1){ if( aLen[i]==n && sqlite3StrNICmp(&zText[aOffset[i]],z,n)==0 ){ testcase( i==0 ); /* REINDEX */ testcase( i==1 ); /* INDEXED */ testcase( i==2 ); /* INDEX */ testcase( i==3 ); /* DESC */ testcase( i==4 ); /* ESCAPE */ testcase( i==5 ); /* EACH */ testcase( i==6 ); /* CHECK */ testcase( i==7 ); /* KEY */ testcase( i==8 ); /* BEFORE */ testcase( i==9 ); /* FOREIGN */ testcase( i==10 ); /* FOR */ testcase( i==11 ); /* IGNORE */ testcase( i==12 ); /* REGEXP */ testcase( i==13 ); /* EXPLAIN */ testcase( i==14 ); /* INSTEAD */ testcase( i==15 ); /* ADD */ testcase( i==16 ); /* DATABASE */ testcase( i==17 ); /* AS */ testcase( i==18 ); /* SELECT */ testcase( i==19 ); /* TABLE */ testcase( i==20 ); /* LEFT */ testcase( i==21 ); /* THEN */ testcase( i==22 ); /* END */ testcase( i==23 ); /* DEFERRABLE */ testcase( i==24 ); /* ELSE */ testcase( i==25 ); /* EXCEPT */ testcase( i==26 ); /* TRANSACTION */ testcase( i==27 ); /* ACTION */ testcase( i==28 ); /* ON */ testcase( i==29 ); /* NATURAL */ testcase( i==30 ); /* ALTER */ testcase( i==31 ); /* RAISE */ testcase( i==32 ); /* EXCLUSIVE */ testcase( i==33 ); /* EXISTS */ testcase( i==34 ); /* SAVEPOINT */ testcase( i==35 ); /* INTERSECT */ testcase( i==36 ); /* TRIGGER */ testcase( i==37 ); /* REFERENCES */ testcase( i==38 ); /* CONSTRAINT */ testcase( i==39 ); /* INTO */ testcase( i==40 ); /* OFFSET */ testcase( i==41 ); /* OF */ testcase( i==42 ); /* SET */ testcase( i==43 ); /* TEMPORARY */ testcase( i==44 ); /* TEMP */ testcase( i==45 ); /* OR */ testcase( i==46 ); /* UNIQUE */ testcase( i==47 ); /* QUERY */ testcase( i==48 ); /* WITHOUT */ testcase( i==49 ); /* WITH */ testcase( i==50 ); /* OUTER */ testcase( i==51 ); /* RELEASE */ testcase( i==52 ); /* ATTACH */ testcase( i==53 ); /* HAVING */ testcase( i==54 ); /* GROUP */ testcase( i==55 ); /* UPDATE */ testcase( i==56 ); /* BEGIN */ testcase( i==57 ); /* INNER */ testcase( i==58 ); /* RECURSIVE */ testcase( i==59 ); /* BETWEEN */ testcase( i==60 ); /* NOTNULL */ testcase( i==61 ); /* NOT */ testcase( i==62 ); /* NO */ testcase( i==63 ); /* NULL */ testcase( i==64 ); /* LIKE */ testcase( i==65 ); /* CASCADE */ testcase( i==66 ); /* ASC */ testcase( i==67 ); /* DELETE */ testcase( i==68 ); /* CASE */ testcase( i==69 ); /* COLLATE */ testcase( i==70 ); /* CREATE */ testcase( i==71 ); /* CURRENT_DATE */ testcase( i==72 ); /* DETACH */ testcase( i==73 ); /* IMMEDIATE */ testcase( i==74 ); /* JOIN */ testcase( i==75 ); /* INSERT */ testcase( i==76 ); /* MATCH */ testcase( i==77 ); /* PLAN */ testcase( i==78 ); /* ANALYZE */ testcase( i==79 ); /* PRAGMA */ testcase( i==80 ); /* ABORT */ testcase( i==81 ); /* VALUES */ testcase( i==82 ); /* VIRTUAL */ testcase( i==83 ); /* LIMIT */ testcase( i==84 ); /* WHEN */ testcase( i==85 ); /* WHERE */ testcase( i==86 ); /* RENAME */ testcase( i==87 ); /* AFTER */ testcase( i==88 ); /* REPLACE */ testcase( i==89 ); /* AND */ testcase( i==90 ); /* DEFAULT */ testcase( i==91 ); /* AUTOINCREMENT */ testcase( i==92 ); /* TO */ testcase( i==93 ); /* IN */ testcase( i==94 ); /* CAST */ testcase( i==95 ); /* COLUMN */ testcase( i==96 ); /* COMMIT */ testcase( i==97 ); /* CONFLICT */ testcase( i==98 ); /* CROSS */ testcase( i==99 ); /* CURRENT_TIMESTAMP */ testcase( i==100 ); /* CURRENT_TIME */ testcase( i==101 ); /* PRIMARY */ testcase( i==102 ); /* DEFERRED */ testcase( i==103 ); /* DISTINCT */ testcase( i==104 ); /* IS */ testcase( i==105 ); /* DROP */ testcase( i==106 ); /* FAIL */ testcase( i==107 ); /* FROM */ testcase( i==108 ); /* FULL */ testcase( i==109 ); /* GLOB */ testcase( i==110 ); /* BY */ testcase( i==111 ); /* IF */ testcase( i==112 ); /* ISNULL */ testcase( i==113 ); /* ORDER */ testcase( i==114 ); /* RESTRICT */ testcase( i==115 ); /* RIGHT */ testcase( i==116 ); /* ROLLBACK */ testcase( i==117 ); /* ROW */ testcase( i==118 ); /* UNION */ testcase( i==119 ); /* USING */ testcase( i==120 ); /* VACUUM */ testcase( i==121 ); /* VIEW */ testcase( i==122 ); /* INITIALLY */ testcase( i==123 ); /* ALL */ return aCode[i]; } } return TK_ID; } SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char *z, int n){ return keywordCode((char*)z, n); } #define SQLITE_N_KEYWORD 124 /************** End of keywordhash.h *****************************************/ /************** Continuing where we left off in tokenize.c *******************/ /* ** If X is a character that can be used in an identifier then ** IdChar(X) will be true. Otherwise it is false. ** ** For ASCII, any character with the high-order bit set is ** allowed in an identifier. For 7-bit characters, ** sqlite3IsIdChar[X] must be 1. ** ** For EBCDIC, the rules are more complex but have the same ** end result. ** ** Ticket #1066. the SQL standard does not allow '$' in the ** middle of identifiers. But many SQL implementations do. ** SQLite will allow '$' in identifiers for compatibility. ** But the feature is undocumented. */ #ifdef SQLITE_ASCII #define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0) #endif #ifdef SQLITE_EBCDIC SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 4x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, /* 5x */ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, /* 6x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* 7x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, /* 8x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, /* 9x */ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, /* Ax */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* Bx */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Cx */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Dx */ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, /* Ex */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, /* Fx */ }; #define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) #endif /* Make the IdChar function accessible from ctime.c */ #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS SQLITE_PRIVATE int sqlite3IsIdChar(u8 c){ return IdChar(c); } #endif /* ** Return the length of the token that begins at z[0]. ** Store the token type in *tokenType before returning. */ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ int i, c; switch( *z ){ case ' ': case '\t': case '\n': case '\f': case '\r': { testcase( z[0]==' ' ); testcase( z[0]=='\t' ); testcase( z[0]=='\n' ); testcase( z[0]=='\f' ); testcase( z[0]=='\r' ); for(i=1; sqlite3Isspace(z[i]); i++){} *tokenType = TK_SPACE; return i; } case '-': { if( z[1]=='-' ){ for(i=2; (c=z[i])!=0 && c!='\n'; i++){} *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ return i; } *tokenType = TK_MINUS; return 1; } case '(': { *tokenType = TK_LP; return 1; } case ')': { *tokenType = TK_RP; return 1; } case ';': { *tokenType = TK_SEMI; return 1; } case '+': { *tokenType = TK_PLUS; return 1; } case '*': { *tokenType = TK_STAR; return 1; } case '/': { if( z[1]!='*' || z[2]==0 ){ *tokenType = TK_SLASH; return 1; } for(i=3, c=z[2]; (c!='*' || z[i]!='/') && (c=z[i])!=0; i++){} if( c ) i++; *tokenType = TK_SPACE; /* IMP: R-22934-25134 */ return i; } case '%': { *tokenType = TK_REM; return 1; } case '=': { *tokenType = TK_EQ; return 1 + (z[1]=='='); } case '<': { if( (c=z[1])=='=' ){ *tokenType = TK_LE; return 2; }else if( c=='>' ){ *tokenType = TK_NE; return 2; }else if( c=='<' ){ *tokenType = TK_LSHIFT; return 2; }else{ *tokenType = TK_LT; return 1; } } case '>': { if( (c=z[1])=='=' ){ *tokenType = TK_GE; return 2; }else if( c=='>' ){ *tokenType = TK_RSHIFT; return 2; }else{ *tokenType = TK_GT; return 1; } } case '!': { if( z[1]!='=' ){ *tokenType = TK_ILLEGAL; return 2; }else{ *tokenType = TK_NE; return 2; } } case '|': { if( z[1]!='|' ){ *tokenType = TK_BITOR; return 1; }else{ *tokenType = TK_CONCAT; return 2; } } case ',': { *tokenType = TK_COMMA; return 1; } case '&': { *tokenType = TK_BITAND; return 1; } case '~': { *tokenType = TK_BITNOT; return 1; } case '`': case '\'': case '"': { int delim = z[0]; testcase( delim=='`' ); testcase( delim=='\'' ); testcase( delim=='"' ); for(i=1; (c=z[i])!=0; i++){ if( c==delim ){ if( z[i+1]==delim ){ i++; }else{ break; } } } if( c=='\'' ){ *tokenType = TK_STRING; return i+1; }else if( c!=0 ){ *tokenType = TK_ID; return i+1; }else{ *tokenType = TK_ILLEGAL; return i; } } case '.': { #ifndef SQLITE_OMIT_FLOATING_POINT if( !sqlite3Isdigit(z[1]) ) #endif { *tokenType = TK_DOT; return 1; } /* If the next character is a digit, this is a floating point ** number that begins with ".". Fall thru into the next case */ } case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); testcase( z[0]=='9' ); *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ for(i=3; sqlite3Isxdigit(z[i]); i++){} return i; } #endif for(i=0; sqlite3Isdigit(z[i]); i++){} #ifndef SQLITE_OMIT_FLOATING_POINT if( z[i]=='.' ){ i++; while( sqlite3Isdigit(z[i]) ){ i++; } *tokenType = TK_FLOAT; } if( (z[i]=='e' || z[i]=='E') && ( sqlite3Isdigit(z[i+1]) || ((z[i+1]=='+' || z[i+1]=='-') && sqlite3Isdigit(z[i+2])) ) ){ i += 2; while( sqlite3Isdigit(z[i]) ){ i++; } *tokenType = TK_FLOAT; } #endif while( IdChar(z[i]) ){ *tokenType = TK_ILLEGAL; i++; } return i; } case '[': { for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} *tokenType = c==']' ? TK_ID : TK_ILLEGAL; return i; } case '?': { *tokenType = TK_VARIABLE; for(i=1; sqlite3Isdigit(z[i]); i++){} return i; } #ifndef SQLITE_OMIT_TCL_VARIABLE case '$': #endif case '@': /* For compatibility with MS SQL Server */ case '#': case ':': { int n = 0; testcase( z[0]=='$' ); testcase( z[0]=='@' ); testcase( z[0]==':' ); testcase( z[0]=='#' ); *tokenType = TK_VARIABLE; for(i=1; (c=z[i])!=0; i++){ if( IdChar(c) ){ n++; #ifndef SQLITE_OMIT_TCL_VARIABLE }else if( c=='(' && n>0 ){ do{ i++; }while( (c=z[i])!=0 && !sqlite3Isspace(c) && c!=')' ); if( c==')' ){ i++; }else{ *tokenType = TK_ILLEGAL; } break; }else if( c==':' && z[i+1]==':' ){ i++; #endif }else{ break; } } if( n==0 ) *tokenType = TK_ILLEGAL; return i; } #ifndef SQLITE_OMIT_BLOB_LITERAL case 'x': case 'X': { testcase( z[0]=='x' ); testcase( z[0]=='X' ); if( z[1]=='\'' ){ *tokenType = TK_BLOB; for(i=2; sqlite3Isxdigit(z[i]); i++){} if( z[i]!='\'' || i%2 ){ *tokenType = TK_ILLEGAL; while( z[i] && z[i]!='\'' ){ i++; } } if( z[i] ) i++; return i; } /* Otherwise fall through to the next case */ } #endif default: { if( !IdChar(*z) ){ break; } for(i=1; IdChar(z[i]); i++){} *tokenType = keywordCode((char*)z, i); return i; } } *tokenType = TK_ILLEGAL; return 1; } /* ** Run the parser on the given SQL string. The parser structure is ** passed in. An SQLITE_ status code is returned. If an error occurs ** then an and attempt is made to write an error message into ** memory obtained from sqlite3_malloc() and to make *pzErrMsg point to that ** error message. */ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzErrMsg){ int nErr = 0; /* Number of errors encountered */ int i; /* Loop counter */ void *pEngine; /* The LEMON-generated LALR(1) parser */ int tokenType; /* type of the next token */ int lastTokenParsed = -1; /* type of the previous token */ u8 enableLookaside; /* Saved value of db->lookaside.bEnabled */ sqlite3 *db = pParse->db; /* The database connection */ int mxSqlLen; /* Max length of an SQL string */ assert( zSql!=0 ); mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH]; if( db->nVdbeActive==0 ){ db->u1.isInterrupted = 0; } pParse->rc = SQLITE_OK; pParse->zTail = zSql; i = 0; assert( pzErrMsg!=0 ); pEngine = sqlite3ParserAlloc(sqlite3Malloc); if( pEngine==0 ){ db->mallocFailed = 1; return SQLITE_NOMEM; } assert( pParse->pNewTable==0 ); assert( pParse->pNewTrigger==0 ); assert( pParse->nVar==0 ); assert( pParse->nzVar==0 ); assert( pParse->azVar==0 ); enableLookaside = db->lookaside.bEnabled; if( db->lookaside.pStart ) db->lookaside.bEnabled = 1; while( !db->mallocFailed && zSql[i]!=0 ){ assert( i>=0 ); pParse->sLastToken.z = &zSql[i]; pParse->sLastToken.n = sqlite3GetToken((unsigned char*)&zSql[i],&tokenType); i += pParse->sLastToken.n; if( i>mxSqlLen ){ pParse->rc = SQLITE_TOOBIG; break; } switch( tokenType ){ case TK_SPACE: { if( db->u1.isInterrupted ){ sqlite3ErrorMsg(pParse, "interrupt"); pParse->rc = SQLITE_INTERRUPT; goto abort_parse; } break; } case TK_ILLEGAL: { sqlite3ErrorMsg(pParse, "unrecognized token: \"%T\"", &pParse->sLastToken); goto abort_parse; } case TK_SEMI: { pParse->zTail = &zSql[i]; /* Fall thru into the default case */ } default: { sqlite3Parser(pEngine, tokenType, pParse->sLastToken, pParse); lastTokenParsed = tokenType; if( pParse->rc!=SQLITE_OK ){ goto abort_parse; } break; } } } abort_parse: assert( nErr==0 ); if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){ assert( zSql[i]==0 ); if( lastTokenParsed!=TK_SEMI ){ sqlite3Parser(pEngine, TK_SEMI, pParse->sLastToken, pParse); pParse->zTail = &zSql[i]; } if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){ sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse); } } #ifdef YYTRACKMAXSTACKDEPTH sqlite3_mutex_enter(sqlite3MallocMutex()); sqlite3StatusSet(SQLITE_STATUS_PARSER_STACK, sqlite3ParserStackPeak(pEngine) ); sqlite3_mutex_leave(sqlite3MallocMutex()); #endif /* YYDEBUG */ sqlite3ParserFree(pEngine, sqlite3_free); db->lookaside.bEnabled = enableLookaside; if( db->mallocFailed ){ pParse->rc = SQLITE_NOMEM; } if( pParse->rc!=SQLITE_OK && pParse->rc!=SQLITE_DONE && pParse->zErrMsg==0 ){ pParse->zErrMsg = sqlite3MPrintf(db, "%s", sqlite3ErrStr(pParse->rc)); } assert( pzErrMsg!=0 ); if( pParse->zErrMsg ){ *pzErrMsg = pParse->zErrMsg; sqlite3_log(pParse->rc, "%s", *pzErrMsg); pParse->zErrMsg = 0; nErr++; } if( pParse->pVdbe && pParse->nErr>0 && pParse->nested==0 ){ sqlite3VdbeDelete(pParse->pVdbe); pParse->pVdbe = 0; } #ifndef SQLITE_OMIT_SHARED_CACHE if( pParse->nested==0 ){ sqlite3DbFree(db, pParse->aTableLock); pParse->aTableLock = 0; pParse->nTableLock = 0; } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE sqlite3_free(pParse->apVtabLock); #endif if( !IN_DECLARE_VTAB ){ /* If the pParse->declareVtab flag is set, do not delete any table ** structure built up in pParse->pNewTable. The calling code (see vtab.c) ** will take responsibility for freeing the Table structure. */ sqlite3DeleteTable(db, pParse->pNewTable); } if( pParse->bFreeWith ) sqlite3WithDelete(db, pParse->pWith); sqlite3DeleteTrigger(db, pParse->pNewTrigger); for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]); sqlite3DbFree(db, pParse->azVar); while( pParse->pAinc ){ AutoincInfo *p = pParse->pAinc; pParse->pAinc = p->pNext; sqlite3DbFree(db, p); } while( pParse->pZombieTab ){ Table *p = pParse->pZombieTab; pParse->pZombieTab = p->pNextZombie; sqlite3DeleteTable(db, p); } assert( nErr==0 || pParse->rc!=SQLITE_OK ); return nErr; } /************** End of tokenize.c ********************************************/ /************** Begin file complete.c ****************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** An tokenizer for SQL ** ** This file contains C code that implements the sqlite3_complete() API. ** This code used to be part of the tokenizer.c source file. But by ** separating it out, the code will be automatically omitted from ** static links that do not use it. */ /* #include "sqliteInt.h" */ #ifndef SQLITE_OMIT_COMPLETE /* ** This is defined in tokenize.c. We just have to import the definition. */ #ifndef SQLITE_AMALGAMATION #ifdef SQLITE_ASCII #define IdChar(C) ((sqlite3CtypeMap[(unsigned char)C]&0x46)!=0) #endif #ifdef SQLITE_EBCDIC SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[]; #define IdChar(C) (((c=C)>=0x42 && sqlite3IsEbcdicIdChar[c-0x40])) #endif #endif /* SQLITE_AMALGAMATION */ /* ** Token types used by the sqlite3_complete() routine. See the header ** comments on that procedure for additional information. */ #define tkSEMI 0 #define tkWS 1 #define tkOTHER 2 #ifndef SQLITE_OMIT_TRIGGER #define tkEXPLAIN 3 #define tkCREATE 4 #define tkTEMP 5 #define tkTRIGGER 6 #define tkEND 7 #endif /* ** Return TRUE if the given SQL string ends in a semicolon. ** ** Special handling is require for CREATE TRIGGER statements. ** Whenever the CREATE TRIGGER keywords are seen, the statement ** must end with ";END;". ** ** This implementation uses a state machine with 8 states: ** ** (0) INVALID We have not yet seen a non-whitespace character. ** ** (1) START At the beginning or end of an SQL statement. This routine ** returns 1 if it ends in the START state and 0 if it ends ** in any other state. ** ** (2) NORMAL We are in the middle of statement which ends with a single ** semicolon. ** ** (3) EXPLAIN The keyword EXPLAIN has been seen at the beginning of ** a statement. ** ** (4) CREATE The keyword CREATE has been seen at the beginning of a ** statement, possibly preceded by EXPLAIN and/or followed by ** TEMP or TEMPORARY ** ** (5) TRIGGER We are in the middle of a trigger definition that must be ** ended by a semicolon, the keyword END, and another semicolon. ** ** (6) SEMI We've seen the first semicolon in the ";END;" that occurs at ** the end of a trigger definition. ** ** (7) END We've seen the ";END" of the ";END;" that occurs at the end ** of a trigger definition. ** ** Transitions between states above are determined by tokens extracted ** from the input. The following tokens are significant: ** ** (0) tkSEMI A semicolon. ** (1) tkWS Whitespace. ** (2) tkOTHER Any other SQL token. ** (3) tkEXPLAIN The "explain" keyword. ** (4) tkCREATE The "create" keyword. ** (5) tkTEMP The "temp" or "temporary" keyword. ** (6) tkTRIGGER The "trigger" keyword. ** (7) tkEND The "end" keyword. ** ** Whitespace never causes a state transition and is always ignored. ** This means that a SQL string of all whitespace is invalid. ** ** If we compile with SQLITE_OMIT_TRIGGER, all of the computation needed ** to recognize the end of a trigger can be omitted. All we have to do ** is look for a semicolon that is not part of an string or comment. */ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){ u8 state = 0; /* Current state, using numbers defined in header comment */ u8 token; /* Value of the next token */ #ifndef SQLITE_OMIT_TRIGGER /* A complex statement machine used to detect the end of a CREATE TRIGGER ** statement. This is the normal case. */ static const u8 trans[8][8] = { /* Token: */ /* State: ** SEMI WS OTHER EXPLAIN CREATE TEMP TRIGGER END */ /* 0 INVALID: */ { 1, 0, 2, 3, 4, 2, 2, 2, }, /* 1 START: */ { 1, 1, 2, 3, 4, 2, 2, 2, }, /* 2 NORMAL: */ { 1, 2, 2, 2, 2, 2, 2, 2, }, /* 3 EXPLAIN: */ { 1, 3, 3, 2, 4, 2, 2, 2, }, /* 4 CREATE: */ { 1, 4, 2, 2, 2, 4, 5, 2, }, /* 5 TRIGGER: */ { 6, 5, 5, 5, 5, 5, 5, 5, }, /* 6 SEMI: */ { 6, 6, 5, 5, 5, 5, 5, 7, }, /* 7 END: */ { 1, 7, 5, 5, 5, 5, 5, 5, }, }; #else /* If triggers are not supported by this compile then the statement machine ** used to detect the end of a statement is much simpler */ static const u8 trans[3][3] = { /* Token: */ /* State: ** SEMI WS OTHER */ /* 0 INVALID: */ { 1, 0, 2, }, /* 1 START: */ { 1, 1, 2, }, /* 2 NORMAL: */ { 1, 2, 2, }, }; #endif /* SQLITE_OMIT_TRIGGER */ #ifdef SQLITE_ENABLE_API_ARMOR if( zSql==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif while( *zSql ){ switch( *zSql ){ case ';': { /* A semicolon */ token = tkSEMI; break; } case ' ': case '\r': case '\t': case '\n': case '\f': { /* White space is ignored */ token = tkWS; break; } case '/': { /* C-style comments */ if( zSql[1]!='*' ){ token = tkOTHER; break; } zSql += 2; while( zSql[0] && (zSql[0]!='*' || zSql[1]!='/') ){ zSql++; } if( zSql[0]==0 ) return 0; zSql++; token = tkWS; break; } case '-': { /* SQL-style comments from "--" to end of line */ if( zSql[1]!='-' ){ token = tkOTHER; break; } while( *zSql && *zSql!='\n' ){ zSql++; } if( *zSql==0 ) return state==1; token = tkWS; break; } case '[': { /* Microsoft-style identifiers in [...] */ zSql++; while( *zSql && *zSql!=']' ){ zSql++; } if( *zSql==0 ) return 0; token = tkOTHER; break; } case '`': /* Grave-accent quoted symbols used by MySQL */ case '"': /* single- and double-quoted strings */ case '\'': { int c = *zSql; zSql++; while( *zSql && *zSql!=c ){ zSql++; } if( *zSql==0 ) return 0; token = tkOTHER; break; } default: { #ifdef SQLITE_EBCDIC unsigned char c; #endif if( IdChar((u8)*zSql) ){ /* Keywords and unquoted identifiers */ int nId; for(nId=1; IdChar(zSql[nId]); nId++){} #ifdef SQLITE_OMIT_TRIGGER token = tkOTHER; #else switch( *zSql ){ case 'c': case 'C': { if( nId==6 && sqlite3StrNICmp(zSql, "create", 6)==0 ){ token = tkCREATE; }else{ token = tkOTHER; } break; } case 't': case 'T': { if( nId==7 && sqlite3StrNICmp(zSql, "trigger", 7)==0 ){ token = tkTRIGGER; }else if( nId==4 && sqlite3StrNICmp(zSql, "temp", 4)==0 ){ token = tkTEMP; }else if( nId==9 && sqlite3StrNICmp(zSql, "temporary", 9)==0 ){ token = tkTEMP; }else{ token = tkOTHER; } break; } case 'e': case 'E': { if( nId==3 && sqlite3StrNICmp(zSql, "end", 3)==0 ){ token = tkEND; }else #ifndef SQLITE_OMIT_EXPLAIN if( nId==7 && sqlite3StrNICmp(zSql, "explain", 7)==0 ){ token = tkEXPLAIN; }else #endif { token = tkOTHER; } break; } default: { token = tkOTHER; break; } } #endif /* SQLITE_OMIT_TRIGGER */ zSql += nId-1; }else{ /* Operators and special symbols */ token = tkOTHER; } break; } } state = trans[state][token]; zSql++; } return state==1; } #ifndef SQLITE_OMIT_UTF16 /* ** This routine is the same as the sqlite3_complete() routine described ** above, except that the parameter is required to be UTF-16 encoded, not ** UTF-8. */ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){ sqlite3_value *pVal; char const *zSql8; int rc; #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ) return rc; #endif pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, zSql, SQLITE_UTF16NATIVE, SQLITE_STATIC); zSql8 = sqlite3ValueText(pVal, SQLITE_UTF8); if( zSql8 ){ rc = sqlite3_complete(zSql8); }else{ rc = SQLITE_NOMEM; } sqlite3ValueFree(pVal); return rc & 0xff; } #endif /* SQLITE_OMIT_UTF16 */ #endif /* SQLITE_OMIT_COMPLETE */ /************** End of complete.c ********************************************/ /************** Begin file main.c ********************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Main file for the SQLite library. The routines in this file ** implement the programmer interface to the library. Routines in ** other files are for internal use by SQLite and should not be ** accessed by users of the library. */ /* #include "sqliteInt.h" */ #ifdef SQLITE_ENABLE_FTS3 /************** Include fts3.h in the middle of main.c ***********************/ /************** Begin file fts3.h ********************************************/ /* ** 2006 Oct 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This header file is used by programs that want to link against the ** FTS3 library. All it does is declare the sqlite3Fts3Init() interface. */ /* #include "sqlite3.h" */ #if 0 extern "C" { #endif /* __cplusplus */ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db); #if 0 } /* extern "C" */ #endif /* __cplusplus */ /************** End of fts3.h ************************************************/ /************** Continuing where we left off in main.c ***********************/ #endif #ifdef SQLITE_ENABLE_RTREE /************** Include rtree.h in the middle of main.c **********************/ /************** Begin file rtree.h *******************************************/ /* ** 2008 May 26 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This header file is used by programs that want to link against the ** RTREE library. All it does is declare the sqlite3RtreeInit() interface. */ /* #include "sqlite3.h" */ #if 0 extern "C" { #endif /* __cplusplus */ SQLITE_PRIVATE int sqlite3RtreeInit(sqlite3 *db); #if 0 } /* extern "C" */ #endif /* __cplusplus */ /************** End of rtree.h ***********************************************/ /************** Continuing where we left off in main.c ***********************/ #endif #ifdef SQLITE_ENABLE_ICU /************** Include sqliteicu.h in the middle of main.c ******************/ /************** Begin file sqliteicu.h ***************************************/ /* ** 2008 May 26 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This header file is used by programs that want to link against the ** ICU extension. All it does is declare the sqlite3IcuInit() interface. */ /* #include "sqlite3.h" */ #if 0 extern "C" { #endif /* __cplusplus */ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db); #if 0 } /* extern "C" */ #endif /* __cplusplus */ /************** End of sqliteicu.h *******************************************/ /************** Continuing where we left off in main.c ***********************/ #endif #ifndef SQLITE_AMALGAMATION /* IMPLEMENTATION-OF: R-46656-45156 The sqlite3_version[] string constant ** contains the text of SQLITE_VERSION macro. */ SQLITE_API const char sqlite3_version[] = SQLITE_VERSION; #endif /* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns ** a pointer to the to the sqlite3_version[] string constant. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void){ return sqlite3_version; } /* IMPLEMENTATION-OF: R-63124-39300 The sqlite3_sourceid() function returns a ** pointer to a string constant whose value is the same as the ** SQLITE_SOURCE_ID C preprocessor macro. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } /* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function ** returns an integer equal to SQLITE_VERSION_NUMBER. */ SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; } /* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns ** zero if and only if SQLite was compiled with mutexing code omitted due to ** the SQLITE_THREADSAFE compile-time option being set to 0. */ SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; } /* ** When compiling the test fixture or with debugging enabled (on Win32), ** this variable being set to non-zero will cause OSTRACE macros to emit ** extra diagnostic information. */ #ifdef SQLITE_HAVE_OS_TRACE # ifndef SQLITE_DEBUG_OS_TRACE # define SQLITE_DEBUG_OS_TRACE 0 # endif int sqlite3OSTrace = SQLITE_DEBUG_OS_TRACE; #endif #if !defined(SQLITE_OMIT_TRACE) && defined(SQLITE_ENABLE_IOTRACE) /* ** If the following function pointer is not NULL and if ** SQLITE_ENABLE_IOTRACE is enabled, then messages describing ** I/O active are written using this function. These messages ** are intended for debugging activity only. */ SQLITE_API void (SQLITE_CDECL *sqlite3IoTrace)(const char*, ...) = 0; #endif /* ** If the following global variable points to a string which is the ** name of a directory, then that directory will be used to store ** temporary files. ** ** See also the "PRAGMA temp_store_directory" SQL command. */ SQLITE_API char *sqlite3_temp_directory = 0; /* ** If the following global variable points to a string which is the ** name of a directory, then that directory will be used to store ** all database files specified with a relative pathname. ** ** See also the "PRAGMA data_store_directory" SQL command. */ SQLITE_API char *sqlite3_data_directory = 0; /* ** Initialize SQLite. ** ** This routine must be called to initialize the memory allocation, ** VFS, and mutex subsystems prior to doing any serious work with ** SQLite. But as long as you do not compile with SQLITE_OMIT_AUTOINIT ** this routine will be called automatically by key routines such as ** sqlite3_open(). ** ** This routine is a no-op except on its very first call for the process, ** or for the first call after a call to sqlite3_shutdown. ** ** The first thread to call this routine runs the initialization to ** completion. If subsequent threads call this routine before the first ** thread has finished the initialization process, then the subsequent ** threads must block until the first thread finishes with the initialization. ** ** The first thread might call this routine recursively. Recursive ** calls to this routine should not block, of course. Otherwise the ** initialization process would never complete. ** ** Let X be the first thread to enter this routine. Let Y be some other ** thread. Then while the initial invocation of this routine by X is ** incomplete, it is required that: ** ** * Calls to this routine from Y must block until the outer-most ** call by X completes. ** ** * Recursive calls to this routine from thread X return immediately ** without blocking. */ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){ MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */ int rc; /* Result code */ #ifdef SQLITE_EXTRA_INIT int bRunExtraInit = 0; /* Extra initialization needed */ #endif #ifdef SQLITE_OMIT_WSD rc = sqlite3_wsd_init(4096, 24); if( rc!=SQLITE_OK ){ return rc; } #endif /* If the following assert() fails on some obscure processor/compiler ** combination, the work-around is to set the correct pointer ** size at compile-time using -DSQLITE_PTRSIZE=n compile-time option */ assert( SQLITE_PTRSIZE==sizeof(char*) ); /* If SQLite is already completely initialized, then this call ** to sqlite3_initialize() should be a no-op. But the initialization ** must be complete. So isInit must not be set until the very end ** of this routine. */ if( sqlite3GlobalConfig.isInit ) return SQLITE_OK; /* Make sure the mutex subsystem is initialized. If unable to ** initialize the mutex subsystem, return early with the error. ** If the system is so sick that we are unable to allocate a mutex, ** there is not much SQLite is going to be able to do. ** ** The mutex subsystem must take care of serializing its own ** initialization. */ rc = sqlite3MutexInit(); if( rc ) return rc; /* Initialize the malloc() system and the recursive pInitMutex mutex. ** This operation is protected by the STATIC_MASTER mutex. Note that ** MutexAlloc() is called for a static mutex prior to initializing the ** malloc subsystem - this implies that the allocation of a static ** mutex must not require support from the malloc subsystem. */ MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) sqlite3_mutex_enter(pMaster); sqlite3GlobalConfig.isMutexInit = 1; if( !sqlite3GlobalConfig.isMallocInit ){ rc = sqlite3MallocInit(); } if( rc==SQLITE_OK ){ sqlite3GlobalConfig.isMallocInit = 1; if( !sqlite3GlobalConfig.pInitMutex ){ sqlite3GlobalConfig.pInitMutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); if( sqlite3GlobalConfig.bCoreMutex && !sqlite3GlobalConfig.pInitMutex ){ rc = SQLITE_NOMEM; } } } if( rc==SQLITE_OK ){ sqlite3GlobalConfig.nRefInitMutex++; } sqlite3_mutex_leave(pMaster); /* If rc is not SQLITE_OK at this point, then either the malloc ** subsystem could not be initialized or the system failed to allocate ** the pInitMutex mutex. Return an error in either case. */ if( rc!=SQLITE_OK ){ return rc; } /* Do the rest of the initialization under the recursive mutex so ** that we will be able to handle recursive calls into ** sqlite3_initialize(). The recursive calls normally come through ** sqlite3_os_init() when it invokes sqlite3_vfs_register(), but other ** recursive calls might also be possible. ** ** IMPLEMENTATION-OF: R-00140-37445 SQLite automatically serializes calls ** to the xInit method, so the xInit method need not be threadsafe. ** ** The following mutex is what serializes access to the appdef pcache xInit ** methods. The sqlite3_pcache_methods.xInit() all is embedded in the ** call to sqlite3PcacheInitialize(). */ sqlite3_mutex_enter(sqlite3GlobalConfig.pInitMutex); if( sqlite3GlobalConfig.isInit==0 && sqlite3GlobalConfig.inProgress==0 ){ FuncDefHash *pHash = &GLOBAL(FuncDefHash, sqlite3GlobalFunctions); sqlite3GlobalConfig.inProgress = 1; memset(pHash, 0, sizeof(sqlite3GlobalFunctions)); sqlite3RegisterGlobalFunctions(); if( sqlite3GlobalConfig.isPCacheInit==0 ){ rc = sqlite3PcacheInitialize(); } if( rc==SQLITE_OK ){ sqlite3GlobalConfig.isPCacheInit = 1; rc = sqlite3OsInit(); } if( rc==SQLITE_OK ){ sqlite3PCacheBufferSetup( sqlite3GlobalConfig.pPage, sqlite3GlobalConfig.szPage, sqlite3GlobalConfig.nPage); sqlite3GlobalConfig.isInit = 1; #ifdef SQLITE_EXTRA_INIT bRunExtraInit = 1; #endif } sqlite3GlobalConfig.inProgress = 0; } sqlite3_mutex_leave(sqlite3GlobalConfig.pInitMutex); /* Go back under the static mutex and clean up the recursive ** mutex to prevent a resource leak. */ sqlite3_mutex_enter(pMaster); sqlite3GlobalConfig.nRefInitMutex--; if( sqlite3GlobalConfig.nRefInitMutex<=0 ){ assert( sqlite3GlobalConfig.nRefInitMutex==0 ); sqlite3_mutex_free(sqlite3GlobalConfig.pInitMutex); sqlite3GlobalConfig.pInitMutex = 0; } sqlite3_mutex_leave(pMaster); /* The following is just a sanity check to make sure SQLite has ** been compiled correctly. It is important to run this code, but ** we don't want to run it too often and soak up CPU cycles for no ** reason. So we run it once during initialization. */ #ifndef NDEBUG #ifndef SQLITE_OMIT_FLOATING_POINT /* This section of code's only "output" is via assert() statements. */ if ( rc==SQLITE_OK ){ u64 x = (((u64)1)<<63)-1; double y; assert(sizeof(x)==8); assert(sizeof(x)==sizeof(y)); memcpy(&y, &x, 8); assert( sqlite3IsNaN(y) ); } #endif #endif /* Do extra initialization steps requested by the SQLITE_EXTRA_INIT ** compile-time option. */ #ifdef SQLITE_EXTRA_INIT if( bRunExtraInit ){ int SQLITE_EXTRA_INIT(const char*); rc = SQLITE_EXTRA_INIT(0); } #endif return rc; } /* ** Undo the effects of sqlite3_initialize(). Must not be called while ** there are outstanding database connections or memory allocations or ** while any part of SQLite is otherwise in use in any thread. This ** routine is not threadsafe. But it is safe to invoke this routine ** on when SQLite is already shut down. If SQLite is already shut down ** when this routine is invoked, then this routine is a harmless no-op. */ SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void){ #ifdef SQLITE_OMIT_WSD int rc = sqlite3_wsd_init(4096, 24); if( rc!=SQLITE_OK ){ return rc; } #endif if( sqlite3GlobalConfig.isInit ){ #ifdef SQLITE_EXTRA_SHUTDOWN void SQLITE_EXTRA_SHUTDOWN(void); SQLITE_EXTRA_SHUTDOWN(); #endif sqlite3_os_end(); sqlite3_reset_auto_extension(); sqlite3GlobalConfig.isInit = 0; } if( sqlite3GlobalConfig.isPCacheInit ){ sqlite3PcacheShutdown(); sqlite3GlobalConfig.isPCacheInit = 0; } if( sqlite3GlobalConfig.isMallocInit ){ sqlite3MallocEnd(); sqlite3GlobalConfig.isMallocInit = 0; #ifndef SQLITE_OMIT_SHUTDOWN_DIRECTORIES /* The heap subsystem has now been shutdown and these values are supposed ** to be NULL or point to memory that was obtained from sqlite3_malloc(), ** which would rely on that heap subsystem; therefore, make sure these ** values cannot refer to heap memory that was just invalidated when the ** heap subsystem was shutdown. This is only done if the current call to ** this function resulted in the heap subsystem actually being shutdown. */ sqlite3_data_directory = 0; sqlite3_temp_directory = 0; #endif } if( sqlite3GlobalConfig.isMutexInit ){ sqlite3MutexEnd(); sqlite3GlobalConfig.isMutexInit = 0; } return SQLITE_OK; } /* ** This API allows applications to modify the global configuration of ** the SQLite library at run-time. ** ** This routine should only be called when there are no outstanding ** database connections or memory allocations. This routine is not ** threadsafe. Failure to heed these warnings can lead to unpredictable ** behavior. */ SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){ va_list ap; int rc = SQLITE_OK; /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while ** the SQLite library is in use. */ if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; va_start(ap, op); switch( op ){ /* Mutex configuration options are only available in a threadsafe ** compile. */ #if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-54466-46756 */ case SQLITE_CONFIG_SINGLETHREAD: { /* EVIDENCE-OF: R-02748-19096 This option sets the threading mode to ** Single-thread. */ sqlite3GlobalConfig.bCoreMutex = 0; /* Disable mutex on core */ sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */ break; } #endif #if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-20520-54086 */ case SQLITE_CONFIG_MULTITHREAD: { /* EVIDENCE-OF: R-14374-42468 This option sets the threading mode to ** Multi-thread. */ sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */ sqlite3GlobalConfig.bFullMutex = 0; /* Disable mutex on connections */ break; } #endif #if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-59593-21810 */ case SQLITE_CONFIG_SERIALIZED: { /* EVIDENCE-OF: R-41220-51800 This option sets the threading mode to ** Serialized. */ sqlite3GlobalConfig.bCoreMutex = 1; /* Enable mutex on core */ sqlite3GlobalConfig.bFullMutex = 1; /* Enable mutex on connections */ break; } #endif #if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-63666-48755 */ case SQLITE_CONFIG_MUTEX: { /* Specify an alternative mutex implementation */ sqlite3GlobalConfig.mutex = *va_arg(ap, sqlite3_mutex_methods*); break; } #endif #if defined(SQLITE_THREADSAFE) && SQLITE_THREADSAFE>0 /* IMP: R-14450-37597 */ case SQLITE_CONFIG_GETMUTEX: { /* Retrieve the current mutex implementation */ *va_arg(ap, sqlite3_mutex_methods*) = sqlite3GlobalConfig.mutex; break; } #endif case SQLITE_CONFIG_MALLOC: { /* EVIDENCE-OF: R-55594-21030 The SQLITE_CONFIG_MALLOC option takes a ** single argument which is a pointer to an instance of the ** sqlite3_mem_methods structure. The argument specifies alternative ** low-level memory allocation routines to be used in place of the memory ** allocation routines built into SQLite. */ sqlite3GlobalConfig.m = *va_arg(ap, sqlite3_mem_methods*); break; } case SQLITE_CONFIG_GETMALLOC: { /* EVIDENCE-OF: R-51213-46414 The SQLITE_CONFIG_GETMALLOC option takes a ** single argument which is a pointer to an instance of the ** sqlite3_mem_methods structure. The sqlite3_mem_methods structure is ** filled with the currently defined memory allocation routines. */ if( sqlite3GlobalConfig.m.xMalloc==0 ) sqlite3MemSetDefault(); *va_arg(ap, sqlite3_mem_methods*) = sqlite3GlobalConfig.m; break; } case SQLITE_CONFIG_MEMSTATUS: { /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes ** single argument of type int, interpreted as a boolean, which enables ** or disables the collection of memory allocation statistics. */ sqlite3GlobalConfig.bMemstat = va_arg(ap, int); break; } case SQLITE_CONFIG_SCRATCH: { /* EVIDENCE-OF: R-08404-60887 There are three arguments to ** SQLITE_CONFIG_SCRATCH: A pointer an 8-byte aligned memory buffer from ** which the scratch allocations will be drawn, the size of each scratch ** allocation (sz), and the maximum number of scratch allocations (N). */ sqlite3GlobalConfig.pScratch = va_arg(ap, void*); sqlite3GlobalConfig.szScratch = va_arg(ap, int); sqlite3GlobalConfig.nScratch = va_arg(ap, int); break; } case SQLITE_CONFIG_PAGECACHE: { /* EVIDENCE-OF: R-31408-40510 There are three arguments to ** SQLITE_CONFIG_PAGECACHE: A pointer to 8-byte aligned memory, the size ** of each page buffer (sz), and the number of pages (N). */ sqlite3GlobalConfig.pPage = va_arg(ap, void*); sqlite3GlobalConfig.szPage = va_arg(ap, int); sqlite3GlobalConfig.nPage = va_arg(ap, int); break; } case SQLITE_CONFIG_PCACHE_HDRSZ: { /* EVIDENCE-OF: R-39100-27317 The SQLITE_CONFIG_PCACHE_HDRSZ option takes ** a single parameter which is a pointer to an integer and writes into ** that integer the number of extra bytes per page required for each page ** in SQLITE_CONFIG_PAGECACHE. */ *va_arg(ap, int*) = sqlite3HeaderSizeBtree() + sqlite3HeaderSizePcache() + sqlite3HeaderSizePcache1(); break; } case SQLITE_CONFIG_PCACHE: { /* no-op */ break; } case SQLITE_CONFIG_GETPCACHE: { /* now an error */ rc = SQLITE_ERROR; break; } case SQLITE_CONFIG_PCACHE2: { /* EVIDENCE-OF: R-63325-48378 The SQLITE_CONFIG_PCACHE2 option takes a ** single argument which is a pointer to an sqlite3_pcache_methods2 ** object. This object specifies the interface to a custom page cache ** implementation. */ sqlite3GlobalConfig.pcache2 = *va_arg(ap, sqlite3_pcache_methods2*); break; } case SQLITE_CONFIG_GETPCACHE2: { /* EVIDENCE-OF: R-22035-46182 The SQLITE_CONFIG_GETPCACHE2 option takes a ** single argument which is a pointer to an sqlite3_pcache_methods2 ** object. SQLite copies of the current page cache implementation into ** that object. */ if( sqlite3GlobalConfig.pcache2.xInit==0 ){ sqlite3PCacheSetDefault(); } *va_arg(ap, sqlite3_pcache_methods2*) = sqlite3GlobalConfig.pcache2; break; } /* EVIDENCE-OF: R-06626-12911 The SQLITE_CONFIG_HEAP option is only ** available if SQLite is compiled with either SQLITE_ENABLE_MEMSYS3 or ** SQLITE_ENABLE_MEMSYS5 and returns SQLITE_ERROR if invoked otherwise. */ #if defined(SQLITE_ENABLE_MEMSYS3) || defined(SQLITE_ENABLE_MEMSYS5) case SQLITE_CONFIG_HEAP: { /* EVIDENCE-OF: R-19854-42126 There are three arguments to ** SQLITE_CONFIG_HEAP: An 8-byte aligned pointer to the memory, the ** number of bytes in the memory buffer, and the minimum allocation size. */ sqlite3GlobalConfig.pHeap = va_arg(ap, void*); sqlite3GlobalConfig.nHeap = va_arg(ap, int); sqlite3GlobalConfig.mnReq = va_arg(ap, int); if( sqlite3GlobalConfig.mnReq<1 ){ sqlite3GlobalConfig.mnReq = 1; }else if( sqlite3GlobalConfig.mnReq>(1<<12) ){ /* cap min request size at 2^12 */ sqlite3GlobalConfig.mnReq = (1<<12); } if( sqlite3GlobalConfig.pHeap==0 ){ /* EVIDENCE-OF: R-49920-60189 If the first pointer (the memory pointer) ** is NULL, then SQLite reverts to using its default memory allocator ** (the system malloc() implementation), undoing any prior invocation of ** SQLITE_CONFIG_MALLOC. ** ** Setting sqlite3GlobalConfig.m to all zeros will cause malloc to ** revert to its default implementation when sqlite3_initialize() is run */ memset(&sqlite3GlobalConfig.m, 0, sizeof(sqlite3GlobalConfig.m)); }else{ /* EVIDENCE-OF: R-61006-08918 If the memory pointer is not NULL then the ** alternative memory allocator is engaged to handle all of SQLites ** memory allocation needs. */ #ifdef SQLITE_ENABLE_MEMSYS3 sqlite3GlobalConfig.m = *sqlite3MemGetMemsys3(); #endif #ifdef SQLITE_ENABLE_MEMSYS5 sqlite3GlobalConfig.m = *sqlite3MemGetMemsys5(); #endif } break; } #endif case SQLITE_CONFIG_LOOKASIDE: { sqlite3GlobalConfig.szLookaside = va_arg(ap, int); sqlite3GlobalConfig.nLookaside = va_arg(ap, int); break; } /* Record a pointer to the logger function and its first argument. ** The default is NULL. Logging is disabled if the function pointer is ** NULL. */ case SQLITE_CONFIG_LOG: { /* MSVC is picky about pulling func ptrs from va lists. ** http://support.microsoft.com/kb/47961 ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); */ typedef void(*LOGFUNC_t)(void*,int,const char*); sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); break; } /* EVIDENCE-OF: R-55548-33817 The compile-time setting for URI filenames ** can be changed at start-time using the ** sqlite3_config(SQLITE_CONFIG_URI,1) or ** sqlite3_config(SQLITE_CONFIG_URI,0) configuration calls. */ case SQLITE_CONFIG_URI: { /* EVIDENCE-OF: R-25451-61125 The SQLITE_CONFIG_URI option takes a single ** argument of type int. If non-zero, then URI handling is globally ** enabled. If the parameter is zero, then URI handling is globally ** disabled. */ sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); break; } case SQLITE_CONFIG_COVERING_INDEX_SCAN: { /* EVIDENCE-OF: R-36592-02772 The SQLITE_CONFIG_COVERING_INDEX_SCAN ** option takes a single integer argument which is interpreted as a ** boolean in order to enable or disable the use of covering indices for ** full table scans in the query optimizer. */ sqlite3GlobalConfig.bUseCis = va_arg(ap, int); break; } #ifdef SQLITE_ENABLE_SQLLOG case SQLITE_CONFIG_SQLLOG: { typedef void(*SQLLOGFUNC_t)(void*, sqlite3*, const char*, int); sqlite3GlobalConfig.xSqllog = va_arg(ap, SQLLOGFUNC_t); sqlite3GlobalConfig.pSqllogArg = va_arg(ap, void *); break; } #endif case SQLITE_CONFIG_MMAP_SIZE: { /* EVIDENCE-OF: R-58063-38258 SQLITE_CONFIG_MMAP_SIZE takes two 64-bit ** integer (sqlite3_int64) values that are the default mmap size limit ** (the default setting for PRAGMA mmap_size) and the maximum allowed ** mmap size limit. */ sqlite3_int64 szMmap = va_arg(ap, sqlite3_int64); sqlite3_int64 mxMmap = va_arg(ap, sqlite3_int64); /* EVIDENCE-OF: R-53367-43190 If either argument to this option is ** negative, then that argument is changed to its compile-time default. ** ** EVIDENCE-OF: R-34993-45031 The maximum allowed mmap size will be ** silently truncated if necessary so that it does not exceed the ** compile-time maximum mmap size set by the SQLITE_MAX_MMAP_SIZE ** compile-time option. */ if( mxMmap<0 || mxMmap>SQLITE_MAX_MMAP_SIZE ){ mxMmap = SQLITE_MAX_MMAP_SIZE; } if( szMmap<0 ) szMmap = SQLITE_DEFAULT_MMAP_SIZE; if( szMmap>mxMmap) szMmap = mxMmap; sqlite3GlobalConfig.mxMmap = mxMmap; sqlite3GlobalConfig.szMmap = szMmap; break; } #if SQLITE_OS_WIN && defined(SQLITE_WIN32_MALLOC) /* IMP: R-04780-55815 */ case SQLITE_CONFIG_WIN32_HEAPSIZE: { /* EVIDENCE-OF: R-34926-03360 SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit ** unsigned integer value that specifies the maximum size of the created ** heap. */ sqlite3GlobalConfig.nHeap = va_arg(ap, int); break; } #endif case SQLITE_CONFIG_PMASZ: { sqlite3GlobalConfig.szPma = va_arg(ap, unsigned int); break; } default: { rc = SQLITE_ERROR; break; } } va_end(ap); return rc; } /* ** Set up the lookaside buffers for a database connection. ** Return SQLITE_OK on success. ** If lookaside is already active, return SQLITE_BUSY. ** ** The sz parameter is the number of bytes in each lookaside slot. ** The cnt parameter is the number of slots. If pStart is NULL the ** space for the lookaside memory is obtained from sqlite3_malloc(). ** If pStart is not NULL then it is sz*cnt bytes of memory to use for ** the lookaside memory. */ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ #ifndef SQLITE_OMIT_LOOKASIDE void *pStart; if( db->lookaside.nOut ){ return SQLITE_BUSY; } /* Free any existing lookaside buffer for this handle before ** allocating a new one so we don't have to have space for ** both at the same time. */ if( db->lookaside.bMalloced ){ sqlite3_free(db->lookaside.pStart); } /* The size of a lookaside slot after ROUNDDOWN8 needs to be larger ** than a pointer to be useful. */ sz = ROUNDDOWN8(sz); /* IMP: R-33038-09382 */ if( sz<=(int)sizeof(LookasideSlot*) ) sz = 0; if( cnt<0 ) cnt = 0; if( sz==0 || cnt==0 ){ sz = 0; pStart = 0; }else if( pBuf==0 ){ sqlite3BeginBenignMalloc(); pStart = sqlite3Malloc( sz*cnt ); /* IMP: R-61949-35727 */ sqlite3EndBenignMalloc(); if( pStart ) cnt = sqlite3MallocSize(pStart)/sz; }else{ pStart = pBuf; } db->lookaside.pStart = pStart; db->lookaside.pFree = 0; db->lookaside.sz = (u16)sz; if( pStart ){ int i; LookasideSlot *p; assert( sz > (int)sizeof(LookasideSlot*) ); p = (LookasideSlot*)pStart; for(i=cnt-1; i>=0; i--){ p->pNext = db->lookaside.pFree; db->lookaside.pFree = p; p = (LookasideSlot*)&((u8*)p)[sz]; } db->lookaside.pEnd = p; db->lookaside.bEnabled = 1; db->lookaside.bMalloced = pBuf==0 ?1:0; }else{ db->lookaside.pStart = db; db->lookaside.pEnd = db; db->lookaside.bEnabled = 0; db->lookaside.bMalloced = 0; } #endif /* SQLITE_OMIT_LOOKASIDE */ return SQLITE_OK; } /* ** Return the mutex associated with a database connection. */ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return db->mutex; } /* ** Free up as much memory as we can from the given database ** connection. */ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3 *db){ int i; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); sqlite3BtreeEnterAll(db); for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; if( pBt ){ Pager *pPager = sqlite3BtreePager(pBt); sqlite3PagerShrink(pPager); } } sqlite3BtreeLeaveAll(db); sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } /* ** Configuration settings for an individual database connection */ SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3 *db, int op, ...){ va_list ap; int rc; va_start(ap, op); switch( op ){ case SQLITE_DBCONFIG_LOOKASIDE: { void *pBuf = va_arg(ap, void*); /* IMP: R-26835-10964 */ int sz = va_arg(ap, int); /* IMP: R-47871-25994 */ int cnt = va_arg(ap, int); /* IMP: R-04460-53386 */ rc = setupLookaside(db, pBuf, sz, cnt); break; } default: { static const struct { int op; /* The opcode */ u32 mask; /* Mask of the bit in sqlite3.flags to set/clear */ } aFlagOp[] = { { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ for(i=0; iflags; if( onoff>0 ){ db->flags |= aFlagOp[i].mask; }else if( onoff==0 ){ db->flags &= ~aFlagOp[i].mask; } if( oldFlags!=db->flags ){ sqlite3ExpirePreparedStatements(db); } if( pRes ){ *pRes = (db->flags & aFlagOp[i].mask)!=0; } rc = SQLITE_OK; break; } } break; } } va_end(ap); return rc; } /* ** Return true if the buffer z[0..n-1] contains all spaces. */ static int allSpaces(const char *z, int n){ while( n>0 && z[n-1]==' ' ){ n--; } return n==0; } /* ** This is the default collating function named "BINARY" which is always ** available. ** ** If the padFlag argument is not NULL then space padding at the end ** of strings is ignored. This implements the RTRIM collation. */ static int binCollFunc( void *padFlag, int nKey1, const void *pKey1, int nKey2, const void *pKey2 ){ int rc, n; n = nKey1lastRowid; } /* ** Return the number of changes in the most recent call to sqlite3_exec(). */ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return db->nChange; } /* ** Return the number of changes since the database handle was opened. */ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return db->nTotalChange; } /* ** Close all open savepoints. This function only manipulates fields of the ** database handle object, it does not close any savepoints that may be open ** at the b-tree/pager level. */ SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *db){ while( db->pSavepoint ){ Savepoint *pTmp = db->pSavepoint; db->pSavepoint = pTmp->pNext; sqlite3DbFree(db, pTmp); } db->nSavepoint = 0; db->nStatement = 0; db->isTransactionSavepoint = 0; } /* ** Invoke the destructor function associated with FuncDef p, if any. Except, ** if this is not the last copy of the function, do not invoke it. Multiple ** copies of a single function are created when create_function() is called ** with SQLITE_ANY as the encoding. */ static void functionDestroy(sqlite3 *db, FuncDef *p){ FuncDestructor *pDestructor = p->pDestructor; if( pDestructor ){ pDestructor->nRef--; if( pDestructor->nRef==0 ){ pDestructor->xDestroy(pDestructor->pUserData); sqlite3DbFree(db, pDestructor); } } } /* ** Disconnect all sqlite3_vtab objects that belong to database connection ** db. This is called when db is being closed. */ static void disconnectAllVtab(sqlite3 *db){ #ifndef SQLITE_OMIT_VIRTUALTABLE int i; sqlite3BtreeEnterAll(db); for(i=0; inDb; i++){ Schema *pSchema = db->aDb[i].pSchema; if( db->aDb[i].pSchema ){ HashElem *p; for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){ Table *pTab = (Table *)sqliteHashData(p); if( IsVirtual(pTab) ) sqlite3VtabDisconnect(db, pTab); } } } sqlite3VtabUnlockList(db); sqlite3BtreeLeaveAll(db); #else UNUSED_PARAMETER(db); #endif } /* ** Return TRUE if database connection db has unfinalized prepared ** statements or unfinished sqlite3_backup objects. */ static int connectionIsBusy(sqlite3 *db){ int j; assert( sqlite3_mutex_held(db->mutex) ); if( db->pVdbe ) return 1; for(j=0; jnDb; j++){ Btree *pBt = db->aDb[j].pBt; if( pBt && sqlite3BtreeIsInBackup(pBt) ) return 1; } return 0; } /* ** Close an existing SQLite database */ static int sqlite3Close(sqlite3 *db, int forceZombie){ if( !db ){ /* EVIDENCE-OF: R-63257-11740 Calling sqlite3_close() or ** sqlite3_close_v2() with a NULL pointer argument is a harmless no-op. */ return SQLITE_OK; } if( !sqlite3SafetyCheckSickOrOk(db) ){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex); /* Force xDisconnect calls on all virtual tables */ disconnectAllVtab(db); /* If a transaction is open, the disconnectAllVtab() call above ** will not have called the xDisconnect() method on any virtual ** tables in the db->aVTrans[] array. The following sqlite3VtabRollback() ** call will do so. We need to do this before the check for active ** SQL statements below, as the v-table implementation may be storing ** some prepared statements internally. */ sqlite3VtabRollback(db); /* Legacy behavior (sqlite3_close() behavior) is to return ** SQLITE_BUSY if the connection can not be closed immediately. */ if( !forceZombie && connectionIsBusy(db) ){ sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to close due to unfinalized " "statements or unfinished backups"); sqlite3_mutex_leave(db->mutex); return SQLITE_BUSY; } #ifdef SQLITE_ENABLE_SQLLOG if( sqlite3GlobalConfig.xSqllog ){ /* Closing the handle. Fourth parameter is passed the value 2. */ sqlite3GlobalConfig.xSqllog(sqlite3GlobalConfig.pSqllogArg, db, 0, 2); } #endif /* Convert the connection into a zombie and then close it. */ db->magic = SQLITE_MAGIC_ZOMBIE; sqlite3LeaveMutexAndCloseZombie(db); return SQLITE_OK; } /* ** Two variations on the public interface for closing a database ** connection. The sqlite3_close() version returns SQLITE_BUSY and ** leaves the connection option if there are unfinalized prepared ** statements or unfinished sqlite3_backups. The sqlite3_close_v2() ** version forces the connection to become a zombie if there are ** unclosed resources, and arranges for deallocation when the last ** prepare statement or sqlite3_backup closes. */ SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); } SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); } /* ** Close the mutex on database connection db. ** ** Furthermore, if database connection db is a zombie (meaning that there ** has been a prior call to sqlite3_close(db) or sqlite3_close_v2(db)) and ** every sqlite3_stmt has now been finalized and every sqlite3_backup has ** finished, then free all resources. */ SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3 *db){ HashElem *i; /* Hash table iterator */ int j; /* If there are outstanding sqlite3_stmt or sqlite3_backup objects ** or if the connection has not yet been closed by sqlite3_close_v2(), ** then just leave the mutex and return. */ if( db->magic!=SQLITE_MAGIC_ZOMBIE || connectionIsBusy(db) ){ sqlite3_mutex_leave(db->mutex); return; } /* If we reach this point, it means that the database connection has ** closed all sqlite3_stmt and sqlite3_backup objects and has been ** passed to sqlite3_close (meaning that it is a zombie). Therefore, ** go ahead and free all resources. */ /* If a transaction is open, roll it back. This also ensures that if ** any database schemas have been modified by an uncommitted transaction ** they are reset. And that the required b-tree mutex is held to make ** the pager rollback and schema reset an atomic operation. */ sqlite3RollbackAll(db, SQLITE_OK); /* Free any outstanding Savepoint structures. */ sqlite3CloseSavepoints(db); /* Close all database connections */ for(j=0; jnDb; j++){ struct Db *pDb = &db->aDb[j]; if( pDb->pBt ){ sqlite3BtreeClose(pDb->pBt); pDb->pBt = 0; if( j!=1 ){ pDb->pSchema = 0; } } } /* Clear the TEMP schema separately and last */ if( db->aDb[1].pSchema ){ sqlite3SchemaClear(db->aDb[1].pSchema); } sqlite3VtabUnlockList(db); /* Free up the array of auxiliary databases */ sqlite3CollapseDatabaseArray(db); assert( db->nDb<=2 ); assert( db->aDb==db->aDbStatic ); /* Tell the code in notify.c that the connection no longer holds any ** locks and does not require any further unlock-notify callbacks. */ sqlite3ConnectionClosed(db); for(j=0; jaFunc.a); j++){ FuncDef *pNext, *pHash, *p; for(p=db->aFunc.a[j]; p; p=pHash){ pHash = p->pHash; while( p ){ functionDestroy(db, p); pNext = p->pNext; sqlite3DbFree(db, p); p = pNext; } } } for(i=sqliteHashFirst(&db->aCollSeq); i; i=sqliteHashNext(i)){ CollSeq *pColl = (CollSeq *)sqliteHashData(i); /* Invoke any destructors registered for collation sequence user data. */ for(j=0; j<3; j++){ if( pColl[j].xDel ){ pColl[j].xDel(pColl[j].pUser); } } sqlite3DbFree(db, pColl); } sqlite3HashClear(&db->aCollSeq); #ifndef SQLITE_OMIT_VIRTUALTABLE for(i=sqliteHashFirst(&db->aModule); i; i=sqliteHashNext(i)){ Module *pMod = (Module *)sqliteHashData(i); if( pMod->xDestroy ){ pMod->xDestroy(pMod->pAux); } sqlite3DbFree(db, pMod); } sqlite3HashClear(&db->aModule); #endif sqlite3Error(db, SQLITE_OK); /* Deallocates any cached error strings. */ sqlite3ValueFree(db->pErr); sqlite3CloseExtensions(db); #if SQLITE_USER_AUTHENTICATION sqlite3_free(db->auth.zAuthUser); sqlite3_free(db->auth.zAuthPW); #endif db->magic = SQLITE_MAGIC_ERROR; /* The temp-database schema is allocated differently from the other schema ** objects (using sqliteMalloc() directly, instead of sqlite3BtreeSchema()). ** So it needs to be freed here. Todo: Why not roll the temp schema into ** the same sqliteMalloc() as the one that allocates the database ** structure? */ sqlite3DbFree(db, db->aDb[1].pSchema); sqlite3_mutex_leave(db->mutex); db->magic = SQLITE_MAGIC_CLOSED; sqlite3_mutex_free(db->mutex); assert( db->lookaside.nOut==0 ); /* Fails on a lookaside memory leak */ if( db->lookaside.bMalloced ){ sqlite3_free(db->lookaside.pStart); } sqlite3_free(db); } /* ** Rollback all database files. If tripCode is not SQLITE_OK, then ** any write cursors are invalidated ("tripped" - as in "tripping a circuit ** breaker") and made to return tripCode if there are any further ** attempts to use that cursor. Read cursors remain open and valid ** but are "saved" in case the table pages are moved around. */ SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3 *db, int tripCode){ int i; int inTrans = 0; int schemaChange; assert( sqlite3_mutex_held(db->mutex) ); sqlite3BeginBenignMalloc(); /* Obtain all b-tree mutexes before making any calls to BtreeRollback(). ** This is important in case the transaction being rolled back has ** modified the database schema. If the b-tree mutexes are not taken ** here, then another shared-cache connection might sneak in between ** the database rollback and schema reset, which can cause false ** corruption reports in some cases. */ sqlite3BtreeEnterAll(db); schemaChange = (db->flags & SQLITE_InternChanges)!=0 && db->init.busy==0; for(i=0; inDb; i++){ Btree *p = db->aDb[i].pBt; if( p ){ if( sqlite3BtreeIsInTrans(p) ){ inTrans = 1; } sqlite3BtreeRollback(p, tripCode, !schemaChange); } } sqlite3VtabRollback(db); sqlite3EndBenignMalloc(); if( (db->flags&SQLITE_InternChanges)!=0 && db->init.busy==0 ){ sqlite3ExpirePreparedStatements(db); sqlite3ResetAllSchemasOfConnection(db); } sqlite3BtreeLeaveAll(db); /* Any deferred constraint violations have now been resolved. */ db->nDeferredCons = 0; db->nDeferredImmCons = 0; db->flags &= ~SQLITE_DeferFKs; /* If one has been configured, invoke the rollback-hook callback */ if( db->xRollbackCallback && (inTrans || !db->autoCommit) ){ db->xRollbackCallback(db->pRollbackArg); } } /* ** Return a static string containing the name corresponding to the error code ** specified in the argument. */ #if defined(SQLITE_NEED_ERR_NAME) SQLITE_PRIVATE const char *sqlite3ErrName(int rc){ const char *zName = 0; int i, origRc = rc; for(i=0; i<2 && zName==0; i++, rc &= 0xff){ switch( rc ){ case SQLITE_OK: zName = "SQLITE_OK"; break; case SQLITE_ERROR: zName = "SQLITE_ERROR"; break; case SQLITE_INTERNAL: zName = "SQLITE_INTERNAL"; break; case SQLITE_PERM: zName = "SQLITE_PERM"; break; case SQLITE_ABORT: zName = "SQLITE_ABORT"; break; case SQLITE_ABORT_ROLLBACK: zName = "SQLITE_ABORT_ROLLBACK"; break; case SQLITE_BUSY: zName = "SQLITE_BUSY"; break; case SQLITE_BUSY_RECOVERY: zName = "SQLITE_BUSY_RECOVERY"; break; case SQLITE_BUSY_SNAPSHOT: zName = "SQLITE_BUSY_SNAPSHOT"; break; case SQLITE_LOCKED: zName = "SQLITE_LOCKED"; break; case SQLITE_LOCKED_SHAREDCACHE: zName = "SQLITE_LOCKED_SHAREDCACHE";break; case SQLITE_NOMEM: zName = "SQLITE_NOMEM"; break; case SQLITE_READONLY: zName = "SQLITE_READONLY"; break; case SQLITE_READONLY_RECOVERY: zName = "SQLITE_READONLY_RECOVERY"; break; case SQLITE_READONLY_CANTLOCK: zName = "SQLITE_READONLY_CANTLOCK"; break; case SQLITE_READONLY_ROLLBACK: zName = "SQLITE_READONLY_ROLLBACK"; break; case SQLITE_READONLY_DBMOVED: zName = "SQLITE_READONLY_DBMOVED"; break; case SQLITE_INTERRUPT: zName = "SQLITE_INTERRUPT"; break; case SQLITE_IOERR: zName = "SQLITE_IOERR"; break; case SQLITE_IOERR_READ: zName = "SQLITE_IOERR_READ"; break; case SQLITE_IOERR_SHORT_READ: zName = "SQLITE_IOERR_SHORT_READ"; break; case SQLITE_IOERR_WRITE: zName = "SQLITE_IOERR_WRITE"; break; case SQLITE_IOERR_FSYNC: zName = "SQLITE_IOERR_FSYNC"; break; case SQLITE_IOERR_DIR_FSYNC: zName = "SQLITE_IOERR_DIR_FSYNC"; break; case SQLITE_IOERR_TRUNCATE: zName = "SQLITE_IOERR_TRUNCATE"; break; case SQLITE_IOERR_FSTAT: zName = "SQLITE_IOERR_FSTAT"; break; case SQLITE_IOERR_UNLOCK: zName = "SQLITE_IOERR_UNLOCK"; break; case SQLITE_IOERR_RDLOCK: zName = "SQLITE_IOERR_RDLOCK"; break; case SQLITE_IOERR_DELETE: zName = "SQLITE_IOERR_DELETE"; break; case SQLITE_IOERR_NOMEM: zName = "SQLITE_IOERR_NOMEM"; break; case SQLITE_IOERR_ACCESS: zName = "SQLITE_IOERR_ACCESS"; break; case SQLITE_IOERR_CHECKRESERVEDLOCK: zName = "SQLITE_IOERR_CHECKRESERVEDLOCK"; break; case SQLITE_IOERR_LOCK: zName = "SQLITE_IOERR_LOCK"; break; case SQLITE_IOERR_CLOSE: zName = "SQLITE_IOERR_CLOSE"; break; case SQLITE_IOERR_DIR_CLOSE: zName = "SQLITE_IOERR_DIR_CLOSE"; break; case SQLITE_IOERR_SHMOPEN: zName = "SQLITE_IOERR_SHMOPEN"; break; case SQLITE_IOERR_SHMSIZE: zName = "SQLITE_IOERR_SHMSIZE"; break; case SQLITE_IOERR_SHMLOCK: zName = "SQLITE_IOERR_SHMLOCK"; break; case SQLITE_IOERR_SHMMAP: zName = "SQLITE_IOERR_SHMMAP"; break; case SQLITE_IOERR_SEEK: zName = "SQLITE_IOERR_SEEK"; break; case SQLITE_IOERR_DELETE_NOENT: zName = "SQLITE_IOERR_DELETE_NOENT";break; case SQLITE_IOERR_MMAP: zName = "SQLITE_IOERR_MMAP"; break; case SQLITE_IOERR_GETTEMPPATH: zName = "SQLITE_IOERR_GETTEMPPATH"; break; case SQLITE_IOERR_CONVPATH: zName = "SQLITE_IOERR_CONVPATH"; break; case SQLITE_CORRUPT: zName = "SQLITE_CORRUPT"; break; case SQLITE_CORRUPT_VTAB: zName = "SQLITE_CORRUPT_VTAB"; break; case SQLITE_NOTFOUND: zName = "SQLITE_NOTFOUND"; break; case SQLITE_FULL: zName = "SQLITE_FULL"; break; case SQLITE_CANTOPEN: zName = "SQLITE_CANTOPEN"; break; case SQLITE_CANTOPEN_NOTEMPDIR: zName = "SQLITE_CANTOPEN_NOTEMPDIR";break; case SQLITE_CANTOPEN_ISDIR: zName = "SQLITE_CANTOPEN_ISDIR"; break; case SQLITE_CANTOPEN_FULLPATH: zName = "SQLITE_CANTOPEN_FULLPATH"; break; case SQLITE_CANTOPEN_CONVPATH: zName = "SQLITE_CANTOPEN_CONVPATH"; break; case SQLITE_PROTOCOL: zName = "SQLITE_PROTOCOL"; break; case SQLITE_EMPTY: zName = "SQLITE_EMPTY"; break; case SQLITE_SCHEMA: zName = "SQLITE_SCHEMA"; break; case SQLITE_TOOBIG: zName = "SQLITE_TOOBIG"; break; case SQLITE_CONSTRAINT: zName = "SQLITE_CONSTRAINT"; break; case SQLITE_CONSTRAINT_UNIQUE: zName = "SQLITE_CONSTRAINT_UNIQUE"; break; case SQLITE_CONSTRAINT_TRIGGER: zName = "SQLITE_CONSTRAINT_TRIGGER";break; case SQLITE_CONSTRAINT_FOREIGNKEY: zName = "SQLITE_CONSTRAINT_FOREIGNKEY"; break; case SQLITE_CONSTRAINT_CHECK: zName = "SQLITE_CONSTRAINT_CHECK"; break; case SQLITE_CONSTRAINT_PRIMARYKEY: zName = "SQLITE_CONSTRAINT_PRIMARYKEY"; break; case SQLITE_CONSTRAINT_NOTNULL: zName = "SQLITE_CONSTRAINT_NOTNULL";break; case SQLITE_CONSTRAINT_COMMITHOOK: zName = "SQLITE_CONSTRAINT_COMMITHOOK"; break; case SQLITE_CONSTRAINT_VTAB: zName = "SQLITE_CONSTRAINT_VTAB"; break; case SQLITE_CONSTRAINT_FUNCTION: zName = "SQLITE_CONSTRAINT_FUNCTION"; break; case SQLITE_CONSTRAINT_ROWID: zName = "SQLITE_CONSTRAINT_ROWID"; break; case SQLITE_MISMATCH: zName = "SQLITE_MISMATCH"; break; case SQLITE_MISUSE: zName = "SQLITE_MISUSE"; break; case SQLITE_NOLFS: zName = "SQLITE_NOLFS"; break; case SQLITE_AUTH: zName = "SQLITE_AUTH"; break; case SQLITE_FORMAT: zName = "SQLITE_FORMAT"; break; case SQLITE_RANGE: zName = "SQLITE_RANGE"; break; case SQLITE_NOTADB: zName = "SQLITE_NOTADB"; break; case SQLITE_ROW: zName = "SQLITE_ROW"; break; case SQLITE_NOTICE: zName = "SQLITE_NOTICE"; break; case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break; case SQLITE_NOTICE_RECOVER_ROLLBACK: zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break; case SQLITE_WARNING: zName = "SQLITE_WARNING"; break; case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break; case SQLITE_DONE: zName = "SQLITE_DONE"; break; } } if( zName==0 ){ static char zBuf[50]; sqlite3_snprintf(sizeof(zBuf), zBuf, "SQLITE_UNKNOWN(%d)", origRc); zName = zBuf; } return zName; } #endif /* ** Return a static string that describes the kind of error specified in the ** argument. */ SQLITE_PRIVATE const char *sqlite3ErrStr(int rc){ static const char* const aMsg[] = { /* SQLITE_OK */ "not an error", /* SQLITE_ERROR */ "SQL logic error or missing database", /* SQLITE_INTERNAL */ 0, /* SQLITE_PERM */ "access permission denied", /* SQLITE_ABORT */ "callback requested query abort", /* SQLITE_BUSY */ "database is locked", /* SQLITE_LOCKED */ "database table is locked", /* SQLITE_NOMEM */ "out of memory", /* SQLITE_READONLY */ "attempt to write a readonly database", /* SQLITE_INTERRUPT */ "interrupted", /* SQLITE_IOERR */ "disk I/O error", /* SQLITE_CORRUPT */ "database disk image is malformed", /* SQLITE_NOTFOUND */ "unknown operation", /* SQLITE_FULL */ "database or disk is full", /* SQLITE_CANTOPEN */ "unable to open database file", /* SQLITE_PROTOCOL */ "locking protocol", /* SQLITE_EMPTY */ "table contains no data", /* SQLITE_SCHEMA */ "database schema has changed", /* SQLITE_TOOBIG */ "string or blob too big", /* SQLITE_CONSTRAINT */ "constraint failed", /* SQLITE_MISMATCH */ "datatype mismatch", /* SQLITE_MISUSE */ "library routine called out of sequence", /* SQLITE_NOLFS */ "large file support is disabled", /* SQLITE_AUTH */ "authorization denied", /* SQLITE_FORMAT */ "auxiliary database format error", /* SQLITE_RANGE */ "bind or column index out of range", /* SQLITE_NOTADB */ "file is encrypted or is not a database", }; const char *zErr = "unknown error"; switch( rc ){ case SQLITE_ABORT_ROLLBACK: { zErr = "abort due to ROLLBACK"; break; } default: { rc &= 0xff; if( ALWAYS(rc>=0) && rcbusyTimeout; int delay, prior; assert( count>=0 ); if( count < NDELAY ){ delay = delays[count]; prior = totals[count]; }else{ delay = delays[NDELAY-1]; prior = totals[NDELAY-1] + delay*(count-(NDELAY-1)); } if( prior + delay > timeout ){ delay = timeout - prior; if( delay<=0 ) return 0; } sqlite3OsSleep(db->pVfs, delay*1000); return 1; #else sqlite3 *db = (sqlite3 *)ptr; int timeout = ((sqlite3 *)ptr)->busyTimeout; if( (count+1)*1000 > timeout ){ return 0; } sqlite3OsSleep(db->pVfs, 1000000); return 1; #endif } /* ** Invoke the given busy handler. ** ** This routine is called when an operation failed with a lock. ** If this routine returns non-zero, the lock is retried. If it ** returns 0, the operation aborts with an SQLITE_BUSY error. */ SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){ int rc; if( NEVER(p==0) || p->xFunc==0 || p->nBusy<0 ) return 0; rc = p->xFunc(p->pArg, p->nBusy); if( rc==0 ){ p->nBusy = -1; }else{ p->nBusy++; } return rc; } /* ** This routine sets the busy callback for an Sqlite database to the ** given callback function with the given argument. */ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler( sqlite3 *db, int (*xBusy)(void*,int), void *pArg ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); db->busyHandler.xFunc = xBusy; db->busyHandler.pArg = pArg; db->busyHandler.nBusy = 0; db->busyTimeout = 0; sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } #ifndef SQLITE_OMIT_PROGRESS_CALLBACK /* ** This routine sets the progress callback for an Sqlite database to the ** given callback function with the given argument. The progress callback will ** be invoked every nOps opcodes. */ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler( sqlite3 *db, int nOps, int (*xProgress)(void*), void *pArg ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return; } #endif sqlite3_mutex_enter(db->mutex); if( nOps>0 ){ db->xProgress = xProgress; db->nProgressOps = (unsigned)nOps; db->pProgressArg = pArg; }else{ db->xProgress = 0; db->nProgressOps = 0; db->pProgressArg = 0; } sqlite3_mutex_leave(db->mutex); } #endif /* ** This routine installs a default busy handler that waits for the ** specified number of milliseconds before returning 0. */ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3 *db, int ms){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif if( ms>0 ){ sqlite3_busy_handler(db, sqliteDefaultBusyCallback, (void*)db); db->busyTimeout = ms; }else{ sqlite3_busy_handler(db, 0, 0); } return SQLITE_OK; } /* ** Cause any pending operation to stop at its earliest opportunity. */ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return; } #endif db->u1.isInterrupted = 1; } /* ** This function is exactly the same as sqlite3_create_function(), except ** that it is designed to be called by internal code. The difference is ** that if a malloc() fails in sqlite3_create_function(), an error code ** is returned and the mallocFailed flag cleared. */ SQLITE_PRIVATE int sqlite3CreateFunc( sqlite3 *db, const char *zFunctionName, int nArg, int enc, void *pUserData, void (*xFunc)(sqlite3_context*,int,sqlite3_value **), void (*xStep)(sqlite3_context*,int,sqlite3_value **), void (*xFinal)(sqlite3_context*), FuncDestructor *pDestructor ){ FuncDef *p; int nName; int extraFlags; assert( sqlite3_mutex_held(db->mutex) ); if( zFunctionName==0 || (xFunc && (xFinal || xStep)) || (!xFunc && (xFinal && !xStep)) || (!xFunc && (!xFinal && xStep)) || (nArg<-1 || nArg>SQLITE_MAX_FUNCTION_ARG) || (255<(nName = sqlite3Strlen30( zFunctionName))) ){ return SQLITE_MISUSE_BKPT; } assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); extraFlags = enc & SQLITE_DETERMINISTIC; enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); #ifndef SQLITE_OMIT_UTF16 /* If SQLITE_UTF16 is specified as the encoding type, transform this ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. ** ** If SQLITE_ANY is specified, add three versions of the function ** to the hash table. */ if( enc==SQLITE_UTF16 ){ enc = SQLITE_UTF16NATIVE; }else if( enc==SQLITE_ANY ){ int rc; rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF8|extraFlags, pUserData, xFunc, xStep, xFinal, pDestructor); if( rc==SQLITE_OK ){ rc = sqlite3CreateFunc(db, zFunctionName, nArg, SQLITE_UTF16LE|extraFlags, pUserData, xFunc, xStep, xFinal, pDestructor); } if( rc!=SQLITE_OK ){ return rc; } enc = SQLITE_UTF16BE; } #else enc = SQLITE_UTF8; #endif /* Check if an existing function is being overridden or deleted. If so, ** and there are active VMs, then return SQLITE_BUSY. If a function ** is being overridden/deleted but there are no active VMs, allow the ** operation to continue but invalidate all precompiled statements. */ p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 0); if( p && (p->funcFlags & SQLITE_FUNC_ENCMASK)==enc && p->nArg==nArg ){ if( db->nVdbeActive ){ sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to delete/modify user-function due to active statements"); assert( !db->mallocFailed ); return SQLITE_BUSY; }else{ sqlite3ExpirePreparedStatements(db); } } p = sqlite3FindFunction(db, zFunctionName, nName, nArg, (u8)enc, 1); assert(p || db->mallocFailed); if( !p ){ return SQLITE_NOMEM; } /* If an older version of the function with a configured destructor is ** being replaced invoke the destructor function here. */ functionDestroy(db, p); if( pDestructor ){ pDestructor->nRef++; } p->pDestructor = pDestructor; p->funcFlags = (p->funcFlags & SQLITE_FUNC_ENCMASK) | extraFlags; testcase( p->funcFlags & SQLITE_DETERMINISTIC ); p->xFunc = xFunc; p->xStep = xStep; p->xFinalize = xFinal; p->pUserData = pUserData; p->nArg = (u16)nArg; return SQLITE_OK; } /* ** Create new user functions. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_function( sqlite3 *db, const char *zFunc, int nArg, int enc, void *p, void (*xFunc)(sqlite3_context*,int,sqlite3_value **), void (*xStep)(sqlite3_context*,int,sqlite3_value **), void (*xFinal)(sqlite3_context*) ){ return sqlite3_create_function_v2(db, zFunc, nArg, enc, p, xFunc, xStep, xFinal, 0); } SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2( sqlite3 *db, const char *zFunc, int nArg, int enc, void *p, void (*xFunc)(sqlite3_context*,int,sqlite3_value **), void (*xStep)(sqlite3_context*,int,sqlite3_value **), void (*xFinal)(sqlite3_context*), void (*xDestroy)(void *) ){ int rc = SQLITE_ERROR; FuncDestructor *pArg = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ return SQLITE_MISUSE_BKPT; } #endif sqlite3_mutex_enter(db->mutex); if( xDestroy ){ pArg = (FuncDestructor *)sqlite3DbMallocZero(db, sizeof(FuncDestructor)); if( !pArg ){ xDestroy(p); goto out; } pArg->xDestroy = xDestroy; pArg->pUserData = p; } rc = sqlite3CreateFunc(db, zFunc, nArg, enc, p, xFunc, xStep, xFinal, pArg); if( pArg && pArg->nRef==0 ){ assert( rc!=SQLITE_OK ); xDestroy(p); sqlite3DbFree(db, pArg); } out: rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #ifndef SQLITE_OMIT_UTF16 SQLITE_API int SQLITE_STDCALL sqlite3_create_function16( sqlite3 *db, const void *zFunctionName, int nArg, int eTextRep, void *p, void (*xFunc)(sqlite3_context*,int,sqlite3_value**), void (*xStep)(sqlite3_context*,int,sqlite3_value**), void (*xFinal)(sqlite3_context*) ){ int rc; char *zFunc8; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zFunctionName==0 ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); assert( !db->mallocFailed ); zFunc8 = sqlite3Utf16to8(db, zFunctionName, -1, SQLITE_UTF16NATIVE); rc = sqlite3CreateFunc(db, zFunc8, nArg, eTextRep, p, xFunc, xStep, xFinal,0); sqlite3DbFree(db, zFunc8); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #endif /* ** Declare that a function has been overloaded by a virtual table. ** ** If the function already exists as a regular global function, then ** this routine is a no-op. If the function does not exist, then create ** a new one that always throws a run-time error. ** ** When virtual tables intend to provide an overloaded function, they ** should call this routine to make sure the global function exists. ** A global function must exist in order for name resolution to work ** properly. */ SQLITE_API int SQLITE_STDCALL sqlite3_overload_function( sqlite3 *db, const char *zName, int nArg ){ int nName = sqlite3Strlen30(zName); int rc = SQLITE_OK; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zName==0 || nArg<-2 ){ return SQLITE_MISUSE_BKPT; } #endif sqlite3_mutex_enter(db->mutex); if( sqlite3FindFunction(db, zName, nName, nArg, SQLITE_UTF8, 0)==0 ){ rc = sqlite3CreateFunc(db, zName, nArg, SQLITE_UTF8, 0, sqlite3InvalidFunction, 0, 0, 0); } rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #ifndef SQLITE_OMIT_TRACE /* ** Register a trace function. The pArg from the previously registered trace ** is returned. ** ** A NULL trace function means that no tracing is executes. A non-NULL ** trace is a pointer to a function that is invoked at the start of each ** SQL statement. */ SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){ void *pOld; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pOld = db->pTraceArg; db->xTrace = xTrace; db->pTraceArg = pArg; sqlite3_mutex_leave(db->mutex); return pOld; } /* ** Register a profile function. The pArg from the previously registered ** profile function is returned. ** ** A NULL profile function means that no profiling is executes. A non-NULL ** profile is a pointer to a function that is invoked at the conclusion of ** each SQL statement that is run. */ SQLITE_API void *SQLITE_STDCALL sqlite3_profile( sqlite3 *db, void (*xProfile)(void*,const char*,sqlite_uint64), void *pArg ){ void *pOld; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pOld = db->pProfileArg; db->xProfile = xProfile; db->pProfileArg = pArg; sqlite3_mutex_leave(db->mutex); return pOld; } #endif /* SQLITE_OMIT_TRACE */ /* ** Register a function to be invoked when a transaction commits. ** If the invoked function returns non-zero, then the commit becomes a ** rollback. */ SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook( sqlite3 *db, /* Attach the hook to this database */ int (*xCallback)(void*), /* Function to invoke on each commit */ void *pArg /* Argument to the function */ ){ void *pOld; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pOld = db->pCommitArg; db->xCommitCallback = xCallback; db->pCommitArg = pArg; sqlite3_mutex_leave(db->mutex); return pOld; } /* ** Register a callback to be invoked each time a row is updated, ** inserted or deleted using this database connection. */ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook( sqlite3 *db, /* Attach the hook to this database */ void (*xCallback)(void*,int,char const *,char const *,sqlite_int64), void *pArg /* Argument to the function */ ){ void *pRet; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pRet = db->pUpdateArg; db->xUpdateCallback = xCallback; db->pUpdateArg = pArg; sqlite3_mutex_leave(db->mutex); return pRet; } /* ** Register a callback to be invoked each time a transaction is rolled ** back by this database connection. */ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook( sqlite3 *db, /* Attach the hook to this database */ void (*xCallback)(void*), /* Callback function */ void *pArg /* Argument to the function */ ){ void *pRet; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pRet = db->pRollbackArg; db->xRollbackCallback = xCallback; db->pRollbackArg = pArg; sqlite3_mutex_leave(db->mutex); return pRet; } #ifndef SQLITE_OMIT_WAL /* ** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). ** Invoke sqlite3_wal_checkpoint if the number of frames in the log file ** is greater than sqlite3.pWalArg cast to an integer (the value configured by ** wal_autocheckpoint()). */ SQLITE_PRIVATE int sqlite3WalDefaultHook( void *pClientData, /* Argument */ sqlite3 *db, /* Connection */ const char *zDb, /* Database */ int nFrame /* Size of WAL */ ){ if( nFrame>=SQLITE_PTR_TO_INT(pClientData) ){ sqlite3BeginBenignMalloc(); sqlite3_wal_checkpoint(db, zDb); sqlite3EndBenignMalloc(); } return SQLITE_OK; } #endif /* SQLITE_OMIT_WAL */ /* ** Configure an sqlite3_wal_hook() callback to automatically checkpoint ** a database after committing a transaction if there are nFrame or ** more frames in the log file. Passing zero or a negative value as the ** nFrame parameter disables automatic checkpoints entirely. ** ** The callback registered by this function replaces any existing callback ** registered using sqlite3_wal_hook(). Likewise, registering a callback ** using sqlite3_wal_hook() disables the automatic checkpoint mechanism ** configured by this function. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){ #ifdef SQLITE_OMIT_WAL UNUSED_PARAMETER(db); UNUSED_PARAMETER(nFrame); #else #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif if( nFrame>0 ){ sqlite3_wal_hook(db, sqlite3WalDefaultHook, SQLITE_INT_TO_PTR(nFrame)); }else{ sqlite3_wal_hook(db, 0, 0); } #endif return SQLITE_OK; } /* ** Register a callback to be invoked each time a transaction is written ** into the write-ahead-log by this database connection. */ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook( sqlite3 *db, /* Attach the hook to this db handle */ int(*xCallback)(void *, sqlite3*, const char*, int), void *pArg /* First argument passed to xCallback() */ ){ #ifndef SQLITE_OMIT_WAL void *pRet; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif sqlite3_mutex_enter(db->mutex); pRet = db->pWalArg; db->xWalCallback = xCallback; db->pWalArg = pArg; sqlite3_mutex_leave(db->mutex); return pRet; #else return 0; #endif } /* ** Checkpoint database zDb. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2( sqlite3 *db, /* Database handle */ const char *zDb, /* Name of attached database (or NULL) */ int eMode, /* SQLITE_CHECKPOINT_* value */ int *pnLog, /* OUT: Size of WAL log in frames */ int *pnCkpt /* OUT: Total number of frames checkpointed */ ){ #ifdef SQLITE_OMIT_WAL return SQLITE_OK; #else int rc; /* Return code */ int iDb = SQLITE_MAX_ATTACHED; /* sqlite3.aDb[] index of db to checkpoint */ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif /* Initialize the output variables to -1 in case an error occurs. */ if( pnLog ) *pnLog = -1; if( pnCkpt ) *pnCkpt = -1; assert( SQLITE_CHECKPOINT_PASSIVE==0 ); assert( SQLITE_CHECKPOINT_FULL==1 ); assert( SQLITE_CHECKPOINT_RESTART==2 ); assert( SQLITE_CHECKPOINT_TRUNCATE==3 ); if( eModeSQLITE_CHECKPOINT_TRUNCATE ){ /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint ** mode: */ return SQLITE_MISUSE; } sqlite3_mutex_enter(db->mutex); if( zDb && zDb[0] ){ iDb = sqlite3FindDbName(db, zDb); } if( iDb<0 ){ rc = SQLITE_ERROR; sqlite3ErrorWithMsg(db, SQLITE_ERROR, "unknown database: %s", zDb); }else{ db->busyHandler.nBusy = 0; rc = sqlite3Checkpoint(db, iDb, eMode, pnLog, pnCkpt); sqlite3Error(db, rc); } rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; #endif } /* ** Checkpoint database zDb. If zDb is NULL, or if the buffer zDb points ** to contains a zero-length string, all attached databases are ** checkpointed. */ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){ /* EVIDENCE-OF: R-41613-20553 The sqlite3_wal_checkpoint(D,X) is equivalent to ** sqlite3_wal_checkpoint_v2(D,X,SQLITE_CHECKPOINT_PASSIVE,0,0). */ return sqlite3_wal_checkpoint_v2(db,zDb,SQLITE_CHECKPOINT_PASSIVE,0,0); } #ifndef SQLITE_OMIT_WAL /* ** Run a checkpoint on database iDb. This is a no-op if database iDb is ** not currently open in WAL mode. ** ** If a transaction is open on the database being checkpointed, this ** function returns SQLITE_LOCKED and a checkpoint is not attempted. If ** an error occurs while running the checkpoint, an SQLite error code is ** returned (i.e. SQLITE_IOERR). Otherwise, SQLITE_OK. ** ** The mutex on database handle db should be held by the caller. The mutex ** associated with the specific b-tree being checkpointed is taken by ** this function while the checkpoint is running. ** ** If iDb is passed SQLITE_MAX_ATTACHED, then all attached databases are ** checkpointed. If an error is encountered it is returned immediately - ** no attempt is made to checkpoint any remaining databases. ** ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. */ SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3 *db, int iDb, int eMode, int *pnLog, int *pnCkpt){ int rc = SQLITE_OK; /* Return code */ int i; /* Used to iterate through attached dbs */ int bBusy = 0; /* True if SQLITE_BUSY has been encountered */ assert( sqlite3_mutex_held(db->mutex) ); assert( !pnLog || *pnLog==-1 ); assert( !pnCkpt || *pnCkpt==-1 ); for(i=0; inDb && rc==SQLITE_OK; i++){ if( i==iDb || iDb==SQLITE_MAX_ATTACHED ){ rc = sqlite3BtreeCheckpoint(db->aDb[i].pBt, eMode, pnLog, pnCkpt); pnLog = 0; pnCkpt = 0; if( rc==SQLITE_BUSY ){ bBusy = 1; rc = SQLITE_OK; } } } return (rc==SQLITE_OK && bBusy) ? SQLITE_BUSY : rc; } #endif /* SQLITE_OMIT_WAL */ /* ** This function returns true if main-memory should be used instead of ** a temporary file for transient pager files and statement journals. ** The value returned depends on the value of db->temp_store (runtime ** parameter) and the compile time value of SQLITE_TEMP_STORE. The ** following table describes the relationship between these two values ** and this functions return value. ** ** SQLITE_TEMP_STORE db->temp_store Location of temporary database ** ----------------- -------------- ------------------------------ ** 0 any file (return 0) ** 1 1 file (return 0) ** 1 2 memory (return 1) ** 1 0 file (return 0) ** 2 1 file (return 0) ** 2 2 memory (return 1) ** 2 0 memory (return 1) ** 3 any memory (return 1) */ SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){ #if SQLITE_TEMP_STORE==1 return ( db->temp_store==2 ); #endif #if SQLITE_TEMP_STORE==2 return ( db->temp_store!=1 ); #endif #if SQLITE_TEMP_STORE==3 UNUSED_PARAMETER(db); return 1; #endif #if SQLITE_TEMP_STORE<1 || SQLITE_TEMP_STORE>3 UNUSED_PARAMETER(db); return 0; #endif } /* ** Return UTF-8 encoded English language explanation of the most recent ** error. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){ const char *z; if( !db ){ return sqlite3ErrStr(SQLITE_NOMEM); } if( !sqlite3SafetyCheckSickOrOk(db) ){ return sqlite3ErrStr(SQLITE_MISUSE_BKPT); } sqlite3_mutex_enter(db->mutex); if( db->mallocFailed ){ z = sqlite3ErrStr(SQLITE_NOMEM); }else{ testcase( db->pErr==0 ); z = (char*)sqlite3_value_text(db->pErr); assert( !db->mallocFailed ); if( z==0 ){ z = sqlite3ErrStr(db->errCode); } } sqlite3_mutex_leave(db->mutex); return z; } #ifndef SQLITE_OMIT_UTF16 /* ** Return UTF-16 encoded English language explanation of the most recent ** error. */ SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){ static const u16 outOfMem[] = { 'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0 }; static const u16 misuse[] = { 'l', 'i', 'b', 'r', 'a', 'r', 'y', ' ', 'r', 'o', 'u', 't', 'i', 'n', 'e', ' ', 'c', 'a', 'l', 'l', 'e', 'd', ' ', 'o', 'u', 't', ' ', 'o', 'f', ' ', 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', 0 }; const void *z; if( !db ){ return (void *)outOfMem; } if( !sqlite3SafetyCheckSickOrOk(db) ){ return (void *)misuse; } sqlite3_mutex_enter(db->mutex); if( db->mallocFailed ){ z = (void *)outOfMem; }else{ z = sqlite3_value_text16(db->pErr); if( z==0 ){ sqlite3ErrorWithMsg(db, db->errCode, sqlite3ErrStr(db->errCode)); z = sqlite3_value_text16(db->pErr); } /* A malloc() may have failed within the call to sqlite3_value_text16() ** above. If this is the case, then the db->mallocFailed flag needs to ** be cleared before returning. Do this directly, instead of via ** sqlite3ApiExit(), to avoid setting the database handle error message. */ db->mallocFailed = 0; } sqlite3_mutex_leave(db->mutex); return z; } #endif /* SQLITE_OMIT_UTF16 */ /* ** Return the most recent error code generated by an SQLite routine. If NULL is ** passed to this function, we assume a malloc() failed during sqlite3_open(). */ SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db){ if( db && !sqlite3SafetyCheckSickOrOk(db) ){ return SQLITE_MISUSE_BKPT; } if( !db || db->mallocFailed ){ return SQLITE_NOMEM; } return db->errCode & db->errMask; } SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db){ if( db && !sqlite3SafetyCheckSickOrOk(db) ){ return SQLITE_MISUSE_BKPT; } if( !db || db->mallocFailed ){ return SQLITE_NOMEM; } return db->errCode; } /* ** Return a string that describes the kind of error specified in the ** argument. For now, this simply calls the internal sqlite3ErrStr() ** function. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int rc){ return sqlite3ErrStr(rc); } /* ** Create a new collating function for database "db". The name is zName ** and the encoding is enc. */ static int createCollation( sqlite3* db, const char *zName, u8 enc, void* pCtx, int(*xCompare)(void*,int,const void*,int,const void*), void(*xDel)(void*) ){ CollSeq *pColl; int enc2; assert( sqlite3_mutex_held(db->mutex) ); /* If SQLITE_UTF16 is specified as the encoding type, transform this ** to one of SQLITE_UTF16LE or SQLITE_UTF16BE using the ** SQLITE_UTF16NATIVE macro. SQLITE_UTF16 is not used internally. */ enc2 = enc; testcase( enc2==SQLITE_UTF16 ); testcase( enc2==SQLITE_UTF16_ALIGNED ); if( enc2==SQLITE_UTF16 || enc2==SQLITE_UTF16_ALIGNED ){ enc2 = SQLITE_UTF16NATIVE; } if( enc2SQLITE_UTF16BE ){ return SQLITE_MISUSE_BKPT; } /* Check if this call is removing or replacing an existing collation ** sequence. If so, and there are active VMs, return busy. If there ** are no active VMs, invalidate any pre-compiled statements. */ pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 0); if( pColl && pColl->xCmp ){ if( db->nVdbeActive ){ sqlite3ErrorWithMsg(db, SQLITE_BUSY, "unable to delete/modify collation sequence due to active statements"); return SQLITE_BUSY; } sqlite3ExpirePreparedStatements(db); /* If collation sequence pColl was created directly by a call to ** sqlite3_create_collation, and not generated by synthCollSeq(), ** then any copies made by synthCollSeq() need to be invalidated. ** Also, collation destructor - CollSeq.xDel() - function may need ** to be called. */ if( (pColl->enc & ~SQLITE_UTF16_ALIGNED)==enc2 ){ CollSeq *aColl = sqlite3HashFind(&db->aCollSeq, zName); int j; for(j=0; j<3; j++){ CollSeq *p = &aColl[j]; if( p->enc==pColl->enc ){ if( p->xDel ){ p->xDel(p->pUser); } p->xCmp = 0; } } } } pColl = sqlite3FindCollSeq(db, (u8)enc2, zName, 1); if( pColl==0 ) return SQLITE_NOMEM; pColl->xCmp = xCompare; pColl->pUser = pCtx; pColl->xDel = xDel; pColl->enc = (u8)(enc2 | (enc & SQLITE_UTF16_ALIGNED)); sqlite3Error(db, SQLITE_OK); return SQLITE_OK; } /* ** This array defines hard upper bounds on limit values. The ** initializer must be kept in sync with the SQLITE_LIMIT_* ** #defines in sqlite3.h. */ static const int aHardLimit[] = { SQLITE_MAX_LENGTH, SQLITE_MAX_SQL_LENGTH, SQLITE_MAX_COLUMN, SQLITE_MAX_EXPR_DEPTH, SQLITE_MAX_COMPOUND_SELECT, SQLITE_MAX_VDBE_OP, SQLITE_MAX_FUNCTION_ARG, SQLITE_MAX_ATTACHED, SQLITE_MAX_LIKE_PATTERN_LENGTH, SQLITE_MAX_VARIABLE_NUMBER, /* IMP: R-38091-32352 */ SQLITE_MAX_TRIGGER_DEPTH, SQLITE_MAX_WORKER_THREADS, }; /* ** Make sure the hard limits are set to reasonable values */ #if SQLITE_MAX_LENGTH<100 # error SQLITE_MAX_LENGTH must be at least 100 #endif #if SQLITE_MAX_SQL_LENGTH<100 # error SQLITE_MAX_SQL_LENGTH must be at least 100 #endif #if SQLITE_MAX_SQL_LENGTH>SQLITE_MAX_LENGTH # error SQLITE_MAX_SQL_LENGTH must not be greater than SQLITE_MAX_LENGTH #endif #if SQLITE_MAX_COMPOUND_SELECT<2 # error SQLITE_MAX_COMPOUND_SELECT must be at least 2 #endif #if SQLITE_MAX_VDBE_OP<40 # error SQLITE_MAX_VDBE_OP must be at least 40 #endif #if SQLITE_MAX_FUNCTION_ARG<0 || SQLITE_MAX_FUNCTION_ARG>1000 # error SQLITE_MAX_FUNCTION_ARG must be between 0 and 1000 #endif #if SQLITE_MAX_ATTACHED<0 || SQLITE_MAX_ATTACHED>125 # error SQLITE_MAX_ATTACHED must be between 0 and 125 #endif #if SQLITE_MAX_LIKE_PATTERN_LENGTH<1 # error SQLITE_MAX_LIKE_PATTERN_LENGTH must be at least 1 #endif #if SQLITE_MAX_COLUMN>32767 # error SQLITE_MAX_COLUMN must not exceed 32767 #endif #if SQLITE_MAX_TRIGGER_DEPTH<1 # error SQLITE_MAX_TRIGGER_DEPTH must be at least 1 #endif #if SQLITE_MAX_WORKER_THREADS<0 || SQLITE_MAX_WORKER_THREADS>50 # error SQLITE_MAX_WORKER_THREADS must be between 0 and 50 #endif /* ** Change the value of a limit. Report the old value. ** If an invalid limit index is supplied, report -1. ** Make no changes but still report the old value if the ** new limit is negative. ** ** A new lower limit does not shrink existing constructs. ** It merely prevents new constructs that exceed the limit ** from forming. */ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3 *db, int limitId, int newLimit){ int oldLimit; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return -1; } #endif /* EVIDENCE-OF: R-30189-54097 For each limit category SQLITE_LIMIT_NAME ** there is a hard upper bound set at compile-time by a C preprocessor ** macro called SQLITE_MAX_NAME. (The "_LIMIT_" in the name is changed to ** "_MAX_".) */ assert( aHardLimit[SQLITE_LIMIT_LENGTH]==SQLITE_MAX_LENGTH ); assert( aHardLimit[SQLITE_LIMIT_SQL_LENGTH]==SQLITE_MAX_SQL_LENGTH ); assert( aHardLimit[SQLITE_LIMIT_COLUMN]==SQLITE_MAX_COLUMN ); assert( aHardLimit[SQLITE_LIMIT_EXPR_DEPTH]==SQLITE_MAX_EXPR_DEPTH ); assert( aHardLimit[SQLITE_LIMIT_COMPOUND_SELECT]==SQLITE_MAX_COMPOUND_SELECT); assert( aHardLimit[SQLITE_LIMIT_VDBE_OP]==SQLITE_MAX_VDBE_OP ); assert( aHardLimit[SQLITE_LIMIT_FUNCTION_ARG]==SQLITE_MAX_FUNCTION_ARG ); assert( aHardLimit[SQLITE_LIMIT_ATTACHED]==SQLITE_MAX_ATTACHED ); assert( aHardLimit[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]== SQLITE_MAX_LIKE_PATTERN_LENGTH ); assert( aHardLimit[SQLITE_LIMIT_VARIABLE_NUMBER]==SQLITE_MAX_VARIABLE_NUMBER); assert( aHardLimit[SQLITE_LIMIT_TRIGGER_DEPTH]==SQLITE_MAX_TRIGGER_DEPTH ); assert( aHardLimit[SQLITE_LIMIT_WORKER_THREADS]==SQLITE_MAX_WORKER_THREADS ); assert( SQLITE_LIMIT_WORKER_THREADS==(SQLITE_N_LIMIT-1) ); if( limitId<0 || limitId>=SQLITE_N_LIMIT ){ return -1; } oldLimit = db->aLimit[limitId]; if( newLimit>=0 ){ /* IMP: R-52476-28732 */ if( newLimit>aHardLimit[limitId] ){ newLimit = aHardLimit[limitId]; /* IMP: R-51463-25634 */ } db->aLimit[limitId] = newLimit; } return oldLimit; /* IMP: R-53341-35419 */ } /* ** This function is used to parse both URIs and non-URI filenames passed by the ** user to API functions sqlite3_open() or sqlite3_open_v2(), and for database ** URIs specified as part of ATTACH statements. ** ** The first argument to this function is the name of the VFS to use (or ** a NULL to signify the default VFS) if the URI does not contain a "vfs=xxx" ** query parameter. The second argument contains the URI (or non-URI filename) ** itself. When this function is called the *pFlags variable should contain ** the default flags to open the database handle with. The value stored in ** *pFlags may be updated before returning if the URI filename contains ** "cache=xxx" or "mode=xxx" query parameters. ** ** If successful, SQLITE_OK is returned. In this case *ppVfs is set to point to ** the VFS that should be used to open the database file. *pzFile is set to ** point to a buffer containing the name of the file to open. It is the ** responsibility of the caller to eventually call sqlite3_free() to release ** this buffer. ** ** If an error occurs, then an SQLite error code is returned and *pzErrMsg ** may be set to point to a buffer containing an English language error ** message. It is the responsibility of the caller to eventually release ** this buffer by calling sqlite3_free(). */ SQLITE_PRIVATE int sqlite3ParseUri( const char *zDefaultVfs, /* VFS to use if no "vfs=xxx" query option */ const char *zUri, /* Nul-terminated URI to parse */ unsigned int *pFlags, /* IN/OUT: SQLITE_OPEN_XXX flags */ sqlite3_vfs **ppVfs, /* OUT: VFS to use */ char **pzFile, /* OUT: Filename component of URI */ char **pzErrMsg /* OUT: Error message (if rc!=SQLITE_OK) */ ){ int rc = SQLITE_OK; unsigned int flags = *pFlags; const char *zVfs = zDefaultVfs; char *zFile; char c; int nUri = sqlite3Strlen30(zUri); assert( *pzErrMsg==0 ); if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */ && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ ){ char *zOpt; int eState; /* Parser state when parsing URI */ int iIn; /* Input character index */ int iOut = 0; /* Output character index */ u64 nByte = nUri+2; /* Bytes of space to allocate */ /* Make sure the SQLITE_OPEN_URI flag is set to indicate to the VFS xOpen ** method that there may be extra parameters following the file-name. */ flags |= SQLITE_OPEN_URI; for(iIn=0; iIn=0 && octet<256 ); if( octet==0 ){ /* This branch is taken when "%00" appears within the URI. In this ** case we ignore all text in the remainder of the path, name or ** value currently being parsed. So ignore the current character ** and skip to the next "?", "=" or "&", as appropriate. */ while( (c = zUri[iIn])!=0 && c!='#' && (eState!=0 || c!='?') && (eState!=1 || (c!='=' && c!='&')) && (eState!=2 || c!='&') ){ iIn++; } continue; } c = octet; }else if( eState==1 && (c=='&' || c=='=') ){ if( zFile[iOut-1]==0 ){ /* An empty option name. Ignore this option altogether. */ while( zUri[iIn] && zUri[iIn]!='#' && zUri[iIn-1]!='&' ) iIn++; continue; } if( c=='&' ){ zFile[iOut++] = '\0'; }else{ eState = 2; } c = 0; }else if( (eState==0 && c=='?') || (eState==2 && c=='&') ){ c = 0; eState = 1; } zFile[iOut++] = c; } if( eState==1 ) zFile[iOut++] = '\0'; zFile[iOut++] = '\0'; zFile[iOut++] = '\0'; /* Check if there were any options specified that should be interpreted ** here. Options that are interpreted here include "vfs" and those that ** correspond to flags that may be passed to the sqlite3_open_v2() ** method. */ zOpt = &zFile[sqlite3Strlen30(zFile)+1]; while( zOpt[0] ){ int nOpt = sqlite3Strlen30(zOpt); char *zVal = &zOpt[nOpt+1]; int nVal = sqlite3Strlen30(zVal); if( nOpt==3 && memcmp("vfs", zOpt, 3)==0 ){ zVfs = zVal; }else{ struct OpenMode { const char *z; int mode; } *aMode = 0; char *zModeType = 0; int mask = 0; int limit = 0; if( nOpt==5 && memcmp("cache", zOpt, 5)==0 ){ static struct OpenMode aCacheMode[] = { { "shared", SQLITE_OPEN_SHAREDCACHE }, { "private", SQLITE_OPEN_PRIVATECACHE }, { 0, 0 } }; mask = SQLITE_OPEN_SHAREDCACHE|SQLITE_OPEN_PRIVATECACHE; aMode = aCacheMode; limit = mask; zModeType = "cache"; } if( nOpt==4 && memcmp("mode", zOpt, 4)==0 ){ static struct OpenMode aOpenMode[] = { { "ro", SQLITE_OPEN_READONLY }, { "rw", SQLITE_OPEN_READWRITE }, { "rwc", SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE }, { "memory", SQLITE_OPEN_MEMORY }, { 0, 0 } }; mask = SQLITE_OPEN_READONLY | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_MEMORY; aMode = aOpenMode; limit = mask & flags; zModeType = "access"; } if( aMode ){ int i; int mode = 0; for(i=0; aMode[i].z; i++){ const char *z = aMode[i].z; if( nVal==sqlite3Strlen30(z) && 0==memcmp(zVal, z, nVal) ){ mode = aMode[i].mode; break; } } if( mode==0 ){ *pzErrMsg = sqlite3_mprintf("no such %s mode: %s", zModeType, zVal); rc = SQLITE_ERROR; goto parse_uri_out; } if( (mode & ~SQLITE_OPEN_MEMORY)>limit ){ *pzErrMsg = sqlite3_mprintf("%s mode not allowed: %s", zModeType, zVal); rc = SQLITE_PERM; goto parse_uri_out; } flags = (flags & ~mask) | mode; } } zOpt = &zVal[nVal+1]; } }else{ zFile = sqlite3_malloc64(nUri+2); if( !zFile ) return SQLITE_NOMEM; memcpy(zFile, zUri, nUri); zFile[nUri] = '\0'; zFile[nUri+1] = '\0'; flags &= ~SQLITE_OPEN_URI; } *ppVfs = sqlite3_vfs_find(zVfs); if( *ppVfs==0 ){ *pzErrMsg = sqlite3_mprintf("no such vfs: %s", zVfs); rc = SQLITE_ERROR; } parse_uri_out: if( rc!=SQLITE_OK ){ sqlite3_free(zFile); zFile = 0; } *pFlags = flags; *pzFile = zFile; return rc; } /* ** This routine does the work of opening a database on behalf of ** sqlite3_open() and sqlite3_open16(). The database filename "zFilename" ** is UTF-8 encoded. */ static int openDatabase( const char *zFilename, /* Database filename UTF-8 encoded */ sqlite3 **ppDb, /* OUT: Returned database handle */ unsigned int flags, /* Operational flags */ const char *zVfs /* Name of the VFS to use */ ){ sqlite3 *db; /* Store allocated handle here */ int rc; /* Return code */ int isThreadsafe; /* True for threadsafe connections */ char *zOpen = 0; /* Filename argument to pass to BtreeOpen() */ char *zErrMsg = 0; /* Error message from sqlite3ParseUri() */ #ifdef SQLITE_ENABLE_API_ARMOR if( ppDb==0 ) return SQLITE_MISUSE_BKPT; #endif *ppDb = 0; #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ) return rc; #endif /* Only allow sensible combinations of bits in the flags argument. ** Throw an error if any non-sense combination is used. If we ** do not block illegal combinations here, it could trigger ** assert() statements in deeper layers. Sensible combinations ** are: ** ** 1: SQLITE_OPEN_READONLY ** 2: SQLITE_OPEN_READWRITE ** 6: SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE */ assert( SQLITE_OPEN_READONLY == 0x01 ); assert( SQLITE_OPEN_READWRITE == 0x02 ); assert( SQLITE_OPEN_CREATE == 0x04 ); testcase( (1<<(flags&7))==0x02 ); /* READONLY */ testcase( (1<<(flags&7))==0x04 ); /* READWRITE */ testcase( (1<<(flags&7))==0x40 ); /* READWRITE | CREATE */ if( ((1<<(flags&7)) & 0x46)==0 ){ return SQLITE_MISUSE_BKPT; /* IMP: R-65497-44594 */ } if( sqlite3GlobalConfig.bCoreMutex==0 ){ isThreadsafe = 0; }else if( flags & SQLITE_OPEN_NOMUTEX ){ isThreadsafe = 0; }else if( flags & SQLITE_OPEN_FULLMUTEX ){ isThreadsafe = 1; }else{ isThreadsafe = sqlite3GlobalConfig.bFullMutex; } if( flags & SQLITE_OPEN_PRIVATECACHE ){ flags &= ~SQLITE_OPEN_SHAREDCACHE; }else if( sqlite3GlobalConfig.sharedCacheEnabled ){ flags |= SQLITE_OPEN_SHAREDCACHE; } /* Remove harmful bits from the flags parameter ** ** The SQLITE_OPEN_NOMUTEX and SQLITE_OPEN_FULLMUTEX flags were ** dealt with in the previous code block. Besides these, the only ** valid input flags for sqlite3_open_v2() are SQLITE_OPEN_READONLY, ** SQLITE_OPEN_READWRITE, SQLITE_OPEN_CREATE, SQLITE_OPEN_SHAREDCACHE, ** SQLITE_OPEN_PRIVATECACHE, and some reserved bits. Silently mask ** off all other flags. */ flags &= ~( SQLITE_OPEN_DELETEONCLOSE | SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_MAIN_DB | SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_TRANSIENT_DB | SQLITE_OPEN_MAIN_JOURNAL | SQLITE_OPEN_TEMP_JOURNAL | SQLITE_OPEN_SUBJOURNAL | SQLITE_OPEN_MASTER_JOURNAL | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_FULLMUTEX | SQLITE_OPEN_WAL ); /* Allocate the sqlite data structure */ db = sqlite3MallocZero( sizeof(sqlite3) ); if( db==0 ) goto opendb_out; if( isThreadsafe ){ db->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_RECURSIVE); if( db->mutex==0 ){ sqlite3_free(db); db = 0; goto opendb_out; } } sqlite3_mutex_enter(db->mutex); db->errMask = 0xff; db->nDb = 2; db->magic = SQLITE_MAGIC_BUSY; db->aDb = db->aDbStatic; assert( sizeof(db->aLimit)==sizeof(aHardLimit) ); memcpy(db->aLimit, aHardLimit, sizeof(db->aLimit)); db->aLimit[SQLITE_LIMIT_WORKER_THREADS] = SQLITE_DEFAULT_WORKER_THREADS; db->autoCommit = 1; db->nextAutovac = -1; db->szMmap = sqlite3GlobalConfig.szMmap; db->nextPagesize = 0; db->nMaxSorterMmap = 0x7FFFFFFF; db->flags |= SQLITE_ShortColNames | SQLITE_EnableTrigger | SQLITE_CacheSpill #if !defined(SQLITE_DEFAULT_AUTOMATIC_INDEX) || SQLITE_DEFAULT_AUTOMATIC_INDEX | SQLITE_AutoIndex #endif #if SQLITE_DEFAULT_CKPTFULLFSYNC | SQLITE_CkptFullFSync #endif #if SQLITE_DEFAULT_FILE_FORMAT<4 | SQLITE_LegacyFileFmt #endif #ifdef SQLITE_ENABLE_LOAD_EXTENSION | SQLITE_LoadExtension #endif #if SQLITE_DEFAULT_RECURSIVE_TRIGGERS | SQLITE_RecTriggers #endif #if defined(SQLITE_DEFAULT_FOREIGN_KEYS) && SQLITE_DEFAULT_FOREIGN_KEYS | SQLITE_ForeignKeys #endif #if defined(SQLITE_REVERSE_UNORDERED_SELECTS) | SQLITE_ReverseOrder #endif #if defined(SQLITE_ENABLE_OVERSIZE_CELL_CHECK) | SQLITE_CellSizeCk #endif ; sqlite3HashInit(&db->aCollSeq); #ifndef SQLITE_OMIT_VIRTUALTABLE sqlite3HashInit(&db->aModule); #endif /* Add the default collation sequence BINARY. BINARY works for both UTF-8 ** and UTF-16, so add a version for each to avoid any unnecessary ** conversions. The only error that can occur here is a malloc() failure. ** ** EVIDENCE-OF: R-52786-44878 SQLite defines three built-in collating ** functions: */ createCollation(db, "BINARY", SQLITE_UTF8, 0, binCollFunc, 0); createCollation(db, "BINARY", SQLITE_UTF16BE, 0, binCollFunc, 0); createCollation(db, "BINARY", SQLITE_UTF16LE, 0, binCollFunc, 0); createCollation(db, "NOCASE", SQLITE_UTF8, 0, nocaseCollatingFunc, 0); createCollation(db, "RTRIM", SQLITE_UTF8, (void*)1, binCollFunc, 0); if( db->mallocFailed ){ goto opendb_out; } /* EVIDENCE-OF: R-08308-17224 The default collating function for all ** strings is BINARY. */ db->pDfltColl = sqlite3FindCollSeq(db, SQLITE_UTF8, "BINARY", 0); assert( db->pDfltColl!=0 ); /* Parse the filename/URI argument. */ db->openFlags = flags; rc = sqlite3ParseUri(zVfs, zFilename, &flags, &db->pVfs, &zOpen, &zErrMsg); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM ) db->mallocFailed = 1; sqlite3ErrorWithMsg(db, rc, zErrMsg ? "%s" : 0, zErrMsg); sqlite3_free(zErrMsg); goto opendb_out; } /* Open the backend database driver */ rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0, flags | SQLITE_OPEN_MAIN_DB); if( rc!=SQLITE_OK ){ if( rc==SQLITE_IOERR_NOMEM ){ rc = SQLITE_NOMEM; } sqlite3Error(db, rc); goto opendb_out; } sqlite3BtreeEnter(db->aDb[0].pBt); db->aDb[0].pSchema = sqlite3SchemaGet(db, db->aDb[0].pBt); if( !db->mallocFailed ) ENC(db) = SCHEMA_ENC(db); sqlite3BtreeLeave(db->aDb[0].pBt); db->aDb[1].pSchema = sqlite3SchemaGet(db, 0); /* The default safety_level for the main database is 'full'; for the temp ** database it is 'NONE'. This matches the pager layer defaults. */ db->aDb[0].zName = "main"; db->aDb[0].safety_level = 3; db->aDb[1].zName = "temp"; db->aDb[1].safety_level = 1; db->magic = SQLITE_MAGIC_OPEN; if( db->mallocFailed ){ goto opendb_out; } /* Register all built-in functions, but do not attempt to read the ** database schema yet. This is delayed until the first time the database ** is accessed. */ sqlite3Error(db, SQLITE_OK); sqlite3RegisterBuiltinFunctions(db); /* Load automatic extensions - extensions that have been registered ** using the sqlite3_automatic_extension() API. */ rc = sqlite3_errcode(db); if( rc==SQLITE_OK ){ sqlite3AutoLoadExtensions(db); rc = sqlite3_errcode(db); if( rc!=SQLITE_OK ){ goto opendb_out; } } #ifdef SQLITE_ENABLE_FTS1 if( !db->mallocFailed ){ extern int sqlite3Fts1Init(sqlite3*); rc = sqlite3Fts1Init(db); } #endif #ifdef SQLITE_ENABLE_FTS2 if( !db->mallocFailed && rc==SQLITE_OK ){ extern int sqlite3Fts2Init(sqlite3*); rc = sqlite3Fts2Init(db); } #endif #ifdef SQLITE_ENABLE_FTS3 if( !db->mallocFailed && rc==SQLITE_OK ){ rc = sqlite3Fts3Init(db); } #endif #ifdef SQLITE_ENABLE_ICU if( !db->mallocFailed && rc==SQLITE_OK ){ rc = sqlite3IcuInit(db); } #endif #ifdef SQLITE_ENABLE_RTREE if( !db->mallocFailed && rc==SQLITE_OK){ rc = sqlite3RtreeInit(db); } #endif #ifdef SQLITE_ENABLE_DBSTAT_VTAB if( !db->mallocFailed && rc==SQLITE_OK){ rc = sqlite3DbstatRegister(db); } #endif /* -DSQLITE_DEFAULT_LOCKING_MODE=1 makes EXCLUSIVE the default locking ** mode. -DSQLITE_DEFAULT_LOCKING_MODE=0 make NORMAL the default locking ** mode. Doing nothing at all also makes NORMAL the default. */ #ifdef SQLITE_DEFAULT_LOCKING_MODE db->dfltLockMode = SQLITE_DEFAULT_LOCKING_MODE; sqlite3PagerLockingMode(sqlite3BtreePager(db->aDb[0].pBt), SQLITE_DEFAULT_LOCKING_MODE); #endif if( rc ) sqlite3Error(db, rc); /* Enable the lookaside-malloc subsystem */ setupLookaside(db, 0, sqlite3GlobalConfig.szLookaside, sqlite3GlobalConfig.nLookaside); sqlite3_wal_autocheckpoint(db, SQLITE_DEFAULT_WAL_AUTOCHECKPOINT); opendb_out: sqlite3_free(zOpen); if( db ){ assert( db->mutex!=0 || isThreadsafe==0 || sqlite3GlobalConfig.bFullMutex==0 ); sqlite3_mutex_leave(db->mutex); } rc = sqlite3_errcode(db); assert( db!=0 || rc==SQLITE_NOMEM ); if( rc==SQLITE_NOMEM ){ sqlite3_close(db); db = 0; }else if( rc!=SQLITE_OK ){ db->magic = SQLITE_MAGIC_SICK; } *ppDb = db; #ifdef SQLITE_ENABLE_SQLLOG if( sqlite3GlobalConfig.xSqllog ){ /* Opening a db handle. Fourth parameter is passed 0. */ void *pArg = sqlite3GlobalConfig.pSqllogArg; sqlite3GlobalConfig.xSqllog(pArg, db, zFilename, 0); } #endif return rc & 0xff; } /* ** Open a new database handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_open( const char *zFilename, sqlite3 **ppDb ){ return openDatabase(zFilename, ppDb, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); } SQLITE_API int SQLITE_STDCALL sqlite3_open_v2( const char *filename, /* Database filename (UTF-8) */ sqlite3 **ppDb, /* OUT: SQLite db handle */ int flags, /* Flags */ const char *zVfs /* Name of VFS module to use */ ){ return openDatabase(filename, ppDb, (unsigned int)flags, zVfs); } #ifndef SQLITE_OMIT_UTF16 /* ** Open a new database handle. */ SQLITE_API int SQLITE_STDCALL sqlite3_open16( const void *zFilename, sqlite3 **ppDb ){ char const *zFilename8; /* zFilename encoded in UTF-8 instead of UTF-16 */ sqlite3_value *pVal; int rc; #ifdef SQLITE_ENABLE_API_ARMOR if( ppDb==0 ) return SQLITE_MISUSE_BKPT; #endif *ppDb = 0; #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ) return rc; #endif if( zFilename==0 ) zFilename = "\000\000"; pVal = sqlite3ValueNew(0); sqlite3ValueSetStr(pVal, -1, zFilename, SQLITE_UTF16NATIVE, SQLITE_STATIC); zFilename8 = sqlite3ValueText(pVal, SQLITE_UTF8); if( zFilename8 ){ rc = openDatabase(zFilename8, ppDb, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0); assert( *ppDb || rc==SQLITE_NOMEM ); if( rc==SQLITE_OK && !DbHasProperty(*ppDb, 0, DB_SchemaLoaded) ){ SCHEMA_ENC(*ppDb) = ENC(*ppDb) = SQLITE_UTF16NATIVE; } }else{ rc = SQLITE_NOMEM; } sqlite3ValueFree(pVal); return rc & 0xff; } #endif /* SQLITE_OMIT_UTF16 */ /* ** Register a new collation sequence with the database handle db. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation( sqlite3* db, const char *zName, int enc, void* pCtx, int(*xCompare)(void*,int,const void*,int,const void*) ){ return sqlite3_create_collation_v2(db, zName, enc, pCtx, xCompare, 0); } /* ** Register a new collation sequence with the database handle db. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2( sqlite3* db, const char *zName, int enc, void* pCtx, int(*xCompare)(void*,int,const void*,int,const void*), void(*xDel)(void*) ){ int rc; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); assert( !db->mallocFailed ); rc = createCollation(db, zName, (u8)enc, pCtx, xCompare, xDel); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #ifndef SQLITE_OMIT_UTF16 /* ** Register a new collation sequence with the database handle db. */ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16( sqlite3* db, const void *zName, int enc, void* pCtx, int(*xCompare)(void*,int,const void*,int,const void*) ){ int rc = SQLITE_OK; char *zName8; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zName==0 ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); assert( !db->mallocFailed ); zName8 = sqlite3Utf16to8(db, zName, -1, SQLITE_UTF16NATIVE); if( zName8 ){ rc = createCollation(db, zName8, (u8)enc, pCtx, xCompare, 0); sqlite3DbFree(db, zName8); } rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } #endif /* SQLITE_OMIT_UTF16 */ /* ** Register a collation sequence factory callback with the database handle ** db. Replace any previously installed collation sequence factory. */ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed( sqlite3 *db, void *pCollNeededArg, void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*) ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); db->xCollNeeded = xCollNeeded; db->xCollNeeded16 = 0; db->pCollNeededArg = pCollNeededArg; sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } #ifndef SQLITE_OMIT_UTF16 /* ** Register a collation sequence factory callback with the database handle ** db. Replace any previously installed collation sequence factory. */ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16( sqlite3 *db, void *pCollNeededArg, void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*) ){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); db->xCollNeeded = 0; db->xCollNeeded16 = xCollNeeded16; db->pCollNeededArg = pCollNeededArg; sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } #endif /* SQLITE_OMIT_UTF16 */ #ifndef SQLITE_OMIT_DEPRECATED /* ** This function is now an anachronism. It used to be used to recover from a ** malloc() failure, but SQLite now does this automatically. */ SQLITE_API int SQLITE_STDCALL sqlite3_global_recover(void){ return SQLITE_OK; } #endif /* ** Test to see whether or not the database connection is in autocommit ** mode. Return TRUE if it is and FALSE if not. Autocommit mode is on ** by default. Autocommit is disabled by a BEGIN statement and reenabled ** by the next COMMIT or ROLLBACK. */ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif return db->autoCommit; } /* ** The following routines are substitutes for constants SQLITE_CORRUPT, ** SQLITE_MISUSE, SQLITE_CANTOPEN, SQLITE_IOERR and possibly other error ** constants. They serve two purposes: ** ** 1. Serve as a convenient place to set a breakpoint in a debugger ** to detect when version error conditions occurs. ** ** 2. Invoke sqlite3_log() to provide the source code location where ** a low-level error is first detected. */ SQLITE_PRIVATE int sqlite3CorruptError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_CORRUPT, "database corruption at line %d of [%.10s]", lineno, 20+sqlite3_sourceid()); return SQLITE_CORRUPT; } SQLITE_PRIVATE int sqlite3MisuseError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_MISUSE, "misuse at line %d of [%.10s]", lineno, 20+sqlite3_sourceid()); return SQLITE_MISUSE; } SQLITE_PRIVATE int sqlite3CantopenError(int lineno){ testcase( sqlite3GlobalConfig.xLog!=0 ); sqlite3_log(SQLITE_CANTOPEN, "cannot open file at line %d of [%.10s]", lineno, 20+sqlite3_sourceid()); return SQLITE_CANTOPEN; } #ifndef SQLITE_OMIT_DEPRECATED /* ** This is a convenience routine that makes sure that all thread-specific ** data for this thread has been deallocated. ** ** SQLite no longer uses thread-specific data so this routine is now a ** no-op. It is retained for historical compatibility. */ SQLITE_API void SQLITE_STDCALL sqlite3_thread_cleanup(void){ } #endif /* ** Return meta information about a specific column of a database table. ** See comment in sqlite3.h (sqlite.h.in) for details. */ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata( sqlite3 *db, /* Connection handle */ const char *zDbName, /* Database name or NULL */ const char *zTableName, /* Table name */ const char *zColumnName, /* Column name */ char const **pzDataType, /* OUTPUT: Declared data type */ char const **pzCollSeq, /* OUTPUT: Collation sequence name */ int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ int *pPrimaryKey, /* OUTPUT: True if column part of PK */ int *pAutoinc /* OUTPUT: True if column is auto-increment */ ){ int rc; char *zErrMsg = 0; Table *pTab = 0; Column *pCol = 0; int iCol = 0; char const *zDataType = 0; char const *zCollSeq = 0; int notnull = 0; int primarykey = 0; int autoinc = 0; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) || zTableName==0 ){ return SQLITE_MISUSE_BKPT; } #endif /* Ensure the database schema has been loaded */ sqlite3_mutex_enter(db->mutex); sqlite3BtreeEnterAll(db); rc = sqlite3Init(db, &zErrMsg); if( SQLITE_OK!=rc ){ goto error_out; } /* Locate the table in question */ pTab = sqlite3FindTable(db, zTableName, zDbName); if( !pTab || pTab->pSelect ){ pTab = 0; goto error_out; } /* Find the column for which info is requested */ if( zColumnName==0 ){ /* Query for existance of table only */ }else{ for(iCol=0; iColnCol; iCol++){ pCol = &pTab->aCol[iCol]; if( 0==sqlite3StrICmp(pCol->zName, zColumnName) ){ break; } } if( iCol==pTab->nCol ){ if( HasRowid(pTab) && sqlite3IsRowid(zColumnName) ){ iCol = pTab->iPKey; pCol = iCol>=0 ? &pTab->aCol[iCol] : 0; }else{ pTab = 0; goto error_out; } } } /* The following block stores the meta information that will be returned ** to the caller in local variables zDataType, zCollSeq, notnull, primarykey ** and autoinc. At this point there are two possibilities: ** ** 1. The specified column name was rowid", "oid" or "_rowid_" ** and there is no explicitly declared IPK column. ** ** 2. The table is not a view and the column name identified an ** explicitly declared column. Copy meta information from *pCol. */ if( pCol ){ zDataType = pCol->zType; zCollSeq = pCol->zColl; notnull = pCol->notNull!=0; primarykey = (pCol->colFlags & COLFLAG_PRIMKEY)!=0; autoinc = pTab->iPKey==iCol && (pTab->tabFlags & TF_Autoincrement)!=0; }else{ zDataType = "INTEGER"; primarykey = 1; } if( !zCollSeq ){ zCollSeq = "BINARY"; } error_out: sqlite3BtreeLeaveAll(db); /* Whether the function call succeeded or failed, set the output parameters ** to whatever their local counterparts contain. If an error did occur, ** this has the effect of zeroing all output parameters. */ if( pzDataType ) *pzDataType = zDataType; if( pzCollSeq ) *pzCollSeq = zCollSeq; if( pNotNull ) *pNotNull = notnull; if( pPrimaryKey ) *pPrimaryKey = primarykey; if( pAutoinc ) *pAutoinc = autoinc; if( SQLITE_OK==rc && !pTab ){ sqlite3DbFree(db, zErrMsg); zErrMsg = sqlite3MPrintf(db, "no such table column: %s.%s", zTableName, zColumnName); rc = SQLITE_ERROR; } sqlite3ErrorWithMsg(db, rc, (zErrMsg?"%s":0), zErrMsg); sqlite3DbFree(db, zErrMsg); rc = sqlite3ApiExit(db, rc); sqlite3_mutex_leave(db->mutex); return rc; } /* ** Sleep for a little while. Return the amount of time slept. */ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int ms){ sqlite3_vfs *pVfs; int rc; pVfs = sqlite3_vfs_find(0); if( pVfs==0 ) return 0; /* This function works in milliseconds, but the underlying OsSleep() ** API uses microseconds. Hence the 1000's. */ rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); return rc; } /* ** Enable or disable the extended result codes. */ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3 *db, int onoff){ #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); db->errMask = onoff ? 0xffffffff : 0xff; sqlite3_mutex_leave(db->mutex); return SQLITE_OK; } /* ** Invoke the xFileControl method on a particular database. */ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){ int rc = SQLITE_ERROR; Btree *pBtree; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; #endif sqlite3_mutex_enter(db->mutex); pBtree = sqlite3DbNameToBtree(db, zDbName); if( pBtree ){ Pager *pPager; sqlite3_file *fd; sqlite3BtreeEnter(pBtree); pPager = sqlite3BtreePager(pBtree); assert( pPager!=0 ); fd = sqlite3PagerFile(pPager); assert( fd!=0 ); if( op==SQLITE_FCNTL_FILE_POINTER ){ *(sqlite3_file**)pArg = fd; rc = SQLITE_OK; }else if( fd->pMethods ){ rc = sqlite3OsFileControl(fd, op, pArg); }else{ rc = SQLITE_NOTFOUND; } sqlite3BtreeLeave(pBtree); } sqlite3_mutex_leave(db->mutex); return rc; } /* ** Interface to the testing logic. */ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){ int rc = 0; #ifdef SQLITE_OMIT_BUILTIN_TEST UNUSED_PARAMETER(op); #else va_list ap; va_start(ap, op); switch( op ){ /* ** Save the current state of the PRNG. */ case SQLITE_TESTCTRL_PRNG_SAVE: { sqlite3PrngSaveState(); break; } /* ** Restore the state of the PRNG to the last state saved using ** PRNG_SAVE. If PRNG_SAVE has never before been called, then ** this verb acts like PRNG_RESET. */ case SQLITE_TESTCTRL_PRNG_RESTORE: { sqlite3PrngRestoreState(); break; } /* ** Reset the PRNG back to its uninitialized state. The next call ** to sqlite3_randomness() will reseed the PRNG using a single call ** to the xRandomness method of the default VFS. */ case SQLITE_TESTCTRL_PRNG_RESET: { sqlite3_randomness(0,0); break; } /* ** sqlite3_test_control(BITVEC_TEST, size, program) ** ** Run a test against a Bitvec object of size. The program argument ** is an array of integers that defines the test. Return -1 on a ** memory allocation error, 0 on success, or non-zero for an error. ** See the sqlite3BitvecBuiltinTest() for additional information. */ case SQLITE_TESTCTRL_BITVEC_TEST: { int sz = va_arg(ap, int); int *aProg = va_arg(ap, int*); rc = sqlite3BitvecBuiltinTest(sz, aProg); break; } /* ** sqlite3_test_control(FAULT_INSTALL, xCallback) ** ** Arrange to invoke xCallback() whenever sqlite3FaultSim() is called, ** if xCallback is not NULL. ** ** As a test of the fault simulator mechanism itself, sqlite3FaultSim(0) ** is called immediately after installing the new callback and the return ** value from sqlite3FaultSim(0) becomes the return from ** sqlite3_test_control(). */ case SQLITE_TESTCTRL_FAULT_INSTALL: { /* MSVC is picky about pulling func ptrs from va lists. ** http://support.microsoft.com/kb/47961 ** sqlite3GlobalConfig.xTestCallback = va_arg(ap, int(*)(int)); */ typedef int(*TESTCALLBACKFUNC_t)(int); sqlite3GlobalConfig.xTestCallback = va_arg(ap, TESTCALLBACKFUNC_t); rc = sqlite3FaultSim(0); break; } /* ** sqlite3_test_control(BENIGN_MALLOC_HOOKS, xBegin, xEnd) ** ** Register hooks to call to indicate which malloc() failures ** are benign. */ case SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS: { typedef void (*void_function)(void); void_function xBenignBegin; void_function xBenignEnd; xBenignBegin = va_arg(ap, void_function); xBenignEnd = va_arg(ap, void_function); sqlite3BenignMallocHooks(xBenignBegin, xBenignEnd); break; } /* ** sqlite3_test_control(SQLITE_TESTCTRL_PENDING_BYTE, unsigned int X) ** ** Set the PENDING byte to the value in the argument, if X>0. ** Make no changes if X==0. Return the value of the pending byte ** as it existing before this routine was called. ** ** IMPORTANT: Changing the PENDING byte from 0x40000000 results in ** an incompatible database file format. Changing the PENDING byte ** while any database connection is open results in undefined and ** deleterious behavior. */ case SQLITE_TESTCTRL_PENDING_BYTE: { rc = PENDING_BYTE; #ifndef SQLITE_OMIT_WSD { unsigned int newVal = va_arg(ap, unsigned int); if( newVal ) sqlite3PendingByte = newVal; } #endif break; } /* ** sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, int X) ** ** This action provides a run-time test to see whether or not ** assert() was enabled at compile-time. If X is true and assert() ** is enabled, then the return value is true. If X is true and ** assert() is disabled, then the return value is zero. If X is ** false and assert() is enabled, then the assertion fires and the ** process aborts. If X is false and assert() is disabled, then the ** return value is zero. */ case SQLITE_TESTCTRL_ASSERT: { volatile int x = 0; assert( (x = va_arg(ap,int))!=0 ); rc = x; break; } /* ** sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, int X) ** ** This action provides a run-time test to see how the ALWAYS and ** NEVER macros were defined at compile-time. ** ** The return value is ALWAYS(X). ** ** The recommended test is X==2. If the return value is 2, that means ** ALWAYS() and NEVER() are both no-op pass-through macros, which is the ** default setting. If the return value is 1, then ALWAYS() is either ** hard-coded to true or else it asserts if its argument is false. ** The first behavior (hard-coded to true) is the case if ** SQLITE_TESTCTRL_ASSERT shows that assert() is disabled and the second ** behavior (assert if the argument to ALWAYS() is false) is the case if ** SQLITE_TESTCTRL_ASSERT shows that assert() is enabled. ** ** The run-time test procedure might look something like this: ** ** if( sqlite3_test_control(SQLITE_TESTCTRL_ALWAYS, 2)==2 ){ ** // ALWAYS() and NEVER() are no-op pass-through macros ** }else if( sqlite3_test_control(SQLITE_TESTCTRL_ASSERT, 1) ){ ** // ALWAYS(x) asserts that x is true. NEVER(x) asserts x is false. ** }else{ ** // ALWAYS(x) is a constant 1. NEVER(x) is a constant 0. ** } */ case SQLITE_TESTCTRL_ALWAYS: { int x = va_arg(ap,int); rc = ALWAYS(x); break; } /* ** sqlite3_test_control(SQLITE_TESTCTRL_BYTEORDER); ** ** The integer returned reveals the byte-order of the computer on which ** SQLite is running: ** ** 1 big-endian, determined at run-time ** 10 little-endian, determined at run-time ** 432101 big-endian, determined at compile-time ** 123410 little-endian, determined at compile-time */ case SQLITE_TESTCTRL_BYTEORDER: { rc = SQLITE_BYTEORDER*100 + SQLITE_LITTLEENDIAN*10 + SQLITE_BIGENDIAN; break; } /* sqlite3_test_control(SQLITE_TESTCTRL_RESERVE, sqlite3 *db, int N) ** ** Set the nReserve size to N for the main database on the database ** connection db. */ case SQLITE_TESTCTRL_RESERVE: { sqlite3 *db = va_arg(ap, sqlite3*); int x = va_arg(ap,int); sqlite3_mutex_enter(db->mutex); sqlite3BtreeSetPageSize(db->aDb[0].pBt, 0, x, 0); sqlite3_mutex_leave(db->mutex); break; } /* sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS, sqlite3 *db, int N) ** ** Enable or disable various optimizations for testing purposes. The ** argument N is a bitmask of optimizations to be disabled. For normal ** operation N should be 0. The idea is that a test program (like the ** SQL Logic Test or SLT test module) can run the same SQL multiple times ** with various optimizations disabled to verify that the same answer ** is obtained in every case. */ case SQLITE_TESTCTRL_OPTIMIZATIONS: { sqlite3 *db = va_arg(ap, sqlite3*); db->dbOptFlags = (u16)(va_arg(ap, int) & 0xffff); break; } #ifdef SQLITE_N_KEYWORD /* sqlite3_test_control(SQLITE_TESTCTRL_ISKEYWORD, const char *zWord) ** ** If zWord is a keyword recognized by the parser, then return the ** number of keywords. Or if zWord is not a keyword, return 0. ** ** This test feature is only available in the amalgamation since ** the SQLITE_N_KEYWORD macro is not defined in this file if SQLite ** is built using separate source files. */ case SQLITE_TESTCTRL_ISKEYWORD: { const char *zWord = va_arg(ap, const char*); int n = sqlite3Strlen30(zWord); rc = (sqlite3KeywordCode((u8*)zWord, n)!=TK_ID) ? SQLITE_N_KEYWORD : 0; break; } #endif /* sqlite3_test_control(SQLITE_TESTCTRL_SCRATCHMALLOC, sz, &pNew, pFree); ** ** Pass pFree into sqlite3ScratchFree(). ** If sz>0 then allocate a scratch buffer into pNew. */ case SQLITE_TESTCTRL_SCRATCHMALLOC: { void *pFree, **ppNew; int sz; sz = va_arg(ap, int); ppNew = va_arg(ap, void**); pFree = va_arg(ap, void*); if( sz ) *ppNew = sqlite3ScratchMalloc(sz); sqlite3ScratchFree(pFree); break; } /* sqlite3_test_control(SQLITE_TESTCTRL_LOCALTIME_FAULT, int onoff); ** ** If parameter onoff is non-zero, configure the wrappers so that all ** subsequent calls to localtime() and variants fail. If onoff is zero, ** undo this setting. */ case SQLITE_TESTCTRL_LOCALTIME_FAULT: { sqlite3GlobalConfig.bLocaltimeFault = va_arg(ap, int); break; } /* sqlite3_test_control(SQLITE_TESTCTRL_NEVER_CORRUPT, int); ** ** Set or clear a flag that indicates that the database file is always well- ** formed and never corrupt. This flag is clear by default, indicating that ** database files might have arbitrary corruption. Setting the flag during ** testing causes certain assert() statements in the code to be activated ** that demonstrat invariants on well-formed database files. */ case SQLITE_TESTCTRL_NEVER_CORRUPT: { sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int); break; } /* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr); ** ** Set the VDBE coverage callback function to xCallback with context ** pointer ptr. */ case SQLITE_TESTCTRL_VDBE_COVERAGE: { #ifdef SQLITE_VDBE_COVERAGE typedef void (*branch_callback)(void*,int,u8,u8); sqlite3GlobalConfig.xVdbeBranch = va_arg(ap,branch_callback); sqlite3GlobalConfig.pVdbeBranchArg = va_arg(ap,void*); #endif break; } /* sqlite3_test_control(SQLITE_TESTCTRL_SORTER_MMAP, db, nMax); */ case SQLITE_TESTCTRL_SORTER_MMAP: { sqlite3 *db = va_arg(ap, sqlite3*); db->nMaxSorterMmap = va_arg(ap, int); break; } /* sqlite3_test_control(SQLITE_TESTCTRL_ISINIT); ** ** Return SQLITE_OK if SQLite has been initialized and SQLITE_ERROR if ** not. */ case SQLITE_TESTCTRL_ISINIT: { if( sqlite3GlobalConfig.isInit==0 ) rc = SQLITE_ERROR; break; } /* sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, db, dbName, onOff, tnum); ** ** This test control is used to create imposter tables. "db" is a pointer ** to the database connection. dbName is the database name (ex: "main" or ** "temp") which will receive the imposter. "onOff" turns imposter mode on ** or off. "tnum" is the root page of the b-tree to which the imposter ** table should connect. ** ** Enable imposter mode only when the schema has already been parsed. Then ** run a single CREATE TABLE statement to construct the imposter table in ** the parsed schema. Then turn imposter mode back off again. ** ** If onOff==0 and tnum>0 then reset the schema for all databases, causing ** the schema to be reparsed the next time it is needed. This has the ** effect of erasing all imposter tables. */ case SQLITE_TESTCTRL_IMPOSTER: { sqlite3 *db = va_arg(ap, sqlite3*); sqlite3_mutex_enter(db->mutex); db->init.iDb = sqlite3FindDbName(db, va_arg(ap,const char*)); db->init.busy = db->init.imposterTable = va_arg(ap,int); db->init.newTnum = va_arg(ap,int); if( db->init.busy==0 && db->init.newTnum>0 ){ sqlite3ResetAllSchemasOfConnection(db); } sqlite3_mutex_leave(db->mutex); break; } } va_end(ap); #endif /* SQLITE_OMIT_BUILTIN_TEST */ return rc; } /* ** This is a utility routine, useful to VFS implementations, that checks ** to see if a database file was a URI that contained a specific query ** parameter, and if so obtains the value of the query parameter. ** ** The zFilename argument is the filename pointer passed into the xOpen() ** method of a VFS implementation. The zParam argument is the name of the ** query parameter we seek. This routine returns the value of the zParam ** parameter if it exists. If the parameter does not exist, this routine ** returns a NULL pointer. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam){ if( zFilename==0 || zParam==0 ) return 0; zFilename += sqlite3Strlen30(zFilename) + 1; while( zFilename[0] ){ int x = strcmp(zFilename, zParam); zFilename += sqlite3Strlen30(zFilename) + 1; if( x==0 ) return zFilename; zFilename += sqlite3Strlen30(zFilename) + 1; } return 0; } /* ** Return a boolean value for a query parameter. */ SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){ const char *z = sqlite3_uri_parameter(zFilename, zParam); bDflt = bDflt!=0; return z ? sqlite3GetBoolean(z, bDflt) : bDflt; } /* ** Return a 64-bit integer value for a query parameter. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64( const char *zFilename, /* Filename as passed to xOpen */ const char *zParam, /* URI parameter sought */ sqlite3_int64 bDflt /* return if parameter is missing */ ){ const char *z = sqlite3_uri_parameter(zFilename, zParam); sqlite3_int64 v; if( z && sqlite3DecOrHexToI64(z, &v)==SQLITE_OK ){ bDflt = v; } return bDflt; } /* ** Return the Btree pointer identified by zDbName. Return NULL if not found. */ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){ int i; for(i=0; inDb; i++){ if( db->aDb[i].pBt && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zName)==0) ){ return db->aDb[i].pBt; } } return 0; } /* ** Return the filename of the database associated with a database ** connection. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName){ Btree *pBt; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return 0; } #endif pBt = sqlite3DbNameToBtree(db, zDbName); return pBt ? sqlite3BtreeGetFilename(pBt) : 0; } /* ** Return 1 if database is read-only or 0 if read/write. Return -1 if ** no such database exists. */ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName){ Btree *pBt; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ (void)SQLITE_MISUSE_BKPT; return -1; } #endif pBt = sqlite3DbNameToBtree(db, zDbName); return pBt ? sqlite3BtreeIsReadonly(pBt) : -1; } /************** End of main.c ************************************************/ /************** Begin file notify.c ******************************************/ /* ** 2009 March 3 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains the implementation of the sqlite3_unlock_notify() ** API method and its associated functionality. */ /* #include "sqliteInt.h" */ /* #include "btreeInt.h" */ /* Omit this entire file if SQLITE_ENABLE_UNLOCK_NOTIFY is not defined. */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* ** Public interfaces: ** ** sqlite3ConnectionBlocked() ** sqlite3ConnectionUnlocked() ** sqlite3ConnectionClosed() ** sqlite3_unlock_notify() */ #define assertMutexHeld() \ assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) ) /* ** Head of a linked list of all sqlite3 objects created by this process ** for which either sqlite3.pBlockingConnection or sqlite3.pUnlockConnection ** is not NULL. This variable may only accessed while the STATIC_MASTER ** mutex is held. */ static sqlite3 *SQLITE_WSD sqlite3BlockedList = 0; #ifndef NDEBUG /* ** This function is a complex assert() that verifies the following ** properties of the blocked connections list: ** ** 1) Each entry in the list has a non-NULL value for either ** pUnlockConnection or pBlockingConnection, or both. ** ** 2) All entries in the list that share a common value for ** xUnlockNotify are grouped together. ** ** 3) If the argument db is not NULL, then none of the entries in the ** blocked connections list have pUnlockConnection or pBlockingConnection ** set to db. This is used when closing connection db. */ static void checkListProperties(sqlite3 *db){ sqlite3 *p; for(p=sqlite3BlockedList; p; p=p->pNextBlocked){ int seen = 0; sqlite3 *p2; /* Verify property (1) */ assert( p->pUnlockConnection || p->pBlockingConnection ); /* Verify property (2) */ for(p2=sqlite3BlockedList; p2!=p; p2=p2->pNextBlocked){ if( p2->xUnlockNotify==p->xUnlockNotify ) seen = 1; assert( p2->xUnlockNotify==p->xUnlockNotify || !seen ); assert( db==0 || p->pUnlockConnection!=db ); assert( db==0 || p->pBlockingConnection!=db ); } } } #else # define checkListProperties(x) #endif /* ** Remove connection db from the blocked connections list. If connection ** db is not currently a part of the list, this function is a no-op. */ static void removeFromBlockedList(sqlite3 *db){ sqlite3 **pp; assertMutexHeld(); for(pp=&sqlite3BlockedList; *pp; pp = &(*pp)->pNextBlocked){ if( *pp==db ){ *pp = (*pp)->pNextBlocked; break; } } } /* ** Add connection db to the blocked connections list. It is assumed ** that it is not already a part of the list. */ static void addToBlockedList(sqlite3 *db){ sqlite3 **pp; assertMutexHeld(); for( pp=&sqlite3BlockedList; *pp && (*pp)->xUnlockNotify!=db->xUnlockNotify; pp=&(*pp)->pNextBlocked ); db->pNextBlocked = *pp; *pp = db; } /* ** Obtain the STATIC_MASTER mutex. */ static void enterMutex(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); checkListProperties(0); } /* ** Release the STATIC_MASTER mutex. */ static void leaveMutex(void){ assertMutexHeld(); checkListProperties(0); sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)); } /* ** Register an unlock-notify callback. ** ** This is called after connection "db" has attempted some operation ** but has received an SQLITE_LOCKED error because another connection ** (call it pOther) in the same process was busy using the same shared ** cache. pOther is found by looking at db->pBlockingConnection. ** ** If there is no blocking connection, the callback is invoked immediately, ** before this routine returns. ** ** If pOther is already blocked on db, then report SQLITE_LOCKED, to indicate ** a deadlock. ** ** Otherwise, make arrangements to invoke xNotify when pOther drops ** its locks. ** ** Each call to this routine overrides any prior callbacks registered ** on the same "db". If xNotify==0 then any prior callbacks are immediately ** cancelled. */ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify( sqlite3 *db, void (*xNotify)(void **, int), void *pArg ){ int rc = SQLITE_OK; sqlite3_mutex_enter(db->mutex); enterMutex(); if( xNotify==0 ){ removeFromBlockedList(db); db->pBlockingConnection = 0; db->pUnlockConnection = 0; db->xUnlockNotify = 0; db->pUnlockArg = 0; }else if( 0==db->pBlockingConnection ){ /* The blocking transaction has been concluded. Or there never was a ** blocking transaction. In either case, invoke the notify callback ** immediately. */ xNotify(&pArg, 1); }else{ sqlite3 *p; for(p=db->pBlockingConnection; p && p!=db; p=p->pUnlockConnection){} if( p ){ rc = SQLITE_LOCKED; /* Deadlock detected. */ }else{ db->pUnlockConnection = db->pBlockingConnection; db->xUnlockNotify = xNotify; db->pUnlockArg = pArg; removeFromBlockedList(db); addToBlockedList(db); } } leaveMutex(); assert( !db->mallocFailed ); sqlite3ErrorWithMsg(db, rc, (rc?"database is deadlocked":0)); sqlite3_mutex_leave(db->mutex); return rc; } /* ** This function is called while stepping or preparing a statement ** associated with connection db. The operation will return SQLITE_LOCKED ** to the user because it requires a lock that will not be available ** until connection pBlocker concludes its current transaction. */ SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *db, sqlite3 *pBlocker){ enterMutex(); if( db->pBlockingConnection==0 && db->pUnlockConnection==0 ){ addToBlockedList(db); } db->pBlockingConnection = pBlocker; leaveMutex(); } /* ** This function is called when ** the transaction opened by database db has just finished. Locks held ** by database connection db have been released. ** ** This function loops through each entry in the blocked connections ** list and does the following: ** ** 1) If the sqlite3.pBlockingConnection member of a list entry is ** set to db, then set pBlockingConnection=0. ** ** 2) If the sqlite3.pUnlockConnection member of a list entry is ** set to db, then invoke the configured unlock-notify callback and ** set pUnlockConnection=0. ** ** 3) If the two steps above mean that pBlockingConnection==0 and ** pUnlockConnection==0, remove the entry from the blocked connections ** list. */ SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db){ void (*xUnlockNotify)(void **, int) = 0; /* Unlock-notify cb to invoke */ int nArg = 0; /* Number of entries in aArg[] */ sqlite3 **pp; /* Iterator variable */ void **aArg; /* Arguments to the unlock callback */ void **aDyn = 0; /* Dynamically allocated space for aArg[] */ void *aStatic[16]; /* Starter space for aArg[]. No malloc required */ aArg = aStatic; enterMutex(); /* Enter STATIC_MASTER mutex */ /* This loop runs once for each entry in the blocked-connections list. */ for(pp=&sqlite3BlockedList; *pp; /* no-op */ ){ sqlite3 *p = *pp; /* Step 1. */ if( p->pBlockingConnection==db ){ p->pBlockingConnection = 0; } /* Step 2. */ if( p->pUnlockConnection==db ){ assert( p->xUnlockNotify ); if( p->xUnlockNotify!=xUnlockNotify && nArg!=0 ){ xUnlockNotify(aArg, nArg); nArg = 0; } sqlite3BeginBenignMalloc(); assert( aArg==aDyn || (aDyn==0 && aArg==aStatic) ); assert( nArg<=(int)ArraySize(aStatic) || aArg==aDyn ); if( (!aDyn && nArg==(int)ArraySize(aStatic)) || (aDyn && nArg==(int)(sqlite3MallocSize(aDyn)/sizeof(void*))) ){ /* The aArg[] array needs to grow. */ void **pNew = (void **)sqlite3Malloc(nArg*sizeof(void *)*2); if( pNew ){ memcpy(pNew, aArg, nArg*sizeof(void *)); sqlite3_free(aDyn); aDyn = aArg = pNew; }else{ /* This occurs when the array of context pointers that need to ** be passed to the unlock-notify callback is larger than the ** aStatic[] array allocated on the stack and the attempt to ** allocate a larger array from the heap has failed. ** ** This is a difficult situation to handle. Returning an error ** code to the caller is insufficient, as even if an error code ** is returned the transaction on connection db will still be ** closed and the unlock-notify callbacks on blocked connections ** will go unissued. This might cause the application to wait ** indefinitely for an unlock-notify callback that will never ** arrive. ** ** Instead, invoke the unlock-notify callback with the context ** array already accumulated. We can then clear the array and ** begin accumulating any further context pointers without ** requiring any dynamic allocation. This is sub-optimal because ** it means that instead of one callback with a large array of ** context pointers the application will receive two or more ** callbacks with smaller arrays of context pointers, which will ** reduce the applications ability to prioritize multiple ** connections. But it is the best that can be done under the ** circumstances. */ xUnlockNotify(aArg, nArg); nArg = 0; } } sqlite3EndBenignMalloc(); aArg[nArg++] = p->pUnlockArg; xUnlockNotify = p->xUnlockNotify; p->pUnlockConnection = 0; p->xUnlockNotify = 0; p->pUnlockArg = 0; } /* Step 3. */ if( p->pBlockingConnection==0 && p->pUnlockConnection==0 ){ /* Remove connection p from the blocked connections list. */ *pp = p->pNextBlocked; p->pNextBlocked = 0; }else{ pp = &p->pNextBlocked; } } if( nArg!=0 ){ xUnlockNotify(aArg, nArg); } sqlite3_free(aDyn); leaveMutex(); /* Leave STATIC_MASTER mutex */ } /* ** This is called when the database connection passed as an argument is ** being closed. The connection is removed from the blocked list. */ SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db){ sqlite3ConnectionUnlocked(db); enterMutex(); removeFromBlockedList(db); checkListProperties(db); leaveMutex(); } #endif /************** End of notify.c **********************************************/ /************** Begin file fts3.c ********************************************/ /* ** 2006 Oct 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This is an SQLite module implementing full-text search. */ /* ** The code in this file is only compiled if: ** ** * The FTS3 module is being built as an extension ** (in which case SQLITE_CORE is not defined), or ** ** * The FTS3 module is being built into the core of ** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). */ /* The full-text index is stored in a series of b+tree (-like) ** structures called segments which map terms to doclists. The ** structures are like b+trees in layout, but are constructed from the ** bottom up in optimal fashion and are not updatable. Since trees ** are built from the bottom up, things will be described from the ** bottom up. ** ** **** Varints **** ** The basic unit of encoding is a variable-length integer called a ** varint. We encode variable-length integers in little-endian order ** using seven bits * per byte as follows: ** ** KEY: ** A = 0xxxxxxx 7 bits of data and one flag bit ** B = 1xxxxxxx 7 bits of data and one flag bit ** ** 7 bits - A ** 14 bits - BA ** 21 bits - BBA ** and so on. ** ** This is similar in concept to how sqlite encodes "varints" but ** the encoding is not the same. SQLite varints are big-endian ** are are limited to 9 bytes in length whereas FTS3 varints are ** little-endian and can be up to 10 bytes in length (in theory). ** ** Example encodings: ** ** 1: 0x01 ** 127: 0x7f ** 128: 0x81 0x00 ** ** **** Document lists **** ** A doclist (document list) holds a docid-sorted list of hits for a ** given term. Doclists hold docids and associated token positions. ** A docid is the unique integer identifier for a single document. ** A position is the index of a word within the document. The first ** word of the document has a position of 0. ** ** FTS3 used to optionally store character offsets using a compile-time ** option. But that functionality is no longer supported. ** ** A doclist is stored like this: ** ** array { ** varint docid; (delta from previous doclist) ** array { (position list for column 0) ** varint position; (2 more than the delta from previous position) ** } ** array { ** varint POS_COLUMN; (marks start of position list for new column) ** varint column; (index of new column) ** array { ** varint position; (2 more than the delta from previous position) ** } ** } ** varint POS_END; (marks end of positions for this document. ** } ** ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. A "position" is an index of a token in the token stream ** generated by the tokenizer. Note that POS_END and POS_COLUMN occur ** in the same logical place as the position element, and act as sentinals ** ending a position list array. POS_END is 0. POS_COLUMN is 1. ** The positions numbers are not stored literally but rather as two more ** than the difference from the prior position, or the just the position plus ** 2 for the first position. Example: ** ** label: A B C D E F G H I J K ** value: 123 5 9 1 1 14 35 0 234 72 0 ** ** The 123 value is the first docid. For column zero in this document ** there are two matches at positions 3 and 10 (5-2 and 9-2+3). The 1 ** at D signals the start of a new column; the 1 at E indicates that the ** new column is column number 1. There are two positions at 12 and 45 ** (14-2 and 35-2+12). The 0 at H indicate the end-of-document. The ** 234 at I is the delta to next docid (357). It has one position 70 ** (72-2) and then terminates with the 0 at K. ** ** A "position-list" is the list of positions for multiple columns for ** a single docid. A "column-list" is the set of positions for a single ** column. Hence, a position-list consists of one or more column-lists, ** a document record consists of a docid followed by a position-list and ** a doclist consists of one or more document records. ** ** A bare doclist omits the position information, becoming an ** array of varint-encoded docids. ** **** Segment leaf nodes **** ** Segment leaf nodes store terms and doclists, ordered by term. Leaf ** nodes are written using LeafWriter, and read using LeafReader (to ** iterate through a single leaf node's data) and LeavesReader (to ** iterate through a segment's entire leaf layer). Leaf nodes have ** the format: ** ** varint iHeight; (height from leaf level, always 0) ** varint nTerm; (length of first term) ** char pTerm[nTerm]; (content of first term) ** varint nDoclist; (length of term's associated doclist) ** char pDoclist[nDoclist]; (content of doclist) ** array { ** (further terms are delta-encoded) ** varint nPrefix; (length of prefix shared with previous term) ** varint nSuffix; (length of unshared suffix) ** char pTermSuffix[nSuffix];(unshared suffix of next term) ** varint nDoclist; (length of term's associated doclist) ** char pDoclist[nDoclist]; (content of doclist) ** } ** ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. ** ** Leaf nodes are broken into blocks which are stored contiguously in ** the %_segments table in sorted order. This means that when the end ** of a node is reached, the next term is in the node with the next ** greater node id. ** ** New data is spilled to a new leaf node when the current node ** exceeds LEAF_MAX bytes (default 2048). New data which itself is ** larger than STANDALONE_MIN (default 1024) is placed in a standalone ** node (a leaf node with a single term and doclist). The goal of ** these settings is to pack together groups of small doclists while ** making it efficient to directly access large doclists. The ** assumption is that large doclists represent terms which are more ** likely to be query targets. ** ** TODO(shess) It may be useful for blocking decisions to be more ** dynamic. For instance, it may make more sense to have a 2.5k leaf ** node rather than splitting into 2k and .5k nodes. My intuition is ** that this might extend through 2x or 4x the pagesize. ** ** **** Segment interior nodes **** ** Segment interior nodes store blockids for subtree nodes and terms ** to describe what data is stored by the each subtree. Interior ** nodes are written using InteriorWriter, and read using ** InteriorReader. InteriorWriters are created as needed when ** SegmentWriter creates new leaf nodes, or when an interior node ** itself grows too big and must be split. The format of interior ** nodes: ** ** varint iHeight; (height from leaf level, always >0) ** varint iBlockid; (block id of node's leftmost subtree) ** optional { ** varint nTerm; (length of first term) ** char pTerm[nTerm]; (content of first term) ** array { ** (further terms are delta-encoded) ** varint nPrefix; (length of shared prefix with previous term) ** varint nSuffix; (length of unshared suffix) ** char pTermSuffix[nSuffix]; (unshared suffix of next term) ** } ** } ** ** Here, optional { X } means an optional element, while array { X } ** means zero or more occurrences of X, adjacent in memory. ** ** An interior node encodes n terms separating n+1 subtrees. The ** subtree blocks are contiguous, so only the first subtree's blockid ** is encoded. The subtree at iBlockid will contain all terms less ** than the first term encoded (or all terms if no term is encoded). ** Otherwise, for terms greater than or equal to pTerm[i] but less ** than pTerm[i+1], the subtree for that term will be rooted at ** iBlockid+i. Interior nodes only store enough term data to ** distinguish adjacent children (if the rightmost term of the left ** child is "something", and the leftmost term of the right child is ** "wicked", only "w" is stored). ** ** New data is spilled to a new interior node at the same height when ** the current node exceeds INTERIOR_MAX bytes (default 2048). ** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing ** interior nodes and making the tree too skinny. The interior nodes ** at a given height are naturally tracked by interior nodes at ** height+1, and so on. ** ** **** Segment directory **** ** The segment directory in table %_segdir stores meta-information for ** merging and deleting segments, and also the root node of the ** segment's tree. ** ** The root node is the top node of the segment's tree after encoding ** the entire segment, restricted to ROOT_MAX bytes (default 1024). ** This could be either a leaf node or an interior node. If the top ** node requires more than ROOT_MAX bytes, it is flushed to %_segments ** and a new root interior node is generated (which should always fit ** within ROOT_MAX because it only needs space for 2 varints, the ** height and the blockid of the previous root). ** ** The meta-information in the segment directory is: ** level - segment level (see below) ** idx - index within level ** - (level,idx uniquely identify a segment) ** start_block - first leaf node ** leaves_end_block - last leaf node ** end_block - last block (including interior nodes) ** root - contents of root node ** ** If the root node is a leaf node, then start_block, ** leaves_end_block, and end_block are all 0. ** ** **** Segment merging **** ** To amortize update costs, segments are grouped into levels and ** merged in batches. Each increase in level represents exponentially ** more documents. ** ** New documents (actually, document updates) are tokenized and ** written individually (using LeafWriter) to a level 0 segment, with ** incrementing idx. When idx reaches MERGE_COUNT (default 16), all ** level 0 segments are merged into a single level 1 segment. Level 1 ** is populated like level 0, and eventually MERGE_COUNT level 1 ** segments are merged to a single level 2 segment (representing ** MERGE_COUNT^2 updates), and so on. ** ** A segment merge traverses all segments at a given level in ** parallel, performing a straightforward sorted merge. Since segment ** leaf nodes are written in to the %_segments table in order, this ** merge traverses the underlying sqlite disk structures efficiently. ** After the merge, all segment blocks from the merged level are ** deleted. ** ** MERGE_COUNT controls how often we merge segments. 16 seems to be ** somewhat of a sweet spot for insertion performance. 32 and 64 show ** very similar performance numbers to 16 on insertion, though they're ** a tiny bit slower (perhaps due to more overhead in merge-time ** sorting). 8 is about 20% slower than 16, 4 about 50% slower than ** 16, 2 about 66% slower than 16. ** ** At query time, high MERGE_COUNT increases the number of segments ** which need to be scanned and merged. For instance, with 100k docs ** inserted: ** ** MERGE_COUNT segments ** 16 25 ** 8 12 ** 4 10 ** 2 6 ** ** This appears to have only a moderate impact on queries for very ** frequent terms (which are somewhat dominated by segment merge ** costs), and infrequent and non-existent terms still seem to be fast ** even with many segments. ** ** TODO(shess) That said, it would be nice to have a better query-side ** argument for MERGE_COUNT of 16. Also, it is possible/likely that ** optimizations to things like doclist merging will swing the sweet ** spot around. ** ** ** **** Handling of deletions and updates **** ** Since we're using a segmented structure, with no docid-oriented ** index into the term index, we clearly cannot simply update the term ** index when a document is deleted or updated. For deletions, we ** write an empty doclist (varint(docid) varint(POS_END)), for updates ** we simply write the new doclist. Segment merges overwrite older ** data for a particular docid with newer data, so deletes or updates ** will eventually overtake the earlier data and knock it out. The ** query logic likewise merges doclists so that newer data knocks out ** older data. */ /************** Include fts3Int.h in the middle of fts3.c ********************/ /************** Begin file fts3Int.h *****************************************/ /* ** 2009 Nov 12 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** */ #ifndef _FTSINT_H #define _FTSINT_H #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif /* ** FTS4 is really an extension for FTS3. It is enabled using the ** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also all ** the SQLITE_ENABLE_FTS4 macro to serve as an alisse for SQLITE_ENABLE_FTS3. */ #if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3) # define SQLITE_ENABLE_FTS3 #endif #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* If not building as part of the core, include sqlite3ext.h. */ #ifndef SQLITE_CORE /* # include "sqlite3ext.h" */ SQLITE_EXTENSION_INIT3 #endif /* #include "sqlite3.h" */ /************** Include fts3_tokenizer.h in the middle of fts3Int.h **********/ /************** Begin file fts3_tokenizer.h **********************************/ /* ** 2006 July 10 ** ** The author disclaims copyright to this source code. ** ************************************************************************* ** Defines the interface to tokenizers used by fulltext-search. There ** are three basic components: ** ** sqlite3_tokenizer_module is a singleton defining the tokenizer ** interface functions. This is essentially the class structure for ** tokenizers. ** ** sqlite3_tokenizer is used to define a particular tokenizer, perhaps ** including customization information defined at creation time. ** ** sqlite3_tokenizer_cursor is generated by a tokenizer to generate ** tokens from a particular input. */ #ifndef _FTS3_TOKENIZER_H_ #define _FTS3_TOKENIZER_H_ /* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. ** If tokenizers are to be allowed to call sqlite3_*() functions, then ** we will need a way to register the API consistently. */ /* #include "sqlite3.h" */ /* ** Structures used by the tokenizer interface. When a new tokenizer ** implementation is registered, the caller provides a pointer to ** an sqlite3_tokenizer_module containing pointers to the callback ** functions that make up an implementation. ** ** When an fts3 table is created, it passes any arguments passed to ** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the ** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer ** implementation. The xCreate() function in turn returns an ** sqlite3_tokenizer structure representing the specific tokenizer to ** be used for the fts3 table (customized by the tokenizer clause arguments). ** ** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() ** method is called. It returns an sqlite3_tokenizer_cursor object ** that may be used to tokenize a specific input buffer based on ** the tokenization rules supplied by a specific sqlite3_tokenizer ** object. */ typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; typedef struct sqlite3_tokenizer sqlite3_tokenizer; typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; struct sqlite3_tokenizer_module { /* ** Structure version. Should always be set to 0 or 1. */ int iVersion; /* ** Create a new tokenizer. The values in the argv[] array are the ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL ** TABLE statement that created the fts3 table. For example, if ** the following SQL is executed: ** ** CREATE .. USING fts3( ... , tokenizer arg1 arg2) ** ** then argc is set to 2, and the argv[] array contains pointers ** to the strings "arg1" and "arg2". ** ** This method should return either SQLITE_OK (0), or an SQLite error ** code. If SQLITE_OK is returned, then *ppTokenizer should be set ** to point at the newly created tokenizer structure. The generic ** sqlite3_tokenizer.pModule variable should not be initialized by ** this callback. The caller will do so. */ int (*xCreate)( int argc, /* Size of argv array */ const char *const*argv, /* Tokenizer argument strings */ sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ ); /* ** Destroy an existing tokenizer. The fts3 module calls this method ** exactly once for each successful call to xCreate(). */ int (*xDestroy)(sqlite3_tokenizer *pTokenizer); /* ** Create a tokenizer cursor to tokenize an input buffer. The caller ** is responsible for ensuring that the input buffer remains valid ** until the cursor is closed (using the xClose() method). */ int (*xOpen)( sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ const char *pInput, int nBytes, /* Input buffer */ sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ ); /* ** Destroy an existing tokenizer cursor. The fts3 module calls this ** method exactly once for each successful call to xOpen(). */ int (*xClose)(sqlite3_tokenizer_cursor *pCursor); /* ** Retrieve the next token from the tokenizer cursor pCursor. This ** method should either return SQLITE_OK and set the values of the ** "OUT" variables identified below, or SQLITE_DONE to indicate that ** the end of the buffer has been reached, or an SQLite error code. ** ** *ppToken should be set to point at a buffer containing the ** normalized version of the token (i.e. after any case-folding and/or ** stemming has been performed). *pnBytes should be set to the length ** of this buffer in bytes. The input text that generated the token is ** identified by the byte offsets returned in *piStartOffset and ** *piEndOffset. *piStartOffset should be set to the index of the first ** byte of the token in the input buffer. *piEndOffset should be set ** to the index of the first byte just past the end of the token in ** the input buffer. ** ** The buffer *ppToken is set to point at is managed by the tokenizer ** implementation. It is only required to be valid until the next call ** to xNext() or xClose(). */ /* TODO(shess) current implementation requires pInput to be ** nul-terminated. This should either be fixed, or pInput/nBytes ** should be converted to zInput. */ int (*xNext)( sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ int *piStartOffset, /* OUT: Byte offset of token in input buffer */ int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ int *piPosition /* OUT: Number of tokens returned before this one */ ); /*********************************************************************** ** Methods below this point are only available if iVersion>=1. */ /* ** Configure the language id of a tokenizer cursor. */ int (*xLanguageid)(sqlite3_tokenizer_cursor *pCsr, int iLangid); }; struct sqlite3_tokenizer { const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ /* Tokenizer implementations will typically add additional fields */ }; struct sqlite3_tokenizer_cursor { sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ /* Tokenizer implementations will typically add additional fields */ }; int fts3_global_term_cnt(int iTerm, int iCol); int fts3_term_cnt(int iTerm, int iCol); #endif /* _FTS3_TOKENIZER_H_ */ /************** End of fts3_tokenizer.h **************************************/ /************** Continuing where we left off in fts3Int.h ********************/ /************** Include fts3_hash.h in the middle of fts3Int.h ***************/ /************** Begin file fts3_hash.h ***************************************/ /* ** 2001 September 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This is the header file for the generic hash-table implementation ** used in SQLite. We've modified it slightly to serve as a standalone ** hash table implementation for the full-text indexing module. ** */ #ifndef _FTS3_HASH_H_ #define _FTS3_HASH_H_ /* Forward declarations of structures. */ typedef struct Fts3Hash Fts3Hash; typedef struct Fts3HashElem Fts3HashElem; /* A complete hash table is an instance of the following structure. ** The internals of this structure are intended to be opaque -- client ** code should not attempt to access or modify the fields of this structure ** directly. Change this structure only by using the routines below. ** However, many of the "procedures" and "functions" for modifying and ** accessing this structure are really macros, so we can't really make ** this structure opaque. */ struct Fts3Hash { char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ char copyKey; /* True if copy of key made on insert */ int count; /* Number of entries in this table */ Fts3HashElem *first; /* The first element of the array */ int htsize; /* Number of buckets in the hash table */ struct _fts3ht { /* the hash table */ int count; /* Number of entries with this hash */ Fts3HashElem *chain; /* Pointer to first entry with this hash */ } *ht; }; /* Each element in the hash table is an instance of the following ** structure. All elements are stored on a single doubly-linked list. ** ** Again, this structure is intended to be opaque, but it can't really ** be opaque because it is used by macros. */ struct Fts3HashElem { Fts3HashElem *next, *prev; /* Next and previous elements in the table */ void *data; /* Data associated with this element */ void *pKey; int nKey; /* Key associated with this element */ }; /* ** There are 2 different modes of operation for a hash table: ** ** FTS3_HASH_STRING pKey points to a string that is nKey bytes long ** (including the null-terminator, if any). Case ** is respected in comparisons. ** ** FTS3_HASH_BINARY pKey points to binary data nKey bytes long. ** memcmp() is used to compare keys. ** ** A copy of the key is made if the copyKey parameter to fts3HashInit is 1. */ #define FTS3_HASH_STRING 1 #define FTS3_HASH_BINARY 2 /* ** Access routines. To delete, insert a NULL pointer. */ SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey); SQLITE_PRIVATE void *sqlite3Fts3HashInsert(Fts3Hash*, const void *pKey, int nKey, void *pData); SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash*, const void *pKey, int nKey); SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash*); SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem(const Fts3Hash *, const void *, int); /* ** Shorthand for the functions above */ #define fts3HashInit sqlite3Fts3HashInit #define fts3HashInsert sqlite3Fts3HashInsert #define fts3HashFind sqlite3Fts3HashFind #define fts3HashClear sqlite3Fts3HashClear #define fts3HashFindElem sqlite3Fts3HashFindElem /* ** Macros for looping over all elements of a hash table. The idiom is ** like this: ** ** Fts3Hash h; ** Fts3HashElem *p; ** ... ** for(p=fts3HashFirst(&h); p; p=fts3HashNext(p)){ ** SomeStructure *pData = fts3HashData(p); ** // do something with pData ** } */ #define fts3HashFirst(H) ((H)->first) #define fts3HashNext(E) ((E)->next) #define fts3HashData(E) ((E)->data) #define fts3HashKey(E) ((E)->pKey) #define fts3HashKeysize(E) ((E)->nKey) /* ** Number of entries in a hash table */ #define fts3HashCount(H) ((H)->count) #endif /* _FTS3_HASH_H_ */ /************** End of fts3_hash.h *******************************************/ /************** Continuing where we left off in fts3Int.h ********************/ /* ** This constant determines the maximum depth of an FTS expression tree ** that the library will create and use. FTS uses recursion to perform ** various operations on the query tree, so the disadvantage of a large ** limit is that it may allow very large queries to use large amounts ** of stack space (perhaps causing a stack overflow). */ #ifndef SQLITE_FTS3_MAX_EXPR_DEPTH # define SQLITE_FTS3_MAX_EXPR_DEPTH 12 #endif /* ** This constant controls how often segments are merged. Once there are ** FTS3_MERGE_COUNT segments of level N, they are merged into a single ** segment of level N+1. */ #define FTS3_MERGE_COUNT 16 /* ** This is the maximum amount of data (in bytes) to store in the ** Fts3Table.pendingTerms hash table. Normally, the hash table is ** populated as documents are inserted/updated/deleted in a transaction ** and used to create a new segment when the transaction is committed. ** However if this limit is reached midway through a transaction, a new ** segment is created and the hash table cleared immediately. */ #define FTS3_MAX_PENDING_DATA (1*1024*1024) /* ** Macro to return the number of elements in an array. SQLite has a ** similar macro called ArraySize(). Use a different name to avoid ** a collision when building an amalgamation with built-in FTS3. */ #define SizeofArray(X) ((int)(sizeof(X)/sizeof(X[0]))) #ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) #endif /* ** Maximum length of a varint encoded integer. The varint format is different ** from that used by SQLite, so the maximum length is 10, not 9. */ #define FTS3_VARINT_MAX 10 /* ** FTS4 virtual tables may maintain multiple indexes - one index of all terms ** in the document set and zero or more prefix indexes. All indexes are stored ** as one or more b+-trees in the %_segments and %_segdir tables. ** ** It is possible to determine which index a b+-tree belongs to based on the ** value stored in the "%_segdir.level" column. Given this value L, the index ** that the b+-tree belongs to is (L<<10). In other words, all b+-trees with ** level values between 0 and 1023 (inclusive) belong to index 0, all levels ** between 1024 and 2047 to index 1, and so on. ** ** It is considered impossible for an index to use more than 1024 levels. In ** theory though this may happen, but only after at least ** (FTS3_MERGE_COUNT^1024) separate flushes of the pending-terms tables. */ #define FTS3_SEGDIR_MAXLEVEL 1024 #define FTS3_SEGDIR_MAXLEVEL_STR "1024" /* ** The testcase() macro is only used by the amalgamation. If undefined, ** make it a no-op. */ #ifndef testcase # define testcase(X) #endif /* ** Terminator values for position-lists and column-lists. */ #define POS_COLUMN (1) /* Column-list terminator */ #define POS_END (0) /* Position-list terminator */ /* ** This section provides definitions to allow the ** FTS3 extension to be compiled outside of the ** amalgamation. */ #ifndef SQLITE_AMALGAMATION /* ** Macros indicating that conditional expressions are always true or ** false. */ #ifdef SQLITE_COVERAGE_TEST # define ALWAYS(x) (1) # define NEVER(X) (0) #elif defined(SQLITE_DEBUG) # define ALWAYS(x) sqlite3Fts3Always((x)!=0) # define NEVER(x) sqlite3Fts3Never((x)!=0) SQLITE_PRIVATE int sqlite3Fts3Always(int b); SQLITE_PRIVATE int sqlite3Fts3Never(int b); #else # define ALWAYS(x) (x) # define NEVER(x) (x) #endif /* ** Internal types used by SQLite. */ typedef unsigned char u8; /* 1-byte (or larger) unsigned integer */ typedef short int i16; /* 2-byte (or larger) signed integer */ typedef unsigned int u32; /* 4-byte unsigned integer */ typedef sqlite3_uint64 u64; /* 8-byte unsigned integer */ typedef sqlite3_int64 i64; /* 8-byte signed integer */ /* ** Macro used to suppress compiler warnings for unused parameters. */ #define UNUSED_PARAMETER(x) (void)(x) /* ** Activate assert() only if SQLITE_TEST is enabled. */ #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif /* ** The TESTONLY macro is used to enclose variable declarations or ** other bits of code that are needed to support the arguments ** within testcase() and assert() macros. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) # define TESTONLY(X) X #else # define TESTONLY(X) #endif #endif /* SQLITE_AMALGAMATION */ #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3Fts3Corrupt(void); # define FTS_CORRUPT_VTAB sqlite3Fts3Corrupt() #else # define FTS_CORRUPT_VTAB SQLITE_CORRUPT_VTAB #endif typedef struct Fts3Table Fts3Table; typedef struct Fts3Cursor Fts3Cursor; typedef struct Fts3Expr Fts3Expr; typedef struct Fts3Phrase Fts3Phrase; typedef struct Fts3PhraseToken Fts3PhraseToken; typedef struct Fts3Doclist Fts3Doclist; typedef struct Fts3SegFilter Fts3SegFilter; typedef struct Fts3DeferredToken Fts3DeferredToken; typedef struct Fts3SegReader Fts3SegReader; typedef struct Fts3MultiSegReader Fts3MultiSegReader; typedef struct MatchinfoBuffer MatchinfoBuffer; /* ** A connection to a fulltext index is an instance of the following ** structure. The xCreate and xConnect methods create an instance ** of this structure and xDestroy and xDisconnect free that instance. ** All other methods receive a pointer to the structure as one of their ** arguments. */ struct Fts3Table { sqlite3_vtab base; /* Base class used by SQLite core */ sqlite3 *db; /* The database connection */ const char *zDb; /* logical database name */ const char *zName; /* virtual table name */ int nColumn; /* number of named columns in virtual table */ char **azColumn; /* column names. malloced */ u8 *abNotindexed; /* True for 'notindexed' columns */ sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ char *zContentTbl; /* content=xxx option, or NULL */ char *zLanguageid; /* languageid=xxx option, or NULL */ int nAutoincrmerge; /* Value configured by 'automerge' */ u32 nLeafAdd; /* Number of leaf blocks added this trans */ /* Precompiled statements used by the implementation. Each of these ** statements is run and reset within a single virtual table API call. */ sqlite3_stmt *aStmt[40]; char *zReadExprlist; char *zWriteExprlist; int nNodeSize; /* Soft limit for node size */ u8 bFts4; /* True for FTS4, false for FTS3 */ u8 bHasStat; /* True if %_stat table exists (2==unknown) */ u8 bHasDocsize; /* True if %_docsize table exists */ u8 bDescIdx; /* True if doclists are in reverse order */ u8 bIgnoreSavepoint; /* True to ignore xSavepoint invocations */ int nPgsz; /* Page size for host database */ char *zSegmentsTbl; /* Name of %_segments table */ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ /* ** The following array of hash tables is used to buffer pending index ** updates during transactions. All pending updates buffered at any one ** time must share a common language-id (see the FTS4 langid= feature). ** The current language id is stored in variable iPrevLangid. ** ** A single FTS4 table may have multiple full-text indexes. For each index ** there is an entry in the aIndex[] array. Index 0 is an index of all the ** terms that appear in the document set. Each subsequent index in aIndex[] ** is an index of prefixes of a specific length. ** ** Variable nPendingData contains an estimate the memory consumed by the ** pending data structures, including hash table overhead, but not including ** malloc overhead. When nPendingData exceeds nMaxPendingData, all hash ** tables are flushed to disk. Variable iPrevDocid is the docid of the most ** recently inserted record. */ int nIndex; /* Size of aIndex[] */ struct Fts3Index { int nPrefix; /* Prefix length (0 for main terms index) */ Fts3Hash hPending; /* Pending terms table for this index */ } *aIndex; int nMaxPendingData; /* Max pending data before flush to disk */ int nPendingData; /* Current bytes of pending data */ sqlite_int64 iPrevDocid; /* Docid of most recently inserted document */ int iPrevLangid; /* Langid of recently inserted document */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) /* State variables used for validating that the transaction control ** methods of the virtual table are called at appropriate times. These ** values do not contribute to FTS functionality; they are used for ** verifying the operation of the SQLite core. */ int inTransaction; /* True after xBegin but before xCommit/xRollback */ int mxSavepoint; /* Largest valid xSavepoint integer */ #endif #ifdef SQLITE_TEST /* True to disable the incremental doclist optimization. This is controled ** by special insert command 'test-no-incr-doclist'. */ int bNoIncrDoclist; #endif }; /* ** When the core wants to read from the virtual table, it creates a ** virtual table cursor (an instance of the following structure) using ** the xOpen method. Cursors are destroyed using the xClose method. */ struct Fts3Cursor { sqlite3_vtab_cursor base; /* Base class used by SQLite core */ i16 eSearch; /* Search strategy (see below) */ u8 isEof; /* True if at End Of Results */ u8 isRequireSeek; /* True if must seek pStmt to %_content row */ sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ Fts3Expr *pExpr; /* Parsed MATCH query string */ int iLangid; /* Language being queried for */ int nPhrase; /* Number of matchable phrases in query */ Fts3DeferredToken *pDeferred; /* Deferred search tokens, if any */ sqlite3_int64 iPrevId; /* Previous id read from aDoclist */ char *pNextId; /* Pointer into the body of aDoclist */ char *aDoclist; /* List of docids for full-text queries */ int nDoclist; /* Size of buffer at aDoclist */ u8 bDesc; /* True to sort in descending order */ int eEvalmode; /* An FTS3_EVAL_XX constant */ int nRowAvg; /* Average size of database rows, in pages */ sqlite3_int64 nDoc; /* Documents in table */ i64 iMinDocid; /* Minimum docid to return */ i64 iMaxDocid; /* Maximum docid to return */ int isMatchinfoNeeded; /* True when aMatchinfo[] needs filling in */ MatchinfoBuffer *pMIBuffer; /* Buffer for matchinfo data */ }; #define FTS3_EVAL_FILTER 0 #define FTS3_EVAL_NEXT 1 #define FTS3_EVAL_MATCHINFO 2 /* ** The Fts3Cursor.eSearch member is always set to one of the following. ** Actualy, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in ** ** CREATE VIRTUAL TABLE ex1 USING fts3(a,b,c,d); ** SELECT docid FROM ex1 WHERE b MATCH 'one two three'; ** ** Because the LHS of the MATCH operator is 2nd column "b", ** Fts3Cursor.eSearch will be set to FTS3_FULLTEXT_SEARCH+1. (+0 for a, ** +1 for b, +2 for c, +3 for d.) If the LHS of MATCH were "ex1" ** indicating that all columns should be searched, ** then eSearch would be set to FTS3_FULLTEXT_SEARCH+4. */ #define FTS3_FULLSCAN_SEARCH 0 /* Linear scan of %_content table */ #define FTS3_DOCID_SEARCH 1 /* Lookup by rowid on %_content table */ #define FTS3_FULLTEXT_SEARCH 2 /* Full-text index search */ /* ** The lower 16-bits of the sqlite3_index_info.idxNum value set by ** the xBestIndex() method contains the Fts3Cursor.eSearch value described ** above. The upper 16-bits contain a combination of the following ** bits, used to describe extra constraints on full-text searches. */ #define FTS3_HAVE_LANGID 0x00010000 /* languageid=? */ #define FTS3_HAVE_DOCID_GE 0x00020000 /* docid>=? */ #define FTS3_HAVE_DOCID_LE 0x00040000 /* docid<=? */ struct Fts3Doclist { char *aAll; /* Array containing doclist (or NULL) */ int nAll; /* Size of a[] in bytes */ char *pNextDocid; /* Pointer to next docid */ sqlite3_int64 iDocid; /* Current docid (if pList!=0) */ int bFreeList; /* True if pList should be sqlite3_free()d */ char *pList; /* Pointer to position list following iDocid */ int nList; /* Length of position list */ }; /* ** A "phrase" is a sequence of one or more tokens that must match in ** sequence. A single token is the base case and the most common case. ** For a sequence of tokens contained in double-quotes (i.e. "one two three") ** nToken will be the number of tokens in the string. */ struct Fts3PhraseToken { char *z; /* Text of the token */ int n; /* Number of bytes in buffer z */ int isPrefix; /* True if token ends with a "*" character */ int bFirst; /* True if token must appear at position 0 */ /* Variables above this point are populated when the expression is ** parsed (by code in fts3_expr.c). Below this point the variables are ** used when evaluating the expression. */ Fts3DeferredToken *pDeferred; /* Deferred token object for this token */ Fts3MultiSegReader *pSegcsr; /* Segment-reader for this token */ }; struct Fts3Phrase { /* Cache of doclist for this phrase. */ Fts3Doclist doclist; int bIncr; /* True if doclist is loaded incrementally */ int iDoclistToken; /* Used by sqlite3Fts3EvalPhrasePoslist() if this is a descendent of an ** OR condition. */ char *pOrPoslist; i64 iOrDocid; /* Variables below this point are populated by fts3_expr.c when parsing ** a MATCH expression. Everything above is part of the evaluation phase. */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ }; /* ** A tree of these objects forms the RHS of a MATCH operator. ** ** If Fts3Expr.eType is FTSQUERY_PHRASE and isLoaded is true, then aDoclist ** points to a malloced buffer, size nDoclist bytes, containing the results ** of this phrase query in FTS3 doclist format. As usual, the initial ** "Length" field found in doclists stored on disk is omitted from this ** buffer. ** ** Variable aMI is used only for FTSQUERY_NEAR nodes to store the global ** matchinfo data. If it is not NULL, it points to an array of size nCol*3, ** where nCol is the number of columns in the queried FTS table. The array ** is populated as follows: ** ** aMI[iCol*3 + 0] = Undefined ** aMI[iCol*3 + 1] = Number of occurrences ** aMI[iCol*3 + 2] = Number of rows containing at least one instance ** ** The aMI array is allocated using sqlite3_malloc(). It should be freed ** when the expression node is. */ struct Fts3Expr { int eType; /* One of the FTSQUERY_XXX values defined below */ int nNear; /* Valid if eType==FTSQUERY_NEAR */ Fts3Expr *pParent; /* pParent->pLeft==this or pParent->pRight==this */ Fts3Expr *pLeft; /* Left operand */ Fts3Expr *pRight; /* Right operand */ Fts3Phrase *pPhrase; /* Valid if eType==FTSQUERY_PHRASE */ /* The following are used by the fts3_eval.c module. */ sqlite3_int64 iDocid; /* Current docid */ u8 bEof; /* True this expression is at EOF already */ u8 bStart; /* True if iDocid is valid */ u8 bDeferred; /* True if this expression is entirely deferred */ /* The following are used by the fts3_snippet.c module. */ int iPhrase; /* Index of this phrase in matchinfo() results */ u32 *aMI; /* See above */ }; /* ** Candidate values for Fts3Query.eType. Note that the order of the first ** four values is in order of precedence when parsing expressions. For ** example, the following: ** ** "a OR b AND c NOT d NEAR e" ** ** is equivalent to: ** ** "a OR (b AND (c NOT (d NEAR e)))" */ #define FTSQUERY_NEAR 1 #define FTSQUERY_NOT 2 #define FTSQUERY_AND 3 #define FTSQUERY_OR 4 #define FTSQUERY_PHRASE 5 /* fts3_write.c */ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod(sqlite3_vtab*,int,sqlite3_value**,sqlite3_int64*); SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *); SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *); SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *); SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(int, int, sqlite3_int64, sqlite3_int64, sqlite3_int64, const char *, int, Fts3SegReader**); SQLITE_PRIVATE int sqlite3Fts3SegReaderPending( Fts3Table*,int,const char*,int,int,Fts3SegReader**); SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *); SQLITE_PRIVATE int sqlite3Fts3AllSegdirs(Fts3Table*, int, int, int, sqlite3_stmt **); SQLITE_PRIVATE int sqlite3Fts3ReadBlock(Fts3Table*, sqlite3_int64, char **, int*, int*); SQLITE_PRIVATE int sqlite3Fts3SelectDoctotal(Fts3Table *, sqlite3_stmt **); SQLITE_PRIVATE int sqlite3Fts3SelectDocsize(Fts3Table *, sqlite3_int64, sqlite3_stmt **); #ifndef SQLITE_DISABLE_FTS4_DEFERRED SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *); SQLITE_PRIVATE int sqlite3Fts3DeferToken(Fts3Cursor *, Fts3PhraseToken *, int); SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *); SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *); SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(Fts3DeferredToken *, char **, int *); #else # define sqlite3Fts3FreeDeferredTokens(x) # define sqlite3Fts3DeferToken(x,y,z) SQLITE_OK # define sqlite3Fts3CacheDeferredDoclists(x) SQLITE_OK # define sqlite3Fts3FreeDeferredDoclists(x) # define sqlite3Fts3DeferredTokenList(x,y,z) SQLITE_OK #endif SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *); SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *, int *); /* Special values interpreted by sqlite3SegReaderCursor() */ #define FTS3_SEGCURSOR_PENDING -1 #define FTS3_SEGCURSOR_ALL -2 SQLITE_PRIVATE int sqlite3Fts3SegReaderStart(Fts3Table*, Fts3MultiSegReader*, Fts3SegFilter*); SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(Fts3Table *, Fts3MultiSegReader *); SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish(Fts3MultiSegReader *); SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor(Fts3Table *, int, int, int, const char *, int, int, int, Fts3MultiSegReader *); /* Flags allowed as part of the 4th argument to SegmentReaderIterate() */ #define FTS3_SEGMENT_REQUIRE_POS 0x00000001 #define FTS3_SEGMENT_IGNORE_EMPTY 0x00000002 #define FTS3_SEGMENT_COLUMN_FILTER 0x00000004 #define FTS3_SEGMENT_PREFIX 0x00000008 #define FTS3_SEGMENT_SCAN 0x00000010 #define FTS3_SEGMENT_FIRST 0x00000020 /* Type passed as 4th argument to SegmentReaderIterate() */ struct Fts3SegFilter { const char *zTerm; int nTerm; int iCol; int flags; }; struct Fts3MultiSegReader { /* Used internally by sqlite3Fts3SegReaderXXX() calls */ Fts3SegReader **apSegment; /* Array of Fts3SegReader objects */ int nSegment; /* Size of apSegment array */ int nAdvance; /* How many seg-readers to advance */ Fts3SegFilter *pFilter; /* Pointer to filter object */ char *aBuffer; /* Buffer to merge doclists in */ int nBuffer; /* Allocated size of aBuffer[] in bytes */ int iColFilter; /* If >=0, filter for this column */ int bRestart; /* Used by fts3.c only. */ int nCost; /* Cost of running iterator */ int bLookup; /* True if a lookup of a single entry. */ /* Output values. Valid only after Fts3SegReaderStep() returns SQLITE_ROW. */ char *zTerm; /* Pointer to term buffer */ int nTerm; /* Size of zTerm in bytes */ char *aDoclist; /* Pointer to doclist buffer */ int nDoclist; /* Size of aDoclist[] in bytes */ }; SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table*,int,int); #define fts3GetVarint32(p, piVal) ( \ (*(u8*)(p)&0x80) ? sqlite3Fts3GetVarint32(p, piVal) : (*piVal=*(u8*)(p), 1) \ ) /* fts3.c */ SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char**,const char*,...); SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *, sqlite3_int64); SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *, sqlite_int64 *); SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *, int *); SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64); SQLITE_PRIVATE void sqlite3Fts3Dequote(char *); SQLITE_PRIVATE void sqlite3Fts3DoclistPrev(int,char*,int,char**,sqlite3_int64*,int*,u8*); SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats(Fts3Cursor *, Fts3Expr *, u32 *); SQLITE_PRIVATE int sqlite3Fts3FirstFilter(sqlite3_int64, char *, int, char *); SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int*, Fts3Table*); SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc); /* fts3_tokenizer.c */ SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *, int *); SQLITE_PRIVATE int sqlite3Fts3InitHashTable(sqlite3 *, Fts3Hash *, const char *); SQLITE_PRIVATE int sqlite3Fts3InitTokenizer(Fts3Hash *pHash, const char *, sqlite3_tokenizer **, char ** ); SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char); /* fts3_snippet.c */ SQLITE_PRIVATE void sqlite3Fts3Offsets(sqlite3_context*, Fts3Cursor*); SQLITE_PRIVATE void sqlite3Fts3Snippet(sqlite3_context *, Fts3Cursor *, const char *, const char *, const char *, int, int ); SQLITE_PRIVATE void sqlite3Fts3Matchinfo(sqlite3_context *, Fts3Cursor *, const char *); SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p); /* fts3_expr.c */ SQLITE_PRIVATE int sqlite3Fts3ExprParse(sqlite3_tokenizer *, int, char **, int, int, int, const char *, int, Fts3Expr **, char ** ); SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *); #ifdef SQLITE_TEST SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3 *db); SQLITE_PRIVATE int sqlite3Fts3InitTerm(sqlite3 *db); #endif SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer(sqlite3_tokenizer *, int, const char *, int, sqlite3_tokenizer_cursor ** ); /* fts3_aux.c */ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db); SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *); SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( Fts3Table*, Fts3MultiSegReader*, int, const char*, int); SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( Fts3Table *, Fts3MultiSegReader *, sqlite3_int64 *, char **, int *); SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); SQLITE_PRIVATE int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); /* fts3_tokenize_vtab.c */ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *); /* fts3_unicode2.c (functions generated by parsing unicode text files) */ #ifndef SQLITE_DISABLE_FTS3_UNICODE SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int, int); SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int); SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); #endif #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */ /************** End of fts3Int.h *********************************************/ /************** Continuing where we left off in fts3.c ***********************/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) #if defined(SQLITE_ENABLE_FTS3) && !defined(SQLITE_CORE) # define SQLITE_CORE 1 #endif /* #include */ /* #include */ /* #include */ /* #include */ /* #include */ /* #include */ /* #include "fts3.h" */ #ifndef SQLITE_CORE /* # include "sqlite3ext.h" */ SQLITE_EXTENSION_INIT1 #endif static int fts3EvalNext(Fts3Cursor *pCsr); static int fts3EvalStart(Fts3Cursor *pCsr); static int fts3TermSegReaderCursor( Fts3Cursor *, const char *, int, int, Fts3MultiSegReader **); #ifndef SQLITE_AMALGAMATION # if defined(SQLITE_DEBUG) SQLITE_PRIVATE int sqlite3Fts3Always(int b) { assert( b ); return b; } SQLITE_PRIVATE int sqlite3Fts3Never(int b) { assert( !b ); return b; } # endif #endif /* ** Write a 64-bit variable-length integer to memory starting at p[0]. ** The length of data written will be between 1 and FTS3_VARINT_MAX bytes. ** The number of bytes written is returned. */ SQLITE_PRIVATE int sqlite3Fts3PutVarint(char *p, sqlite_int64 v){ unsigned char *q = (unsigned char *) p; sqlite_uint64 vu = v; do{ *q++ = (unsigned char) ((vu & 0x7f) | 0x80); vu >>= 7; }while( vu!=0 ); q[-1] &= 0x7f; /* turn off high bit in final byte */ assert( q - (unsigned char *)p <= FTS3_VARINT_MAX ); return (int) (q - (unsigned char *)p); } #define GETVARINT_STEP(v, ptr, shift, mask1, mask2, var, ret) \ v = (v & mask1) | ( (*ptr++) << shift ); \ if( (v & mask2)==0 ){ var = v; return ret; } #define GETVARINT_INIT(v, ptr, shift, mask1, mask2, var, ret) \ v = (*ptr++); \ if( (v & mask2)==0 ){ var = v; return ret; } /* ** Read a 64-bit variable-length integer from memory starting at p[0]. ** Return the number of bytes read, or 0 on error. ** The value is stored in *v. */ SQLITE_PRIVATE int sqlite3Fts3GetVarint(const char *p, sqlite_int64 *v){ const char *pStart = p; u32 a; u64 b; int shift; GETVARINT_INIT(a, p, 0, 0x00, 0x80, *v, 1); GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *v, 2); GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *v, 3); GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *v, 4); b = (a & 0x0FFFFFFF ); for(shift=28; shift<=63; shift+=7){ u64 c = *p++; b += (c&0x7F) << shift; if( (c & 0x80)==0 ) break; } *v = b; return (int)(p - pStart); } /* ** Similar to sqlite3Fts3GetVarint(), except that the output is truncated to a ** 32-bit integer before it is returned. */ SQLITE_PRIVATE int sqlite3Fts3GetVarint32(const char *p, int *pi){ u32 a; #ifndef fts3GetVarint32 GETVARINT_INIT(a, p, 0, 0x00, 0x80, *pi, 1); #else a = (*p++); assert( a & 0x80 ); #endif GETVARINT_STEP(a, p, 7, 0x7F, 0x4000, *pi, 2); GETVARINT_STEP(a, p, 14, 0x3FFF, 0x200000, *pi, 3); GETVARINT_STEP(a, p, 21, 0x1FFFFF, 0x10000000, *pi, 4); a = (a & 0x0FFFFFFF ); *pi = (int)(a | ((u32)(*p & 0x0F) << 28)); return 5; } /* ** Return the number of bytes required to encode v as a varint */ SQLITE_PRIVATE int sqlite3Fts3VarintLen(sqlite3_uint64 v){ int i = 0; do{ i++; v >>= 7; }while( v!=0 ); return i; } /* ** Convert an SQL-style quoted string into a normal string by removing ** the quote characters. The conversion is done in-place. If the ** input does not begin with a quote character, then this routine ** is a no-op. ** ** Examples: ** ** "abc" becomes abc ** 'xyz' becomes xyz ** [pqr] becomes pqr ** `mno` becomes mno ** */ SQLITE_PRIVATE void sqlite3Fts3Dequote(char *z){ char quote; /* Quote character (if any ) */ quote = z[0]; if( quote=='[' || quote=='\'' || quote=='"' || quote=='`' ){ int iIn = 1; /* Index of next byte to read from input */ int iOut = 0; /* Index of next byte to write to output */ /* If the first byte was a '[', then the close-quote character is a ']' */ if( quote=='[' ) quote = ']'; while( z[iIn] ){ if( z[iIn]==quote ){ if( z[iIn+1]!=quote ) break; z[iOut++] = quote; iIn += 2; }else{ z[iOut++] = z[iIn++]; } } z[iOut] = '\0'; } } /* ** Read a single varint from the doclist at *pp and advance *pp to point ** to the first byte past the end of the varint. Add the value of the varint ** to *pVal. */ static void fts3GetDeltaVarint(char **pp, sqlite3_int64 *pVal){ sqlite3_int64 iVal; *pp += sqlite3Fts3GetVarint(*pp, &iVal); *pVal += iVal; } /* ** When this function is called, *pp points to the first byte following a ** varint that is part of a doclist (or position-list, or any other list ** of varints). This function moves *pp to point to the start of that varint, ** and sets *pVal by the varint value. ** ** Argument pStart points to the first byte of the doclist that the ** varint is part of. */ static void fts3GetReverseVarint( char **pp, char *pStart, sqlite3_int64 *pVal ){ sqlite3_int64 iVal; char *p; /* Pointer p now points at the first byte past the varint we are ** interested in. So, unless the doclist is corrupt, the 0x80 bit is ** clear on character p[-1]. */ for(p = (*pp)-2; p>=pStart && *p&0x80; p--); p++; *pp = p; sqlite3Fts3GetVarint(p, &iVal); *pVal = iVal; } /* ** The xDisconnect() virtual table method. */ static int fts3DisconnectMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table *)pVtab; int i; assert( p->nPendingData==0 ); assert( p->pSegments==0 ); /* Free any prepared statements held */ for(i=0; iaStmt); i++){ sqlite3_finalize(p->aStmt[i]); } sqlite3_free(p->zSegmentsTbl); sqlite3_free(p->zReadExprlist); sqlite3_free(p->zWriteExprlist); sqlite3_free(p->zContentTbl); sqlite3_free(p->zLanguageid); /* Invoke the tokenizer destructor to free the tokenizer. */ p->pTokenizer->pModule->xDestroy(p->pTokenizer); sqlite3_free(p); return SQLITE_OK; } /* ** Write an error message into *pzErr */ SQLITE_PRIVATE void sqlite3Fts3ErrMsg(char **pzErr, const char *zFormat, ...){ va_list ap; sqlite3_free(*pzErr); va_start(ap, zFormat); *pzErr = sqlite3_vmprintf(zFormat, ap); va_end(ap); } /* ** Construct one or more SQL statements from the format string given ** and then evaluate those statements. The success code is written ** into *pRc. ** ** If *pRc is initially non-zero then this routine is a no-op. */ static void fts3DbExec( int *pRc, /* Success code */ sqlite3 *db, /* Database in which to run SQL */ const char *zFormat, /* Format string for SQL */ ... /* Arguments to the format string */ ){ va_list ap; char *zSql; if( *pRc ) return; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); if( zSql==0 ){ *pRc = SQLITE_NOMEM; }else{ *pRc = sqlite3_exec(db, zSql, 0, 0, 0); sqlite3_free(zSql); } } /* ** The xDestroy() virtual table method. */ static int fts3DestroyMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table *)pVtab; int rc = SQLITE_OK; /* Return code */ const char *zDb = p->zDb; /* Name of database (e.g. "main", "temp") */ sqlite3 *db = p->db; /* Database handle */ /* Drop the shadow tables */ if( p->zContentTbl==0 ){ fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_content'", zDb, p->zName); } fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segments'", zDb,p->zName); fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_segdir'", zDb, p->zName); fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_docsize'", zDb, p->zName); fts3DbExec(&rc, db, "DROP TABLE IF EXISTS %Q.'%q_stat'", zDb, p->zName); /* If everything has worked, invoke fts3DisconnectMethod() to free the ** memory associated with the Fts3Table structure and return SQLITE_OK. ** Otherwise, return an SQLite error code. */ return (rc==SQLITE_OK ? fts3DisconnectMethod(pVtab) : rc); } /* ** Invoke sqlite3_declare_vtab() to declare the schema for the FTS3 table ** passed as the first argument. This is done as part of the xConnect() ** and xCreate() methods. ** ** If *pRc is non-zero when this function is called, it is a no-op. ** Otherwise, if an error occurs, an SQLite error code is stored in *pRc ** before returning. */ static void fts3DeclareVtab(int *pRc, Fts3Table *p){ if( *pRc==SQLITE_OK ){ int i; /* Iterator variable */ int rc; /* Return code */ char *zSql; /* SQL statement passed to declare_vtab() */ char *zCols; /* List of user defined columns */ const char *zLanguageid; zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Create a list of user columns for the virtual table */ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); for(i=1; zCols && inColumn; i++){ zCols = sqlite3_mprintf("%z%Q, ", zCols, p->azColumn[i]); } /* Create the whole "CREATE TABLE" statement to pass to SQLite */ zSql = sqlite3_mprintf( "CREATE TABLE x(%s %Q HIDDEN, docid HIDDEN, %Q HIDDEN)", zCols, p->zName, zLanguageid ); if( !zCols || !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_declare_vtab(p->db, zSql); } sqlite3_free(zSql); sqlite3_free(zCols); *pRc = rc; } } /* ** Create the %_stat table if it does not already exist. */ SQLITE_PRIVATE void sqlite3Fts3CreateStatTable(int *pRc, Fts3Table *p){ fts3DbExec(pRc, p->db, "CREATE TABLE IF NOT EXISTS %Q.'%q_stat'" "(id INTEGER PRIMARY KEY, value BLOB);", p->zDb, p->zName ); if( (*pRc)==SQLITE_OK ) p->bHasStat = 1; } /* ** Create the backing store tables (%_content, %_segments and %_segdir) ** required by the FTS3 table passed as the only argument. This is done ** as part of the vtab xCreate() method. ** ** If the p->bHasDocsize boolean is true (indicating that this is an ** FTS4 table, not an FTS3 table) then also create the %_docsize and ** %_stat tables required by FTS4. */ static int fts3CreateTables(Fts3Table *p){ int rc = SQLITE_OK; /* Return code */ int i; /* Iterator variable */ sqlite3 *db = p->db; /* The database connection */ if( p->zContentTbl==0 ){ const char *zLanguageid = p->zLanguageid; char *zContentCols; /* Columns of %_content table */ /* Create a list of user columns for the content table */ zContentCols = sqlite3_mprintf("docid INTEGER PRIMARY KEY"); for(i=0; zContentCols && inColumn; i++){ char *z = p->azColumn[i]; zContentCols = sqlite3_mprintf("%z, 'c%d%q'", zContentCols, i, z); } if( zLanguageid && zContentCols ){ zContentCols = sqlite3_mprintf("%z, langid", zContentCols, zLanguageid); } if( zContentCols==0 ) rc = SQLITE_NOMEM; /* Create the content table */ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_content'(%s)", p->zDb, p->zName, zContentCols ); sqlite3_free(zContentCols); } /* Create other tables */ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_segments'(blockid INTEGER PRIMARY KEY, block BLOB);", p->zDb, p->zName ); fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_segdir'(" "level INTEGER," "idx INTEGER," "start_block INTEGER," "leaves_end_block INTEGER," "end_block INTEGER," "root BLOB," "PRIMARY KEY(level, idx)" ");", p->zDb, p->zName ); if( p->bHasDocsize ){ fts3DbExec(&rc, db, "CREATE TABLE %Q.'%q_docsize'(docid INTEGER PRIMARY KEY, size BLOB);", p->zDb, p->zName ); } assert( p->bHasStat==p->bFts4 ); if( p->bHasStat ){ sqlite3Fts3CreateStatTable(&rc, p); } return rc; } /* ** Store the current database page-size in bytes in p->nPgsz. ** ** If *pRc is non-zero when this function is called, it is a no-op. ** Otherwise, if an error occurs, an SQLite error code is stored in *pRc ** before returning. */ static void fts3DatabasePageSize(int *pRc, Fts3Table *p){ if( *pRc==SQLITE_OK ){ int rc; /* Return code */ char *zSql; /* SQL text "PRAGMA %Q.page_size" */ sqlite3_stmt *pStmt; /* Compiled "PRAGMA %Q.page_size" statement */ zSql = sqlite3_mprintf("PRAGMA %Q.page_size", p->zDb); if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare(p->db, zSql, -1, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_step(pStmt); p->nPgsz = sqlite3_column_int(pStmt, 0); rc = sqlite3_finalize(pStmt); }else if( rc==SQLITE_AUTH ){ p->nPgsz = 1024; rc = SQLITE_OK; } } assert( p->nPgsz>0 || rc!=SQLITE_OK ); sqlite3_free(zSql); *pRc = rc; } } /* ** "Special" FTS4 arguments are column specifications of the following form: ** ** = ** ** There may not be whitespace surrounding the "=" character. The ** term may be quoted, but the may not. */ static int fts3IsSpecialColumn( const char *z, int *pnKey, char **pzValue ){ char *zValue; const char *zCsr = z; while( *zCsr!='=' ){ if( *zCsr=='\0' ) return 0; zCsr++; } *pnKey = (int)(zCsr-z); zValue = sqlite3_mprintf("%s", &zCsr[1]); if( zValue ){ sqlite3Fts3Dequote(zValue); } *pzValue = zValue; return 1; } /* ** Append the output of a printf() style formatting to an existing string. */ static void fts3Appendf( int *pRc, /* IN/OUT: Error code */ char **pz, /* IN/OUT: Pointer to string buffer */ const char *zFormat, /* Printf format string to append */ ... /* Arguments for printf format string */ ){ if( *pRc==SQLITE_OK ){ va_list ap; char *z; va_start(ap, zFormat); z = sqlite3_vmprintf(zFormat, ap); va_end(ap); if( z && *pz ){ char *z2 = sqlite3_mprintf("%s%s", *pz, z); sqlite3_free(z); z = z2; } if( z==0 ) *pRc = SQLITE_NOMEM; sqlite3_free(*pz); *pz = z; } } /* ** Return a copy of input string zInput enclosed in double-quotes (") and ** with all double quote characters escaped. For example: ** ** fts3QuoteId("un \"zip\"") -> "un \"\"zip\"\"" ** ** The pointer returned points to memory obtained from sqlite3_malloc(). It ** is the callers responsibility to call sqlite3_free() to release this ** memory. */ static char *fts3QuoteId(char const *zInput){ int nRet; char *zRet; nRet = 2 + (int)strlen(zInput)*2 + 1; zRet = sqlite3_malloc(nRet); if( zRet ){ int i; char *z = zRet; *(z++) = '"'; for(i=0; zInput[i]; i++){ if( zInput[i]=='"' ) *(z++) = '"'; *(z++) = zInput[i]; } *(z++) = '"'; *(z++) = '\0'; } return zRet; } /* ** Return a list of comma separated SQL expressions and a FROM clause that ** could be used in a SELECT statement such as the following: ** ** SELECT FROM %_content AS x ... ** ** to return the docid, followed by each column of text data in order ** from left to write. If parameter zFunc is not NULL, then instead of ** being returned directly each column of text data is passed to an SQL ** function named zFunc first. For example, if zFunc is "unzip" and the ** table has the three user-defined columns "a", "b", and "c", the following ** string is returned: ** ** "docid, unzip(x.'a'), unzip(x.'b'), unzip(x.'c') FROM %_content AS x" ** ** The pointer returned points to a buffer allocated by sqlite3_malloc(). It ** is the responsibility of the caller to eventually free it. ** ** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and ** a NULL pointer is returned). Otherwise, if an OOM error is encountered ** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If ** no error occurs, *pRc is left unmodified. */ static char *fts3ReadExprList(Fts3Table *p, const char *zFunc, int *pRc){ char *zRet = 0; char *zFree = 0; char *zFunction; int i; if( p->zContentTbl==0 ){ if( !zFunc ){ zFunction = ""; }else{ zFree = zFunction = fts3QuoteId(zFunc); } fts3Appendf(pRc, &zRet, "docid"); for(i=0; inColumn; i++){ fts3Appendf(pRc, &zRet, ",%s(x.'c%d%q')", zFunction, i, p->azColumn[i]); } if( p->zLanguageid ){ fts3Appendf(pRc, &zRet, ", x.%Q", "langid"); } sqlite3_free(zFree); }else{ fts3Appendf(pRc, &zRet, "rowid"); for(i=0; inColumn; i++){ fts3Appendf(pRc, &zRet, ", x.'%q'", p->azColumn[i]); } if( p->zLanguageid ){ fts3Appendf(pRc, &zRet, ", x.%Q", p->zLanguageid); } } fts3Appendf(pRc, &zRet, " FROM '%q'.'%q%s' AS x", p->zDb, (p->zContentTbl ? p->zContentTbl : p->zName), (p->zContentTbl ? "" : "_content") ); return zRet; } /* ** Return a list of N comma separated question marks, where N is the number ** of columns in the %_content table (one for the docid plus one for each ** user-defined text column). ** ** If argument zFunc is not NULL, then all but the first question mark ** is preceded by zFunc and an open bracket, and followed by a closed ** bracket. For example, if zFunc is "zip" and the FTS3 table has three ** user-defined text columns, the following string is returned: ** ** "?, zip(?), zip(?), zip(?)" ** ** The pointer returned points to a buffer allocated by sqlite3_malloc(). It ** is the responsibility of the caller to eventually free it. ** ** If *pRc is not SQLITE_OK when this function is called, it is a no-op (and ** a NULL pointer is returned). Otherwise, if an OOM error is encountered ** by this function, NULL is returned and *pRc is set to SQLITE_NOMEM. If ** no error occurs, *pRc is left unmodified. */ static char *fts3WriteExprList(Fts3Table *p, const char *zFunc, int *pRc){ char *zRet = 0; char *zFree = 0; char *zFunction; int i; if( !zFunc ){ zFunction = ""; }else{ zFree = zFunction = fts3QuoteId(zFunc); } fts3Appendf(pRc, &zRet, "?"); for(i=0; inColumn; i++){ fts3Appendf(pRc, &zRet, ",%s(?)", zFunction); } if( p->zLanguageid ){ fts3Appendf(pRc, &zRet, ", ?"); } sqlite3_free(zFree); return zRet; } /* ** This function interprets the string at (*pp) as a non-negative integer ** value. It reads the integer and sets *pnOut to the value read, then ** sets *pp to point to the byte immediately following the last byte of ** the integer value. ** ** Only decimal digits ('0'..'9') may be part of an integer value. ** ** If *pp does not being with a decimal digit SQLITE_ERROR is returned and ** the output value undefined. Otherwise SQLITE_OK is returned. ** ** This function is used when parsing the "prefix=" FTS4 parameter. */ static int fts3GobbleInt(const char **pp, int *pnOut){ const int MAX_NPREFIX = 10000000; const char *p; /* Iterator pointer */ int nInt = 0; /* Output value */ for(p=*pp; p[0]>='0' && p[0]<='9'; p++){ nInt = nInt * 10 + (p[0] - '0'); if( nInt>MAX_NPREFIX ){ nInt = 0; break; } } if( p==*pp ) return SQLITE_ERROR; *pnOut = nInt; *pp = p; return SQLITE_OK; } /* ** This function is called to allocate an array of Fts3Index structures ** representing the indexes maintained by the current FTS table. FTS tables ** always maintain the main "terms" index, but may also maintain one or ** more "prefix" indexes, depending on the value of the "prefix=" parameter ** (if any) specified as part of the CREATE VIRTUAL TABLE statement. ** ** Argument zParam is passed the value of the "prefix=" option if one was ** specified, or NULL otherwise. ** ** If no error occurs, SQLITE_OK is returned and *apIndex set to point to ** the allocated array. *pnIndex is set to the number of elements in the ** array. If an error does occur, an SQLite error code is returned. ** ** Regardless of whether or not an error is returned, it is the responsibility ** of the caller to call sqlite3_free() on the output array to free it. */ static int fts3PrefixParameter( const char *zParam, /* ABC in prefix=ABC parameter to parse */ int *pnIndex, /* OUT: size of *apIndex[] array */ struct Fts3Index **apIndex /* OUT: Array of indexes for this table */ ){ struct Fts3Index *aIndex; /* Allocated array */ int nIndex = 1; /* Number of entries in array */ if( zParam && zParam[0] ){ const char *p; nIndex++; for(p=zParam; *p; p++){ if( *p==',' ) nIndex++; } } aIndex = sqlite3_malloc(sizeof(struct Fts3Index) * nIndex); *apIndex = aIndex; if( !aIndex ){ return SQLITE_NOMEM; } memset(aIndex, 0, sizeof(struct Fts3Index) * nIndex); if( zParam ){ const char *p = zParam; int i; for(i=1; i=0 ); if( nPrefix==0 ){ nIndex--; i--; }else{ aIndex[i].nPrefix = nPrefix; } p++; } } *pnIndex = nIndex; return SQLITE_OK; } /* ** This function is called when initializing an FTS4 table that uses the ** content=xxx option. It determines the number of and names of the columns ** of the new FTS4 table. ** ** The third argument passed to this function is the value passed to the ** config=xxx option (i.e. "xxx"). This function queries the database for ** a table of that name. If found, the output variables are populated ** as follows: ** ** *pnCol: Set to the number of columns table xxx has, ** ** *pnStr: Set to the total amount of space required to store a copy ** of each columns name, including the nul-terminator. ** ** *pazCol: Set to point to an array of *pnCol strings. Each string is ** the name of the corresponding column in table xxx. The array ** and its contents are allocated using a single allocation. It ** is the responsibility of the caller to free this allocation ** by eventually passing the *pazCol value to sqlite3_free(). ** ** If the table cannot be found, an error code is returned and the output ** variables are undefined. Or, if an OOM is encountered, SQLITE_NOMEM is ** returned (and the output variables are undefined). */ static int fts3ContentColumns( sqlite3 *db, /* Database handle */ const char *zDb, /* Name of db (i.e. "main", "temp" etc.) */ const char *zTbl, /* Name of content table */ const char ***pazCol, /* OUT: Malloc'd array of column names */ int *pnCol, /* OUT: Size of array *pazCol */ int *pnStr, /* OUT: Bytes of string content */ char **pzErr /* OUT: error message */ ){ int rc = SQLITE_OK; /* Return code */ char *zSql; /* "SELECT *" statement on zTbl */ sqlite3_stmt *pStmt = 0; /* Compiled version of zSql */ zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", zDb, zTbl); if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ){ sqlite3Fts3ErrMsg(pzErr, "%s", sqlite3_errmsg(db)); } } sqlite3_free(zSql); if( rc==SQLITE_OK ){ const char **azCol; /* Output array */ int nStr = 0; /* Size of all column names (incl. 0x00) */ int nCol; /* Number of table columns */ int i; /* Used to iterate through columns */ /* Loop through the returned columns. Set nStr to the number of bytes of ** space required to store a copy of each column name, including the ** nul-terminator byte. */ nCol = sqlite3_column_count(pStmt); for(i=0; i module name ("fts3" or "fts4") ** argv[1] -> database name ** argv[2] -> table name ** argv[...] -> "column name" and other module argument fields. */ static int fts3InitVtab( int isCreate, /* True for xCreate, false for xConnect */ sqlite3 *db, /* The SQLite database connection */ void *pAux, /* Hash table containing tokenizers */ int argc, /* Number of elements in argv array */ const char * const *argv, /* xCreate/xConnect argument array */ sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ char **pzErr /* Write any error message here */ ){ Fts3Hash *pHash = (Fts3Hash *)pAux; Fts3Table *p = 0; /* Pointer to allocated vtab */ int rc = SQLITE_OK; /* Return code */ int i; /* Iterator variable */ int nByte; /* Size of allocation used for *p */ int iCol; /* Column index */ int nString = 0; /* Bytes required to hold all column names */ int nCol = 0; /* Number of columns in the FTS table */ char *zCsr; /* Space for holding column names */ int nDb; /* Bytes required to hold database name */ int nName; /* Bytes required to hold table name */ int isFts4 = (argv[0][3]=='4'); /* True for FTS4, false for FTS3 */ const char **aCol; /* Array of column names */ sqlite3_tokenizer *pTokenizer = 0; /* Tokenizer for this table */ int nIndex = 0; /* Size of aIndex[] array */ struct Fts3Index *aIndex = 0; /* Array of indexes for this table */ /* The results of parsing supported FTS4 key=value options: */ int bNoDocsize = 0; /* True to omit %_docsize table */ int bDescIdx = 0; /* True to store descending indexes */ char *zPrefix = 0; /* Prefix parameter value (or NULL) */ char *zCompress = 0; /* compress=? parameter (or NULL) */ char *zUncompress = 0; /* uncompress=? parameter (or NULL) */ char *zContent = 0; /* content=? parameter (or NULL) */ char *zLanguageid = 0; /* languageid=? parameter (or NULL) */ char **azNotindexed = 0; /* The set of notindexed= columns */ int nNotindexed = 0; /* Size of azNotindexed[] array */ assert( strlen(argv[0])==4 ); assert( (sqlite3_strnicmp(argv[0], "fts4", 4)==0 && isFts4) || (sqlite3_strnicmp(argv[0], "fts3", 4)==0 && !isFts4) ); nDb = (int)strlen(argv[1]) + 1; nName = (int)strlen(argv[2]) + 1; nByte = sizeof(const char *) * (argc-2); aCol = (const char **)sqlite3_malloc(nByte); if( aCol ){ memset((void*)aCol, 0, nByte); azNotindexed = (char **)sqlite3_malloc(nByte); } if( azNotindexed ){ memset(azNotindexed, 0, nByte); } if( !aCol || !azNotindexed ){ rc = SQLITE_NOMEM; goto fts3_init_out; } /* Loop through all of the arguments passed by the user to the FTS3/4 ** module (i.e. all the column names and special arguments). This loop ** does the following: ** ** + Figures out the number of columns the FTSX table will have, and ** the number of bytes of space that must be allocated to store copies ** of the column names. ** ** + If there is a tokenizer specification included in the arguments, ** initializes the tokenizer pTokenizer. */ for(i=3; rc==SQLITE_OK && i8 && 0==sqlite3_strnicmp(z, "tokenize", 8) && 0==sqlite3Fts3IsIdChar(z[8]) ){ rc = sqlite3Fts3InitTokenizer(pHash, &z[9], &pTokenizer, pzErr); } /* Check if it is an FTS4 special argument. */ else if( isFts4 && fts3IsSpecialColumn(z, &nKey, &zVal) ){ struct Fts4Option { const char *zOpt; int nOpt; } aFts4Opt[] = { { "matchinfo", 9 }, /* 0 -> MATCHINFO */ { "prefix", 6 }, /* 1 -> PREFIX */ { "compress", 8 }, /* 2 -> COMPRESS */ { "uncompress", 10 }, /* 3 -> UNCOMPRESS */ { "order", 5 }, /* 4 -> ORDER */ { "content", 7 }, /* 5 -> CONTENT */ { "languageid", 10 }, /* 6 -> LANGUAGEID */ { "notindexed", 10 } /* 7 -> NOTINDEXED */ }; int iOpt; if( !zVal ){ rc = SQLITE_NOMEM; }else{ for(iOpt=0; iOptnOpt && !sqlite3_strnicmp(z, pOp->zOpt, pOp->nOpt) ){ break; } } if( iOpt==SizeofArray(aFts4Opt) ){ sqlite3Fts3ErrMsg(pzErr, "unrecognized parameter: %s", z); rc = SQLITE_ERROR; }else{ switch( iOpt ){ case 0: /* MATCHINFO */ if( strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "fts3", 4) ){ sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo: %s", zVal); rc = SQLITE_ERROR; } bNoDocsize = 1; break; case 1: /* PREFIX */ sqlite3_free(zPrefix); zPrefix = zVal; zVal = 0; break; case 2: /* COMPRESS */ sqlite3_free(zCompress); zCompress = zVal; zVal = 0; break; case 3: /* UNCOMPRESS */ sqlite3_free(zUncompress); zUncompress = zVal; zVal = 0; break; case 4: /* ORDER */ if( (strlen(zVal)!=3 || sqlite3_strnicmp(zVal, "asc", 3)) && (strlen(zVal)!=4 || sqlite3_strnicmp(zVal, "desc", 4)) ){ sqlite3Fts3ErrMsg(pzErr, "unrecognized order: %s", zVal); rc = SQLITE_ERROR; } bDescIdx = (zVal[0]=='d' || zVal[0]=='D'); break; case 5: /* CONTENT */ sqlite3_free(zContent); zContent = zVal; zVal = 0; break; case 6: /* LANGUAGEID */ assert( iOpt==6 ); sqlite3_free(zLanguageid); zLanguageid = zVal; zVal = 0; break; case 7: /* NOTINDEXED */ azNotindexed[nNotindexed++] = zVal; zVal = 0; break; } } sqlite3_free(zVal); } } /* Otherwise, the argument is a column name. */ else { nString += (int)(strlen(z) + 1); aCol[nCol++] = z; } } /* If a content=xxx option was specified, the following: ** ** 1. Ignore any compress= and uncompress= options. ** ** 2. If no column names were specified as part of the CREATE VIRTUAL ** TABLE statement, use all columns from the content table. */ if( rc==SQLITE_OK && zContent ){ sqlite3_free(zCompress); sqlite3_free(zUncompress); zCompress = 0; zUncompress = 0; if( nCol==0 ){ sqlite3_free((void*)aCol); aCol = 0; rc = fts3ContentColumns(db, argv[1], zContent,&aCol,&nCol,&nString,pzErr); /* If a languageid= option was specified, remove the language id ** column from the aCol[] array. */ if( rc==SQLITE_OK && zLanguageid ){ int j; for(j=0; jdb = db; p->nColumn = nCol; p->nPendingData = 0; p->azColumn = (char **)&p[1]; p->pTokenizer = pTokenizer; p->nMaxPendingData = FTS3_MAX_PENDING_DATA; p->bHasDocsize = (isFts4 && bNoDocsize==0); p->bHasStat = isFts4; p->bFts4 = isFts4; p->bDescIdx = bDescIdx; p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */ p->zContentTbl = zContent; p->zLanguageid = zLanguageid; zContent = 0; zLanguageid = 0; TESTONLY( p->inTransaction = -1 ); TESTONLY( p->mxSavepoint = -1 ); p->aIndex = (struct Fts3Index *)&p->azColumn[nCol]; memcpy(p->aIndex, aIndex, sizeof(struct Fts3Index) * nIndex); p->nIndex = nIndex; for(i=0; iaIndex[i].hPending, FTS3_HASH_STRING, 1); } p->abNotindexed = (u8 *)&p->aIndex[nIndex]; /* Fill in the zName and zDb fields of the vtab structure. */ zCsr = (char *)&p->abNotindexed[nCol]; p->zName = zCsr; memcpy(zCsr, argv[2], nName); zCsr += nName; p->zDb = zCsr; memcpy(zCsr, argv[1], nDb); zCsr += nDb; /* Fill in the azColumn array */ for(iCol=0; iColazColumn[iCol] = zCsr; zCsr += n+1; assert( zCsr <= &((char *)p)[nByte] ); } /* Fill in the abNotindexed array */ for(iCol=0; iColazColumn[iCol]); for(i=0; iazColumn[iCol], zNot, n) ){ p->abNotindexed[iCol] = 1; sqlite3_free(zNot); azNotindexed[i] = 0; } } } for(i=0; izReadExprlist = fts3ReadExprList(p, zUncompress, &rc); p->zWriteExprlist = fts3WriteExprList(p, zCompress, &rc); if( rc!=SQLITE_OK ) goto fts3_init_out; /* If this is an xCreate call, create the underlying tables in the ** database. TODO: For xConnect(), it could verify that said tables exist. */ if( isCreate ){ rc = fts3CreateTables(p); } /* Check to see if a legacy fts3 table has been "upgraded" by the ** addition of a %_stat table so that it can use incremental merge. */ if( !isFts4 && !isCreate ){ p->bHasStat = 2; } /* Figure out the page-size for the database. This is required in order to ** estimate the cost of loading large doclists from the database. */ fts3DatabasePageSize(&rc, p); p->nNodeSize = p->nPgsz-35; /* Declare the table schema to SQLite. */ fts3DeclareVtab(&rc, p); fts3_init_out: sqlite3_free(zPrefix); sqlite3_free(aIndex); sqlite3_free(zCompress); sqlite3_free(zUncompress); sqlite3_free(zContent); sqlite3_free(zLanguageid); for(i=0; ipModule->xDestroy(pTokenizer); } }else{ assert( p->pSegments==0 ); *ppVTab = &p->base; } return rc; } /* ** The xConnect() and xCreate() methods for the virtual table. All the ** work is done in function fts3InitVtab(). */ static int fts3ConnectMethod( sqlite3 *db, /* Database connection */ void *pAux, /* Pointer to tokenizer hash table */ int argc, /* Number of elements in argv array */ const char * const *argv, /* xCreate/xConnect argument array */ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ char **pzErr /* OUT: sqlite3_malloc'd error message */ ){ return fts3InitVtab(0, db, pAux, argc, argv, ppVtab, pzErr); } static int fts3CreateMethod( sqlite3 *db, /* Database connection */ void *pAux, /* Pointer to tokenizer hash table */ int argc, /* Number of elements in argv array */ const char * const *argv, /* xCreate/xConnect argument array */ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ char **pzErr /* OUT: sqlite3_malloc'd error message */ ){ return fts3InitVtab(1, db, pAux, argc, argv, ppVtab, pzErr); } /* ** Set the pIdxInfo->estimatedRows variable to nRow. Unless this ** extension is currently being used by a version of SQLite too old to ** support estimatedRows. In that case this function is a no-op. */ static void fts3SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ #if SQLITE_VERSION_NUMBER>=3008002 if( sqlite3_libversion_number()>=3008002 ){ pIdxInfo->estimatedRows = nRow; } #endif } /* ** Implementation of the xBestIndex method for FTS3 tables. There ** are three possible strategies, in order of preference: ** ** 1. Direct lookup by rowid or docid. ** 2. Full-text search using a MATCH operator on a non-docid column. ** 3. Linear scan of %_content table. */ static int fts3BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ Fts3Table *p = (Fts3Table *)pVTab; int i; /* Iterator variable */ int iCons = -1; /* Index of constraint to use */ int iLangidCons = -1; /* Index of langid=x constraint, if present */ int iDocidGe = -1; /* Index of docid>=x constraint, if present */ int iDocidLe = -1; /* Index of docid<=x constraint, if present */ int iIdx; /* By default use a full table scan. This is an expensive option, ** so search through the constraints to see if a more efficient ** strategy is possible. */ pInfo->idxNum = FTS3_FULLSCAN_SEARCH; pInfo->estimatedCost = 5000000; for(i=0; inConstraint; i++){ int bDocid; /* True if this constraint is on docid */ struct sqlite3_index_constraint *pCons = &pInfo->aConstraint[i]; if( pCons->usable==0 ){ if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ /* There exists an unusable MATCH constraint. This means that if ** the planner does elect to use the results of this call as part ** of the overall query plan the user will see an "unable to use ** function MATCH in the requested context" error. To discourage ** this, return a very high cost here. */ pInfo->idxNum = FTS3_FULLSCAN_SEARCH; pInfo->estimatedCost = 1e50; fts3SetEstimatedRows(pInfo, ((sqlite3_int64)1) << 50); return SQLITE_OK; } continue; } bDocid = (pCons->iColumn<0 || pCons->iColumn==p->nColumn+1); /* A direct lookup on the rowid or docid column. Assign a cost of 1.0. */ if( iCons<0 && pCons->op==SQLITE_INDEX_CONSTRAINT_EQ && bDocid ){ pInfo->idxNum = FTS3_DOCID_SEARCH; pInfo->estimatedCost = 1.0; iCons = i; } /* A MATCH constraint. Use a full-text search. ** ** If there is more than one MATCH constraint available, use the first ** one encountered. If there is both a MATCH constraint and a direct ** rowid/docid lookup, prefer the MATCH strategy. This is done even ** though the rowid/docid lookup is faster than a MATCH query, selecting ** it would lead to an "unable to use function MATCH in the requested ** context" error. */ if( pCons->op==SQLITE_INDEX_CONSTRAINT_MATCH && pCons->iColumn>=0 && pCons->iColumn<=p->nColumn ){ pInfo->idxNum = FTS3_FULLTEXT_SEARCH + pCons->iColumn; pInfo->estimatedCost = 2.0; iCons = i; } /* Equality constraint on the langid column */ if( pCons->op==SQLITE_INDEX_CONSTRAINT_EQ && pCons->iColumn==p->nColumn + 2 ){ iLangidCons = i; } if( bDocid ){ switch( pCons->op ){ case SQLITE_INDEX_CONSTRAINT_GE: case SQLITE_INDEX_CONSTRAINT_GT: iDocidGe = i; break; case SQLITE_INDEX_CONSTRAINT_LE: case SQLITE_INDEX_CONSTRAINT_LT: iDocidLe = i; break; } } } iIdx = 1; if( iCons>=0 ){ pInfo->aConstraintUsage[iCons].argvIndex = iIdx++; pInfo->aConstraintUsage[iCons].omit = 1; } if( iLangidCons>=0 ){ pInfo->idxNum |= FTS3_HAVE_LANGID; pInfo->aConstraintUsage[iLangidCons].argvIndex = iIdx++; } if( iDocidGe>=0 ){ pInfo->idxNum |= FTS3_HAVE_DOCID_GE; pInfo->aConstraintUsage[iDocidGe].argvIndex = iIdx++; } if( iDocidLe>=0 ){ pInfo->idxNum |= FTS3_HAVE_DOCID_LE; pInfo->aConstraintUsage[iDocidLe].argvIndex = iIdx++; } /* Regardless of the strategy selected, FTS can deliver rows in rowid (or ** docid) order. Both ascending and descending are possible. */ if( pInfo->nOrderBy==1 ){ struct sqlite3_index_orderby *pOrder = &pInfo->aOrderBy[0]; if( pOrder->iColumn<0 || pOrder->iColumn==p->nColumn+1 ){ if( pOrder->desc ){ pInfo->idxStr = "DESC"; }else{ pInfo->idxStr = "ASC"; } pInfo->orderByConsumed = 1; } } assert( p->pSegments==0 ); return SQLITE_OK; } /* ** Implementation of xOpen method. */ static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ sqlite3_vtab_cursor *pCsr; /* Allocated cursor */ UNUSED_PARAMETER(pVTab); /* Allocate a buffer large enough for an Fts3Cursor structure. If the ** allocation succeeds, zero it and return SQLITE_OK. Otherwise, ** if the allocation fails, return SQLITE_NOMEM. */ *ppCsr = pCsr = (sqlite3_vtab_cursor *)sqlite3_malloc(sizeof(Fts3Cursor)); if( !pCsr ){ return SQLITE_NOMEM; } memset(pCsr, 0, sizeof(Fts3Cursor)); return SQLITE_OK; } /* ** Close the cursor. For additional information see the documentation ** on the xClose method of the virtual table interface. */ static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){ Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); sqlite3_finalize(pCsr->pStmt); sqlite3Fts3ExprFree(pCsr->pExpr); sqlite3Fts3FreeDeferredTokens(pCsr); sqlite3_free(pCsr->aDoclist); sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); sqlite3_free(pCsr); return SQLITE_OK; } /* ** If pCsr->pStmt has not been prepared (i.e. if pCsr->pStmt==0), then ** compose and prepare an SQL statement of the form: ** ** "SELECT FROM %_content WHERE rowid = ?" ** ** (or the equivalent for a content=xxx table) and set pCsr->pStmt to ** it. If an error occurs, return an SQLite error code. ** ** Otherwise, set *ppStmt to point to pCsr->pStmt and return SQLITE_OK. */ static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){ int rc = SQLITE_OK; if( pCsr->pStmt==0 ){ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; char *zSql; zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist); if( !zSql ) return SQLITE_NOMEM; rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); sqlite3_free(zSql); } *ppStmt = pCsr->pStmt; return rc; } /* ** Position the pCsr->pStmt statement so that it is on the row ** of the %_content table that contains the last match. Return ** SQLITE_OK on success. */ static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){ int rc = SQLITE_OK; if( pCsr->isRequireSeek ){ sqlite3_stmt *pStmt = 0; rc = fts3CursorSeekStmt(pCsr, &pStmt); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId); pCsr->isRequireSeek = 0; if( SQLITE_ROW==sqlite3_step(pCsr->pStmt) ){ return SQLITE_OK; }else{ rc = sqlite3_reset(pCsr->pStmt); if( rc==SQLITE_OK && ((Fts3Table *)pCsr->base.pVtab)->zContentTbl==0 ){ /* If no row was found and no error has occurred, then the %_content ** table is missing a row that is present in the full-text index. ** The data structures are corrupt. */ rc = FTS_CORRUPT_VTAB; pCsr->isEof = 1; } } } } if( rc!=SQLITE_OK && pContext ){ sqlite3_result_error_code(pContext, rc); } return rc; } /* ** This function is used to process a single interior node when searching ** a b-tree for a term or term prefix. The node data is passed to this ** function via the zNode/nNode parameters. The term to search for is ** passed in zTerm/nTerm. ** ** If piFirst is not NULL, then this function sets *piFirst to the blockid ** of the child node that heads the sub-tree that may contain the term. ** ** If piLast is not NULL, then *piLast is set to the right-most child node ** that heads a sub-tree that may contain a term for which zTerm/nTerm is ** a prefix. ** ** If an OOM error occurs, SQLITE_NOMEM is returned. Otherwise, SQLITE_OK. */ static int fts3ScanInteriorNode( const char *zTerm, /* Term to select leaves for */ int nTerm, /* Size of term zTerm in bytes */ const char *zNode, /* Buffer containing segment interior node */ int nNode, /* Size of buffer at zNode */ sqlite3_int64 *piFirst, /* OUT: Selected child node */ sqlite3_int64 *piLast /* OUT: Selected child node */ ){ int rc = SQLITE_OK; /* Return code */ const char *zCsr = zNode; /* Cursor to iterate through node */ const char *zEnd = &zCsr[nNode];/* End of interior node buffer */ char *zBuffer = 0; /* Buffer to load terms into */ int nAlloc = 0; /* Size of allocated buffer */ int isFirstTerm = 1; /* True when processing first term on page */ sqlite3_int64 iChild; /* Block id of child node to descend to */ /* Skip over the 'height' varint that occurs at the start of every ** interior node. Then load the blockid of the left-child of the b-tree ** node into variable iChild. ** ** Even if the data structure on disk is corrupted, this (reading two ** varints from the buffer) does not risk an overread. If zNode is a ** root node, then the buffer comes from a SELECT statement. SQLite does ** not make this guarantee explicitly, but in practice there are always ** either more than 20 bytes of allocated space following the nNode bytes of ** contents, or two zero bytes. Or, if the node is read from the %_segments ** table, then there are always 20 bytes of zeroed padding following the ** nNode bytes of content (see sqlite3Fts3ReadBlock() for details). */ zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); zCsr += sqlite3Fts3GetVarint(zCsr, &iChild); if( zCsr>zEnd ){ return FTS_CORRUPT_VTAB; } while( zCsrzEnd ){ rc = FTS_CORRUPT_VTAB; goto finish_scan; } if( nPrefix+nSuffix>nAlloc ){ char *zNew; nAlloc = (nPrefix+nSuffix) * 2; zNew = (char *)sqlite3_realloc(zBuffer, nAlloc); if( !zNew ){ rc = SQLITE_NOMEM; goto finish_scan; } zBuffer = zNew; } assert( zBuffer ); memcpy(&zBuffer[nPrefix], zCsr, nSuffix); nBuffer = nPrefix + nSuffix; zCsr += nSuffix; /* Compare the term we are searching for with the term just loaded from ** the interior node. If the specified term is greater than or equal ** to the term from the interior node, then all terms on the sub-tree ** headed by node iChild are smaller than zTerm. No need to search ** iChild. ** ** If the interior node term is larger than the specified term, then ** the tree headed by iChild may contain the specified term. */ cmp = memcmp(zTerm, zBuffer, (nBuffer>nTerm ? nTerm : nBuffer)); if( piFirst && (cmp<0 || (cmp==0 && nBuffer>nTerm)) ){ *piFirst = iChild; piFirst = 0; } if( piLast && cmp<0 ){ *piLast = iChild; piLast = 0; } iChild++; }; if( piFirst ) *piFirst = iChild; if( piLast ) *piLast = iChild; finish_scan: sqlite3_free(zBuffer); return rc; } /* ** The buffer pointed to by argument zNode (size nNode bytes) contains an ** interior node of a b-tree segment. The zTerm buffer (size nTerm bytes) ** contains a term. This function searches the sub-tree headed by the zNode ** node for the range of leaf nodes that may contain the specified term ** or terms for which the specified term is a prefix. ** ** If piLeaf is not NULL, then *piLeaf is set to the blockid of the ** left-most leaf node in the tree that may contain the specified term. ** If piLeaf2 is not NULL, then *piLeaf2 is set to the blockid of the ** right-most leaf node that may contain a term for which the specified ** term is a prefix. ** ** It is possible that the range of returned leaf nodes does not contain ** the specified term or any terms for which it is a prefix. However, if the ** segment does contain any such terms, they are stored within the identified ** range. Because this function only inspects interior segment nodes (and ** never loads leaf nodes into memory), it is not possible to be sure. ** ** If an error occurs, an error code other than SQLITE_OK is returned. */ static int fts3SelectLeaf( Fts3Table *p, /* Virtual table handle */ const char *zTerm, /* Term to select leaves for */ int nTerm, /* Size of term zTerm in bytes */ const char *zNode, /* Buffer containing segment interior node */ int nNode, /* Size of buffer at zNode */ sqlite3_int64 *piLeaf, /* Selected leaf node */ sqlite3_int64 *piLeaf2 /* Selected leaf node */ ){ int rc = SQLITE_OK; /* Return code */ int iHeight; /* Height of this node in tree */ assert( piLeaf || piLeaf2 ); fts3GetVarint32(zNode, &iHeight); rc = fts3ScanInteriorNode(zTerm, nTerm, zNode, nNode, piLeaf, piLeaf2); assert( !piLeaf2 || !piLeaf || rc!=SQLITE_OK || (*piLeaf<=*piLeaf2) ); if( rc==SQLITE_OK && iHeight>1 ){ char *zBlob = 0; /* Blob read from %_segments table */ int nBlob = 0; /* Size of zBlob in bytes */ if( piLeaf && piLeaf2 && (*piLeaf!=*piLeaf2) ){ rc = sqlite3Fts3ReadBlock(p, *piLeaf, &zBlob, &nBlob, 0); if( rc==SQLITE_OK ){ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, 0); } sqlite3_free(zBlob); piLeaf = 0; zBlob = 0; } if( rc==SQLITE_OK ){ rc = sqlite3Fts3ReadBlock(p, piLeaf?*piLeaf:*piLeaf2, &zBlob, &nBlob, 0); } if( rc==SQLITE_OK ){ rc = fts3SelectLeaf(p, zTerm, nTerm, zBlob, nBlob, piLeaf, piLeaf2); } sqlite3_free(zBlob); } return rc; } /* ** This function is used to create delta-encoded serialized lists of FTS3 ** varints. Each call to this function appends a single varint to a list. */ static void fts3PutDeltaVarint( char **pp, /* IN/OUT: Output pointer */ sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ sqlite3_int64 iVal /* Write this value to the list */ ){ assert( iVal-*piPrev > 0 || (*piPrev==0 && iVal==0) ); *pp += sqlite3Fts3PutVarint(*pp, iVal-*piPrev); *piPrev = iVal; } /* ** When this function is called, *ppPoslist is assumed to point to the ** start of a position-list. After it returns, *ppPoslist points to the ** first byte after the position-list. ** ** A position list is list of positions (delta encoded) and columns for ** a single document record of a doclist. So, in other words, this ** routine advances *ppPoslist so that it points to the next docid in ** the doclist, or to the first byte past the end of the doclist. ** ** If pp is not NULL, then the contents of the position list are copied ** to *pp. *pp is set to point to the first byte past the last byte copied ** before this function returns. */ static void fts3PoslistCopy(char **pp, char **ppPoslist){ char *pEnd = *ppPoslist; char c = 0; /* The end of a position list is marked by a zero encoded as an FTS3 ** varint. A single POS_END (0) byte. Except, if the 0 byte is preceded by ** a byte with the 0x80 bit set, then it is not a varint 0, but the tail ** of some other, multi-byte, value. ** ** The following while-loop moves pEnd to point to the first byte that is not ** immediately preceded by a byte with the 0x80 bit set. Then increments ** pEnd once more so that it points to the byte immediately following the ** last byte in the position-list. */ while( *pEnd | c ){ c = *pEnd++ & 0x80; testcase( c!=0 && (*pEnd)==0 ); } pEnd++; /* Advance past the POS_END terminator byte */ if( pp ){ int n = (int)(pEnd - *ppPoslist); char *p = *pp; memcpy(p, *ppPoslist, n); p += n; *pp = p; } *ppPoslist = pEnd; } /* ** When this function is called, *ppPoslist is assumed to point to the ** start of a column-list. After it returns, *ppPoslist points to the ** to the terminator (POS_COLUMN or POS_END) byte of the column-list. ** ** A column-list is list of delta-encoded positions for a single column ** within a single document within a doclist. ** ** The column-list is terminated either by a POS_COLUMN varint (1) or ** a POS_END varint (0). This routine leaves *ppPoslist pointing to ** the POS_COLUMN or POS_END that terminates the column-list. ** ** If pp is not NULL, then the contents of the column-list are copied ** to *pp. *pp is set to point to the first byte past the last byte copied ** before this function returns. The POS_COLUMN or POS_END terminator ** is not copied into *pp. */ static void fts3ColumnlistCopy(char **pp, char **ppPoslist){ char *pEnd = *ppPoslist; char c = 0; /* A column-list is terminated by either a 0x01 or 0x00 byte that is ** not part of a multi-byte varint. */ while( 0xFE & (*pEnd | c) ){ c = *pEnd++ & 0x80; testcase( c!=0 && ((*pEnd)&0xfe)==0 ); } if( pp ){ int n = (int)(pEnd - *ppPoslist); char *p = *pp; memcpy(p, *ppPoslist, n); p += n; *pp = p; } *ppPoslist = pEnd; } /* ** Value used to signify the end of an position-list. This is safe because ** it is not possible to have a document with 2^31 terms. */ #define POSITION_LIST_END 0x7fffffff /* ** This function is used to help parse position-lists. When this function is ** called, *pp may point to the start of the next varint in the position-list ** being parsed, or it may point to 1 byte past the end of the position-list ** (in which case **pp will be a terminator bytes POS_END (0) or ** (1)). ** ** If *pp points past the end of the current position-list, set *pi to ** POSITION_LIST_END and return. Otherwise, read the next varint from *pp, ** increment the current value of *pi by the value read, and set *pp to ** point to the next value before returning. ** ** Before calling this routine *pi must be initialized to the value of ** the previous position, or zero if we are reading the first position ** in the position-list. Because positions are delta-encoded, the value ** of the previous position is needed in order to compute the value of ** the next position. */ static void fts3ReadNextPos( char **pp, /* IN/OUT: Pointer into position-list buffer */ sqlite3_int64 *pi /* IN/OUT: Value read from position-list */ ){ if( (**pp)&0xFE ){ fts3GetDeltaVarint(pp, pi); *pi -= 2; }else{ *pi = POSITION_LIST_END; } } /* ** If parameter iCol is not 0, write an POS_COLUMN (1) byte followed by ** the value of iCol encoded as a varint to *pp. This will start a new ** column list. ** ** Set *pp to point to the byte just after the last byte written before ** returning (do not modify it if iCol==0). Return the total number of bytes ** written (0 if iCol==0). */ static int fts3PutColNumber(char **pp, int iCol){ int n = 0; /* Number of bytes written */ if( iCol ){ char *p = *pp; /* Output pointer */ n = 1 + sqlite3Fts3PutVarint(&p[1], iCol); *p = 0x01; *pp = &p[n]; } return n; } /* ** Compute the union of two position lists. The output written ** into *pp contains all positions of both *pp1 and *pp2 in sorted ** order and with any duplicates removed. All pointers are ** updated appropriately. The caller is responsible for insuring ** that there is enough space in *pp to hold the complete output. */ static void fts3PoslistMerge( char **pp, /* Output buffer */ char **pp1, /* Left input list */ char **pp2 /* Right input list */ ){ char *p = *pp; char *p1 = *pp1; char *p2 = *pp2; while( *p1 || *p2 ){ int iCol1; /* The current column index in pp1 */ int iCol2; /* The current column index in pp2 */ if( *p1==POS_COLUMN ) fts3GetVarint32(&p1[1], &iCol1); else if( *p1==POS_END ) iCol1 = POSITION_LIST_END; else iCol1 = 0; if( *p2==POS_COLUMN ) fts3GetVarint32(&p2[1], &iCol2); else if( *p2==POS_END ) iCol2 = POSITION_LIST_END; else iCol2 = 0; if( iCol1==iCol2 ){ sqlite3_int64 i1 = 0; /* Last position from pp1 */ sqlite3_int64 i2 = 0; /* Last position from pp2 */ sqlite3_int64 iPrev = 0; int n = fts3PutColNumber(&p, iCol1); p1 += n; p2 += n; /* At this point, both p1 and p2 point to the start of column-lists ** for the same column (the column with index iCol1 and iCol2). ** A column-list is a list of non-negative delta-encoded varints, each ** incremented by 2 before being stored. Each list is terminated by a ** POS_END (0) or POS_COLUMN (1). The following block merges the two lists ** and writes the results to buffer p. p is left pointing to the byte ** after the list written. No terminator (POS_END or POS_COLUMN) is ** written to the output. */ fts3GetDeltaVarint(&p1, &i1); fts3GetDeltaVarint(&p2, &i2); do { fts3PutDeltaVarint(&p, &iPrev, (i1pos(*pp1) && pos(*pp2)-pos(*pp1)<=nToken). i.e. ** when the *pp1 token appears before the *pp2 token, but not more than nToken ** slots before it. ** ** e.g. nToken==1 searches for adjacent positions. */ static int fts3PoslistPhraseMerge( char **pp, /* IN/OUT: Preallocated output buffer */ int nToken, /* Maximum difference in token positions */ int isSaveLeft, /* Save the left position */ int isExact, /* If *pp1 is exactly nTokens before *pp2 */ char **pp1, /* IN/OUT: Left input list */ char **pp2 /* IN/OUT: Right input list */ ){ char *p = *pp; char *p1 = *pp1; char *p2 = *pp2; int iCol1 = 0; int iCol2 = 0; /* Never set both isSaveLeft and isExact for the same invocation. */ assert( isSaveLeft==0 || isExact==0 ); assert( p!=0 && *p1!=0 && *p2!=0 ); if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); } while( 1 ){ if( iCol1==iCol2 ){ char *pSave = p; sqlite3_int64 iPrev = 0; sqlite3_int64 iPos1 = 0; sqlite3_int64 iPos2 = 0; if( iCol1 ){ *p++ = POS_COLUMN; p += sqlite3Fts3PutVarint(p, iCol1); } assert( *p1!=POS_END && *p1!=POS_COLUMN ); assert( *p2!=POS_END && *p2!=POS_COLUMN ); fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; while( 1 ){ if( iPos2==iPos1+nToken || (isExact==0 && iPos2>iPos1 && iPos2<=iPos1+nToken) ){ sqlite3_int64 iSave; iSave = isSaveLeft ? iPos1 : iPos2; fts3PutDeltaVarint(&p, &iPrev, iSave+2); iPrev -= 2; pSave = 0; assert( p ); } if( (!isSaveLeft && iPos2<=(iPos1+nToken)) || iPos2<=iPos1 ){ if( (*p2&0xFE)==0 ) break; fts3GetDeltaVarint(&p2, &iPos2); iPos2 -= 2; }else{ if( (*p1&0xFE)==0 ) break; fts3GetDeltaVarint(&p1, &iPos1); iPos1 -= 2; } } if( pSave ){ assert( pp && p ); p = pSave; } fts3ColumnlistCopy(0, &p1); fts3ColumnlistCopy(0, &p2); assert( (*p1&0xFE)==0 && (*p2&0xFE)==0 ); if( 0==*p1 || 0==*p2 ) break; p1++; p1 += fts3GetVarint32(p1, &iCol1); p2++; p2 += fts3GetVarint32(p2, &iCol2); } /* Advance pointer p1 or p2 (whichever corresponds to the smaller of ** iCol1 and iCol2) so that it points to either the 0x00 that marks the ** end of the position list, or the 0x01 that precedes the next ** column-number in the position list. */ else if( iCol1=pEnd ){ *pp = 0; }else{ sqlite3_int64 iVal; *pp += sqlite3Fts3GetVarint(*pp, &iVal); if( bDescIdx ){ *pVal -= iVal; }else{ *pVal += iVal; } } } /* ** This function is used to write a single varint to a buffer. The varint ** is written to *pp. Before returning, *pp is set to point 1 byte past the ** end of the value written. ** ** If *pbFirst is zero when this function is called, the value written to ** the buffer is that of parameter iVal. ** ** If *pbFirst is non-zero when this function is called, then the value ** written is either (iVal-*piPrev) (if bDescIdx is zero) or (*piPrev-iVal) ** (if bDescIdx is non-zero). ** ** Before returning, this function always sets *pbFirst to 1 and *piPrev ** to the value of parameter iVal. */ static void fts3PutDeltaVarint3( char **pp, /* IN/OUT: Output pointer */ int bDescIdx, /* True for descending docids */ sqlite3_int64 *piPrev, /* IN/OUT: Previous value written to list */ int *pbFirst, /* IN/OUT: True after first int written */ sqlite3_int64 iVal /* Write this value to the list */ ){ sqlite3_int64 iWrite; if( bDescIdx==0 || *pbFirst==0 ){ iWrite = iVal - *piPrev; }else{ iWrite = *piPrev - iVal; } assert( *pbFirst || *piPrev==0 ); assert( *pbFirst==0 || iWrite>0 ); *pp += sqlite3Fts3PutVarint(*pp, iWrite); *piPrev = iVal; *pbFirst = 1; } /* ** This macro is used by various functions that merge doclists. The two ** arguments are 64-bit docid values. If the value of the stack variable ** bDescDoclist is 0 when this macro is invoked, then it returns (i1-i2). ** Otherwise, (i2-i1). ** ** Using this makes it easier to write code that can merge doclists that are ** sorted in either ascending or descending order. */ #define DOCID_CMP(i1, i2) ((bDescDoclist?-1:1) * (i1-i2)) /* ** This function does an "OR" merge of two doclists (output contains all ** positions contained in either argument doclist). If the docids in the ** input doclists are sorted in ascending order, parameter bDescDoclist ** should be false. If they are sorted in ascending order, it should be ** passed a non-zero value. ** ** If no error occurs, *paOut is set to point at an sqlite3_malloc'd buffer ** containing the output doclist and SQLITE_OK is returned. In this case ** *pnOut is set to the number of bytes in the output doclist. ** ** If an error occurs, an SQLite error code is returned. The output values ** are undefined in this case. */ static int fts3DoclistOrMerge( int bDescDoclist, /* True if arguments are desc */ char *a1, int n1, /* First doclist */ char *a2, int n2, /* Second doclist */ char **paOut, int *pnOut /* OUT: Malloc'd doclist */ ){ sqlite3_int64 i1 = 0; sqlite3_int64 i2 = 0; sqlite3_int64 iPrev = 0; char *pEnd1 = &a1[n1]; char *pEnd2 = &a2[n2]; char *p1 = a1; char *p2 = a2; char *p; char *aOut; int bFirstOut = 0; *paOut = 0; *pnOut = 0; /* Allocate space for the output. Both the input and output doclists ** are delta encoded. If they are in ascending order (bDescDoclist==0), ** then the first docid in each list is simply encoded as a varint. For ** each subsequent docid, the varint stored is the difference between the ** current and previous docid (a positive number - since the list is in ** ascending order). ** ** The first docid written to the output is therefore encoded using the ** same number of bytes as it is in whichever of the input lists it is ** read from. And each subsequent docid read from the same input list ** consumes either the same or less bytes as it did in the input (since ** the difference between it and the previous value in the output must ** be a positive value less than or equal to the delta value read from ** the input list). The same argument applies to all but the first docid ** read from the 'other' list. And to the contents of all position lists ** that will be copied and merged from the input to the output. ** ** However, if the first docid copied to the output is a negative number, ** then the encoding of the first docid from the 'other' input list may ** be larger in the output than it was in the input (since the delta value ** may be a larger positive integer than the actual docid). ** ** The space required to store the output is therefore the sum of the ** sizes of the two inputs, plus enough space for exactly one of the input ** docids to grow. ** ** A symetric argument may be made if the doclists are in descending ** order. */ aOut = sqlite3_malloc(n1+n2+FTS3_VARINT_MAX-1); if( !aOut ) return SQLITE_NOMEM; p = aOut; fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); while( p1 || p2 ){ sqlite3_int64 iDiff = DOCID_CMP(i1, i2); if( p2 && p1 && iDiff==0 ){ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); fts3PoslistMerge(&p, &p1, &p2); fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); }else if( !p2 || (p1 && iDiff<0) ){ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); fts3PoslistCopy(&p, &p1); fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i2); fts3PoslistCopy(&p, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *paOut = aOut; *pnOut = (int)(p-aOut); assert( *pnOut<=n1+n2+FTS3_VARINT_MAX-1 ); return SQLITE_OK; } /* ** This function does a "phrase" merge of two doclists. In a phrase merge, ** the output contains a copy of each position from the right-hand input ** doclist for which there is a position in the left-hand input doclist ** exactly nDist tokens before it. ** ** If the docids in the input doclists are sorted in ascending order, ** parameter bDescDoclist should be false. If they are sorted in ascending ** order, it should be passed a non-zero value. ** ** The right-hand input doclist is overwritten by this function. */ static int fts3DoclistPhraseMerge( int bDescDoclist, /* True if arguments are desc */ int nDist, /* Distance from left to right (1=adjacent) */ char *aLeft, int nLeft, /* Left doclist */ char **paRight, int *pnRight /* IN/OUT: Right/output doclist */ ){ sqlite3_int64 i1 = 0; sqlite3_int64 i2 = 0; sqlite3_int64 iPrev = 0; char *aRight = *paRight; char *pEnd1 = &aLeft[nLeft]; char *pEnd2 = &aRight[*pnRight]; char *p1 = aLeft; char *p2 = aRight; char *p; int bFirstOut = 0; char *aOut; assert( nDist>0 ); if( bDescDoclist ){ aOut = sqlite3_malloc(*pnRight + FTS3_VARINT_MAX); if( aOut==0 ) return SQLITE_NOMEM; }else{ aOut = aRight; } p = aOut; fts3GetDeltaVarint3(&p1, pEnd1, 0, &i1); fts3GetDeltaVarint3(&p2, pEnd2, 0, &i2); while( p1 && p2 ){ sqlite3_int64 iDiff = DOCID_CMP(i1, i2); if( iDiff==0 ){ char *pSave = p; sqlite3_int64 iPrevSave = iPrev; int bFirstOutSave = bFirstOut; fts3PutDeltaVarint3(&p, bDescDoclist, &iPrev, &bFirstOut, i1); if( 0==fts3PoslistPhraseMerge(&p, nDist, 0, 1, &p1, &p2) ){ p = pSave; iPrev = iPrevSave; bFirstOut = bFirstOutSave; } fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); }else if( iDiff<0 ){ fts3PoslistCopy(0, &p1); fts3GetDeltaVarint3(&p1, pEnd1, bDescDoclist, &i1); }else{ fts3PoslistCopy(0, &p2); fts3GetDeltaVarint3(&p2, pEnd2, bDescDoclist, &i2); } } *pnRight = (int)(p - aOut); if( bDescDoclist ){ sqlite3_free(aRight); *paRight = aOut; } return SQLITE_OK; } /* ** Argument pList points to a position list nList bytes in size. This ** function checks to see if the position list contains any entries for ** a token in position 0 (of any column). If so, it writes argument iDelta ** to the output buffer pOut, followed by a position list consisting only ** of the entries from pList at position 0, and terminated by an 0x00 byte. ** The value returned is the number of bytes written to pOut (if any). */ SQLITE_PRIVATE int sqlite3Fts3FirstFilter( sqlite3_int64 iDelta, /* Varint that may be written to pOut */ char *pList, /* Position list (no 0x00 term) */ int nList, /* Size of pList in bytes */ char *pOut /* Write output here */ ){ int nOut = 0; int bWritten = 0; /* True once iDelta has been written */ char *p = pList; char *pEnd = &pList[nList]; if( *p!=0x01 ){ if( *p==0x02 ){ nOut += sqlite3Fts3PutVarint(&pOut[nOut], iDelta); pOut[nOut++] = 0x02; bWritten = 1; } fts3ColumnlistCopy(0, &p); } while( paaOutput); i++){ if( pTS->aaOutput[i] ){ if( !aOut ){ aOut = pTS->aaOutput[i]; nOut = pTS->anOutput[i]; pTS->aaOutput[i] = 0; }else{ int nNew; char *aNew; int rc = fts3DoclistOrMerge(p->bDescIdx, pTS->aaOutput[i], pTS->anOutput[i], aOut, nOut, &aNew, &nNew ); if( rc!=SQLITE_OK ){ sqlite3_free(aOut); return rc; } sqlite3_free(pTS->aaOutput[i]); sqlite3_free(aOut); pTS->aaOutput[i] = 0; aOut = aNew; nOut = nNew; } } } pTS->aaOutput[0] = aOut; pTS->anOutput[0] = nOut; return SQLITE_OK; } /* ** Merge the doclist aDoclist/nDoclist into the TermSelect object passed ** as the first argument. The merge is an "OR" merge (see function ** fts3DoclistOrMerge() for details). ** ** This function is called with the doclist for each term that matches ** a queried prefix. It merges all these doclists into one, the doclist ** for the specified prefix. Since there can be a very large number of ** doclists to merge, the merging is done pair-wise using the TermSelect ** object. ** ** This function returns SQLITE_OK if the merge is successful, or an ** SQLite error code (SQLITE_NOMEM) if an error occurs. */ static int fts3TermSelectMerge( Fts3Table *p, /* FTS table handle */ TermSelect *pTS, /* TermSelect object to merge into */ char *aDoclist, /* Pointer to doclist */ int nDoclist /* Size of aDoclist in bytes */ ){ if( pTS->aaOutput[0]==0 ){ /* If this is the first term selected, copy the doclist to the output ** buffer using memcpy(). ** ** Add FTS3_VARINT_MAX bytes of unused space to the end of the ** allocation. This is so as to ensure that the buffer is big enough ** to hold the current doclist AND'd with any other doclist. If the ** doclists are stored in order=ASC order, this padding would not be ** required (since the size of [doclistA AND doclistB] is always less ** than or equal to the size of [doclistA] in that case). But this is ** not true for order=DESC. For example, a doclist containing (1, -1) ** may be smaller than (-1), as in the first example the -1 may be stored ** as a single-byte delta, whereas in the second it must be stored as a ** FTS3_VARINT_MAX byte varint. ** ** Similar padding is added in the fts3DoclistOrMerge() function. */ pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1); pTS->anOutput[0] = nDoclist; if( pTS->aaOutput[0] ){ memcpy(pTS->aaOutput[0], aDoclist, nDoclist); }else{ return SQLITE_NOMEM; } }else{ char *aMerge = aDoclist; int nMerge = nDoclist; int iOut; for(iOut=0; iOutaaOutput); iOut++){ if( pTS->aaOutput[iOut]==0 ){ assert( iOut>0 ); pTS->aaOutput[iOut] = aMerge; pTS->anOutput[iOut] = nMerge; break; }else{ char *aNew; int nNew; int rc = fts3DoclistOrMerge(p->bDescIdx, aMerge, nMerge, pTS->aaOutput[iOut], pTS->anOutput[iOut], &aNew, &nNew ); if( rc!=SQLITE_OK ){ if( aMerge!=aDoclist ) sqlite3_free(aMerge); return rc; } if( aMerge!=aDoclist ) sqlite3_free(aMerge); sqlite3_free(pTS->aaOutput[iOut]); pTS->aaOutput[iOut] = 0; aMerge = aNew; nMerge = nNew; if( (iOut+1)==SizeofArray(pTS->aaOutput) ){ pTS->aaOutput[iOut] = aMerge; pTS->anOutput[iOut] = nMerge; } } } } return SQLITE_OK; } /* ** Append SegReader object pNew to the end of the pCsr->apSegment[] array. */ static int fts3SegReaderCursorAppend( Fts3MultiSegReader *pCsr, Fts3SegReader *pNew ){ if( (pCsr->nSegment%16)==0 ){ Fts3SegReader **apNew; int nByte = (pCsr->nSegment + 16)*sizeof(Fts3SegReader*); apNew = (Fts3SegReader **)sqlite3_realloc(pCsr->apSegment, nByte); if( !apNew ){ sqlite3Fts3SegReaderFree(pNew); return SQLITE_NOMEM; } pCsr->apSegment = apNew; } pCsr->apSegment[pCsr->nSegment++] = pNew; return SQLITE_OK; } /* ** Add seg-reader objects to the Fts3MultiSegReader object passed as the ** 8th argument. ** ** This function returns SQLITE_OK if successful, or an SQLite error code ** otherwise. */ static int fts3SegReaderCursor( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language id */ int iIndex, /* Index to search (from 0 to p->nIndex-1) */ int iLevel, /* Level of segments to scan */ const char *zTerm, /* Term to query for */ int nTerm, /* Size of zTerm in bytes */ int isPrefix, /* True for a prefix search */ int isScan, /* True to scan from zTerm to EOF */ Fts3MultiSegReader *pCsr /* Cursor object to populate */ ){ int rc = SQLITE_OK; /* Error code */ sqlite3_stmt *pStmt = 0; /* Statement to iterate through segments */ int rc2; /* Result of sqlite3_reset() */ /* If iLevel is less than 0 and this is not a scan, include a seg-reader ** for the pending-terms. If this is a scan, then this call must be being ** made by an fts4aux module, not an FTS table. In this case calling ** Fts3SegReaderPending might segfault, as the data structures used by ** fts4aux are not completely populated. So it's easiest to filter these ** calls out here. */ if( iLevel<0 && p->aIndex ){ Fts3SegReader *pSeg = 0; rc = sqlite3Fts3SegReaderPending(p, iIndex, zTerm, nTerm, isPrefix||isScan, &pSeg); if( rc==SQLITE_OK && pSeg ){ rc = fts3SegReaderCursorAppend(pCsr, pSeg); } } if( iLevel!=FTS3_SEGCURSOR_PENDING ){ if( rc==SQLITE_OK ){ rc = sqlite3Fts3AllSegdirs(p, iLangid, iIndex, iLevel, &pStmt); } while( rc==SQLITE_OK && SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ Fts3SegReader *pSeg = 0; /* Read the values returned by the SELECT into local variables. */ sqlite3_int64 iStartBlock = sqlite3_column_int64(pStmt, 1); sqlite3_int64 iLeavesEndBlock = sqlite3_column_int64(pStmt, 2); sqlite3_int64 iEndBlock = sqlite3_column_int64(pStmt, 3); int nRoot = sqlite3_column_bytes(pStmt, 4); char const *zRoot = sqlite3_column_blob(pStmt, 4); /* If zTerm is not NULL, and this segment is not stored entirely on its ** root node, the range of leaves scanned can be reduced. Do this. */ if( iStartBlock && zTerm ){ sqlite3_int64 *pi = (isPrefix ? &iLeavesEndBlock : 0); rc = fts3SelectLeaf(p, zTerm, nTerm, zRoot, nRoot, &iStartBlock, pi); if( rc!=SQLITE_OK ) goto finished; if( isPrefix==0 && isScan==0 ) iLeavesEndBlock = iStartBlock; } rc = sqlite3Fts3SegReaderNew(pCsr->nSegment+1, (isPrefix==0 && isScan==0), iStartBlock, iLeavesEndBlock, iEndBlock, zRoot, nRoot, &pSeg ); if( rc!=SQLITE_OK ) goto finished; rc = fts3SegReaderCursorAppend(pCsr, pSeg); } } finished: rc2 = sqlite3_reset(pStmt); if( rc==SQLITE_DONE ) rc = rc2; return rc; } /* ** Set up a cursor object for iterating through a full-text index or a ** single level therein. */ SQLITE_PRIVATE int sqlite3Fts3SegReaderCursor( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language-id to search */ int iIndex, /* Index to search (from 0 to p->nIndex-1) */ int iLevel, /* Level of segments to scan */ const char *zTerm, /* Term to query for */ int nTerm, /* Size of zTerm in bytes */ int isPrefix, /* True for a prefix search */ int isScan, /* True to scan from zTerm to EOF */ Fts3MultiSegReader *pCsr /* Cursor object to populate */ ){ assert( iIndex>=0 && iIndexnIndex ); assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel==FTS3_SEGCURSOR_PENDING || iLevel>=0 ); assert( iLevelbase.pVtab; if( isPrefix ){ for(i=1; bFound==0 && inIndex; i++){ if( p->aIndex[i].nPrefix==nTerm ){ bFound = 1; rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 0, 0, pSegcsr ); pSegcsr->bLookup = 1; } } for(i=1; bFound==0 && inIndex; i++){ if( p->aIndex[i].nPrefix==nTerm+1 ){ bFound = 1; rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, i, FTS3_SEGCURSOR_ALL, zTerm, nTerm, 1, 0, pSegcsr ); if( rc==SQLITE_OK ){ rc = fts3SegReaderCursorAddZero( p, pCsr->iLangid, zTerm, nTerm, pSegcsr ); } } } } if( bFound==0 ){ rc = sqlite3Fts3SegReaderCursor(p, pCsr->iLangid, 0, FTS3_SEGCURSOR_ALL, zTerm, nTerm, isPrefix, 0, pSegcsr ); pSegcsr->bLookup = !isPrefix; } } *ppSegcsr = pSegcsr; return rc; } /* ** Free an Fts3MultiSegReader allocated by fts3TermSegReaderCursor(). */ static void fts3SegReaderCursorFree(Fts3MultiSegReader *pSegcsr){ sqlite3Fts3SegReaderFinish(pSegcsr); sqlite3_free(pSegcsr); } /* ** This function retrieves the doclist for the specified term (or term ** prefix) from the database. */ static int fts3TermSelect( Fts3Table *p, /* Virtual table handle */ Fts3PhraseToken *pTok, /* Token to query for */ int iColumn, /* Column to query (or -ve for all columns) */ int *pnOut, /* OUT: Size of buffer at *ppOut */ char **ppOut /* OUT: Malloced result buffer */ ){ int rc; /* Return code */ Fts3MultiSegReader *pSegcsr; /* Seg-reader cursor for this term */ TermSelect tsc; /* Object for pair-wise doclist merging */ Fts3SegFilter filter; /* Segment term filter configuration */ pSegcsr = pTok->pSegcsr; memset(&tsc, 0, sizeof(TermSelect)); filter.flags = FTS3_SEGMENT_IGNORE_EMPTY | FTS3_SEGMENT_REQUIRE_POS | (pTok->isPrefix ? FTS3_SEGMENT_PREFIX : 0) | (pTok->bFirst ? FTS3_SEGMENT_FIRST : 0) | (iColumnnColumn ? FTS3_SEGMENT_COLUMN_FILTER : 0); filter.iCol = iColumn; filter.zTerm = pTok->z; filter.nTerm = pTok->n; rc = sqlite3Fts3SegReaderStart(p, pSegcsr, &filter); while( SQLITE_OK==rc && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pSegcsr)) ){ rc = fts3TermSelectMerge(p, &tsc, pSegcsr->aDoclist, pSegcsr->nDoclist); } if( rc==SQLITE_OK ){ rc = fts3TermSelectFinishMerge(p, &tsc); } if( rc==SQLITE_OK ){ *ppOut = tsc.aaOutput[0]; *pnOut = tsc.anOutput[0]; }else{ int i; for(i=0; ipSegcsr = 0; return rc; } /* ** This function counts the total number of docids in the doclist stored ** in buffer aList[], size nList bytes. ** ** If the isPoslist argument is true, then it is assumed that the doclist ** contains a position-list following each docid. Otherwise, it is assumed ** that the doclist is simply a list of docids stored as delta encoded ** varints. */ static int fts3DoclistCountDocids(char *aList, int nList){ int nDoc = 0; /* Return value */ if( aList ){ char *aEnd = &aList[nList]; /* Pointer to one byte after EOF */ char *p = aList; /* Cursor */ while( peSearch==FTS3_DOCID_SEARCH || pCsr->eSearch==FTS3_FULLSCAN_SEARCH ){ if( SQLITE_ROW!=sqlite3_step(pCsr->pStmt) ){ pCsr->isEof = 1; rc = sqlite3_reset(pCsr->pStmt); }else{ pCsr->iPrevId = sqlite3_column_int64(pCsr->pStmt, 0); rc = SQLITE_OK; } }else{ rc = fts3EvalNext((Fts3Cursor *)pCursor); } assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); return rc; } /* ** The following are copied from sqliteInt.h. ** ** Constants for the largest and smallest possible 64-bit signed integers. ** These macros are designed to work correctly on both 32-bit and 64-bit ** compilers. */ #ifndef SQLITE_AMALGAMATION # define LARGEST_INT64 (0xffffffff|(((sqlite3_int64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((sqlite3_int64)-1) - LARGEST_INT64) #endif /* ** If the numeric type of argument pVal is "integer", then return it ** converted to a 64-bit signed integer. Otherwise, return a copy of ** the second parameter, iDefault. */ static sqlite3_int64 fts3DocidRange(sqlite3_value *pVal, i64 iDefault){ if( pVal ){ int eType = sqlite3_value_numeric_type(pVal); if( eType==SQLITE_INTEGER ){ return sqlite3_value_int64(pVal); } } return iDefault; } /* ** This is the xFilter interface for the virtual table. See ** the virtual table xFilter method documentation for additional ** information. ** ** If idxNum==FTS3_FULLSCAN_SEARCH then do a full table scan against ** the %_content table. ** ** If idxNum==FTS3_DOCID_SEARCH then do a docid lookup for a single entry ** in the %_content table. ** ** If idxNum>=FTS3_FULLTEXT_SEARCH then use the full text index. The ** column on the left-hand side of the MATCH operator is column ** number idxNum-FTS3_FULLTEXT_SEARCH, 0 indexed. argv[0] is the right-hand ** side of the MATCH operator. */ static int fts3FilterMethod( sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ int idxNum, /* Strategy index */ const char *idxStr, /* Unused */ int nVal, /* Number of elements in apVal */ sqlite3_value **apVal /* Arguments for the indexing scheme */ ){ int rc = SQLITE_OK; char *zSql; /* SQL statement used to access %_content */ int eSearch; Fts3Table *p = (Fts3Table *)pCursor->pVtab; Fts3Cursor *pCsr = (Fts3Cursor *)pCursor; sqlite3_value *pCons = 0; /* The MATCH or rowid constraint, if any */ sqlite3_value *pLangid = 0; /* The "langid = ?" constraint, if any */ sqlite3_value *pDocidGe = 0; /* The "docid >= ?" constraint, if any */ sqlite3_value *pDocidLe = 0; /* The "docid <= ?" constraint, if any */ int iIdx; UNUSED_PARAMETER(idxStr); UNUSED_PARAMETER(nVal); eSearch = (idxNum & 0x0000FFFF); assert( eSearch>=0 && eSearch<=(FTS3_FULLTEXT_SEARCH+p->nColumn) ); assert( p->pSegments==0 ); /* Collect arguments into local variables */ iIdx = 0; if( eSearch!=FTS3_FULLSCAN_SEARCH ) pCons = apVal[iIdx++]; if( idxNum & FTS3_HAVE_LANGID ) pLangid = apVal[iIdx++]; if( idxNum & FTS3_HAVE_DOCID_GE ) pDocidGe = apVal[iIdx++]; if( idxNum & FTS3_HAVE_DOCID_LE ) pDocidLe = apVal[iIdx++]; assert( iIdx==nVal ); /* In case the cursor has been used before, clear it now. */ sqlite3_finalize(pCsr->pStmt); sqlite3_free(pCsr->aDoclist); sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); sqlite3Fts3ExprFree(pCsr->pExpr); memset(&pCursor[1], 0, sizeof(Fts3Cursor)-sizeof(sqlite3_vtab_cursor)); /* Set the lower and upper bounds on docids to return */ pCsr->iMinDocid = fts3DocidRange(pDocidGe, SMALLEST_INT64); pCsr->iMaxDocid = fts3DocidRange(pDocidLe, LARGEST_INT64); if( idxStr ){ pCsr->bDesc = (idxStr[0]=='D'); }else{ pCsr->bDesc = p->bDescIdx; } pCsr->eSearch = (i16)eSearch; if( eSearch!=FTS3_DOCID_SEARCH && eSearch!=FTS3_FULLSCAN_SEARCH ){ int iCol = eSearch-FTS3_FULLTEXT_SEARCH; const char *zQuery = (const char *)sqlite3_value_text(pCons); if( zQuery==0 && sqlite3_value_type(pCons)!=SQLITE_NULL ){ return SQLITE_NOMEM; } pCsr->iLangid = 0; if( pLangid ) pCsr->iLangid = sqlite3_value_int(pLangid); assert( p->base.zErrMsg==0 ); rc = sqlite3Fts3ExprParse(p->pTokenizer, pCsr->iLangid, p->azColumn, p->bFts4, p->nColumn, iCol, zQuery, -1, &pCsr->pExpr, &p->base.zErrMsg ); if( rc!=SQLITE_OK ){ return rc; } rc = fts3EvalStart(pCsr); sqlite3Fts3SegmentsClose(p); if( rc!=SQLITE_OK ) return rc; pCsr->pNextId = pCsr->aDoclist; pCsr->iPrevId = 0; } /* Compile a SELECT statement for this cursor. For a full-table-scan, the ** statement loops through all rows of the %_content table. For a ** full-text query or docid lookup, the statement retrieves a single ** row by docid. */ if( eSearch==FTS3_FULLSCAN_SEARCH ){ if( pDocidGe || pDocidLe ){ zSql = sqlite3_mprintf( "SELECT %s WHERE rowid BETWEEN %lld AND %lld ORDER BY rowid %s", p->zReadExprlist, pCsr->iMinDocid, pCsr->iMaxDocid, (pCsr->bDesc ? "DESC" : "ASC") ); }else{ zSql = sqlite3_mprintf("SELECT %s ORDER BY rowid %s", p->zReadExprlist, (pCsr->bDesc ? "DESC" : "ASC") ); } if( zSql ){ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0); sqlite3_free(zSql); }else{ rc = SQLITE_NOMEM; } }else if( eSearch==FTS3_DOCID_SEARCH ){ rc = fts3CursorSeekStmt(pCsr, &pCsr->pStmt); if( rc==SQLITE_OK ){ rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons); } } if( rc!=SQLITE_OK ) return rc; return fts3NextMethod(pCursor); } /* ** This is the xEof method of the virtual table. SQLite calls this ** routine to find out if it has reached the end of a result set. */ static int fts3EofMethod(sqlite3_vtab_cursor *pCursor){ return ((Fts3Cursor *)pCursor)->isEof; } /* ** This is the xRowid method. The SQLite core calls this routine to ** retrieve the rowid for the current row of the result set. fts3 ** exposes %_content.docid as the rowid for the virtual table. The ** rowid should be written to *pRowid. */ static int fts3RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; *pRowid = pCsr->iPrevId; return SQLITE_OK; } /* ** This is the xColumn method, called by SQLite to request a value from ** the row that the supplied cursor currently points to. ** ** If: ** ** (iCol < p->nColumn) -> The value of the iCol'th user column. ** (iCol == p->nColumn) -> Magic column with the same name as the table. ** (iCol == p->nColumn+1) -> Docid column ** (iCol == p->nColumn+2) -> Langid column */ static int fts3ColumnMethod( sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ int iCol /* Index of column to read value from */ ){ int rc = SQLITE_OK; /* Return Code */ Fts3Cursor *pCsr = (Fts3Cursor *) pCursor; Fts3Table *p = (Fts3Table *)pCursor->pVtab; /* The column value supplied by SQLite must be in range. */ assert( iCol>=0 && iCol<=p->nColumn+2 ); if( iCol==p->nColumn+1 ){ /* This call is a request for the "docid" column. Since "docid" is an ** alias for "rowid", use the xRowid() method to obtain the value. */ sqlite3_result_int64(pCtx, pCsr->iPrevId); }else if( iCol==p->nColumn ){ /* The extra column whose name is the same as the table. ** Return a blob which is a pointer to the cursor. */ sqlite3_result_blob(pCtx, &pCsr, sizeof(pCsr), SQLITE_TRANSIENT); }else if( iCol==p->nColumn+2 && pCsr->pExpr ){ sqlite3_result_int64(pCtx, pCsr->iLangid); }else{ /* The requested column is either a user column (one that contains ** indexed data), or the language-id column. */ rc = fts3CursorSeek(0, pCsr); if( rc==SQLITE_OK ){ if( iCol==p->nColumn+2 ){ int iLangid = 0; if( p->zLanguageid ){ iLangid = sqlite3_column_int(pCsr->pStmt, p->nColumn+1); } sqlite3_result_int(pCtx, iLangid); }else if( sqlite3_data_count(pCsr->pStmt)>(iCol+1) ){ sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); } } } assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 ); return rc; } /* ** This function is the implementation of the xUpdate callback used by ** FTS3 virtual tables. It is invoked by SQLite each time a row is to be ** inserted, updated or deleted. */ static int fts3UpdateMethod( sqlite3_vtab *pVtab, /* Virtual table handle */ int nArg, /* Size of argument array */ sqlite3_value **apVal, /* Array of arguments */ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ ){ return sqlite3Fts3UpdateMethod(pVtab, nArg, apVal, pRowid); } /* ** Implementation of xSync() method. Flush the contents of the pending-terms ** hash-table to the database. */ static int fts3SyncMethod(sqlite3_vtab *pVtab){ /* Following an incremental-merge operation, assuming that the input ** segments are not completely consumed (the usual case), they are updated ** in place to remove the entries that have already been merged. This ** involves updating the leaf block that contains the smallest unmerged ** entry and each block (if any) between the leaf and the root node. So ** if the height of the input segment b-trees is N, and input segments ** are merged eight at a time, updating the input segments at the end ** of an incremental-merge requires writing (8*(1+N)) blocks. N is usually ** small - often between 0 and 2. So the overhead of the incremental ** merge is somewhere between 8 and 24 blocks. To avoid this overhead ** dwarfing the actual productive work accomplished, the incremental merge ** is only attempted if it will write at least 64 leaf blocks. Hence ** nMinMerge. ** ** Of course, updating the input segments also involves deleting a bunch ** of blocks from the segments table. But this is not considered overhead ** as it would also be required by a crisis-merge that used the same input ** segments. */ const u32 nMinMerge = 64; /* Minimum amount of incr-merge work to do */ Fts3Table *p = (Fts3Table*)pVtab; int rc = sqlite3Fts3PendingTermsFlush(p); if( rc==SQLITE_OK && p->nLeafAdd>(nMinMerge/16) && p->nAutoincrmerge && p->nAutoincrmerge!=0xff ){ int mxLevel = 0; /* Maximum relative level value in db */ int A; /* Incr-merge parameter A */ rc = sqlite3Fts3MaxLevel(p, &mxLevel); assert( rc==SQLITE_OK || mxLevel==0 ); A = p->nLeafAdd * mxLevel; A += (A/2); if( A>(int)nMinMerge ) rc = sqlite3Fts3Incrmerge(p, A, p->nAutoincrmerge); } sqlite3Fts3SegmentsClose(p); return rc; } /* ** If it is currently unknown whether or not the FTS table has an %_stat ** table (if p->bHasStat==2), attempt to determine this (set p->bHasStat ** to 0 or 1). Return SQLITE_OK if successful, or an SQLite error code ** if an error occurs. */ static int fts3SetHasStat(Fts3Table *p){ int rc = SQLITE_OK; if( p->bHasStat==2 ){ const char *zFmt ="SELECT 1 FROM %Q.sqlite_master WHERE tbl_name='%q_stat'"; char *zSql = sqlite3_mprintf(zFmt, p->zDb, p->zName); if( zSql ){ sqlite3_stmt *pStmt = 0; rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); if( rc==SQLITE_OK ){ int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW); rc = sqlite3_finalize(pStmt); if( rc==SQLITE_OK ) p->bHasStat = bHasStat; } sqlite3_free(zSql); }else{ rc = SQLITE_NOMEM; } } return rc; } /* ** Implementation of xBegin() method. */ static int fts3BeginMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table*)pVtab; UNUSED_PARAMETER(pVtab); assert( p->pSegments==0 ); assert( p->nPendingData==0 ); assert( p->inTransaction!=1 ); TESTONLY( p->inTransaction = 1 ); TESTONLY( p->mxSavepoint = -1; ); p->nLeafAdd = 0; return fts3SetHasStat(p); } /* ** Implementation of xCommit() method. This is a no-op. The contents of ** the pending-terms hash-table have already been flushed into the database ** by fts3SyncMethod(). */ static int fts3CommitMethod(sqlite3_vtab *pVtab){ TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); UNUSED_PARAMETER(pVtab); assert( p->nPendingData==0 ); assert( p->inTransaction!=0 ); assert( p->pSegments==0 ); TESTONLY( p->inTransaction = 0 ); TESTONLY( p->mxSavepoint = -1; ); return SQLITE_OK; } /* ** Implementation of xRollback(). Discard the contents of the pending-terms ** hash-table. Any changes made to the database are reverted by SQLite. */ static int fts3RollbackMethod(sqlite3_vtab *pVtab){ Fts3Table *p = (Fts3Table*)pVtab; sqlite3Fts3PendingTermsClear(p); assert( p->inTransaction!=0 ); TESTONLY( p->inTransaction = 0 ); TESTONLY( p->mxSavepoint = -1; ); return SQLITE_OK; } /* ** When called, *ppPoslist must point to the byte immediately following the ** end of a position-list. i.e. ( (*ppPoslist)[-1]==POS_END ). This function ** moves *ppPoslist so that it instead points to the first byte of the ** same position list. */ static void fts3ReversePoslist(char *pStart, char **ppPoslist){ char *p = &(*ppPoslist)[-2]; char c = 0; /* Skip backwards passed any trailing 0x00 bytes added by NearTrim() */ while( p>pStart && (c=*p--)==0 ); /* Search backwards for a varint with value zero (the end of the previous ** poslist). This is an 0x00 byte preceded by some byte that does not ** have the 0x80 bit set. */ while( p>pStart && (*p & 0x80) | c ){ c = *p--; } assert( p==pStart || c==0 ); /* At this point p points to that preceding byte without the 0x80 bit ** set. So to find the start of the poslist, skip forward 2 bytes then ** over a varint. ** ** Normally. The other case is that p==pStart and the poslist to return ** is the first in the doclist. In this case do not skip forward 2 bytes. ** The second part of the if condition (c==0 && *ppPoslist>&p[2]) ** is required for cases where the first byte of a doclist and the ** doclist is empty. For example, if the first docid is 10, a doclist ** that begins with: ** ** 0x0A 0x00 */ if( p>pStart || (c==0 && *ppPoslist>&p[2]) ){ p = &p[2]; } while( *p++&0x80 ); *ppPoslist = p; } /* ** Helper function used by the implementation of the overloaded snippet(), ** offsets() and optimize() SQL functions. ** ** If the value passed as the third argument is a blob of size ** sizeof(Fts3Cursor*), then the blob contents are copied to the ** output variable *ppCsr and SQLITE_OK is returned. Otherwise, an error ** message is written to context pContext and SQLITE_ERROR returned. The ** string passed via zFunc is used as part of the error message. */ static int fts3FunctionArg( sqlite3_context *pContext, /* SQL function call context */ const char *zFunc, /* Function name */ sqlite3_value *pVal, /* argv[0] passed to function */ Fts3Cursor **ppCsr /* OUT: Store cursor handle here */ ){ Fts3Cursor *pRet; if( sqlite3_value_type(pVal)!=SQLITE_BLOB || sqlite3_value_bytes(pVal)!=sizeof(Fts3Cursor *) ){ char *zErr = sqlite3_mprintf("illegal first argument to %s", zFunc); sqlite3_result_error(pContext, zErr, -1); sqlite3_free(zErr); return SQLITE_ERROR; } memcpy(&pRet, sqlite3_value_blob(pVal), sizeof(Fts3Cursor *)); *ppCsr = pRet; return SQLITE_OK; } /* ** Implementation of the snippet() function for FTS3 */ static void fts3SnippetFunc( sqlite3_context *pContext, /* SQLite function call context */ int nVal, /* Size of apVal[] array */ sqlite3_value **apVal /* Array of arguments */ ){ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ const char *zStart = ""; const char *zEnd = ""; const char *zEllipsis = "..."; int iCol = -1; int nToken = 15; /* Default number of tokens in snippet */ /* There must be at least one argument passed to this function (otherwise ** the non-overloaded version would have been called instead of this one). */ assert( nVal>=1 ); if( nVal>6 ){ sqlite3_result_error(pContext, "wrong number of arguments to function snippet()", -1); return; } if( fts3FunctionArg(pContext, "snippet", apVal[0], &pCsr) ) return; switch( nVal ){ case 6: nToken = sqlite3_value_int(apVal[5]); case 5: iCol = sqlite3_value_int(apVal[4]); case 4: zEllipsis = (const char*)sqlite3_value_text(apVal[3]); case 3: zEnd = (const char*)sqlite3_value_text(apVal[2]); case 2: zStart = (const char*)sqlite3_value_text(apVal[1]); } if( !zEllipsis || !zEnd || !zStart ){ sqlite3_result_error_nomem(pContext); }else if( nToken==0 ){ sqlite3_result_text(pContext, "", -1, SQLITE_STATIC); }else if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ sqlite3Fts3Snippet(pContext, pCsr, zStart, zEnd, zEllipsis, iCol, nToken); } } /* ** Implementation of the offsets() function for FTS3 */ static void fts3OffsetsFunc( sqlite3_context *pContext, /* SQLite function call context */ int nVal, /* Size of argument array */ sqlite3_value **apVal /* Array of arguments */ ){ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ UNUSED_PARAMETER(nVal); assert( nVal==1 ); if( fts3FunctionArg(pContext, "offsets", apVal[0], &pCsr) ) return; assert( pCsr ); if( SQLITE_OK==fts3CursorSeek(pContext, pCsr) ){ sqlite3Fts3Offsets(pContext, pCsr); } } /* ** Implementation of the special optimize() function for FTS3. This ** function merges all segments in the database to a single segment. ** Example usage is: ** ** SELECT optimize(t) FROM t LIMIT 1; ** ** where 't' is the name of an FTS3 table. */ static void fts3OptimizeFunc( sqlite3_context *pContext, /* SQLite function call context */ int nVal, /* Size of argument array */ sqlite3_value **apVal /* Array of arguments */ ){ int rc; /* Return code */ Fts3Table *p; /* Virtual table handle */ Fts3Cursor *pCursor; /* Cursor handle passed through apVal[0] */ UNUSED_PARAMETER(nVal); assert( nVal==1 ); if( fts3FunctionArg(pContext, "optimize", apVal[0], &pCursor) ) return; p = (Fts3Table *)pCursor->base.pVtab; assert( p ); rc = sqlite3Fts3Optimize(p); switch( rc ){ case SQLITE_OK: sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); break; case SQLITE_DONE: sqlite3_result_text(pContext, "Index already optimal", -1, SQLITE_STATIC); break; default: sqlite3_result_error_code(pContext, rc); break; } } /* ** Implementation of the matchinfo() function for FTS3 */ static void fts3MatchinfoFunc( sqlite3_context *pContext, /* SQLite function call context */ int nVal, /* Size of argument array */ sqlite3_value **apVal /* Array of arguments */ ){ Fts3Cursor *pCsr; /* Cursor handle passed through apVal[0] */ assert( nVal==1 || nVal==2 ); if( SQLITE_OK==fts3FunctionArg(pContext, "matchinfo", apVal[0], &pCsr) ){ const char *zArg = 0; if( nVal>1 ){ zArg = (const char *)sqlite3_value_text(apVal[1]); } sqlite3Fts3Matchinfo(pContext, pCsr, zArg); } } /* ** This routine implements the xFindFunction method for the FTS3 ** virtual table. */ static int fts3FindFunctionMethod( sqlite3_vtab *pVtab, /* Virtual table handle */ int nArg, /* Number of SQL function arguments */ const char *zName, /* Name of SQL function */ void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), /* OUT: Result */ void **ppArg /* Unused */ ){ struct Overloaded { const char *zName; void (*xFunc)(sqlite3_context*,int,sqlite3_value**); } aOverload[] = { { "snippet", fts3SnippetFunc }, { "offsets", fts3OffsetsFunc }, { "optimize", fts3OptimizeFunc }, { "matchinfo", fts3MatchinfoFunc }, }; int i; /* Iterator variable */ UNUSED_PARAMETER(pVtab); UNUSED_PARAMETER(nArg); UNUSED_PARAMETER(ppArg); for(i=0; idb; /* Database connection */ int rc; /* Return Code */ /* At this point it must be known if the %_stat table exists or not. ** So bHasStat may not be 2. */ rc = fts3SetHasStat(p); /* As it happens, the pending terms table is always empty here. This is ** because an "ALTER TABLE RENAME TABLE" statement inside a transaction ** always opens a savepoint transaction. And the xSavepoint() method ** flushes the pending terms table. But leave the (no-op) call to ** PendingTermsFlush() in in case that changes. */ assert( p->nPendingData==0 ); if( rc==SQLITE_OK ){ rc = sqlite3Fts3PendingTermsFlush(p); } if( p->zContentTbl==0 ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", p->zDb, p->zName, zName ); } if( p->bHasDocsize ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_docsize' RENAME TO '%q_docsize';", p->zDb, p->zName, zName ); } if( p->bHasStat ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_stat' RENAME TO '%q_stat';", p->zDb, p->zName, zName ); } fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';", p->zDb, p->zName, zName ); fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", p->zDb, p->zName, zName ); return rc; } /* ** The xSavepoint() method. ** ** Flush the contents of the pending-terms table to disk. */ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; UNUSED_PARAMETER(iSavepoint); assert( ((Fts3Table *)pVtab)->inTransaction ); assert( ((Fts3Table *)pVtab)->mxSavepoint < iSavepoint ); TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ rc = fts3SyncMethod(pVtab); } return rc; } /* ** The xRelease() method. ** ** This is a no-op. */ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); UNUSED_PARAMETER(iSavepoint); UNUSED_PARAMETER(pVtab); assert( p->inTransaction ); assert( p->mxSavepoint >= iSavepoint ); TESTONLY( p->mxSavepoint = iSavepoint-1 ); return SQLITE_OK; } /* ** The xRollbackTo() method. ** ** Discard the contents of the pending terms table. */ static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts3Table *p = (Fts3Table*)pVtab; UNUSED_PARAMETER(iSavepoint); assert( p->inTransaction ); assert( p->mxSavepoint >= iSavepoint ); TESTONLY( p->mxSavepoint = iSavepoint ); sqlite3Fts3PendingTermsClear(p); return SQLITE_OK; } static const sqlite3_module fts3Module = { /* iVersion */ 2, /* xCreate */ fts3CreateMethod, /* xConnect */ fts3ConnectMethod, /* xBestIndex */ fts3BestIndexMethod, /* xDisconnect */ fts3DisconnectMethod, /* xDestroy */ fts3DestroyMethod, /* xOpen */ fts3OpenMethod, /* xClose */ fts3CloseMethod, /* xFilter */ fts3FilterMethod, /* xNext */ fts3NextMethod, /* xEof */ fts3EofMethod, /* xColumn */ fts3ColumnMethod, /* xRowid */ fts3RowidMethod, /* xUpdate */ fts3UpdateMethod, /* xBegin */ fts3BeginMethod, /* xSync */ fts3SyncMethod, /* xCommit */ fts3CommitMethod, /* xRollback */ fts3RollbackMethod, /* xFindFunction */ fts3FindFunctionMethod, /* xRename */ fts3RenameMethod, /* xSavepoint */ fts3SavepointMethod, /* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, }; /* ** This function is registered as the module destructor (called when an ** FTS3 enabled database connection is closed). It frees the memory ** allocated for the tokenizer hash table. */ static void hashDestroy(void *p){ Fts3Hash *pHash = (Fts3Hash *)p; sqlite3Fts3HashClear(pHash); sqlite3_free(pHash); } /* ** The fts3 built-in tokenizers - "simple", "porter" and "icu"- are ** implemented in files fts3_tokenizer1.c, fts3_porter.c and fts3_icu.c ** respectively. The following three forward declarations are for functions ** declared in these files used to retrieve the respective implementations. ** ** Calling sqlite3Fts3SimpleTokenizerModule() sets the value pointed ** to by the argument to point to the "simple" tokenizer implementation. ** And so on. */ SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); #ifndef SQLITE_DISABLE_FTS3_UNICODE SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const**ppModule); #endif #ifdef SQLITE_ENABLE_ICU SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); #endif /* ** Initialize the fts3 extension. If this extension is built as part ** of the sqlite library, then this function is called directly by ** SQLite. If fts3 is built as a dynamically loadable extension, this ** function is called by the sqlite3_extension_init() entry point. */ SQLITE_PRIVATE int sqlite3Fts3Init(sqlite3 *db){ int rc = SQLITE_OK; Fts3Hash *pHash = 0; const sqlite3_tokenizer_module *pSimple = 0; const sqlite3_tokenizer_module *pPorter = 0; #ifndef SQLITE_DISABLE_FTS3_UNICODE const sqlite3_tokenizer_module *pUnicode = 0; #endif #ifdef SQLITE_ENABLE_ICU const sqlite3_tokenizer_module *pIcu = 0; sqlite3Fts3IcuTokenizerModule(&pIcu); #endif #ifndef SQLITE_DISABLE_FTS3_UNICODE sqlite3Fts3UnicodeTokenizer(&pUnicode); #endif #ifdef SQLITE_TEST rc = sqlite3Fts3InitTerm(db); if( rc!=SQLITE_OK ) return rc; #endif rc = sqlite3Fts3InitAux(db); if( rc!=SQLITE_OK ) return rc; sqlite3Fts3SimpleTokenizerModule(&pSimple); sqlite3Fts3PorterTokenizerModule(&pPorter); /* Allocate and initialize the hash-table used to store tokenizers. */ pHash = sqlite3_malloc(sizeof(Fts3Hash)); if( !pHash ){ rc = SQLITE_NOMEM; }else{ sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); } /* Load the built-in tokenizers into the hash table */ if( rc==SQLITE_OK ){ if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) #ifndef SQLITE_DISABLE_FTS3_UNICODE || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode) #endif #ifdef SQLITE_ENABLE_ICU || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) #endif ){ rc = SQLITE_NOMEM; } } #ifdef SQLITE_TEST if( rc==SQLITE_OK ){ rc = sqlite3Fts3ExprInitTestInterface(db); } #endif /* Create the virtual table wrapper around the hash-table and overload ** the two scalar functions. If this is successful, register the ** module with sqlite. */ if( SQLITE_OK==rc && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) ){ rc = sqlite3_create_module_v2( db, "fts3", &fts3Module, (void *)pHash, hashDestroy ); if( rc==SQLITE_OK ){ rc = sqlite3_create_module_v2( db, "fts4", &fts3Module, (void *)pHash, 0 ); } if( rc==SQLITE_OK ){ rc = sqlite3Fts3InitTok(db, (void *)pHash); } return rc; } /* An error has occurred. Delete the hash table and return the error code. */ assert( rc!=SQLITE_OK ); if( pHash ){ sqlite3Fts3HashClear(pHash); sqlite3_free(pHash); } return rc; } /* ** Allocate an Fts3MultiSegReader for each token in the expression headed ** by pExpr. ** ** An Fts3SegReader object is a cursor that can seek or scan a range of ** entries within a single segment b-tree. An Fts3MultiSegReader uses multiple ** Fts3SegReader objects internally to provide an interface to seek or scan ** within the union of all segments of a b-tree. Hence the name. ** ** If the allocated Fts3MultiSegReader just seeks to a single entry in a ** segment b-tree (if the term is not a prefix or it is a prefix for which ** there exists prefix b-tree of the right length) then it may be traversed ** and merged incrementally. Otherwise, it has to be merged into an in-memory ** doclist and then traversed. */ static void fts3EvalAllocateReaders( Fts3Cursor *pCsr, /* FTS cursor handle */ Fts3Expr *pExpr, /* Allocate readers for this expression */ int *pnToken, /* OUT: Total number of tokens in phrase. */ int *pnOr, /* OUT: Total number of OR nodes in expr. */ int *pRc /* IN/OUT: Error code */ ){ if( pExpr && SQLITE_OK==*pRc ){ if( pExpr->eType==FTSQUERY_PHRASE ){ int i; int nToken = pExpr->pPhrase->nToken; *pnToken += nToken; for(i=0; ipPhrase->aToken[i]; int rc = fts3TermSegReaderCursor(pCsr, pToken->z, pToken->n, pToken->isPrefix, &pToken->pSegcsr ); if( rc!=SQLITE_OK ){ *pRc = rc; return; } } assert( pExpr->pPhrase->iDoclistToken==0 ); pExpr->pPhrase->iDoclistToken = -1; }else{ *pnOr += (pExpr->eType==FTSQUERY_OR); fts3EvalAllocateReaders(pCsr, pExpr->pLeft, pnToken, pnOr, pRc); fts3EvalAllocateReaders(pCsr, pExpr->pRight, pnToken, pnOr, pRc); } } } /* ** Arguments pList/nList contain the doclist for token iToken of phrase p. ** It is merged into the main doclist stored in p->doclist.aAll/nAll. ** ** This function assumes that pList points to a buffer allocated using ** sqlite3_malloc(). This function takes responsibility for eventually ** freeing the buffer. ** ** SQLITE_OK is returned if successful, or SQLITE_NOMEM if an error occurs. */ static int fts3EvalPhraseMergeToken( Fts3Table *pTab, /* FTS Table pointer */ Fts3Phrase *p, /* Phrase to merge pList/nList into */ int iToken, /* Token pList/nList corresponds to */ char *pList, /* Pointer to doclist */ int nList /* Number of bytes in pList */ ){ int rc = SQLITE_OK; assert( iToken!=p->iDoclistToken ); if( pList==0 ){ sqlite3_free(p->doclist.aAll); p->doclist.aAll = 0; p->doclist.nAll = 0; } else if( p->iDoclistToken<0 ){ p->doclist.aAll = pList; p->doclist.nAll = nList; } else if( p->doclist.aAll==0 ){ sqlite3_free(pList); } else { char *pLeft; char *pRight; int nLeft; int nRight; int nDiff; if( p->iDoclistTokendoclist.aAll; nLeft = p->doclist.nAll; pRight = pList; nRight = nList; nDiff = iToken - p->iDoclistToken; }else{ pRight = p->doclist.aAll; nRight = p->doclist.nAll; pLeft = pList; nLeft = nList; nDiff = p->iDoclistToken - iToken; } rc = fts3DoclistPhraseMerge( pTab->bDescIdx, nDiff, pLeft, nLeft, &pRight, &nRight ); sqlite3_free(pLeft); p->doclist.aAll = pRight; p->doclist.nAll = nRight; } if( iToken>p->iDoclistToken ) p->iDoclistToken = iToken; return rc; } /* ** Load the doclist for phrase p into p->doclist.aAll/nAll. The loaded doclist ** does not take deferred tokens into account. ** ** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. */ static int fts3EvalPhraseLoad( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Phrase *p /* Phrase object */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int iToken; int rc = SQLITE_OK; for(iToken=0; rc==SQLITE_OK && iTokennToken; iToken++){ Fts3PhraseToken *pToken = &p->aToken[iToken]; assert( pToken->pDeferred==0 || pToken->pSegcsr==0 ); if( pToken->pSegcsr ){ int nThis = 0; char *pThis = 0; rc = fts3TermSelect(pTab, pToken, p->iColumn, &nThis, &pThis); if( rc==SQLITE_OK ){ rc = fts3EvalPhraseMergeToken(pTab, p, iToken, pThis, nThis); } } assert( pToken->pSegcsr==0 ); } return rc; } /* ** This function is called on each phrase after the position lists for ** any deferred tokens have been loaded into memory. It updates the phrases ** current position list to include only those positions that are really ** instances of the phrase (after considering deferred tokens). If this ** means that the phrase does not appear in the current row, doclist.pList ** and doclist.nList are both zeroed. ** ** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. */ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ int iToken; /* Used to iterate through phrase tokens */ char *aPoslist = 0; /* Position list for deferred tokens */ int nPoslist = 0; /* Number of bytes in aPoslist */ int iPrev = -1; /* Token number of previous deferred token */ assert( pPhrase->doclist.bFreeList==0 ); for(iToken=0; iTokennToken; iToken++){ Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; Fts3DeferredToken *pDeferred = pToken->pDeferred; if( pDeferred ){ char *pList; int nList; int rc = sqlite3Fts3DeferredTokenList(pDeferred, &pList, &nList); if( rc!=SQLITE_OK ) return rc; if( pList==0 ){ sqlite3_free(aPoslist); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; }else if( aPoslist==0 ){ aPoslist = pList; nPoslist = nList; }else{ char *aOut = pList; char *p1 = aPoslist; char *p2 = aOut; assert( iPrev>=0 ); fts3PoslistPhraseMerge(&aOut, iToken-iPrev, 0, 1, &p1, &p2); sqlite3_free(aPoslist); aPoslist = pList; nPoslist = (int)(aOut - aPoslist); if( nPoslist==0 ){ sqlite3_free(aPoslist); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; } } iPrev = iToken; } } if( iPrev>=0 ){ int nMaxUndeferred = pPhrase->iDoclistToken; if( nMaxUndeferred<0 ){ pPhrase->doclist.pList = aPoslist; pPhrase->doclist.nList = nPoslist; pPhrase->doclist.iDocid = pCsr->iPrevId; pPhrase->doclist.bFreeList = 1; }else{ int nDistance; char *p1; char *p2; char *aOut; if( nMaxUndeferred>iPrev ){ p1 = aPoslist; p2 = pPhrase->doclist.pList; nDistance = nMaxUndeferred - iPrev; }else{ p1 = pPhrase->doclist.pList; p2 = aPoslist; nDistance = iPrev - nMaxUndeferred; } aOut = (char *)sqlite3_malloc(nPoslist+8); if( !aOut ){ sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); }else{ sqlite3_free(aOut); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; } sqlite3_free(aPoslist); } } return SQLITE_OK; } /* ** Maximum number of tokens a phrase may have to be considered for the ** incremental doclists strategy. */ #define MAX_INCR_PHRASE_TOKENS 4 /* ** This function is called for each Fts3Phrase in a full-text query ** expression to initialize the mechanism for returning rows. Once this ** function has been called successfully on an Fts3Phrase, it may be ** used with fts3EvalPhraseNext() to iterate through the matching docids. ** ** If parameter bOptOk is true, then the phrase may (or may not) use the ** incremental loading strategy. Otherwise, the entire doclist is loaded into ** memory within this call. ** ** SQLITE_OK is returned if no error occurs, otherwise an SQLite error code. */ static int fts3EvalPhraseStart(Fts3Cursor *pCsr, int bOptOk, Fts3Phrase *p){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc = SQLITE_OK; /* Error code */ int i; /* Determine if doclists may be loaded from disk incrementally. This is ** possible if the bOptOk argument is true, the FTS doclists will be ** scanned in forward order, and the phrase consists of ** MAX_INCR_PHRASE_TOKENS or fewer tokens, none of which are are "^first" ** tokens or prefix tokens that cannot use a prefix-index. */ int bHaveIncr = 0; int bIncrOk = (bOptOk && pCsr->bDesc==pTab->bDescIdx && p->nToken<=MAX_INCR_PHRASE_TOKENS && p->nToken>0 #ifdef SQLITE_TEST && pTab->bNoIncrDoclist==0 #endif ); for(i=0; bIncrOk==1 && inToken; i++){ Fts3PhraseToken *pToken = &p->aToken[i]; if( pToken->bFirst || (pToken->pSegcsr!=0 && !pToken->pSegcsr->bLookup) ){ bIncrOk = 0; } if( pToken->pSegcsr ) bHaveIncr = 1; } if( bIncrOk && bHaveIncr ){ /* Use the incremental approach. */ int iCol = (p->iColumn >= pTab->nColumn ? -1 : p->iColumn); for(i=0; rc==SQLITE_OK && inToken; i++){ Fts3PhraseToken *pToken = &p->aToken[i]; Fts3MultiSegReader *pSegcsr = pToken->pSegcsr; if( pSegcsr ){ rc = sqlite3Fts3MsrIncrStart(pTab, pSegcsr, iCol, pToken->z, pToken->n); } } p->bIncr = 1; }else{ /* Load the full doclist for the phrase into memory. */ rc = fts3EvalPhraseLoad(pCsr, p); p->bIncr = 0; } assert( rc!=SQLITE_OK || p->nToken<1 || p->aToken[0].pSegcsr==0 || p->bIncr ); return rc; } /* ** This function is used to iterate backwards (from the end to start) ** through doclists. It is used by this module to iterate through phrase ** doclists in reverse and by the fts3_write.c module to iterate through ** pending-terms lists when writing to databases with "order=desc". ** ** The doclist may be sorted in ascending (parameter bDescIdx==0) or ** descending (parameter bDescIdx==1) order of docid. Regardless, this ** function iterates from the end of the doclist to the beginning. */ SQLITE_PRIVATE void sqlite3Fts3DoclistPrev( int bDescIdx, /* True if the doclist is desc */ char *aDoclist, /* Pointer to entire doclist */ int nDoclist, /* Length of aDoclist in bytes */ char **ppIter, /* IN/OUT: Iterator pointer */ sqlite3_int64 *piDocid, /* IN/OUT: Docid pointer */ int *pnList, /* OUT: List length pointer */ u8 *pbEof /* OUT: End-of-file flag */ ){ char *p = *ppIter; assert( nDoclist>0 ); assert( *pbEof==0 ); assert( p || *piDocid==0 ); assert( !p || (p>aDoclist && p<&aDoclist[nDoclist]) ); if( p==0 ){ sqlite3_int64 iDocid = 0; char *pNext = 0; char *pDocid = aDoclist; char *pEnd = &aDoclist[nDoclist]; int iMul = 1; while( pDocid0 ); assert( *pbEof==0 ); assert( p || *piDocid==0 ); assert( !p || (p>=aDoclist && p<=&aDoclist[nDoclist]) ); if( p==0 ){ p = aDoclist; p += sqlite3Fts3GetVarint(p, piDocid); }else{ fts3PoslistCopy(0, &p); while( p<&aDoclist[nDoclist] && *p==0 ) p++; if( p>=&aDoclist[nDoclist] ){ *pbEof = 1; }else{ sqlite3_int64 iVar; p += sqlite3Fts3GetVarint(p, &iVar); *piDocid += ((bDescIdx ? -1 : 1) * iVar); } } *ppIter = p; } /* ** Advance the iterator pDL to the next entry in pDL->aAll/nAll. Set *pbEof ** to true if EOF is reached. */ static void fts3EvalDlPhraseNext( Fts3Table *pTab, Fts3Doclist *pDL, u8 *pbEof ){ char *pIter; /* Used to iterate through aAll */ char *pEnd = &pDL->aAll[pDL->nAll]; /* 1 byte past end of aAll */ if( pDL->pNextDocid ){ pIter = pDL->pNextDocid; }else{ pIter = pDL->aAll; } if( pIter>=pEnd ){ /* We have already reached the end of this doclist. EOF. */ *pbEof = 1; }else{ sqlite3_int64 iDelta; pIter += sqlite3Fts3GetVarint(pIter, &iDelta); if( pTab->bDescIdx==0 || pDL->pNextDocid==0 ){ pDL->iDocid += iDelta; }else{ pDL->iDocid -= iDelta; } pDL->pList = pIter; fts3PoslistCopy(0, &pIter); pDL->nList = (int)(pIter - pDL->pList); /* pIter now points just past the 0x00 that terminates the position- ** list for document pDL->iDocid. However, if this position-list was ** edited in place by fts3EvalNearTrim(), then pIter may not actually ** point to the start of the next docid value. The following line deals ** with this case by advancing pIter past the zero-padding added by ** fts3EvalNearTrim(). */ while( pIterpNextDocid = pIter; assert( pIter>=&pDL->aAll[pDL->nAll] || *pIter ); *pbEof = 0; } } /* ** Helper type used by fts3EvalIncrPhraseNext() and incrPhraseTokenNext(). */ typedef struct TokenDoclist TokenDoclist; struct TokenDoclist { int bIgnore; sqlite3_int64 iDocid; char *pList; int nList; }; /* ** Token pToken is an incrementally loaded token that is part of a ** multi-token phrase. Advance it to the next matching document in the ** database and populate output variable *p with the details of the new ** entry. Or, if the iterator has reached EOF, set *pbEof to true. ** ** If an error occurs, return an SQLite error code. Otherwise, return ** SQLITE_OK. */ static int incrPhraseTokenNext( Fts3Table *pTab, /* Virtual table handle */ Fts3Phrase *pPhrase, /* Phrase to advance token of */ int iToken, /* Specific token to advance */ TokenDoclist *p, /* OUT: Docid and doclist for new entry */ u8 *pbEof /* OUT: True if iterator is at EOF */ ){ int rc = SQLITE_OK; if( pPhrase->iDoclistToken==iToken ){ assert( p->bIgnore==0 ); assert( pPhrase->aToken[iToken].pSegcsr==0 ); fts3EvalDlPhraseNext(pTab, &pPhrase->doclist, pbEof); p->pList = pPhrase->doclist.pList; p->nList = pPhrase->doclist.nList; p->iDocid = pPhrase->doclist.iDocid; }else{ Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; assert( pToken->pDeferred==0 ); assert( pToken->pSegcsr || pPhrase->iDoclistToken>=0 ); if( pToken->pSegcsr ){ assert( p->bIgnore==0 ); rc = sqlite3Fts3MsrIncrNext( pTab, pToken->pSegcsr, &p->iDocid, &p->pList, &p->nList ); if( p->pList==0 ) *pbEof = 1; }else{ p->bIgnore = 1; } } return rc; } /* ** The phrase iterator passed as the second argument: ** ** * features at least one token that uses an incremental doclist, and ** ** * does not contain any deferred tokens. ** ** Advance it to the next matching documnent in the database and populate ** the Fts3Doclist.pList and nList fields. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to ** 1 before returning. Otherwise, if no error occurs and the iterator is ** successfully advanced, *pbEof is set to 0. ** ** If an error occurs, return an SQLite error code. Otherwise, return ** SQLITE_OK. */ static int fts3EvalIncrPhraseNext( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Phrase *p, /* Phrase object to advance to next docid */ u8 *pbEof /* OUT: Set to 1 if EOF */ ){ int rc = SQLITE_OK; Fts3Doclist *pDL = &p->doclist; Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; u8 bEof = 0; /* This is only called if it is guaranteed that the phrase has at least ** one incremental token. In which case the bIncr flag is set. */ assert( p->bIncr==1 ); if( p->nToken==1 && p->bIncr ){ rc = sqlite3Fts3MsrIncrNext(pTab, p->aToken[0].pSegcsr, &pDL->iDocid, &pDL->pList, &pDL->nList ); if( pDL->pList==0 ) bEof = 1; }else{ int bDescDoclist = pCsr->bDesc; struct TokenDoclist a[MAX_INCR_PHRASE_TOKENS]; memset(a, 0, sizeof(a)); assert( p->nToken<=MAX_INCR_PHRASE_TOKENS ); assert( p->iDoclistTokennToken && bEof==0; i++){ rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); if( a[i].bIgnore==0 && (bMaxSet==0 || DOCID_CMP(iMax, a[i].iDocid)<0) ){ iMax = a[i].iDocid; bMaxSet = 1; } } assert( rc!=SQLITE_OK || (p->nToken>=1 && a[p->nToken-1].bIgnore==0) ); assert( rc!=SQLITE_OK || bMaxSet ); /* Keep advancing iterators until they all point to the same document */ for(i=0; inToken; i++){ while( rc==SQLITE_OK && bEof==0 && a[i].bIgnore==0 && DOCID_CMP(a[i].iDocid, iMax)<0 ){ rc = incrPhraseTokenNext(pTab, p, i, &a[i], &bEof); if( DOCID_CMP(a[i].iDocid, iMax)>0 ){ iMax = a[i].iDocid; i = 0; } } } /* Check if the current entries really are a phrase match */ if( bEof==0 ){ int nList = 0; int nByte = a[p->nToken-1].nList; char *aDoclist = sqlite3_malloc(nByte+1); if( !aDoclist ) return SQLITE_NOMEM; memcpy(aDoclist, a[p->nToken-1].pList, nByte+1); for(i=0; i<(p->nToken-1); i++){ if( a[i].bIgnore==0 ){ char *pL = a[i].pList; char *pR = aDoclist; char *pOut = aDoclist; int nDist = p->nToken-1-i; int res = fts3PoslistPhraseMerge(&pOut, nDist, 0, 1, &pL, &pR); if( res==0 ) break; nList = (int)(pOut - aDoclist); } } if( i==(p->nToken-1) ){ pDL->iDocid = iMax; pDL->pList = aDoclist; pDL->nList = nList; pDL->bFreeList = 1; break; } sqlite3_free(aDoclist); } } } *pbEof = bEof; return rc; } /* ** Attempt to move the phrase iterator to point to the next matching docid. ** If an error occurs, return an SQLite error code. Otherwise, return ** SQLITE_OK. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to ** 1 before returning. Otherwise, if no error occurs and the iterator is ** successfully advanced, *pbEof is set to 0. */ static int fts3EvalPhraseNext( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Phrase *p, /* Phrase object to advance to next docid */ u8 *pbEof /* OUT: Set to 1 if EOF */ ){ int rc = SQLITE_OK; Fts3Doclist *pDL = &p->doclist; Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; if( p->bIncr ){ rc = fts3EvalIncrPhraseNext(pCsr, p, pbEof); }else if( pCsr->bDesc!=pTab->bDescIdx && pDL->nAll ){ sqlite3Fts3DoclistPrev(pTab->bDescIdx, pDL->aAll, pDL->nAll, &pDL->pNextDocid, &pDL->iDocid, &pDL->nList, pbEof ); pDL->pList = pDL->pNextDocid; }else{ fts3EvalDlPhraseNext(pTab, pDL, pbEof); } return rc; } /* ** ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, fts3EvalPhraseStart() is called on all phrases within the ** expression. Also the Fts3Expr.bDeferred variable is set to true for any ** expressions for which all descendent tokens are deferred. ** ** If parameter bOptOk is zero, then it is guaranteed that the ** Fts3Phrase.doclist.aAll/nAll variables contain the entire doclist for ** each phrase in the expression (subject to deferred token processing). ** Or, if bOptOk is non-zero, then one or more tokens within the expression ** may be loaded incrementally, meaning doclist.aAll/nAll is not available. ** ** If an error occurs within this function, *pRc is set to an SQLite error ** code before returning. */ static void fts3EvalStartReaders( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Expr *pExpr, /* Expression to initialize phrases in */ int *pRc /* IN/OUT: Error code */ ){ if( pExpr && SQLITE_OK==*pRc ){ if( pExpr->eType==FTSQUERY_PHRASE ){ int nToken = pExpr->pPhrase->nToken; if( nToken ){ int i; for(i=0; ipPhrase->aToken[i].pDeferred==0 ) break; } pExpr->bDeferred = (i==nToken); } *pRc = fts3EvalPhraseStart(pCsr, 1, pExpr->pPhrase); }else{ fts3EvalStartReaders(pCsr, pExpr->pLeft, pRc); fts3EvalStartReaders(pCsr, pExpr->pRight, pRc); pExpr->bDeferred = (pExpr->pLeft->bDeferred && pExpr->pRight->bDeferred); } } } /* ** An array of the following structures is assembled as part of the process ** of selecting tokens to defer before the query starts executing (as part ** of the xFilter() method). There is one element in the array for each ** token in the FTS expression. ** ** Tokens are divided into AND/NEAR clusters. All tokens in a cluster belong ** to phrases that are connected only by AND and NEAR operators (not OR or ** NOT). When determining tokens to defer, each AND/NEAR cluster is considered ** separately. The root of a tokens AND/NEAR cluster is stored in ** Fts3TokenAndCost.pRoot. */ typedef struct Fts3TokenAndCost Fts3TokenAndCost; struct Fts3TokenAndCost { Fts3Phrase *pPhrase; /* The phrase the token belongs to */ int iToken; /* Position of token in phrase */ Fts3PhraseToken *pToken; /* The token itself */ Fts3Expr *pRoot; /* Root of NEAR/AND cluster */ int nOvfl; /* Number of overflow pages to load doclist */ int iCol; /* The column the token must match */ }; /* ** This function is used to populate an allocated Fts3TokenAndCost array. ** ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, if an error occurs during execution, *pRc is set to an ** SQLite error code. */ static void fts3EvalTokenCosts( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Expr *pRoot, /* Root of current AND/NEAR cluster */ Fts3Expr *pExpr, /* Expression to consider */ Fts3TokenAndCost **ppTC, /* Write new entries to *(*ppTC)++ */ Fts3Expr ***ppOr, /* Write new OR root to *(*ppOr)++ */ int *pRc /* IN/OUT: Error code */ ){ if( *pRc==SQLITE_OK ){ if( pExpr->eType==FTSQUERY_PHRASE ){ Fts3Phrase *pPhrase = pExpr->pPhrase; int i; for(i=0; *pRc==SQLITE_OK && inToken; i++){ Fts3TokenAndCost *pTC = (*ppTC)++; pTC->pPhrase = pPhrase; pTC->iToken = i; pTC->pRoot = pRoot; pTC->pToken = &pPhrase->aToken[i]; pTC->iCol = pPhrase->iColumn; *pRc = sqlite3Fts3MsrOvfl(pCsr, pTC->pToken->pSegcsr, &pTC->nOvfl); } }else if( pExpr->eType!=FTSQUERY_NOT ){ assert( pExpr->eType==FTSQUERY_OR || pExpr->eType==FTSQUERY_AND || pExpr->eType==FTSQUERY_NEAR ); assert( pExpr->pLeft && pExpr->pRight ); if( pExpr->eType==FTSQUERY_OR ){ pRoot = pExpr->pLeft; **ppOr = pRoot; (*ppOr)++; } fts3EvalTokenCosts(pCsr, pRoot, pExpr->pLeft, ppTC, ppOr, pRc); if( pExpr->eType==FTSQUERY_OR ){ pRoot = pExpr->pRight; **ppOr = pRoot; (*ppOr)++; } fts3EvalTokenCosts(pCsr, pRoot, pExpr->pRight, ppTC, ppOr, pRc); } } } /* ** Determine the average document (row) size in pages. If successful, ** write this value to *pnPage and return SQLITE_OK. Otherwise, return ** an SQLite error code. ** ** The average document size in pages is calculated by first calculating ** determining the average size in bytes, B. If B is less than the amount ** of data that will fit on a single leaf page of an intkey table in ** this database, then the average docsize is 1. Otherwise, it is 1 plus ** the number of overflow pages consumed by a record B bytes in size. */ static int fts3EvalAverageDocsize(Fts3Cursor *pCsr, int *pnPage){ if( pCsr->nRowAvg==0 ){ /* The average document size, which is required to calculate the cost ** of each doclist, has not yet been determined. Read the required ** data from the %_stat table to calculate it. ** ** Entry 0 of the %_stat table is a blob containing (nCol+1) FTS3 ** varints, where nCol is the number of columns in the FTS3 table. ** The first varint is the number of documents currently stored in ** the table. The following nCol varints contain the total amount of ** data stored in all rows of each column of the table, from left ** to right. */ int rc; Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; sqlite3_stmt *pStmt; sqlite3_int64 nDoc = 0; sqlite3_int64 nByte = 0; const char *pEnd; const char *a; rc = sqlite3Fts3SelectDoctotal(p, &pStmt); if( rc!=SQLITE_OK ) return rc; a = sqlite3_column_blob(pStmt, 0); assert( a ); pEnd = &a[sqlite3_column_bytes(pStmt, 0)]; a += sqlite3Fts3GetVarint(a, &nDoc); while( anDoc = nDoc; pCsr->nRowAvg = (int)(((nByte / nDoc) + p->nPgsz) / p->nPgsz); assert( pCsr->nRowAvg>0 ); rc = sqlite3_reset(pStmt); if( rc!=SQLITE_OK ) return rc; } *pnPage = pCsr->nRowAvg; return SQLITE_OK; } /* ** This function is called to select the tokens (if any) that will be ** deferred. The array aTC[] has already been populated when this is ** called. ** ** This function is called once for each AND/NEAR cluster in the ** expression. Each invocation determines which tokens to defer within ** the cluster with root node pRoot. See comments above the definition ** of struct Fts3TokenAndCost for more details. ** ** If no error occurs, SQLITE_OK is returned and sqlite3Fts3DeferToken() ** called on each token to defer. Otherwise, an SQLite error code is ** returned. */ static int fts3EvalSelectDeferred( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Expr *pRoot, /* Consider tokens with this root node */ Fts3TokenAndCost *aTC, /* Array of expression tokens and costs */ int nTC /* Number of entries in aTC[] */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int nDocSize = 0; /* Number of pages per doc loaded */ int rc = SQLITE_OK; /* Return code */ int ii; /* Iterator variable for various purposes */ int nOvfl = 0; /* Total overflow pages used by doclists */ int nToken = 0; /* Total number of tokens in cluster */ int nMinEst = 0; /* The minimum count for any phrase so far. */ int nLoad4 = 1; /* (Phrases that will be loaded)^4. */ /* Tokens are never deferred for FTS tables created using the content=xxx ** option. The reason being that it is not guaranteed that the content ** table actually contains the same data as the index. To prevent this from ** causing any problems, the deferred token optimization is completely ** disabled for content=xxx tables. */ if( pTab->zContentTbl ){ return SQLITE_OK; } /* Count the tokens in this AND/NEAR cluster. If none of the doclists ** associated with the tokens spill onto overflow pages, or if there is ** only 1 token, exit early. No tokens to defer in this case. */ for(ii=0; ii0 ); /* Iterate through all tokens in this AND/NEAR cluster, in ascending order ** of the number of overflow pages that will be loaded by the pager layer ** to retrieve the entire doclist for the token from the full-text index. ** Load the doclists for tokens that are either: ** ** a. The cheapest token in the entire query (i.e. the one visited by the ** first iteration of this loop), or ** ** b. Part of a multi-token phrase. ** ** After each token doclist is loaded, merge it with the others from the ** same phrase and count the number of documents that the merged doclist ** contains. Set variable "nMinEst" to the smallest number of documents in ** any phrase doclist for which 1 or more token doclists have been loaded. ** Let nOther be the number of other phrases for which it is certain that ** one or more tokens will not be deferred. ** ** Then, for each token, defer it if loading the doclist would result in ** loading N or more overflow pages into memory, where N is computed as: ** ** (nMinEst + 4^nOther - 1) / (4^nOther) */ for(ii=0; iinOvfl) ){ pTC = &aTC[iTC]; } } assert( pTC ); if( ii && pTC->nOvfl>=((nMinEst+(nLoad4/4)-1)/(nLoad4/4))*nDocSize ){ /* The number of overflow pages to load for this (and therefore all ** subsequent) tokens is greater than the estimated number of pages ** that will be loaded if all subsequent tokens are deferred. */ Fts3PhraseToken *pToken = pTC->pToken; rc = sqlite3Fts3DeferToken(pCsr, pToken, pTC->iCol); fts3SegReaderCursorFree(pToken->pSegcsr); pToken->pSegcsr = 0; }else{ /* Set nLoad4 to the value of (4^nOther) for the next iteration of the ** for-loop. Except, limit the value to 2^24 to prevent it from ** overflowing the 32-bit integer it is stored in. */ if( ii<12 ) nLoad4 = nLoad4*4; if( ii==0 || (pTC->pPhrase->nToken>1 && ii!=nToken-1) ){ /* Either this is the cheapest token in the entire query, or it is ** part of a multi-token phrase. Either way, the entire doclist will ** (eventually) be loaded into memory. It may as well be now. */ Fts3PhraseToken *pToken = pTC->pToken; int nList = 0; char *pList = 0; rc = fts3TermSelect(pTab, pToken, pTC->iCol, &nList, &pList); assert( rc==SQLITE_OK || pList==0 ); if( rc==SQLITE_OK ){ rc = fts3EvalPhraseMergeToken( pTab, pTC->pPhrase, pTC->iToken,pList,nList ); } if( rc==SQLITE_OK ){ int nCount; nCount = fts3DoclistCountDocids( pTC->pPhrase->doclist.aAll, pTC->pPhrase->doclist.nAll ); if( ii==0 || nCountpToken = 0; } return rc; } /* ** This function is called from within the xFilter method. It initializes ** the full-text query currently stored in pCsr->pExpr. To iterate through ** the results of a query, the caller does: ** ** fts3EvalStart(pCsr); ** while( 1 ){ ** fts3EvalNext(pCsr); ** if( pCsr->bEof ) break; ** ... return row pCsr->iPrevId to the caller ... ** } */ static int fts3EvalStart(Fts3Cursor *pCsr){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc = SQLITE_OK; int nToken = 0; int nOr = 0; /* Allocate a MultiSegReader for each token in the expression. */ fts3EvalAllocateReaders(pCsr, pCsr->pExpr, &nToken, &nOr, &rc); /* Determine which, if any, tokens in the expression should be deferred. */ #ifndef SQLITE_DISABLE_FTS4_DEFERRED if( rc==SQLITE_OK && nToken>1 && pTab->bFts4 ){ Fts3TokenAndCost *aTC; Fts3Expr **apOr; aTC = (Fts3TokenAndCost *)sqlite3_malloc( sizeof(Fts3TokenAndCost) * nToken + sizeof(Fts3Expr *) * nOr * 2 ); apOr = (Fts3Expr **)&aTC[nToken]; if( !aTC ){ rc = SQLITE_NOMEM; }else{ int ii; Fts3TokenAndCost *pTC = aTC; Fts3Expr **ppOr = apOr; fts3EvalTokenCosts(pCsr, 0, pCsr->pExpr, &pTC, &ppOr, &rc); nToken = (int)(pTC-aTC); nOr = (int)(ppOr-apOr); if( rc==SQLITE_OK ){ rc = fts3EvalSelectDeferred(pCsr, 0, aTC, nToken); for(ii=0; rc==SQLITE_OK && iipExpr, &rc); return rc; } /* ** Invalidate the current position list for phrase pPhrase. */ static void fts3EvalInvalidatePoslist(Fts3Phrase *pPhrase){ if( pPhrase->doclist.bFreeList ){ sqlite3_free(pPhrase->doclist.pList); } pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; pPhrase->doclist.bFreeList = 0; } /* ** This function is called to edit the position list associated with ** the phrase object passed as the fifth argument according to a NEAR ** condition. For example: ** ** abc NEAR/5 "def ghi" ** ** Parameter nNear is passed the NEAR distance of the expression (5 in ** the example above). When this function is called, *paPoslist points to ** the position list, and *pnToken is the number of phrase tokens in, the ** phrase on the other side of the NEAR operator to pPhrase. For example, ** if pPhrase refers to the "def ghi" phrase, then *paPoslist points to ** the position list associated with phrase "abc". ** ** All positions in the pPhrase position list that are not sufficiently ** close to a position in the *paPoslist position list are removed. If this ** leaves 0 positions, zero is returned. Otherwise, non-zero. ** ** Before returning, *paPoslist is set to point to the position lsit ** associated with pPhrase. And *pnToken is set to the number of tokens in ** pPhrase. */ static int fts3EvalNearTrim( int nNear, /* NEAR distance. As in "NEAR/nNear". */ char *aTmp, /* Temporary space to use */ char **paPoslist, /* IN/OUT: Position list */ int *pnToken, /* IN/OUT: Tokens in phrase of *paPoslist */ Fts3Phrase *pPhrase /* The phrase object to trim the doclist of */ ){ int nParam1 = nNear + pPhrase->nToken; int nParam2 = nNear + *pnToken; int nNew; char *p2; char *pOut; int res; assert( pPhrase->doclist.pList ); p2 = pOut = pPhrase->doclist.pList; res = fts3PoslistNearMerge( &pOut, aTmp, nParam1, nParam2, paPoslist, &p2 ); if( res ){ nNew = (int)(pOut - pPhrase->doclist.pList) - 1; assert( pPhrase->doclist.pList[nNew]=='\0' ); assert( nNew<=pPhrase->doclist.nList && nNew>0 ); memset(&pPhrase->doclist.pList[nNew], 0, pPhrase->doclist.nList - nNew); pPhrase->doclist.nList = nNew; *paPoslist = pPhrase->doclist.pList; *pnToken = pPhrase->nToken; } return res; } /* ** This function is a no-op if *pRc is other than SQLITE_OK when it is called. ** Otherwise, it advances the expression passed as the second argument to ** point to the next matching row in the database. Expressions iterate through ** matching rows in docid order. Ascending order if Fts3Cursor.bDesc is zero, ** or descending if it is non-zero. ** ** If an error occurs, *pRc is set to an SQLite error code. Otherwise, if ** successful, the following variables in pExpr are set: ** ** Fts3Expr.bEof (non-zero if EOF - there is no next row) ** Fts3Expr.iDocid (valid if bEof==0. The docid of the next row) ** ** If the expression is of type FTSQUERY_PHRASE, and the expression is not ** at EOF, then the following variables are populated with the position list ** for the phrase for the visited row: ** ** FTs3Expr.pPhrase->doclist.nList (length of pList in bytes) ** FTs3Expr.pPhrase->doclist.pList (pointer to position list) ** ** It says above that this function advances the expression to the next ** matching row. This is usually true, but there are the following exceptions: ** ** 1. Deferred tokens are not taken into account. If a phrase consists ** entirely of deferred tokens, it is assumed to match every row in ** the db. In this case the position-list is not populated at all. ** ** Or, if a phrase contains one or more deferred tokens and one or ** more non-deferred tokens, then the expression is advanced to the ** next possible match, considering only non-deferred tokens. In other ** words, if the phrase is "A B C", and "B" is deferred, the expression ** is advanced to the next row that contains an instance of "A * C", ** where "*" may match any single token. The position list in this case ** is populated as for "A * C" before returning. ** ** 2. NEAR is treated as AND. If the expression is "x NEAR y", it is ** advanced to point to the next row that matches "x AND y". ** ** See sqlite3Fts3EvalTestDeferred() for details on testing if a row is ** really a match, taking into account deferred tokens and NEAR operators. */ static void fts3EvalNextRow( Fts3Cursor *pCsr, /* FTS Cursor handle */ Fts3Expr *pExpr, /* Expr. to advance to next matching row */ int *pRc /* IN/OUT: Error code */ ){ if( *pRc==SQLITE_OK ){ int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ assert( pExpr->bEof==0 ); pExpr->bStart = 1; switch( pExpr->eType ){ case FTSQUERY_NEAR: case FTSQUERY_AND: { Fts3Expr *pLeft = pExpr->pLeft; Fts3Expr *pRight = pExpr->pRight; assert( !pLeft->bDeferred || !pRight->bDeferred ); if( pLeft->bDeferred ){ /* LHS is entirely deferred. So we assume it matches every row. ** Advance the RHS iterator to find the next row visited. */ fts3EvalNextRow(pCsr, pRight, pRc); pExpr->iDocid = pRight->iDocid; pExpr->bEof = pRight->bEof; }else if( pRight->bDeferred ){ /* RHS is entirely deferred. So we assume it matches every row. ** Advance the LHS iterator to find the next row visited. */ fts3EvalNextRow(pCsr, pLeft, pRc); pExpr->iDocid = pLeft->iDocid; pExpr->bEof = pLeft->bEof; }else{ /* Neither the RHS or LHS are deferred. */ fts3EvalNextRow(pCsr, pLeft, pRc); fts3EvalNextRow(pCsr, pRight, pRc); while( !pLeft->bEof && !pRight->bEof && *pRc==SQLITE_OK ){ sqlite3_int64 iDiff = DOCID_CMP(pLeft->iDocid, pRight->iDocid); if( iDiff==0 ) break; if( iDiff<0 ){ fts3EvalNextRow(pCsr, pLeft, pRc); }else{ fts3EvalNextRow(pCsr, pRight, pRc); } } pExpr->iDocid = pLeft->iDocid; pExpr->bEof = (pLeft->bEof || pRight->bEof); if( pExpr->eType==FTSQUERY_NEAR && pExpr->bEof ){ if( pRight->pPhrase && pRight->pPhrase->doclist.aAll ){ Fts3Doclist *pDl = &pRight->pPhrase->doclist; while( *pRc==SQLITE_OK && pRight->bEof==0 ){ memset(pDl->pList, 0, pDl->nList); fts3EvalNextRow(pCsr, pRight, pRc); } } if( pLeft->pPhrase && pLeft->pPhrase->doclist.aAll ){ Fts3Doclist *pDl = &pLeft->pPhrase->doclist; while( *pRc==SQLITE_OK && pLeft->bEof==0 ){ memset(pDl->pList, 0, pDl->nList); fts3EvalNextRow(pCsr, pLeft, pRc); } } } } break; } case FTSQUERY_OR: { Fts3Expr *pLeft = pExpr->pLeft; Fts3Expr *pRight = pExpr->pRight; sqlite3_int64 iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); assert( pLeft->bStart || pLeft->iDocid==pRight->iDocid ); assert( pRight->bStart || pLeft->iDocid==pRight->iDocid ); if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ fts3EvalNextRow(pCsr, pLeft, pRc); }else if( pLeft->bEof || (pRight->bEof==0 && iCmp>0) ){ fts3EvalNextRow(pCsr, pRight, pRc); }else{ fts3EvalNextRow(pCsr, pLeft, pRc); fts3EvalNextRow(pCsr, pRight, pRc); } pExpr->bEof = (pLeft->bEof && pRight->bEof); iCmp = DOCID_CMP(pLeft->iDocid, pRight->iDocid); if( pRight->bEof || (pLeft->bEof==0 && iCmp<0) ){ pExpr->iDocid = pLeft->iDocid; }else{ pExpr->iDocid = pRight->iDocid; } break; } case FTSQUERY_NOT: { Fts3Expr *pLeft = pExpr->pLeft; Fts3Expr *pRight = pExpr->pRight; if( pRight->bStart==0 ){ fts3EvalNextRow(pCsr, pRight, pRc); assert( *pRc!=SQLITE_OK || pRight->bStart ); } fts3EvalNextRow(pCsr, pLeft, pRc); if( pLeft->bEof==0 ){ while( !*pRc && !pRight->bEof && DOCID_CMP(pLeft->iDocid, pRight->iDocid)>0 ){ fts3EvalNextRow(pCsr, pRight, pRc); } } pExpr->iDocid = pLeft->iDocid; pExpr->bEof = pLeft->bEof; break; } default: { Fts3Phrase *pPhrase = pExpr->pPhrase; fts3EvalInvalidatePoslist(pPhrase); *pRc = fts3EvalPhraseNext(pCsr, pPhrase, &pExpr->bEof); pExpr->iDocid = pPhrase->doclist.iDocid; break; } } } } /* ** If *pRc is not SQLITE_OK, or if pExpr is not the root node of a NEAR ** cluster, then this function returns 1 immediately. ** ** Otherwise, it checks if the current row really does match the NEAR ** expression, using the data currently stored in the position lists ** (Fts3Expr->pPhrase.doclist.pList/nList) for each phrase in the expression. ** ** If the current row is a match, the position list associated with each ** phrase in the NEAR expression is edited in place to contain only those ** phrase instances sufficiently close to their peers to satisfy all NEAR ** constraints. In this case it returns 1. If the NEAR expression does not ** match the current row, 0 is returned. The position lists may or may not ** be edited if 0 is returned. */ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ int res = 1; /* The following block runs if pExpr is the root of a NEAR query. ** For example, the query: ** ** "w" NEAR "x" NEAR "y" NEAR "z" ** ** which is represented in tree form as: ** ** | ** +--NEAR--+ <-- root of NEAR query ** | | ** +--NEAR--+ "z" ** | | ** +--NEAR--+ "y" ** | | ** "w" "x" ** ** The right-hand child of a NEAR node is always a phrase. The ** left-hand child may be either a phrase or a NEAR node. There are ** no exceptions to this - it's the way the parser in fts3_expr.c works. */ if( *pRc==SQLITE_OK && pExpr->eType==FTSQUERY_NEAR && pExpr->bEof==0 && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) ){ Fts3Expr *p; int nTmp = 0; /* Bytes of temp space */ char *aTmp; /* Temp space for PoslistNearMerge() */ /* Allocate temporary working space. */ for(p=pExpr; p->pLeft; p=p->pLeft){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; if( nTmp==0 ){ res = 0; }else{ aTmp = sqlite3_malloc(nTmp*2); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; }else{ char *aPoslist = p->pPhrase->doclist.pList; int nToken = p->pPhrase->nToken; for(p=p->pParent;res && p && p->eType==FTSQUERY_NEAR; p=p->pParent){ Fts3Phrase *pPhrase = p->pRight->pPhrase; int nNear = p->nNear; res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); } aPoslist = pExpr->pRight->pPhrase->doclist.pList; nToken = pExpr->pRight->pPhrase->nToken; for(p=pExpr->pLeft; p && res; p=p->pLeft){ int nNear; Fts3Phrase *pPhrase; assert( p->pParent && p->pParent->pLeft==p ); nNear = p->pParent->nNear; pPhrase = ( p->eType==FTSQUERY_NEAR ? p->pRight->pPhrase : p->pPhrase ); res = fts3EvalNearTrim(nNear, aTmp, &aPoslist, &nToken, pPhrase); } } sqlite3_free(aTmp); } } return res; } /* ** This function is a helper function for sqlite3Fts3EvalTestDeferred(). ** Assuming no error occurs or has occurred, It returns non-zero if the ** expression passed as the second argument matches the row that pCsr ** currently points to, or zero if it does not. ** ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** If an error occurs during execution of this function, *pRc is set to ** the appropriate SQLite error code. In this case the returned value is ** undefined. */ static int fts3EvalTestExpr( Fts3Cursor *pCsr, /* FTS cursor handle */ Fts3Expr *pExpr, /* Expr to test. May or may not be root. */ int *pRc /* IN/OUT: Error code */ ){ int bHit = 1; /* Return value */ if( *pRc==SQLITE_OK ){ switch( pExpr->eType ){ case FTSQUERY_NEAR: case FTSQUERY_AND: bHit = ( fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) && fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) && fts3EvalNearTest(pExpr, pRc) ); /* If the NEAR expression does not match any rows, zero the doclist for ** all phrases involved in the NEAR. This is because the snippet(), ** offsets() and matchinfo() functions are not supposed to recognize ** any instances of phrases that are part of unmatched NEAR queries. ** For example if this expression: ** ** ... MATCH 'a OR (b NEAR c)' ** ** is matched against a row containing: ** ** 'a b d e' ** ** then any snippet() should ony highlight the "a" term, not the "b" ** (as "b" is part of a non-matching NEAR clause). */ if( bHit==0 && pExpr->eType==FTSQUERY_NEAR && (pExpr->pParent==0 || pExpr->pParent->eType!=FTSQUERY_NEAR) ){ Fts3Expr *p; for(p=pExpr; p->pPhrase==0; p=p->pLeft){ if( p->pRight->iDocid==pCsr->iPrevId ){ fts3EvalInvalidatePoslist(p->pRight->pPhrase); } } if( p->iDocid==pCsr->iPrevId ){ fts3EvalInvalidatePoslist(p->pPhrase); } } break; case FTSQUERY_OR: { int bHit1 = fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc); int bHit2 = fts3EvalTestExpr(pCsr, pExpr->pRight, pRc); bHit = bHit1 || bHit2; break; } case FTSQUERY_NOT: bHit = ( fts3EvalTestExpr(pCsr, pExpr->pLeft, pRc) && !fts3EvalTestExpr(pCsr, pExpr->pRight, pRc) ); break; default: { #ifndef SQLITE_DISABLE_FTS4_DEFERRED if( pCsr->pDeferred && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred) ){ Fts3Phrase *pPhrase = pExpr->pPhrase; assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 ); if( pExpr->bDeferred ){ fts3EvalInvalidatePoslist(pPhrase); } *pRc = fts3EvalDeferredPhrase(pCsr, pPhrase); bHit = (pPhrase->doclist.pList!=0); pExpr->iDocid = pCsr->iPrevId; }else #endif { bHit = (pExpr->bEof==0 && pExpr->iDocid==pCsr->iPrevId); } break; } } } return bHit; } /* ** This function is called as the second part of each xNext operation when ** iterating through the results of a full-text query. At this point the ** cursor points to a row that matches the query expression, with the ** following caveats: ** ** * Up until this point, "NEAR" operators in the expression have been ** treated as "AND". ** ** * Deferred tokens have not yet been considered. ** ** If *pRc is not SQLITE_OK when this function is called, it immediately ** returns 0. Otherwise, it tests whether or not after considering NEAR ** operators and deferred tokens the current row is still a match for the ** expression. It returns 1 if both of the following are true: ** ** 1. *pRc is SQLITE_OK when this function returns, and ** ** 2. After scanning the current FTS table row for the deferred tokens, ** it is determined that the row does *not* match the query. ** ** Or, if no error occurs and it seems the current row does match the FTS ** query, return 0. */ SQLITE_PRIVATE int sqlite3Fts3EvalTestDeferred(Fts3Cursor *pCsr, int *pRc){ int rc = *pRc; int bMiss = 0; if( rc==SQLITE_OK ){ /* If there are one or more deferred tokens, load the current row into ** memory and scan it to determine the position list for each deferred ** token. Then, see if this row is really a match, considering deferred ** tokens and NEAR operators (neither of which were taken into account ** earlier, by fts3EvalNextRow()). */ if( pCsr->pDeferred ){ rc = fts3CursorSeek(0, pCsr); if( rc==SQLITE_OK ){ rc = sqlite3Fts3CacheDeferredDoclists(pCsr); } } bMiss = (0==fts3EvalTestExpr(pCsr, pCsr->pExpr, &rc)); /* Free the position-lists accumulated for each deferred token above. */ sqlite3Fts3FreeDeferredDoclists(pCsr); *pRc = rc; } return (rc==SQLITE_OK && bMiss); } /* ** Advance to the next document that matches the FTS expression in ** Fts3Cursor.pExpr. */ static int fts3EvalNext(Fts3Cursor *pCsr){ int rc = SQLITE_OK; /* Return Code */ Fts3Expr *pExpr = pCsr->pExpr; assert( pCsr->isEof==0 ); if( pExpr==0 ){ pCsr->isEof = 1; }else{ do { if( pCsr->isRequireSeek==0 ){ sqlite3_reset(pCsr->pStmt); } assert( sqlite3_data_count(pCsr->pStmt)==0 ); fts3EvalNextRow(pCsr, pExpr, &rc); pCsr->isEof = pExpr->bEof; pCsr->isRequireSeek = 1; pCsr->isMatchinfoNeeded = 1; pCsr->iPrevId = pExpr->iDocid; }while( pCsr->isEof==0 && sqlite3Fts3EvalTestDeferred(pCsr, &rc) ); } /* Check if the cursor is past the end of the docid range specified ** by Fts3Cursor.iMinDocid/iMaxDocid. If so, set the EOF flag. */ if( rc==SQLITE_OK && ( (pCsr->bDesc==0 && pCsr->iPrevId>pCsr->iMaxDocid) || (pCsr->bDesc!=0 && pCsr->iPrevIdiMinDocid) )){ pCsr->isEof = 1; } return rc; } /* ** Restart interation for expression pExpr so that the next call to ** fts3EvalNext() visits the first row. Do not allow incremental ** loading or merging of phrase doclists for this iteration. ** ** If *pRc is other than SQLITE_OK when this function is called, it is ** a no-op. If an error occurs within this function, *pRc is set to an ** SQLite error code before returning. */ static void fts3EvalRestart( Fts3Cursor *pCsr, Fts3Expr *pExpr, int *pRc ){ if( pExpr && *pRc==SQLITE_OK ){ Fts3Phrase *pPhrase = pExpr->pPhrase; if( pPhrase ){ fts3EvalInvalidatePoslist(pPhrase); if( pPhrase->bIncr ){ int i; for(i=0; inToken; i++){ Fts3PhraseToken *pToken = &pPhrase->aToken[i]; assert( pToken->pDeferred==0 ); if( pToken->pSegcsr ){ sqlite3Fts3MsrIncrRestart(pToken->pSegcsr); } } *pRc = fts3EvalPhraseStart(pCsr, 0, pPhrase); } pPhrase->doclist.pNextDocid = 0; pPhrase->doclist.iDocid = 0; pPhrase->pOrPoslist = 0; } pExpr->iDocid = 0; pExpr->bEof = 0; pExpr->bStart = 0; fts3EvalRestart(pCsr, pExpr->pLeft, pRc); fts3EvalRestart(pCsr, pExpr->pRight, pRc); } } /* ** After allocating the Fts3Expr.aMI[] array for each phrase in the ** expression rooted at pExpr, the cursor iterates through all rows matched ** by pExpr, calling this function for each row. This function increments ** the values in Fts3Expr.aMI[] according to the position-list currently ** found in Fts3Expr.pPhrase->doclist.pList for each of the phrase ** expression nodes. */ static void fts3EvalUpdateCounts(Fts3Expr *pExpr){ if( pExpr ){ Fts3Phrase *pPhrase = pExpr->pPhrase; if( pPhrase && pPhrase->doclist.pList ){ int iCol = 0; char *p = pPhrase->doclist.pList; assert( *p ); while( 1 ){ u8 c = 0; int iCnt = 0; while( 0xFE & (*p | c) ){ if( (c&0x80)==0 ) iCnt++; c = *p++ & 0x80; } /* aMI[iCol*3 + 1] = Number of occurrences ** aMI[iCol*3 + 2] = Number of rows containing at least one instance */ pExpr->aMI[iCol*3 + 1] += iCnt; pExpr->aMI[iCol*3 + 2] += (iCnt>0); if( *p==0x00 ) break; p++; p += fts3GetVarint32(p, &iCol); } } fts3EvalUpdateCounts(pExpr->pLeft); fts3EvalUpdateCounts(pExpr->pRight); } } /* ** Expression pExpr must be of type FTSQUERY_PHRASE. ** ** If it is not already allocated and populated, this function allocates and ** populates the Fts3Expr.aMI[] array for expression pExpr. If pExpr is part ** of a NEAR expression, then it also allocates and populates the same array ** for all other phrases that are part of the NEAR expression. ** ** SQLITE_OK is returned if the aMI[] array is successfully allocated and ** populated. Otherwise, if an error occurs, an SQLite error code is returned. */ static int fts3EvalGatherStats( Fts3Cursor *pCsr, /* Cursor object */ Fts3Expr *pExpr /* FTSQUERY_PHRASE expression */ ){ int rc = SQLITE_OK; /* Return code */ assert( pExpr->eType==FTSQUERY_PHRASE ); if( pExpr->aMI==0 ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; Fts3Expr *pRoot; /* Root of NEAR expression */ Fts3Expr *p; /* Iterator used for several purposes */ sqlite3_int64 iPrevId = pCsr->iPrevId; sqlite3_int64 iDocid; u8 bEof; /* Find the root of the NEAR expression */ pRoot = pExpr; while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ pRoot = pRoot->pParent; } iDocid = pRoot->iDocid; bEof = pRoot->bEof; assert( pRoot->bStart ); /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ for(p=pRoot; p; p=p->pLeft){ Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); assert( pE->aMI==0 ); pE->aMI = (u32 *)sqlite3_malloc(pTab->nColumn * 3 * sizeof(u32)); if( !pE->aMI ) return SQLITE_NOMEM; memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); } fts3EvalRestart(pCsr, pRoot, &rc); while( pCsr->isEof==0 && rc==SQLITE_OK ){ do { /* Ensure the %_content statement is reset. */ if( pCsr->isRequireSeek==0 ) sqlite3_reset(pCsr->pStmt); assert( sqlite3_data_count(pCsr->pStmt)==0 ); /* Advance to the next document */ fts3EvalNextRow(pCsr, pRoot, &rc); pCsr->isEof = pRoot->bEof; pCsr->isRequireSeek = 1; pCsr->isMatchinfoNeeded = 1; pCsr->iPrevId = pRoot->iDocid; }while( pCsr->isEof==0 && pRoot->eType==FTSQUERY_NEAR && sqlite3Fts3EvalTestDeferred(pCsr, &rc) ); if( rc==SQLITE_OK && pCsr->isEof==0 ){ fts3EvalUpdateCounts(pRoot); } } pCsr->isEof = 0; pCsr->iPrevId = iPrevId; if( bEof ){ pRoot->bEof = bEof; }else{ /* Caution: pRoot may iterate through docids in ascending or descending ** order. For this reason, even though it seems more defensive, the ** do loop can not be written: ** ** do {...} while( pRoot->iDocidbEof==0 ); }while( pRoot->iDocid!=iDocid && rc==SQLITE_OK ); } } return rc; } /* ** This function is used by the matchinfo() module to query a phrase ** expression node for the following information: ** ** 1. The total number of occurrences of the phrase in each column of ** the FTS table (considering all rows), and ** ** 2. For each column, the number of rows in the table for which the ** column contains at least one instance of the phrase. ** ** If no error occurs, SQLITE_OK is returned and the values for each column ** written into the array aiOut as follows: ** ** aiOut[iCol*3 + 1] = Number of occurrences ** aiOut[iCol*3 + 2] = Number of rows containing at least one instance ** ** Caveats: ** ** * If a phrase consists entirely of deferred tokens, then all output ** values are set to the number of documents in the table. In other ** words we assume that very common tokens occur exactly once in each ** column of each row of the table. ** ** * If a phrase contains some deferred tokens (and some non-deferred ** tokens), count the potential occurrence identified by considering ** the non-deferred tokens instead of actual phrase occurrences. ** ** * If the phrase is part of a NEAR expression, then only phrase instances ** that meet the NEAR constraint are included in the counts. */ SQLITE_PRIVATE int sqlite3Fts3EvalPhraseStats( Fts3Cursor *pCsr, /* FTS cursor handle */ Fts3Expr *pExpr, /* Phrase expression */ u32 *aiOut /* Array to write results into (see above) */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc = SQLITE_OK; int iCol; if( pExpr->bDeferred && pExpr->pParent->eType!=FTSQUERY_NEAR ){ assert( pCsr->nDoc>0 ); for(iCol=0; iColnColumn; iCol++){ aiOut[iCol*3 + 1] = (u32)pCsr->nDoc; aiOut[iCol*3 + 2] = (u32)pCsr->nDoc; } }else{ rc = fts3EvalGatherStats(pCsr, pExpr); if( rc==SQLITE_OK ){ assert( pExpr->aMI ); for(iCol=0; iColnColumn; iCol++){ aiOut[iCol*3 + 1] = pExpr->aMI[iCol*3 + 1]; aiOut[iCol*3 + 2] = pExpr->aMI[iCol*3 + 2]; } } } return rc; } /* ** The expression pExpr passed as the second argument to this function ** must be of type FTSQUERY_PHRASE. ** ** The returned value is either NULL or a pointer to a buffer containing ** a position-list indicating the occurrences of the phrase in column iCol ** of the current row. ** ** More specifically, the returned buffer contains 1 varint for each ** occurrence of the phrase in the column, stored using the normal (delta+2) ** compression and is terminated by either an 0x01 or 0x00 byte. For example, ** if the requested column contains "a b X c d X X" and the position-list ** for 'X' is requested, the buffer returned may contain: ** ** 0x04 0x05 0x03 0x01 or 0x04 0x05 0x03 0x00 ** ** This function works regardless of whether or not the phrase is deferred, ** incremental, or neither. */ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( Fts3Cursor *pCsr, /* FTS3 cursor object */ Fts3Expr *pExpr, /* Phrase to return doclist for */ int iCol, /* Column to return position list for */ char **ppOut /* OUT: Pointer to position list */ ){ Fts3Phrase *pPhrase = pExpr->pPhrase; Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; char *pIter; int iThis; sqlite3_int64 iDocid; /* If this phrase is applies specifically to some column other than ** column iCol, return a NULL pointer. */ *ppOut = 0; assert( iCol>=0 && iColnColumn ); if( (pPhrase->iColumnnColumn && pPhrase->iColumn!=iCol) ){ return SQLITE_OK; } iDocid = pExpr->iDocid; pIter = pPhrase->doclist.pList; if( iDocid!=pCsr->iPrevId || pExpr->bEof ){ int rc = SQLITE_OK; int bDescDoclist = pTab->bDescIdx; /* For DOCID_CMP macro */ int bOr = 0; u8 bTreeEof = 0; Fts3Expr *p; /* Used to iterate from pExpr to root */ Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ int bMatch; /* Check if this phrase descends from an OR expression node. If not, ** return NULL. Otherwise, the entry that corresponds to docid ** pCsr->iPrevId may lie earlier in the doclist buffer. Or, if the ** tree that the node is part of has been marked as EOF, but the node ** itself is not EOF, then it may point to an earlier entry. */ pNear = pExpr; for(p=pExpr->pParent; p; p=p->pParent){ if( p->eType==FTSQUERY_OR ) bOr = 1; if( p->eType==FTSQUERY_NEAR ) pNear = p; if( p->bEof ) bTreeEof = 1; } if( bOr==0 ) return SQLITE_OK; /* This is the descendent of an OR node. In this case we cannot use ** an incremental phrase. Load the entire doclist for the phrase ** into memory in this case. */ if( pPhrase->bIncr ){ int bEofSave = pNear->bEof; fts3EvalRestart(pCsr, pNear, &rc); while( rc==SQLITE_OK && !pNear->bEof ){ fts3EvalNextRow(pCsr, pNear, &rc); if( bEofSave==0 && pNear->iDocid==iDocid ) break; } assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); } if( bTreeEof ){ while( rc==SQLITE_OK && !pNear->bEof ){ fts3EvalNextRow(pCsr, pNear, &rc); } } if( rc!=SQLITE_OK ) return rc; bMatch = 1; for(p=pNear; p; p=p->pLeft){ u8 bEof = 0; Fts3Expr *pTest = p; Fts3Phrase *pPh; assert( pTest->eType==FTSQUERY_NEAR || pTest->eType==FTSQUERY_PHRASE ); if( pTest->eType==FTSQUERY_NEAR ) pTest = pTest->pRight; assert( pTest->eType==FTSQUERY_PHRASE ); pPh = pTest->pPhrase; pIter = pPh->pOrPoslist; iDocid = pPh->iOrDocid; if( pCsr->bDesc==bDescDoclist ){ bEof = !pPh->doclist.nAll || (pIter >= (pPh->doclist.aAll + pPh->doclist.nAll)); while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)<0 ) && bEof==0 ){ sqlite3Fts3DoclistNext( bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll, &pIter, &iDocid, &bEof ); } }else{ bEof = !pPh->doclist.nAll || (pIter && pIter<=pPh->doclist.aAll); while( (pIter==0 || DOCID_CMP(iDocid, pCsr->iPrevId)>0 ) && bEof==0 ){ int dummy; sqlite3Fts3DoclistPrev( bDescDoclist, pPh->doclist.aAll, pPh->doclist.nAll, &pIter, &iDocid, &dummy, &bEof ); } } pPh->pOrPoslist = pIter; pPh->iOrDocid = iDocid; if( bEof || iDocid!=pCsr->iPrevId ) bMatch = 0; } if( bMatch ){ pIter = pPhrase->pOrPoslist; }else{ pIter = 0; } } if( pIter==0 ) return SQLITE_OK; if( *pIter==0x01 ){ pIter++; pIter += fts3GetVarint32(pIter, &iThis); }else{ iThis = 0; } while( iThisdoclist, and ** * any Fts3MultiSegReader objects held by phrase tokens. */ SQLITE_PRIVATE void sqlite3Fts3EvalPhraseCleanup(Fts3Phrase *pPhrase){ if( pPhrase ){ int i; sqlite3_free(pPhrase->doclist.aAll); fts3EvalInvalidatePoslist(pPhrase); memset(&pPhrase->doclist, 0, sizeof(Fts3Doclist)); for(i=0; inToken; i++){ fts3SegReaderCursorFree(pPhrase->aToken[i].pSegcsr); pPhrase->aToken[i].pSegcsr = 0; } } } /* ** Return SQLITE_CORRUPT_VTAB. */ #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3Fts3Corrupt(){ return SQLITE_CORRUPT_VTAB; } #endif #if !SQLITE_CORE /* ** Initialize API pointer table, if required. */ #ifdef _WIN32 __declspec(dllexport) #endif SQLITE_API int SQLITE_STDCALL sqlite3_fts3_init( sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi ){ SQLITE_EXTENSION_INIT2(pApi) return sqlite3Fts3Init(db); } #endif #endif /************** End of fts3.c ************************************************/ /************** Begin file fts3_aux.c ****************************************/ /* ** 2011 Jan 27 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ typedef struct Fts3auxTable Fts3auxTable; typedef struct Fts3auxCursor Fts3auxCursor; struct Fts3auxTable { sqlite3_vtab base; /* Base class used by SQLite core */ Fts3Table *pFts3Tab; }; struct Fts3auxCursor { sqlite3_vtab_cursor base; /* Base class used by SQLite core */ Fts3MultiSegReader csr; /* Must be right after "base" */ Fts3SegFilter filter; char *zStop; int nStop; /* Byte-length of string zStop */ int iLangid; /* Language id to query */ int isEof; /* True if cursor is at EOF */ sqlite3_int64 iRowid; /* Current rowid */ int iCol; /* Current value of 'col' column */ int nStat; /* Size of aStat[] array */ struct Fts3auxColstats { sqlite3_int64 nDoc; /* 'documents' values for current csr row */ sqlite3_int64 nOcc; /* 'occurrences' values for current csr row */ } *aStat; }; /* ** Schema of the terms table. */ #define FTS3_AUX_SCHEMA \ "CREATE TABLE x(term, col, documents, occurrences, languageid HIDDEN)" /* ** This function does all the work for both the xConnect and xCreate methods. ** These tables have no persistent representation of their own, so xConnect ** and xCreate are identical operations. */ static int fts3auxConnectMethod( sqlite3 *db, /* Database connection */ void *pUnused, /* Unused */ int argc, /* Number of elements in argv array */ const char * const *argv, /* xCreate/xConnect argument array */ sqlite3_vtab **ppVtab, /* OUT: New sqlite3_vtab object */ char **pzErr /* OUT: sqlite3_malloc'd error message */ ){ char const *zDb; /* Name of database (e.g. "main") */ char const *zFts3; /* Name of fts3 table */ int nDb; /* Result of strlen(zDb) */ int nFts3; /* Result of strlen(zFts3) */ int nByte; /* Bytes of space to allocate here */ int rc; /* value returned by declare_vtab() */ Fts3auxTable *p; /* Virtual table object to return */ UNUSED_PARAMETER(pUnused); /* The user should invoke this in one of two forms: ** ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table); ** CREATE VIRTUAL TABLE xxx USING fts4aux(fts4-table-db, fts4-table); */ if( argc!=4 && argc!=5 ) goto bad_args; zDb = argv[1]; nDb = (int)strlen(zDb); if( argc==5 ){ if( nDb==4 && 0==sqlite3_strnicmp("temp", zDb, 4) ){ zDb = argv[3]; nDb = (int)strlen(zDb); zFts3 = argv[4]; }else{ goto bad_args; } }else{ zFts3 = argv[3]; } nFts3 = (int)strlen(zFts3); rc = sqlite3_declare_vtab(db, FTS3_AUX_SCHEMA); if( rc!=SQLITE_OK ) return rc; nByte = sizeof(Fts3auxTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; p = (Fts3auxTable *)sqlite3_malloc(nByte); if( !p ) return SQLITE_NOMEM; memset(p, 0, nByte); p->pFts3Tab = (Fts3Table *)&p[1]; p->pFts3Tab->zDb = (char *)&p->pFts3Tab[1]; p->pFts3Tab->zName = &p->pFts3Tab->zDb[nDb+1]; p->pFts3Tab->db = db; p->pFts3Tab->nIndex = 1; memcpy((char *)p->pFts3Tab->zDb, zDb, nDb); memcpy((char *)p->pFts3Tab->zName, zFts3, nFts3); sqlite3Fts3Dequote((char *)p->pFts3Tab->zName); *ppVtab = (sqlite3_vtab *)p; return SQLITE_OK; bad_args: sqlite3Fts3ErrMsg(pzErr, "invalid arguments to fts4aux constructor"); return SQLITE_ERROR; } /* ** This function does the work for both the xDisconnect and xDestroy methods. ** These tables have no persistent representation of their own, so xDisconnect ** and xDestroy are identical operations. */ static int fts3auxDisconnectMethod(sqlite3_vtab *pVtab){ Fts3auxTable *p = (Fts3auxTable *)pVtab; Fts3Table *pFts3 = p->pFts3Tab; int i; /* Free any prepared statements held */ for(i=0; iaStmt); i++){ sqlite3_finalize(pFts3->aStmt[i]); } sqlite3_free(pFts3->zSegmentsTbl); sqlite3_free(p); return SQLITE_OK; } #define FTS4AUX_EQ_CONSTRAINT 1 #define FTS4AUX_GE_CONSTRAINT 2 #define FTS4AUX_LE_CONSTRAINT 4 /* ** xBestIndex - Analyze a WHERE and ORDER BY clause. */ static int fts3auxBestIndexMethod( sqlite3_vtab *pVTab, sqlite3_index_info *pInfo ){ int i; int iEq = -1; int iGe = -1; int iLe = -1; int iLangid = -1; int iNext = 1; /* Next free argvIndex value */ UNUSED_PARAMETER(pVTab); /* This vtab delivers always results in "ORDER BY term ASC" order. */ if( pInfo->nOrderBy==1 && pInfo->aOrderBy[0].iColumn==0 && pInfo->aOrderBy[0].desc==0 ){ pInfo->orderByConsumed = 1; } /* Search for equality and range constraints on the "term" column. ** And equality constraints on the hidden "languageid" column. */ for(i=0; inConstraint; i++){ if( pInfo->aConstraint[i].usable ){ int op = pInfo->aConstraint[i].op; int iCol = pInfo->aConstraint[i].iColumn; if( iCol==0 ){ if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iEq = i; if( op==SQLITE_INDEX_CONSTRAINT_LT ) iLe = i; if( op==SQLITE_INDEX_CONSTRAINT_LE ) iLe = i; if( op==SQLITE_INDEX_CONSTRAINT_GT ) iGe = i; if( op==SQLITE_INDEX_CONSTRAINT_GE ) iGe = i; } if( iCol==4 ){ if( op==SQLITE_INDEX_CONSTRAINT_EQ ) iLangid = i; } } } if( iEq>=0 ){ pInfo->idxNum = FTS4AUX_EQ_CONSTRAINT; pInfo->aConstraintUsage[iEq].argvIndex = iNext++; pInfo->estimatedCost = 5; }else{ pInfo->idxNum = 0; pInfo->estimatedCost = 20000; if( iGe>=0 ){ pInfo->idxNum += FTS4AUX_GE_CONSTRAINT; pInfo->aConstraintUsage[iGe].argvIndex = iNext++; pInfo->estimatedCost /= 2; } if( iLe>=0 ){ pInfo->idxNum += FTS4AUX_LE_CONSTRAINT; pInfo->aConstraintUsage[iLe].argvIndex = iNext++; pInfo->estimatedCost /= 2; } } if( iLangid>=0 ){ pInfo->aConstraintUsage[iLangid].argvIndex = iNext++; pInfo->estimatedCost--; } return SQLITE_OK; } /* ** xOpen - Open a cursor. */ static int fts3auxOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ Fts3auxCursor *pCsr; /* Pointer to cursor object to return */ UNUSED_PARAMETER(pVTab); pCsr = (Fts3auxCursor *)sqlite3_malloc(sizeof(Fts3auxCursor)); if( !pCsr ) return SQLITE_NOMEM; memset(pCsr, 0, sizeof(Fts3auxCursor)); *ppCsr = (sqlite3_vtab_cursor *)pCsr; return SQLITE_OK; } /* ** xClose - Close a cursor. */ static int fts3auxCloseMethod(sqlite3_vtab_cursor *pCursor){ Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; sqlite3Fts3SegmentsClose(pFts3); sqlite3Fts3SegReaderFinish(&pCsr->csr); sqlite3_free((void *)pCsr->filter.zTerm); sqlite3_free(pCsr->zStop); sqlite3_free(pCsr->aStat); sqlite3_free(pCsr); return SQLITE_OK; } static int fts3auxGrowStatArray(Fts3auxCursor *pCsr, int nSize){ if( nSize>pCsr->nStat ){ struct Fts3auxColstats *aNew; aNew = (struct Fts3auxColstats *)sqlite3_realloc(pCsr->aStat, sizeof(struct Fts3auxColstats) * nSize ); if( aNew==0 ) return SQLITE_NOMEM; memset(&aNew[pCsr->nStat], 0, sizeof(struct Fts3auxColstats) * (nSize - pCsr->nStat) ); pCsr->aStat = aNew; pCsr->nStat = nSize; } return SQLITE_OK; } /* ** xNext - Advance the cursor to the next row, if any. */ static int fts3auxNextMethod(sqlite3_vtab_cursor *pCursor){ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; int rc; /* Increment our pretend rowid value. */ pCsr->iRowid++; for(pCsr->iCol++; pCsr->iColnStat; pCsr->iCol++){ if( pCsr->aStat[pCsr->iCol].nDoc>0 ) return SQLITE_OK; } rc = sqlite3Fts3SegReaderStep(pFts3, &pCsr->csr); if( rc==SQLITE_ROW ){ int i = 0; int nDoclist = pCsr->csr.nDoclist; char *aDoclist = pCsr->csr.aDoclist; int iCol; int eState = 0; if( pCsr->zStop ){ int n = (pCsr->nStopcsr.nTerm) ? pCsr->nStop : pCsr->csr.nTerm; int mc = memcmp(pCsr->zStop, pCsr->csr.zTerm, n); if( mc<0 || (mc==0 && pCsr->csr.nTerm>pCsr->nStop) ){ pCsr->isEof = 1; return SQLITE_OK; } } if( fts3auxGrowStatArray(pCsr, 2) ) return SQLITE_NOMEM; memset(pCsr->aStat, 0, sizeof(struct Fts3auxColstats) * pCsr->nStat); iCol = 0; while( iaStat[0].nDoc++; eState = 1; iCol = 0; break; /* State 1. In this state we are expecting either a 1, indicating ** that the following integer will be a column number, or the ** start of a position list for column 0. ** ** The only difference between state 1 and state 2 is that if the ** integer encountered in state 1 is not 0 or 1, then we need to ** increment the column 0 "nDoc" count for this term. */ case 1: assert( iCol==0 ); if( v>1 ){ pCsr->aStat[1].nDoc++; } eState = 2; /* fall through */ case 2: if( v==0 ){ /* 0x00. Next integer will be a docid. */ eState = 0; }else if( v==1 ){ /* 0x01. Next integer will be a column number. */ eState = 3; }else{ /* 2 or greater. A position. */ pCsr->aStat[iCol+1].nOcc++; pCsr->aStat[0].nOcc++; } break; /* State 3. The integer just read is a column number. */ default: assert( eState==3 ); iCol = (int)v; if( fts3auxGrowStatArray(pCsr, iCol+2) ) return SQLITE_NOMEM; pCsr->aStat[iCol+1].nDoc++; eState = 2; break; } } pCsr->iCol = 0; rc = SQLITE_OK; }else{ pCsr->isEof = 1; } return rc; } /* ** xFilter - Initialize a cursor to point at the start of its data. */ static int fts3auxFilterMethod( sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ int idxNum, /* Strategy index */ const char *idxStr, /* Unused */ int nVal, /* Number of elements in apVal */ sqlite3_value **apVal /* Arguments for the indexing scheme */ ){ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; Fts3Table *pFts3 = ((Fts3auxTable *)pCursor->pVtab)->pFts3Tab; int rc; int isScan = 0; int iLangVal = 0; /* Language id to query */ int iEq = -1; /* Index of term=? value in apVal */ int iGe = -1; /* Index of term>=? value in apVal */ int iLe = -1; /* Index of term<=? value in apVal */ int iLangid = -1; /* Index of languageid=? value in apVal */ int iNext = 0; UNUSED_PARAMETER(nVal); UNUSED_PARAMETER(idxStr); assert( idxStr==0 ); assert( idxNum==FTS4AUX_EQ_CONSTRAINT || idxNum==0 || idxNum==FTS4AUX_LE_CONSTRAINT || idxNum==FTS4AUX_GE_CONSTRAINT || idxNum==(FTS4AUX_LE_CONSTRAINT|FTS4AUX_GE_CONSTRAINT) ); if( idxNum==FTS4AUX_EQ_CONSTRAINT ){ iEq = iNext++; }else{ isScan = 1; if( idxNum & FTS4AUX_GE_CONSTRAINT ){ iGe = iNext++; } if( idxNum & FTS4AUX_LE_CONSTRAINT ){ iLe = iNext++; } } if( iNextfilter.zTerm); sqlite3Fts3SegReaderFinish(&pCsr->csr); sqlite3_free((void *)pCsr->filter.zTerm); sqlite3_free(pCsr->aStat); memset(&pCsr->csr, 0, ((u8*)&pCsr[1]) - (u8*)&pCsr->csr); pCsr->filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; if( isScan ) pCsr->filter.flags |= FTS3_SEGMENT_SCAN; if( iEq>=0 || iGe>=0 ){ const unsigned char *zStr = sqlite3_value_text(apVal[0]); assert( (iEq==0 && iGe==-1) || (iEq==-1 && iGe==0) ); if( zStr ){ pCsr->filter.zTerm = sqlite3_mprintf("%s", zStr); pCsr->filter.nTerm = sqlite3_value_bytes(apVal[0]); if( pCsr->filter.zTerm==0 ) return SQLITE_NOMEM; } } if( iLe>=0 ){ pCsr->zStop = sqlite3_mprintf("%s", sqlite3_value_text(apVal[iLe])); pCsr->nStop = sqlite3_value_bytes(apVal[iLe]); if( pCsr->zStop==0 ) return SQLITE_NOMEM; } if( iLangid>=0 ){ iLangVal = sqlite3_value_int(apVal[iLangid]); /* If the user specified a negative value for the languageid, use zero ** instead. This works, as the "languageid=?" constraint will also ** be tested by the VDBE layer. The test will always be false (since ** this module will not return a row with a negative languageid), and ** so the overall query will return zero rows. */ if( iLangVal<0 ) iLangVal = 0; } pCsr->iLangid = iLangVal; rc = sqlite3Fts3SegReaderCursor(pFts3, iLangVal, 0, FTS3_SEGCURSOR_ALL, pCsr->filter.zTerm, pCsr->filter.nTerm, 0, isScan, &pCsr->csr ); if( rc==SQLITE_OK ){ rc = sqlite3Fts3SegReaderStart(pFts3, &pCsr->csr, &pCsr->filter); } if( rc==SQLITE_OK ) rc = fts3auxNextMethod(pCursor); return rc; } /* ** xEof - Return true if the cursor is at EOF, or false otherwise. */ static int fts3auxEofMethod(sqlite3_vtab_cursor *pCursor){ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; return pCsr->isEof; } /* ** xColumn - Return a column value. */ static int fts3auxColumnMethod( sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ int iCol /* Index of column to read value from */ ){ Fts3auxCursor *p = (Fts3auxCursor *)pCursor; assert( p->isEof==0 ); switch( iCol ){ case 0: /* term */ sqlite3_result_text(pCtx, p->csr.zTerm, p->csr.nTerm, SQLITE_TRANSIENT); break; case 1: /* col */ if( p->iCol ){ sqlite3_result_int(pCtx, p->iCol-1); }else{ sqlite3_result_text(pCtx, "*", -1, SQLITE_STATIC); } break; case 2: /* documents */ sqlite3_result_int64(pCtx, p->aStat[p->iCol].nDoc); break; case 3: /* occurrences */ sqlite3_result_int64(pCtx, p->aStat[p->iCol].nOcc); break; default: /* languageid */ assert( iCol==4 ); sqlite3_result_int(pCtx, p->iLangid); break; } return SQLITE_OK; } /* ** xRowid - Return the current rowid for the cursor. */ static int fts3auxRowidMethod( sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ sqlite_int64 *pRowid /* OUT: Rowid value */ ){ Fts3auxCursor *pCsr = (Fts3auxCursor *)pCursor; *pRowid = pCsr->iRowid; return SQLITE_OK; } /* ** Register the fts3aux module with database connection db. Return SQLITE_OK ** if successful or an error code if sqlite3_create_module() fails. */ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){ static const sqlite3_module fts3aux_module = { 0, /* iVersion */ fts3auxConnectMethod, /* xCreate */ fts3auxConnectMethod, /* xConnect */ fts3auxBestIndexMethod, /* xBestIndex */ fts3auxDisconnectMethod, /* xDisconnect */ fts3auxDisconnectMethod, /* xDestroy */ fts3auxOpenMethod, /* xOpen */ fts3auxCloseMethod, /* xClose */ fts3auxFilterMethod, /* xFilter */ fts3auxNextMethod, /* xNext */ fts3auxEofMethod, /* xEof */ fts3auxColumnMethod, /* xColumn */ fts3auxRowidMethod, /* xRowid */ 0, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindFunction */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ 0 /* xRollbackTo */ }; int rc; /* Return code */ rc = sqlite3_create_module(db, "fts4aux", &fts3aux_module, 0); return rc; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_aux.c ********************************************/ /************** Begin file fts3_expr.c ***************************************/ /* ** 2008 Nov 28 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This module contains code that implements a parser for fts3 query strings ** (the right-hand argument to the MATCH operator). Because the supported ** syntax is relatively simple, the whole tokenizer/parser system is ** hand-coded. */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* ** By default, this module parses the legacy syntax that has been ** traditionally used by fts3. Or, if SQLITE_ENABLE_FTS3_PARENTHESIS ** is defined, then it uses the new syntax. The differences between ** the new and the old syntaxes are: ** ** a) The new syntax supports parenthesis. The old does not. ** ** b) The new syntax supports the AND and NOT operators. The old does not. ** ** c) The old syntax supports the "-" token qualifier. This is not ** supported by the new syntax (it is replaced by the NOT operator). ** ** d) When using the old syntax, the OR operator has a greater precedence ** than an implicit AND. When using the new, both implicity and explicit ** AND operators have a higher precedence than OR. ** ** If compiled with SQLITE_TEST defined, then this module exports the ** symbol "int sqlite3_fts3_enable_parentheses". Setting this variable ** to zero causes the module to use the old syntax. If it is set to ** non-zero the new syntax is activated. This is so both syntaxes can ** be tested using a single build of testfixture. ** ** The following describes the syntax supported by the fts3 MATCH ** operator in a similar format to that used by the lemon parser ** generator. This module does not use actually lemon, it uses a ** custom parser. ** ** query ::= andexpr (OR andexpr)*. ** ** andexpr ::= notexpr (AND? notexpr)*. ** ** notexpr ::= nearexpr (NOT nearexpr|-TOKEN)*. ** notexpr ::= LP query RP. ** ** nearexpr ::= phrase (NEAR distance_opt nearexpr)*. ** ** distance_opt ::= . ** distance_opt ::= / INTEGER. ** ** phrase ::= TOKEN. ** phrase ::= COLUMN:TOKEN. ** phrase ::= "TOKEN TOKEN TOKEN...". */ #ifdef SQLITE_TEST SQLITE_API int sqlite3_fts3_enable_parentheses = 0; #else # ifdef SQLITE_ENABLE_FTS3_PARENTHESIS # define sqlite3_fts3_enable_parentheses 1 # else # define sqlite3_fts3_enable_parentheses 0 # endif #endif /* ** Default span for NEAR operators. */ #define SQLITE_FTS3_DEFAULT_NEAR_PARAM 10 /* #include */ /* #include */ /* ** isNot: ** This variable is used by function getNextNode(). When getNextNode() is ** called, it sets ParseContext.isNot to true if the 'next node' is a ** FTSQUERY_PHRASE with a unary "-" attached to it. i.e. "mysql" in the ** FTS3 query "sqlite -mysql". Otherwise, ParseContext.isNot is set to ** zero. */ typedef struct ParseContext ParseContext; struct ParseContext { sqlite3_tokenizer *pTokenizer; /* Tokenizer module */ int iLangid; /* Language id used with tokenizer */ const char **azCol; /* Array of column names for fts3 table */ int bFts4; /* True to allow FTS4-only syntax */ int nCol; /* Number of entries in azCol[] */ int iDefaultCol; /* Default column to query */ int isNot; /* True if getNextNode() sees a unary - */ sqlite3_context *pCtx; /* Write error message here */ int nNest; /* Number of nested brackets */ }; /* ** This function is equivalent to the standard isspace() function. ** ** The standard isspace() can be awkward to use safely, because although it ** is defined to accept an argument of type int, its behavior when passed ** an integer that falls outside of the range of the unsigned char type ** is undefined (and sometimes, "undefined" means segfault). This wrapper ** is defined to accept an argument of type char, and always returns 0 for ** any values that fall outside of the range of the unsigned char type (i.e. ** negative values). */ static int fts3isspace(char c){ return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; } /* ** Allocate nByte bytes of memory using sqlite3_malloc(). If successful, ** zero the memory before returning a pointer to it. If unsuccessful, ** return NULL. */ static void *fts3MallocZero(int nByte){ void *pRet = sqlite3_malloc(nByte); if( pRet ) memset(pRet, 0, nByte); return pRet; } SQLITE_PRIVATE int sqlite3Fts3OpenTokenizer( sqlite3_tokenizer *pTokenizer, int iLangid, const char *z, int n, sqlite3_tokenizer_cursor **ppCsr ){ sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; sqlite3_tokenizer_cursor *pCsr = 0; int rc; rc = pModule->xOpen(pTokenizer, z, n, &pCsr); assert( rc==SQLITE_OK || pCsr==0 ); if( rc==SQLITE_OK ){ pCsr->pTokenizer = pTokenizer; if( pModule->iVersion>=1 ){ rc = pModule->xLanguageid(pCsr, iLangid); if( rc!=SQLITE_OK ){ pModule->xClose(pCsr); pCsr = 0; } } } *ppCsr = pCsr; return rc; } /* ** Function getNextNode(), which is called by fts3ExprParse(), may itself ** call fts3ExprParse(). So this forward declaration is required. */ static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); /* ** Extract the next token from buffer z (length n) using the tokenizer ** and other information (column names etc.) in pParse. Create an Fts3Expr ** structure of type FTSQUERY_PHRASE containing a phrase consisting of this ** single token and set *ppExpr to point to it. If the end of the buffer is ** reached before a token is found, set *ppExpr to zero. It is the ** responsibility of the caller to eventually deallocate the allocated ** Fts3Expr structure (if any) by passing it to sqlite3_free(). ** ** Return SQLITE_OK if successful, or SQLITE_NOMEM if a memory allocation ** fails. */ static int getNextToken( ParseContext *pParse, /* fts3 query parse context */ int iCol, /* Value for Fts3Phrase.iColumn */ const char *z, int n, /* Input string */ Fts3Expr **ppExpr, /* OUT: expression */ int *pnConsumed /* OUT: Number of bytes consumed */ ){ sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; int rc; sqlite3_tokenizer_cursor *pCursor; Fts3Expr *pRet = 0; int i = 0; /* Set variable i to the maximum number of bytes of input to tokenize. */ for(i=0; iiLangid, z, i, &pCursor); if( rc==SQLITE_OK ){ const char *zToken; int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; int nByte; /* total space to allocate */ rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; pRet = (Fts3Expr *)fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; }else{ pRet->eType = FTSQUERY_PHRASE; pRet->pPhrase = (Fts3Phrase *)&pRet[1]; pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); if( iEndpPhrase->aToken[0].isPrefix = 1; iEnd++; } while( 1 ){ if( !sqlite3_fts3_enable_parentheses && iStart>0 && z[iStart-1]=='-' ){ pParse->isNot = 1; iStart--; }else if( pParse->bFts4 && iStart>0 && z[iStart-1]=='^' ){ pRet->pPhrase->aToken[0].bFirst = 1; iStart--; }else{ break; } } } *pnConsumed = iEnd; }else if( i && rc==SQLITE_DONE ){ rc = SQLITE_OK; } pModule->xClose(pCursor); } *ppExpr = pRet; return rc; } /* ** Enlarge a memory allocation. If an out-of-memory allocation occurs, ** then free the old allocation. */ static void *fts3ReallocOrFree(void *pOrig, int nNew){ void *pRet = sqlite3_realloc(pOrig, nNew); if( !pRet ){ sqlite3_free(pOrig); } return pRet; } /* ** Buffer zInput, length nInput, contains the contents of a quoted string ** that appeared as part of an fts3 query expression. Neither quote character ** is included in the buffer. This function attempts to tokenize the entire ** input buffer and create an Fts3Expr structure of type FTSQUERY_PHRASE ** containing the results. ** ** If successful, SQLITE_OK is returned and *ppExpr set to point at the ** allocated Fts3Expr structure. Otherwise, either SQLITE_NOMEM (out of memory ** error) or SQLITE_ERROR (tokenization error) is returned and *ppExpr set ** to 0. */ static int getNextString( ParseContext *pParse, /* fts3 query parse context */ const char *zInput, int nInput, /* Input string */ Fts3Expr **ppExpr /* OUT: expression */ ){ sqlite3_tokenizer *pTokenizer = pParse->pTokenizer; sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; int rc; Fts3Expr *p = 0; sqlite3_tokenizer_cursor *pCursor = 0; char *zTemp = 0; int nTemp = 0; const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); int nToken = 0; /* The final Fts3Expr data structure, including the Fts3Phrase, ** Fts3PhraseToken structures token buffers are all stored as a single ** allocation so that the expression can be freed with a single call to ** sqlite3_free(). Setting this up requires a two pass approach. ** ** The first pass, in the block below, uses a tokenizer cursor to iterate ** through the tokens in the expression. This pass uses fts3ReallocOrFree() ** to assemble data in two dynamic buffers: ** ** Buffer p: Points to the Fts3Expr structure, followed by the Fts3Phrase ** structure, followed by the array of Fts3PhraseToken ** structures. This pass only populates the Fts3PhraseToken array. ** ** Buffer zTemp: Contains copies of all tokens. ** ** The second pass, in the block that begins "if( rc==SQLITE_DONE )" below, ** appends buffer zTemp to buffer p, and fills in the Fts3Expr and Fts3Phrase ** structures. */ rc = sqlite3Fts3OpenTokenizer( pTokenizer, pParse->iLangid, zInput, nInput, &pCursor); if( rc==SQLITE_OK ){ int ii; for(ii=0; rc==SQLITE_OK; ii++){ const char *zByte; int nByte = 0, iBegin = 0, iEnd = 0, iPos = 0; rc = pModule->xNext(pCursor, &zByte, &nByte, &iBegin, &iEnd, &iPos); if( rc==SQLITE_OK ){ Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); if( !p ) goto no_mem; zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); if( !zTemp ) goto no_mem; assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; memset(pToken, 0, sizeof(Fts3PhraseToken)); memcpy(&zTemp[nTemp], zByte, nByte); nTemp += nByte; pToken->n = nByte; pToken->isPrefix = (iEndbFirst = (iBegin>0 && zInput[iBegin-1]=='^'); nToken = ii+1; } } pModule->xClose(pCursor); pCursor = 0; } if( rc==SQLITE_DONE ){ int jj; char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); if( !p ) goto no_mem; memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; p->pPhrase->iColumn = pParse->iDefaultCol; p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; if( zTemp ){ memcpy(zBuf, zTemp, nTemp); sqlite3_free(zTemp); }else{ assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ p->pPhrase->aToken[jj].z = zBuf; zBuf += p->pPhrase->aToken[jj].n; } rc = SQLITE_OK; } *ppExpr = p; return rc; no_mem: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); sqlite3_free(p); *ppExpr = 0; return SQLITE_NOMEM; } /* ** The output variable *ppExpr is populated with an allocated Fts3Expr ** structure, or set to 0 if the end of the input buffer is reached. ** ** Returns an SQLite error code. SQLITE_OK if everything works, SQLITE_NOMEM ** if a malloc failure occurs, or SQLITE_ERROR if a parse error is encountered. ** If SQLITE_ERROR is returned, pContext is populated with an error message. */ static int getNextNode( ParseContext *pParse, /* fts3 query parse context */ const char *z, int n, /* Input string */ Fts3Expr **ppExpr, /* OUT: expression */ int *pnConsumed /* OUT: Number of bytes consumed */ ){ static const struct Fts3Keyword { char *z; /* Keyword text */ unsigned char n; /* Length of the keyword */ unsigned char parenOnly; /* Only valid in paren mode */ unsigned char eType; /* Keyword code */ } aKeyword[] = { { "OR" , 2, 0, FTSQUERY_OR }, { "AND", 3, 1, FTSQUERY_AND }, { "NOT", 3, 1, FTSQUERY_NOT }, { "NEAR", 4, 0, FTSQUERY_NEAR } }; int ii; int iCol; int iColLen; int rc; Fts3Expr *pRet = 0; const char *zInput = z; int nInput = n; pParse->isNot = 0; /* Skip over any whitespace before checking for a keyword, an open or ** close bracket, or a quoted string. */ while( nInput>0 && fts3isspace(*zInput) ){ nInput--; zInput++; } if( nInput==0 ){ return SQLITE_DONE; } /* See if we are dealing with a keyword. */ for(ii=0; ii<(int)(sizeof(aKeyword)/sizeof(struct Fts3Keyword)); ii++){ const struct Fts3Keyword *pKey = &aKeyword[ii]; if( (pKey->parenOnly & ~sqlite3_fts3_enable_parentheses)!=0 ){ continue; } if( nInput>=pKey->n && 0==memcmp(zInput, pKey->z, pKey->n) ){ int nNear = SQLITE_FTS3_DEFAULT_NEAR_PARAM; int nKey = pKey->n; char cNext; /* If this is a "NEAR" keyword, check for an explicit nearness. */ if( pKey->eType==FTSQUERY_NEAR ){ assert( nKey==4 ); if( zInput[4]=='/' && zInput[5]>='0' && zInput[5]<='9' ){ nNear = 0; for(nKey=5; zInput[nKey]>='0' && zInput[nKey]<='9'; nKey++){ nNear = nNear * 10 + (zInput[nKey] - '0'); } } } /* At this point this is probably a keyword. But for that to be true, ** the next byte must contain either whitespace, an open or close ** parenthesis, a quote character, or EOF. */ cNext = zInput[nKey]; if( fts3isspace(cNext) || cNext=='"' || cNext=='(' || cNext==')' || cNext==0 ){ pRet = (Fts3Expr *)fts3MallocZero(sizeof(Fts3Expr)); if( !pRet ){ return SQLITE_NOMEM; } pRet->eType = pKey->eType; pRet->nNear = nNear; *ppExpr = pRet; *pnConsumed = (int)((zInput - z) + nKey); return SQLITE_OK; } /* Turns out that wasn't a keyword after all. This happens if the ** user has supplied a token such as "ORacle". Continue. */ } } /* See if we are dealing with a quoted phrase. If this is the case, then ** search for the closing quote and pass the whole string to getNextString() ** for processing. This is easy to do, as fts3 has no syntax for escaping ** a quote character embedded in a string. */ if( *zInput=='"' ){ for(ii=1; iinNest++; rc = fts3ExprParse(pParse, zInput+1, nInput-1, ppExpr, &nConsumed); if( rc==SQLITE_OK && !*ppExpr ){ rc = SQLITE_DONE; } *pnConsumed = (int)(zInput - z) + 1 + nConsumed; return rc; }else if( *zInput==')' ){ pParse->nNest--; *pnConsumed = (int)((zInput - z) + 1); *ppExpr = 0; return SQLITE_DONE; } } /* If control flows to this point, this must be a regular token, or ** the end of the input. Read a regular token using the sqlite3_tokenizer ** interface. Before doing so, figure out if there is an explicit ** column specifier for the token. ** ** TODO: Strangely, it is not possible to associate a column specifier ** with a quoted phrase, only with a single token. Not sure if this was ** an implementation artifact or an intentional decision when fts3 was ** first implemented. Whichever it was, this module duplicates the ** limitation. */ iCol = pParse->iDefaultCol; iColLen = 0; for(ii=0; iinCol; ii++){ const char *zStr = pParse->azCol[ii]; int nStr = (int)strlen(zStr); if( nInput>nStr && zInput[nStr]==':' && sqlite3_strnicmp(zStr, zInput, nStr)==0 ){ iCol = ii; iColLen = (int)((zInput - z) + nStr + 1); break; } } rc = getNextToken(pParse, iCol, &z[iColLen], n-iColLen, ppExpr, pnConsumed); *pnConsumed += iColLen; return rc; } /* ** The argument is an Fts3Expr structure for a binary operator (any type ** except an FTSQUERY_PHRASE). Return an integer value representing the ** precedence of the operator. Lower values have a higher precedence (i.e. ** group more tightly). For example, in the C language, the == operator ** groups more tightly than ||, and would therefore have a higher precedence. ** ** When using the new fts3 query syntax (when SQLITE_ENABLE_FTS3_PARENTHESIS ** is defined), the order of the operators in precedence from highest to ** lowest is: ** ** NEAR ** NOT ** AND (including implicit ANDs) ** OR ** ** Note that when using the old query syntax, the OR operator has a higher ** precedence than the AND operator. */ static int opPrecedence(Fts3Expr *p){ assert( p->eType!=FTSQUERY_PHRASE ); if( sqlite3_fts3_enable_parentheses ){ return p->eType; }else if( p->eType==FTSQUERY_NEAR ){ return 1; }else if( p->eType==FTSQUERY_OR ){ return 2; } assert( p->eType==FTSQUERY_AND ); return 3; } /* ** Argument ppHead contains a pointer to the current head of a query ** expression tree being parsed. pPrev is the expression node most recently ** inserted into the tree. This function adds pNew, which is always a binary ** operator node, into the expression tree based on the relative precedence ** of pNew and the existing nodes of the tree. This may result in the head ** of the tree changing, in which case *ppHead is set to the new root node. */ static void insertBinaryOperator( Fts3Expr **ppHead, /* Pointer to the root node of a tree */ Fts3Expr *pPrev, /* Node most recently inserted into the tree */ Fts3Expr *pNew /* New binary node to insert into expression tree */ ){ Fts3Expr *pSplit = pPrev; while( pSplit->pParent && opPrecedence(pSplit->pParent)<=opPrecedence(pNew) ){ pSplit = pSplit->pParent; } if( pSplit->pParent ){ assert( pSplit->pParent->pRight==pSplit ); pSplit->pParent->pRight = pNew; pNew->pParent = pSplit->pParent; }else{ *ppHead = pNew; } pNew->pLeft = pSplit; pSplit->pParent = pNew; } /* ** Parse the fts3 query expression found in buffer z, length n. This function ** returns either when the end of the buffer is reached or an unmatched ** closing bracket - ')' - is encountered. ** ** If successful, SQLITE_OK is returned, *ppExpr is set to point to the ** parsed form of the expression and *pnConsumed is set to the number of ** bytes read from buffer z. Otherwise, *ppExpr is set to 0 and SQLITE_NOMEM ** (out of memory error) or SQLITE_ERROR (parse error) is returned. */ static int fts3ExprParse( ParseContext *pParse, /* fts3 query parse context */ const char *z, int n, /* Text of MATCH query */ Fts3Expr **ppExpr, /* OUT: Parsed query structure */ int *pnConsumed /* OUT: Number of bytes consumed */ ){ Fts3Expr *pRet = 0; Fts3Expr *pPrev = 0; Fts3Expr *pNotBranch = 0; /* Only used in legacy parse mode */ int nIn = n; const char *zIn = z; int rc = SQLITE_OK; int isRequirePhrase = 1; while( rc==SQLITE_OK ){ Fts3Expr *p = 0; int nByte = 0; rc = getNextNode(pParse, zIn, nIn, &p, &nByte); assert( nByte>0 || (rc!=SQLITE_OK && p==0) ); if( rc==SQLITE_OK ){ if( p ){ int isPhrase; if( !sqlite3_fts3_enable_parentheses && p->eType==FTSQUERY_PHRASE && pParse->isNot ){ /* Create an implicit NOT operator. */ Fts3Expr *pNot = fts3MallocZero(sizeof(Fts3Expr)); if( !pNot ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; goto exprparse_out; } pNot->eType = FTSQUERY_NOT; pNot->pRight = p; p->pParent = pNot; if( pNotBranch ){ pNot->pLeft = pNotBranch; pNotBranch->pParent = pNot; } pNotBranch = pNot; p = pPrev; }else{ int eType = p->eType; isPhrase = (eType==FTSQUERY_PHRASE || p->pLeft); /* The isRequirePhrase variable is set to true if a phrase or ** an expression contained in parenthesis is required. If a ** binary operator (AND, OR, NOT or NEAR) is encounted when ** isRequirePhrase is set, this is a syntax error. */ if( !isPhrase && isRequirePhrase ){ sqlite3Fts3ExprFree(p); rc = SQLITE_ERROR; goto exprparse_out; } if( isPhrase && !isRequirePhrase ){ /* Insert an implicit AND operator. */ Fts3Expr *pAnd; assert( pRet && pPrev ); pAnd = fts3MallocZero(sizeof(Fts3Expr)); if( !pAnd ){ sqlite3Fts3ExprFree(p); rc = SQLITE_NOMEM; goto exprparse_out; } pAnd->eType = FTSQUERY_AND; insertBinaryOperator(&pRet, pPrev, pAnd); pPrev = pAnd; } /* This test catches attempts to make either operand of a NEAR ** operator something other than a phrase. For example, either of ** the following: ** ** (bracketed expression) NEAR phrase ** phrase NEAR (bracketed expression) ** ** Return an error in either case. */ if( pPrev && ( (eType==FTSQUERY_NEAR && !isPhrase && pPrev->eType!=FTSQUERY_PHRASE) || (eType!=FTSQUERY_PHRASE && isPhrase && pPrev->eType==FTSQUERY_NEAR) )){ sqlite3Fts3ExprFree(p); rc = SQLITE_ERROR; goto exprparse_out; } if( isPhrase ){ if( pRet ){ assert( pPrev && pPrev->pLeft && pPrev->pRight==0 ); pPrev->pRight = p; p->pParent = pPrev; }else{ pRet = p; } }else{ insertBinaryOperator(&pRet, pPrev, p); } isRequirePhrase = !isPhrase; } pPrev = p; } assert( nByte>0 ); } assert( rc!=SQLITE_OK || (nByte>0 && nByte<=nIn) ); nIn -= nByte; zIn += nByte; } if( rc==SQLITE_DONE && pRet && isRequirePhrase ){ rc = SQLITE_ERROR; } if( rc==SQLITE_DONE ){ rc = SQLITE_OK; if( !sqlite3_fts3_enable_parentheses && pNotBranch ){ if( !pRet ){ rc = SQLITE_ERROR; }else{ Fts3Expr *pIter = pNotBranch; while( pIter->pLeft ){ pIter = pIter->pLeft; } pIter->pLeft = pRet; pRet->pParent = pIter; pRet = pNotBranch; } } } *pnConsumed = n - nIn; exprparse_out: if( rc!=SQLITE_OK ){ sqlite3Fts3ExprFree(pRet); sqlite3Fts3ExprFree(pNotBranch); pRet = 0; } *ppExpr = pRet; return rc; } /* ** Return SQLITE_ERROR if the maximum depth of the expression tree passed ** as the only argument is more than nMaxDepth. */ static int fts3ExprCheckDepth(Fts3Expr *p, int nMaxDepth){ int rc = SQLITE_OK; if( p ){ if( nMaxDepth<0 ){ rc = SQLITE_TOOBIG; }else{ rc = fts3ExprCheckDepth(p->pLeft, nMaxDepth-1); if( rc==SQLITE_OK ){ rc = fts3ExprCheckDepth(p->pRight, nMaxDepth-1); } } } return rc; } /* ** This function attempts to transform the expression tree at (*pp) to ** an equivalent but more balanced form. The tree is modified in place. ** If successful, SQLITE_OK is returned and (*pp) set to point to the ** new root expression node. ** ** nMaxDepth is the maximum allowable depth of the balanced sub-tree. ** ** Otherwise, if an error occurs, an SQLite error code is returned and ** expression (*pp) freed. */ static int fts3ExprBalance(Fts3Expr **pp, int nMaxDepth){ int rc = SQLITE_OK; /* Return code */ Fts3Expr *pRoot = *pp; /* Initial root node */ Fts3Expr *pFree = 0; /* List of free nodes. Linked by pParent. */ int eType = pRoot->eType; /* Type of node in this tree */ if( nMaxDepth==0 ){ rc = SQLITE_ERROR; } if( rc==SQLITE_OK && (eType==FTSQUERY_AND || eType==FTSQUERY_OR) ){ Fts3Expr **apLeaf; apLeaf = (Fts3Expr **)sqlite3_malloc(sizeof(Fts3Expr *) * nMaxDepth); if( 0==apLeaf ){ rc = SQLITE_NOMEM; }else{ memset(apLeaf, 0, sizeof(Fts3Expr *) * nMaxDepth); } if( rc==SQLITE_OK ){ int i; Fts3Expr *p; /* Set $p to point to the left-most leaf in the tree of eType nodes. */ for(p=pRoot; p->eType==eType; p=p->pLeft){ assert( p->pParent==0 || p->pParent->pLeft==p ); assert( p->pLeft && p->pRight ); } /* This loop runs once for each leaf in the tree of eType nodes. */ while( 1 ){ int iLvl; Fts3Expr *pParent = p->pParent; /* Current parent of p */ assert( pParent==0 || pParent->pLeft==p ); p->pParent = 0; if( pParent ){ pParent->pLeft = 0; }else{ pRoot = 0; } rc = fts3ExprBalance(&p, nMaxDepth-1); if( rc!=SQLITE_OK ) break; for(iLvl=0; p && iLvlpLeft = apLeaf[iLvl]; pFree->pRight = p; pFree->pLeft->pParent = pFree; pFree->pRight->pParent = pFree; p = pFree; pFree = pFree->pParent; p->pParent = 0; apLeaf[iLvl] = 0; } } if( p ){ sqlite3Fts3ExprFree(p); rc = SQLITE_TOOBIG; break; } /* If that was the last leaf node, break out of the loop */ if( pParent==0 ) break; /* Set $p to point to the next leaf in the tree of eType nodes */ for(p=pParent->pRight; p->eType==eType; p=p->pLeft); /* Remove pParent from the original tree. */ assert( pParent->pParent==0 || pParent->pParent->pLeft==pParent ); pParent->pRight->pParent = pParent->pParent; if( pParent->pParent ){ pParent->pParent->pLeft = pParent->pRight; }else{ assert( pParent==pRoot ); pRoot = pParent->pRight; } /* Link pParent into the free node list. It will be used as an ** internal node of the new tree. */ pParent->pParent = pFree; pFree = pParent; } if( rc==SQLITE_OK ){ p = 0; for(i=0; ipParent = 0; }else{ assert( pFree!=0 ); pFree->pRight = p; pFree->pLeft = apLeaf[i]; pFree->pLeft->pParent = pFree; pFree->pRight->pParent = pFree; p = pFree; pFree = pFree->pParent; p->pParent = 0; } } } pRoot = p; }else{ /* An error occurred. Delete the contents of the apLeaf[] array ** and pFree list. Everything else is cleaned up by the call to ** sqlite3Fts3ExprFree(pRoot) below. */ Fts3Expr *pDel; for(i=0; ipParent; sqlite3_free(pDel); } } assert( pFree==0 ); sqlite3_free( apLeaf ); } } if( rc!=SQLITE_OK ){ sqlite3Fts3ExprFree(pRoot); pRoot = 0; } *pp = pRoot; return rc; } /* ** This function is similar to sqlite3Fts3ExprParse(), with the following ** differences: ** ** 1. It does not do expression rebalancing. ** 2. It does not check that the expression does not exceed the ** maximum allowable depth. ** 3. Even if it fails, *ppExpr may still be set to point to an ** expression tree. It should be deleted using sqlite3Fts3ExprFree() ** in this case. */ static int fts3ExprParseUnbalanced( sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ int iLangid, /* Language id for tokenizer */ char **azCol, /* Array of column names for fts3 table */ int bFts4, /* True to allow FTS4-only syntax */ int nCol, /* Number of entries in azCol[] */ int iDefaultCol, /* Default column to query */ const char *z, int n, /* Text of MATCH query */ Fts3Expr **ppExpr /* OUT: Parsed query structure */ ){ int nParsed; int rc; ParseContext sParse; memset(&sParse, 0, sizeof(ParseContext)); sParse.pTokenizer = pTokenizer; sParse.iLangid = iLangid; sParse.azCol = (const char **)azCol; sParse.nCol = nCol; sParse.iDefaultCol = iDefaultCol; sParse.bFts4 = bFts4; if( z==0 ){ *ppExpr = 0; return SQLITE_OK; } if( n<0 ){ n = (int)strlen(z); } rc = fts3ExprParse(&sParse, z, n, ppExpr, &nParsed); assert( rc==SQLITE_OK || *ppExpr==0 ); /* Check for mismatched parenthesis */ if( rc==SQLITE_OK && sParse.nNest ){ rc = SQLITE_ERROR; } return rc; } /* ** Parameters z and n contain a pointer to and length of a buffer containing ** an fts3 query expression, respectively. This function attempts to parse the ** query expression and create a tree of Fts3Expr structures representing the ** parsed expression. If successful, *ppExpr is set to point to the head ** of the parsed expression tree and SQLITE_OK is returned. If an error ** occurs, either SQLITE_NOMEM (out-of-memory error) or SQLITE_ERROR (parse ** error) is returned and *ppExpr is set to 0. ** ** If parameter n is a negative number, then z is assumed to point to a ** nul-terminated string and the length is determined using strlen(). ** ** The first parameter, pTokenizer, is passed the fts3 tokenizer module to ** use to normalize query tokens while parsing the expression. The azCol[] ** array, which is assumed to contain nCol entries, should contain the names ** of each column in the target fts3 table, in order from left to right. ** Column names must be nul-terminated strings. ** ** The iDefaultCol parameter should be passed the index of the table column ** that appears on the left-hand-side of the MATCH operator (the default ** column to match against for tokens for which a column name is not explicitly ** specified as part of the query string), or -1 if tokens may by default ** match any table column. */ SQLITE_PRIVATE int sqlite3Fts3ExprParse( sqlite3_tokenizer *pTokenizer, /* Tokenizer module */ int iLangid, /* Language id for tokenizer */ char **azCol, /* Array of column names for fts3 table */ int bFts4, /* True to allow FTS4-only syntax */ int nCol, /* Number of entries in azCol[] */ int iDefaultCol, /* Default column to query */ const char *z, int n, /* Text of MATCH query */ Fts3Expr **ppExpr, /* OUT: Parsed query structure */ char **pzErr /* OUT: Error message (sqlite3_malloc) */ ){ int rc = fts3ExprParseUnbalanced( pTokenizer, iLangid, azCol, bFts4, nCol, iDefaultCol, z, n, ppExpr ); /* Rebalance the expression. And check that its depth does not exceed ** SQLITE_FTS3_MAX_EXPR_DEPTH. */ if( rc==SQLITE_OK && *ppExpr ){ rc = fts3ExprBalance(ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); if( rc==SQLITE_OK ){ rc = fts3ExprCheckDepth(*ppExpr, SQLITE_FTS3_MAX_EXPR_DEPTH); } } if( rc!=SQLITE_OK ){ sqlite3Fts3ExprFree(*ppExpr); *ppExpr = 0; if( rc==SQLITE_TOOBIG ){ sqlite3Fts3ErrMsg(pzErr, "FTS expression tree is too large (maximum depth %d)", SQLITE_FTS3_MAX_EXPR_DEPTH ); rc = SQLITE_ERROR; }else if( rc==SQLITE_ERROR ){ sqlite3Fts3ErrMsg(pzErr, "malformed MATCH expression: [%s]", z); } } return rc; } /* ** Free a single node of an expression tree. */ static void fts3FreeExprNode(Fts3Expr *p){ assert( p->eType==FTSQUERY_PHRASE || p->pPhrase==0 ); sqlite3Fts3EvalPhraseCleanup(p->pPhrase); sqlite3_free(p->aMI); sqlite3_free(p); } /* ** Free a parsed fts3 query expression allocated by sqlite3Fts3ExprParse(). ** ** This function would be simpler if it recursively called itself. But ** that would mean passing a sufficiently large expression to ExprParse() ** could cause a stack overflow. */ SQLITE_PRIVATE void sqlite3Fts3ExprFree(Fts3Expr *pDel){ Fts3Expr *p; assert( pDel==0 || pDel->pParent==0 ); for(p=pDel; p && (p->pLeft||p->pRight); p=(p->pLeft ? p->pLeft : p->pRight)){ assert( p->pParent==0 || p==p->pParent->pRight || p==p->pParent->pLeft ); } while( p ){ Fts3Expr *pParent = p->pParent; fts3FreeExprNode(p); if( pParent && p==pParent->pLeft && pParent->pRight ){ p = pParent->pRight; while( p && (p->pLeft || p->pRight) ){ assert( p==p->pParent->pRight || p==p->pParent->pLeft ); p = (p->pLeft ? p->pLeft : p->pRight); } }else{ p = pParent; } } } /**************************************************************************** ***************************************************************************** ** Everything after this point is just test code. */ #ifdef SQLITE_TEST /* #include */ /* ** Function to query the hash-table of tokenizers (see README.tokenizers). */ static int queryTestTokenizer( sqlite3 *db, const char *zName, const sqlite3_tokenizer_module **pp ){ int rc; sqlite3_stmt *pStmt; const char zSql[] = "SELECT fts3_tokenizer(?)"; *pp = 0; rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ){ return rc; } sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); if( SQLITE_ROW==sqlite3_step(pStmt) ){ if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); } } return sqlite3_finalize(pStmt); } /* ** Return a pointer to a buffer containing a text representation of the ** expression passed as the first argument. The buffer is obtained from ** sqlite3_malloc(). It is the responsibility of the caller to use ** sqlite3_free() to release the memory. If an OOM condition is encountered, ** NULL is returned. ** ** If the second argument is not NULL, then its contents are prepended to ** the returned expression text and then freed using sqlite3_free(). */ static char *exprToString(Fts3Expr *pExpr, char *zBuf){ if( pExpr==0 ){ return sqlite3_mprintf(""); } switch( pExpr->eType ){ case FTSQUERY_PHRASE: { Fts3Phrase *pPhrase = pExpr->pPhrase; int i; zBuf = sqlite3_mprintf( "%zPHRASE %d 0", zBuf, pPhrase->iColumn); for(i=0; zBuf && inToken; i++){ zBuf = sqlite3_mprintf("%z %.*s%s", zBuf, pPhrase->aToken[i].n, pPhrase->aToken[i].z, (pPhrase->aToken[i].isPrefix?"+":"") ); } return zBuf; } case FTSQUERY_NEAR: zBuf = sqlite3_mprintf("%zNEAR/%d ", zBuf, pExpr->nNear); break; case FTSQUERY_NOT: zBuf = sqlite3_mprintf("%zNOT ", zBuf); break; case FTSQUERY_AND: zBuf = sqlite3_mprintf("%zAND ", zBuf); break; case FTSQUERY_OR: zBuf = sqlite3_mprintf("%zOR ", zBuf); break; } if( zBuf ) zBuf = sqlite3_mprintf("%z{", zBuf); if( zBuf ) zBuf = exprToString(pExpr->pLeft, zBuf); if( zBuf ) zBuf = sqlite3_mprintf("%z} {", zBuf); if( zBuf ) zBuf = exprToString(pExpr->pRight, zBuf); if( zBuf ) zBuf = sqlite3_mprintf("%z}", zBuf); return zBuf; } /* ** This is the implementation of a scalar SQL function used to test the ** expression parser. It should be called as follows: ** ** fts3_exprtest(, , , ...); ** ** The first argument, , is the name of the fts3 tokenizer used ** to parse the query expression (see README.tokenizers). The second argument ** is the query expression to parse. Each subsequent argument is the name ** of a column of the fts3 table that the query expression may refer to. ** For example: ** ** SELECT fts3_exprtest('simple', 'Bill col2:Bloggs', 'col1', 'col2'); */ static void fts3ExprTest( sqlite3_context *context, int argc, sqlite3_value **argv ){ sqlite3_tokenizer_module const *pModule = 0; sqlite3_tokenizer *pTokenizer = 0; int rc; char **azCol = 0; const char *zExpr; int nExpr; int nCol; int ii; Fts3Expr *pExpr; char *zBuf = 0; sqlite3 *db = sqlite3_context_db_handle(context); if( argc<3 ){ sqlite3_result_error(context, "Usage: fts3_exprtest(tokenizer, expr, col1, ...", -1 ); return; } rc = queryTestTokenizer(db, (const char *)sqlite3_value_text(argv[0]), &pModule); if( rc==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); goto exprtest_out; }else if( !pModule ){ sqlite3_result_error(context, "No such tokenizer module", -1); goto exprtest_out; } rc = pModule->xCreate(0, 0, &pTokenizer); assert( rc==SQLITE_NOMEM || rc==SQLITE_OK ); if( rc==SQLITE_NOMEM ){ sqlite3_result_error_nomem(context); goto exprtest_out; } pTokenizer->pModule = pModule; zExpr = (const char *)sqlite3_value_text(argv[1]); nExpr = sqlite3_value_bytes(argv[1]); nCol = argc-2; azCol = (char **)sqlite3_malloc(nCol*sizeof(char *)); if( !azCol ){ sqlite3_result_error_nomem(context); goto exprtest_out; } for(ii=0; iixDestroy(pTokenizer); } sqlite3_free(azCol); } /* ** Register the query expression parser test function fts3_exprtest() ** with database connection db. */ SQLITE_PRIVATE int sqlite3Fts3ExprInitTestInterface(sqlite3* db){ int rc = sqlite3_create_function( db, "fts3_exprtest", -1, SQLITE_UTF8, 0, fts3ExprTest, 0, 0 ); if( rc==SQLITE_OK ){ rc = sqlite3_create_function(db, "fts3_exprtest_rebalance", -1, SQLITE_UTF8, (void *)1, fts3ExprTest, 0, 0 ); } return rc; } #endif #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_expr.c *******************************************/ /************** Begin file fts3_hash.c ***************************************/ /* ** 2001 September 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This is the implementation of generic hash-tables used in SQLite. ** We've modified it slightly to serve as a standalone hash table ** implementation for the full-text indexing module. */ /* ** The code in this file is only compiled if: ** ** * The FTS3 module is being built as an extension ** (in which case SQLITE_CORE is not defined), or ** ** * The FTS3 module is being built into the core of ** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* #include */ /* #include "fts3_hash.h" */ /* ** Malloc and Free functions */ static void *fts3HashMalloc(int n){ void *p = sqlite3_malloc(n); if( p ){ memset(p, 0, n); } return p; } static void fts3HashFree(void *p){ sqlite3_free(p); } /* Turn bulk memory into a hash table object by initializing the ** fields of the Hash structure. ** ** "pNew" is a pointer to the hash table that is to be initialized. ** keyClass is one of the constants ** FTS3_HASH_BINARY or FTS3_HASH_STRING. The value of keyClass ** determines what kind of key the hash table will use. "copyKey" is ** true if the hash table should make its own private copy of keys and ** false if it should just use the supplied pointer. */ SQLITE_PRIVATE void sqlite3Fts3HashInit(Fts3Hash *pNew, char keyClass, char copyKey){ assert( pNew!=0 ); assert( keyClass>=FTS3_HASH_STRING && keyClass<=FTS3_HASH_BINARY ); pNew->keyClass = keyClass; pNew->copyKey = copyKey; pNew->first = 0; pNew->count = 0; pNew->htsize = 0; pNew->ht = 0; } /* Remove all entries from a hash table. Reclaim all memory. ** Call this routine to delete a hash table or to reset a hash table ** to the empty state. */ SQLITE_PRIVATE void sqlite3Fts3HashClear(Fts3Hash *pH){ Fts3HashElem *elem; /* For looping over all elements of the table */ assert( pH!=0 ); elem = pH->first; pH->first = 0; fts3HashFree(pH->ht); pH->ht = 0; pH->htsize = 0; while( elem ){ Fts3HashElem *next_elem = elem->next; if( pH->copyKey && elem->pKey ){ fts3HashFree(elem->pKey); } fts3HashFree(elem); elem = next_elem; } pH->count = 0; } /* ** Hash and comparison functions when the mode is FTS3_HASH_STRING */ static int fts3StrHash(const void *pKey, int nKey){ const char *z = (const char *)pKey; unsigned h = 0; if( nKey<=0 ) nKey = (int) strlen(z); while( nKey > 0 ){ h = (h<<3) ^ h ^ *z++; nKey--; } return (int)(h & 0x7fffffff); } static int fts3StrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ if( n1!=n2 ) return 1; return strncmp((const char*)pKey1,(const char*)pKey2,n1); } /* ** Hash and comparison functions when the mode is FTS3_HASH_BINARY */ static int fts3BinHash(const void *pKey, int nKey){ int h = 0; const char *z = (const char *)pKey; while( nKey-- > 0 ){ h = (h<<3) ^ h ^ *(z++); } return h & 0x7fffffff; } static int fts3BinCompare(const void *pKey1, int n1, const void *pKey2, int n2){ if( n1!=n2 ) return 1; return memcmp(pKey1,pKey2,n1); } /* ** Return a pointer to the appropriate hash function given the key class. ** ** The C syntax in this function definition may be unfamilar to some ** programmers, so we provide the following additional explanation: ** ** The name of the function is "ftsHashFunction". The function takes a ** single parameter "keyClass". The return value of ftsHashFunction() ** is a pointer to another function. Specifically, the return value ** of ftsHashFunction() is a pointer to a function that takes two parameters ** with types "const void*" and "int" and returns an "int". */ static int (*ftsHashFunction(int keyClass))(const void*,int){ if( keyClass==FTS3_HASH_STRING ){ return &fts3StrHash; }else{ assert( keyClass==FTS3_HASH_BINARY ); return &fts3BinHash; } } /* ** Return a pointer to the appropriate hash function given the key class. ** ** For help in interpreted the obscure C code in the function definition, ** see the header comment on the previous function. */ static int (*ftsCompareFunction(int keyClass))(const void*,int,const void*,int){ if( keyClass==FTS3_HASH_STRING ){ return &fts3StrCompare; }else{ assert( keyClass==FTS3_HASH_BINARY ); return &fts3BinCompare; } } /* Link an element into the hash table */ static void fts3HashInsertElement( Fts3Hash *pH, /* The complete hash table */ struct _fts3ht *pEntry, /* The entry into which pNew is inserted */ Fts3HashElem *pNew /* The element to be inserted */ ){ Fts3HashElem *pHead; /* First element already in pEntry */ pHead = pEntry->chain; if( pHead ){ pNew->next = pHead; pNew->prev = pHead->prev; if( pHead->prev ){ pHead->prev->next = pNew; } else { pH->first = pNew; } pHead->prev = pNew; }else{ pNew->next = pH->first; if( pH->first ){ pH->first->prev = pNew; } pNew->prev = 0; pH->first = pNew; } pEntry->count++; pEntry->chain = pNew; } /* Resize the hash table so that it cantains "new_size" buckets. ** "new_size" must be a power of 2. The hash table might fail ** to resize if sqliteMalloc() fails. ** ** Return non-zero if a memory allocation error occurs. */ static int fts3Rehash(Fts3Hash *pH, int new_size){ struct _fts3ht *new_ht; /* The new hash table */ Fts3HashElem *elem, *next_elem; /* For looping over existing elements */ int (*xHash)(const void*,int); /* The hash function */ assert( (new_size & (new_size-1))==0 ); new_ht = (struct _fts3ht *)fts3HashMalloc( new_size*sizeof(struct _fts3ht) ); if( new_ht==0 ) return 1; fts3HashFree(pH->ht); pH->ht = new_ht; pH->htsize = new_size; xHash = ftsHashFunction(pH->keyClass); for(elem=pH->first, pH->first=0; elem; elem = next_elem){ int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); next_elem = elem->next; fts3HashInsertElement(pH, &new_ht[h], elem); } return 0; } /* This function (for internal use only) locates an element in an ** hash table that matches the given key. The hash for this key has ** already been computed and is passed as the 4th parameter. */ static Fts3HashElem *fts3FindElementByHash( const Fts3Hash *pH, /* The pH to be searched */ const void *pKey, /* The key we are searching for */ int nKey, int h /* The hash for this key. */ ){ Fts3HashElem *elem; /* Used to loop thru the element list */ int count; /* Number of elements left to test */ int (*xCompare)(const void*,int,const void*,int); /* comparison function */ if( pH->ht ){ struct _fts3ht *pEntry = &pH->ht[h]; elem = pEntry->chain; count = pEntry->count; xCompare = ftsCompareFunction(pH->keyClass); while( count-- && elem ){ if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ return elem; } elem = elem->next; } } return 0; } /* Remove a single entry from the hash table given a pointer to that ** element and a hash on the element's key. */ static void fts3RemoveElementByHash( Fts3Hash *pH, /* The pH containing "elem" */ Fts3HashElem* elem, /* The element to be removed from the pH */ int h /* Hash value for the element */ ){ struct _fts3ht *pEntry; if( elem->prev ){ elem->prev->next = elem->next; }else{ pH->first = elem->next; } if( elem->next ){ elem->next->prev = elem->prev; } pEntry = &pH->ht[h]; if( pEntry->chain==elem ){ pEntry->chain = elem->next; } pEntry->count--; if( pEntry->count<=0 ){ pEntry->chain = 0; } if( pH->copyKey && elem->pKey ){ fts3HashFree(elem->pKey); } fts3HashFree( elem ); pH->count--; if( pH->count<=0 ){ assert( pH->first==0 ); assert( pH->count==0 ); fts3HashClear(pH); } } SQLITE_PRIVATE Fts3HashElem *sqlite3Fts3HashFindElem( const Fts3Hash *pH, const void *pKey, int nKey ){ int h; /* A hash on key */ int (*xHash)(const void*,int); /* The hash function */ if( pH==0 || pH->ht==0 ) return 0; xHash = ftsHashFunction(pH->keyClass); assert( xHash!=0 ); h = (*xHash)(pKey,nKey); assert( (pH->htsize & (pH->htsize-1))==0 ); return fts3FindElementByHash(pH,pKey,nKey, h & (pH->htsize-1)); } /* ** Attempt to locate an element of the hash table pH with a key ** that matches pKey,nKey. Return the data for this element if it is ** found, or NULL if there is no match. */ SQLITE_PRIVATE void *sqlite3Fts3HashFind(const Fts3Hash *pH, const void *pKey, int nKey){ Fts3HashElem *pElem; /* The element that matches key (if any) */ pElem = sqlite3Fts3HashFindElem(pH, pKey, nKey); return pElem ? pElem->data : 0; } /* Insert an element into the hash table pH. The key is pKey,nKey ** and the data is "data". ** ** If no element exists with a matching key, then a new ** element is created. A copy of the key is made if the copyKey ** flag is set. NULL is returned. ** ** If another element already exists with the same key, then the ** new data replaces the old data and the old data is returned. ** The key is not copied in this instance. If a malloc fails, then ** the new data is returned and the hash table is unchanged. ** ** If the "data" parameter to this function is NULL, then the ** element corresponding to "key" is removed from the hash table. */ SQLITE_PRIVATE void *sqlite3Fts3HashInsert( Fts3Hash *pH, /* The hash table to insert into */ const void *pKey, /* The key */ int nKey, /* Number of bytes in the key */ void *data /* The data */ ){ int hraw; /* Raw hash value of the key */ int h; /* the hash of the key modulo hash table size */ Fts3HashElem *elem; /* Used to loop thru the element list */ Fts3HashElem *new_elem; /* New element added to the pH */ int (*xHash)(const void*,int); /* The hash function */ assert( pH!=0 ); xHash = ftsHashFunction(pH->keyClass); assert( xHash!=0 ); hraw = (*xHash)(pKey, nKey); assert( (pH->htsize & (pH->htsize-1))==0 ); h = hraw & (pH->htsize-1); elem = fts3FindElementByHash(pH,pKey,nKey,h); if( elem ){ void *old_data = elem->data; if( data==0 ){ fts3RemoveElementByHash(pH,elem,h); }else{ elem->data = data; } return old_data; } if( data==0 ) return 0; if( (pH->htsize==0 && fts3Rehash(pH,8)) || (pH->count>=pH->htsize && fts3Rehash(pH, pH->htsize*2)) ){ pH->count = 0; return data; } assert( pH->htsize>0 ); new_elem = (Fts3HashElem*)fts3HashMalloc( sizeof(Fts3HashElem) ); if( new_elem==0 ) return data; if( pH->copyKey && pKey!=0 ){ new_elem->pKey = fts3HashMalloc( nKey ); if( new_elem->pKey==0 ){ fts3HashFree(new_elem); return data; } memcpy((void*)new_elem->pKey, pKey, nKey); }else{ new_elem->pKey = (void*)pKey; } new_elem->nKey = nKey; pH->count++; assert( pH->htsize>0 ); assert( (pH->htsize & (pH->htsize-1))==0 ); h = hraw & (pH->htsize-1); fts3HashInsertElement(pH, &pH->ht[h], new_elem); new_elem->data = data; return 0; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_hash.c *******************************************/ /************** Begin file fts3_porter.c *************************************/ /* ** 2006 September 30 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** Implementation of the full-text-search tokenizer that implements ** a Porter stemmer. */ /* ** The code in this file is only compiled if: ** ** * The FTS3 module is being built as an extension ** (in which case SQLITE_CORE is not defined), or ** ** * The FTS3 module is being built into the core of ** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* #include */ /* #include */ /* #include "fts3_tokenizer.h" */ /* ** Class derived from sqlite3_tokenizer */ typedef struct porter_tokenizer { sqlite3_tokenizer base; /* Base class */ } porter_tokenizer; /* ** Class derived from sqlite3_tokenizer_cursor */ typedef struct porter_tokenizer_cursor { sqlite3_tokenizer_cursor base; const char *zInput; /* input we are tokenizing */ int nInput; /* size of the input */ int iOffset; /* current position in zInput */ int iToken; /* index of next token to be returned */ char *zToken; /* storage for current token */ int nAllocated; /* space allocated to zToken buffer */ } porter_tokenizer_cursor; /* ** Create a new tokenizer instance. */ static int porterCreate( int argc, const char * const *argv, sqlite3_tokenizer **ppTokenizer ){ porter_tokenizer *t; UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); if( t==NULL ) return SQLITE_NOMEM; memset(t, 0, sizeof(*t)); *ppTokenizer = &t->base; return SQLITE_OK; } /* ** Destroy a tokenizer */ static int porterDestroy(sqlite3_tokenizer *pTokenizer){ sqlite3_free(pTokenizer); return SQLITE_OK; } /* ** Prepare to begin tokenizing a particular string. The input ** string to be tokenized is zInput[0..nInput-1]. A cursor ** used to incrementally tokenize this string is returned in ** *ppCursor. */ static int porterOpen( sqlite3_tokenizer *pTokenizer, /* The tokenizer */ const char *zInput, int nInput, /* String to be tokenized */ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ ){ porter_tokenizer_cursor *c; UNUSED_PARAMETER(pTokenizer); c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); if( c==NULL ) return SQLITE_NOMEM; c->zInput = zInput; if( zInput==0 ){ c->nInput = 0; }else if( nInput<0 ){ c->nInput = (int)strlen(zInput); }else{ c->nInput = nInput; } c->iOffset = 0; /* start tokenizing at the beginning */ c->iToken = 0; c->zToken = NULL; /* no space allocated, yet. */ c->nAllocated = 0; *ppCursor = &c->base; return SQLITE_OK; } /* ** Close a tokenization cursor previously opened by a call to ** porterOpen() above. */ static int porterClose(sqlite3_tokenizer_cursor *pCursor){ porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; sqlite3_free(c->zToken); sqlite3_free(c); return SQLITE_OK; } /* ** Vowel or consonant */ static const char cType[] = { 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 2, 1 }; /* ** isConsonant() and isVowel() determine if their first character in ** the string they point to is a consonant or a vowel, according ** to Porter ruls. ** ** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. ** 'Y' is a consonant unless it follows another consonant, ** in which case it is a vowel. ** ** In these routine, the letters are in reverse order. So the 'y' rule ** is that 'y' is a consonant unless it is followed by another ** consonent. */ static int isVowel(const char*); static int isConsonant(const char *z){ int j; char x = *z; if( x==0 ) return 0; assert( x>='a' && x<='z' ); j = cType[x-'a']; if( j<2 ) return j; return z[1]==0 || isVowel(z + 1); } static int isVowel(const char *z){ int j; char x = *z; if( x==0 ) return 0; assert( x>='a' && x<='z' ); j = cType[x-'a']; if( j<2 ) return 1-j; return isConsonant(z + 1); } /* ** Let any sequence of one or more vowels be represented by V and let ** C be sequence of one or more consonants. Then every word can be ** represented as: ** ** [C] (VC){m} [V] ** ** In prose: A word is an optional consonant followed by zero or ** vowel-consonant pairs followed by an optional vowel. "m" is the ** number of vowel consonant pairs. This routine computes the value ** of m for the first i bytes of a word. ** ** Return true if the m-value for z is 1 or more. In other words, ** return true if z contains at least one vowel that is followed ** by a consonant. ** ** In this routine z[] is in reverse order. So we are really looking ** for an instance of a consonant followed by a vowel. */ static int m_gt_0(const char *z){ while( isVowel(z) ){ z++; } if( *z==0 ) return 0; while( isConsonant(z) ){ z++; } return *z!=0; } /* Like mgt0 above except we are looking for a value of m which is ** exactly 1 */ static int m_eq_1(const char *z){ while( isVowel(z) ){ z++; } if( *z==0 ) return 0; while( isConsonant(z) ){ z++; } if( *z==0 ) return 0; while( isVowel(z) ){ z++; } if( *z==0 ) return 1; while( isConsonant(z) ){ z++; } return *z==0; } /* Like mgt0 above except we are looking for a value of m>1 instead ** or m>0 */ static int m_gt_1(const char *z){ while( isVowel(z) ){ z++; } if( *z==0 ) return 0; while( isConsonant(z) ){ z++; } if( *z==0 ) return 0; while( isVowel(z) ){ z++; } if( *z==0 ) return 0; while( isConsonant(z) ){ z++; } return *z!=0; } /* ** Return TRUE if there is a vowel anywhere within z[0..n-1] */ static int hasVowel(const char *z){ while( isConsonant(z) ){ z++; } return *z!=0; } /* ** Return TRUE if the word ends in a double consonant. ** ** The text is reversed here. So we are really looking at ** the first two characters of z[]. */ static int doubleConsonant(const char *z){ return isConsonant(z) && z[0]==z[1]; } /* ** Return TRUE if the word ends with three letters which ** are consonant-vowel-consonent and where the final consonant ** is not 'w', 'x', or 'y'. ** ** The word is reversed here. So we are really checking the ** first three letters and the first one cannot be in [wxy]. */ static int star_oh(const char *z){ return isConsonant(z) && z[0]!='w' && z[0]!='x' && z[0]!='y' && isVowel(z+1) && isConsonant(z+2); } /* ** If the word ends with zFrom and xCond() is true for the stem ** of the word that preceeds the zFrom ending, then change the ** ending to zTo. ** ** The input word *pz and zFrom are both in reverse order. zTo ** is in normal order. ** ** Return TRUE if zFrom matches. Return FALSE if zFrom does not ** match. Not that TRUE is returned even if xCond() fails and ** no substitution occurs. */ static int stem( char **pz, /* The word being stemmed (Reversed) */ const char *zFrom, /* If the ending matches this... (Reversed) */ const char *zTo, /* ... change the ending to this (not reversed) */ int (*xCond)(const char*) /* Condition that must be true */ ){ char *z = *pz; while( *zFrom && *zFrom==*z ){ z++; zFrom++; } if( *zFrom!=0 ) return 0; if( xCond && !xCond(z) ) return 1; while( *zTo ){ *(--z) = *(zTo++); } *pz = z; return 1; } /* ** This is the fallback stemmer used when the porter stemmer is ** inappropriate. The input word is copied into the output with ** US-ASCII case folding. If the input word is too long (more ** than 20 bytes if it contains no digits or more than 6 bytes if ** it contains digits) then word is truncated to 20 or 6 bytes ** by taking 10 or 3 bytes from the beginning and end. */ static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ int i, mx, j; int hasDigit = 0; for(i=0; i='A' && c<='Z' ){ zOut[i] = c - 'A' + 'a'; }else{ if( c>='0' && c<='9' ) hasDigit = 1; zOut[i] = c; } } mx = hasDigit ? 3 : 10; if( nIn>mx*2 ){ for(j=mx, i=nIn-mx; i=(int)sizeof(zReverse)-7 ){ /* The word is too big or too small for the porter stemmer. ** Fallback to the copy stemmer */ copy_stemmer(zIn, nIn, zOut, pnOut); return; } for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ zReverse[j] = c + 'a' - 'A'; }else if( c>='a' && c<='z' ){ zReverse[j] = c; }else{ /* The use of a character not in [a-zA-Z] means that we fallback ** to the copy stemmer */ copy_stemmer(zIn, nIn, zOut, pnOut); return; } } memset(&zReverse[sizeof(zReverse)-5], 0, 5); z = &zReverse[j+1]; /* Step 1a */ if( z[0]=='s' ){ if( !stem(&z, "sess", "ss", 0) && !stem(&z, "sei", "i", 0) && !stem(&z, "ss", "ss", 0) ){ z++; } } /* Step 1b */ z2 = z; if( stem(&z, "dee", "ee", m_gt_0) ){ /* Do nothing. The work was all in the test */ }else if( (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) && z!=z2 ){ if( stem(&z, "ta", "ate", 0) || stem(&z, "lb", "ble", 0) || stem(&z, "zi", "ize", 0) ){ /* Do nothing. The work was all in the test */ }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ z++; }else if( m_eq_1(z) && star_oh(z) ){ *(--z) = 'e'; } } /* Step 1c */ if( z[0]=='y' && hasVowel(z+1) ){ z[0] = 'i'; } /* Step 2 */ switch( z[1] ){ case 'a': if( !stem(&z, "lanoita", "ate", m_gt_0) ){ stem(&z, "lanoit", "tion", m_gt_0); } break; case 'c': if( !stem(&z, "icne", "ence", m_gt_0) ){ stem(&z, "icna", "ance", m_gt_0); } break; case 'e': stem(&z, "rezi", "ize", m_gt_0); break; case 'g': stem(&z, "igol", "log", m_gt_0); break; case 'l': if( !stem(&z, "ilb", "ble", m_gt_0) && !stem(&z, "illa", "al", m_gt_0) && !stem(&z, "iltne", "ent", m_gt_0) && !stem(&z, "ile", "e", m_gt_0) ){ stem(&z, "ilsuo", "ous", m_gt_0); } break; case 'o': if( !stem(&z, "noitazi", "ize", m_gt_0) && !stem(&z, "noita", "ate", m_gt_0) ){ stem(&z, "rota", "ate", m_gt_0); } break; case 's': if( !stem(&z, "msila", "al", m_gt_0) && !stem(&z, "ssenevi", "ive", m_gt_0) && !stem(&z, "ssenluf", "ful", m_gt_0) ){ stem(&z, "ssensuo", "ous", m_gt_0); } break; case 't': if( !stem(&z, "itila", "al", m_gt_0) && !stem(&z, "itivi", "ive", m_gt_0) ){ stem(&z, "itilib", "ble", m_gt_0); } break; } /* Step 3 */ switch( z[0] ){ case 'e': if( !stem(&z, "etaci", "ic", m_gt_0) && !stem(&z, "evita", "", m_gt_0) ){ stem(&z, "ezila", "al", m_gt_0); } break; case 'i': stem(&z, "itici", "ic", m_gt_0); break; case 'l': if( !stem(&z, "laci", "ic", m_gt_0) ){ stem(&z, "luf", "", m_gt_0); } break; case 's': stem(&z, "ssen", "", m_gt_0); break; } /* Step 4 */ switch( z[1] ){ case 'a': if( z[0]=='l' && m_gt_1(z+2) ){ z += 2; } break; case 'c': if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ z += 4; } break; case 'e': if( z[0]=='r' && m_gt_1(z+2) ){ z += 2; } break; case 'i': if( z[0]=='c' && m_gt_1(z+2) ){ z += 2; } break; case 'l': if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ z += 4; } break; case 'n': if( z[0]=='t' ){ if( z[2]=='a' ){ if( m_gt_1(z+3) ){ z += 3; } }else if( z[2]=='e' ){ if( !stem(&z, "tneme", "", m_gt_1) && !stem(&z, "tnem", "", m_gt_1) ){ stem(&z, "tne", "", m_gt_1); } } } break; case 'o': if( z[0]=='u' ){ if( m_gt_1(z+2) ){ z += 2; } }else if( z[3]=='s' || z[3]=='t' ){ stem(&z, "noi", "", m_gt_1); } break; case 's': if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ z += 3; } break; case 't': if( !stem(&z, "eta", "", m_gt_1) ){ stem(&z, "iti", "", m_gt_1); } break; case 'u': if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ z += 3; } break; case 'v': case 'z': if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ z += 3; } break; } /* Step 5a */ if( z[0]=='e' ){ if( m_gt_1(z+1) ){ z++; }else if( m_eq_1(z+1) && !star_oh(z+1) ){ z++; } } /* Step 5b */ if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ z++; } /* z[] is now the stemmed word in reverse order. Flip it back ** around into forward order and return. */ *pnOut = i = (int)strlen(z); zOut[i] = 0; while( *z ){ zOut[--i] = *(z++); } } /* ** Characters that can be part of a token. We assume any character ** whose value is greater than 0x80 (any UTF character) can be ** part of a token. In other words, delimiters all must have ** values of 0x7f or lower. */ static const char porterIdChar[] = { /* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ }; #define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) /* ** Extract the next token from a tokenization cursor. The cursor must ** have been opened by a prior call to porterOpen(). */ static int porterNext( sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ const char **pzToken, /* OUT: *pzToken is the token text */ int *pnBytes, /* OUT: Number of bytes in token */ int *piStartOffset, /* OUT: Starting offset of token */ int *piEndOffset, /* OUT: Ending offset of token */ int *piPosition /* OUT: Position integer of token */ ){ porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; const char *z = c->zInput; while( c->iOffsetnInput ){ int iStartOffset, ch; /* Scan past delimiter characters */ while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ c->iOffset++; } /* Count non-delimiter characters. */ iStartOffset = c->iOffset; while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ c->iOffset++; } if( c->iOffset>iStartOffset ){ int n = c->iOffset-iStartOffset; if( n>c->nAllocated ){ char *pNew; c->nAllocated = n+20; pNew = sqlite3_realloc(c->zToken, c->nAllocated); if( !pNew ) return SQLITE_NOMEM; c->zToken = pNew; } porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); *pzToken = c->zToken; *piStartOffset = iStartOffset; *piEndOffset = c->iOffset; *piPosition = c->iToken++; return SQLITE_OK; } } return SQLITE_DONE; } /* ** The set of routines that implement the porter-stemmer tokenizer */ static const sqlite3_tokenizer_module porterTokenizerModule = { 0, porterCreate, porterDestroy, porterOpen, porterClose, porterNext, 0 }; /* ** Allocate a new porter tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ SQLITE_PRIVATE void sqlite3Fts3PorterTokenizerModule( sqlite3_tokenizer_module const**ppModule ){ *ppModule = &porterTokenizerModule; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_porter.c *****************************************/ /************** Begin file fts3_tokenizer.c **********************************/ /* ** 2007 June 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This is part of an SQLite module implementing full-text search. ** This particular file implements the generic tokenizer interface. */ /* ** The code in this file is only compiled if: ** ** * The FTS3 module is being built as an extension ** (in which case SQLITE_CORE is not defined), or ** ** * The FTS3 module is being built into the core of ** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* ** Implementation of the SQL scalar function for accessing the underlying ** hash table. This function may be called as follows: ** ** SELECT (); ** SELECT (, ); ** ** where is the name passed as the second argument ** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer'). ** ** If the argument is specified, it must be a blob value ** containing a pointer to be stored as the hash data corresponding ** to the string . If is not specified, then ** the string must already exist in the has table. Otherwise, ** an error is returned. ** ** Whether or not the argument is specified, the value returned ** is a blob containing the pointer stored as the hash data corresponding ** to string (after the hash-table is updated, if applicable). */ static void scalarFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ Fts3Hash *pHash; void *pPtr = 0; const unsigned char *zName; int nName; assert( argc==1 || argc==2 ); pHash = (Fts3Hash *)sqlite3_user_data(context); zName = sqlite3_value_text(argv[0]); nName = sqlite3_value_bytes(argv[0])+1; if( argc==2 ){ void *pOld; int n = sqlite3_value_bytes(argv[1]); if( zName==0 || n!=sizeof(pPtr) ){ sqlite3_result_error(context, "argument type mismatch", -1); return; } pPtr = *(void **)sqlite3_value_blob(argv[1]); pOld = sqlite3Fts3HashInsert(pHash, (void *)zName, nName, pPtr); if( pOld==pPtr ){ sqlite3_result_error(context, "out of memory", -1); return; } }else{ if( zName ){ pPtr = sqlite3Fts3HashFind(pHash, zName, nName); } if( !pPtr ){ char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); sqlite3_result_error(context, zErr, -1); sqlite3_free(zErr); return; } } sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); } SQLITE_PRIVATE int sqlite3Fts3IsIdChar(char c){ static const char isFtsIdChar[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0x */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1x */ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ }; return (c&0x80 || isFtsIdChar[(int)(c)]); } SQLITE_PRIVATE const char *sqlite3Fts3NextToken(const char *zStr, int *pn){ const char *z1; const char *z2 = 0; /* Find the start of the next token. */ z1 = zStr; while( z2==0 ){ char c = *z1; switch( c ){ case '\0': return 0; /* No more tokens here */ case '\'': case '"': case '`': { z2 = z1; while( *++z2 && (*z2!=c || *++z2==c) ); break; } case '[': z2 = &z1[1]; while( *z2 && z2[0]!=']' ) z2++; if( *z2 ) z2++; break; default: if( sqlite3Fts3IsIdChar(*z1) ){ z2 = &z1[1]; while( sqlite3Fts3IsIdChar(*z2) ) z2++; }else{ z1++; } } } *pn = (int)(z2-z1); return z1; } SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( Fts3Hash *pHash, /* Tokenizer hash table */ const char *zArg, /* Tokenizer name */ sqlite3_tokenizer **ppTok, /* OUT: Tokenizer (if applicable) */ char **pzErr /* OUT: Set to malloced error message */ ){ int rc; char *z = (char *)zArg; int n = 0; char *zCopy; char *zEnd; /* Pointer to nul-term of zCopy */ sqlite3_tokenizer_module *m; zCopy = sqlite3_mprintf("%s", zArg); if( !zCopy ) return SQLITE_NOMEM; zEnd = &zCopy[strlen(zCopy)]; z = (char *)sqlite3Fts3NextToken(zCopy, &n); if( z==0 ){ assert( n==0 ); z = zCopy; } z[n] = '\0'; sqlite3Fts3Dequote(z); m = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash,z,(int)strlen(z)+1); if( !m ){ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", z); rc = SQLITE_ERROR; }else{ char const **aArg = 0; int iArg = 0; z = &z[n+1]; while( zxCreate(iArg, aArg, ppTok); assert( rc!=SQLITE_OK || *ppTok ); if( rc!=SQLITE_OK ){ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer"); }else{ (*ppTok)->pModule = m; } sqlite3_free((void *)aArg); } sqlite3_free(zCopy); return rc; } #ifdef SQLITE_TEST #include /* #include */ /* ** Implementation of a special SQL scalar function for testing tokenizers ** designed to be used in concert with the Tcl testing framework. This ** function must be called with two or more arguments: ** ** SELECT (, ..., ); ** ** where is the name passed as the second argument ** to the sqlite3Fts3InitHashTable() function (e.g. 'fts3_tokenizer') ** concatenated with the string '_test' (e.g. 'fts3_tokenizer_test'). ** ** The return value is a string that may be interpreted as a Tcl ** list. For each token in the , three elements are ** added to the returned list. The first is the token position, the ** second is the token text (folded, stemmed, etc.) and the third is the ** substring of associated with the token. For example, ** using the built-in "simple" tokenizer: ** ** SELECT fts_tokenizer_test('simple', 'I don't see how'); ** ** will return the string: ** ** "{0 i I 1 dont don't 2 see see 3 how how}" ** */ static void testFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ Fts3Hash *pHash; sqlite3_tokenizer_module *p; sqlite3_tokenizer *pTokenizer = 0; sqlite3_tokenizer_cursor *pCsr = 0; const char *zErr = 0; const char *zName; int nName; const char *zInput; int nInput; const char *azArg[64]; const char *zToken; int nToken = 0; int iStart = 0; int iEnd = 0; int iPos = 0; int i; Tcl_Obj *pRet; if( argc<2 ){ sqlite3_result_error(context, "insufficient arguments", -1); return; } nName = sqlite3_value_bytes(argv[0]); zName = (const char *)sqlite3_value_text(argv[0]); nInput = sqlite3_value_bytes(argv[argc-1]); zInput = (const char *)sqlite3_value_text(argv[argc-1]); pHash = (Fts3Hash *)sqlite3_user_data(context); p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); if( !p ){ char *zErr2 = sqlite3_mprintf("unknown tokenizer: %s", zName); sqlite3_result_error(context, zErr2, -1); sqlite3_free(zErr2); return; } pRet = Tcl_NewObj(); Tcl_IncrRefCount(pRet); for(i=1; ixCreate(argc-2, azArg, &pTokenizer) ){ zErr = "error in xCreate()"; goto finish; } pTokenizer->pModule = p; if( sqlite3Fts3OpenTokenizer(pTokenizer, 0, zInput, nInput, &pCsr) ){ zErr = "error in xOpen()"; goto finish; } while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); zToken = &zInput[iStart]; nToken = iEnd-iStart; Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); } if( SQLITE_OK!=p->xClose(pCsr) ){ zErr = "error in xClose()"; goto finish; } if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ zErr = "error in xDestroy()"; goto finish; } finish: if( zErr ){ sqlite3_result_error(context, zErr, -1); }else{ sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); } Tcl_DecrRefCount(pRet); } static int registerTokenizer( sqlite3 *db, char *zName, const sqlite3_tokenizer_module *p ){ int rc; sqlite3_stmt *pStmt; const char zSql[] = "SELECT fts3_tokenizer(?, ?)"; rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ){ return rc; } sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); sqlite3_step(pStmt); return sqlite3_finalize(pStmt); } static int queryTokenizer( sqlite3 *db, char *zName, const sqlite3_tokenizer_module **pp ){ int rc; sqlite3_stmt *pStmt; const char zSql[] = "SELECT fts3_tokenizer(?)"; *pp = 0; rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); if( rc!=SQLITE_OK ){ return rc; } sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); if( SQLITE_ROW==sqlite3_step(pStmt) ){ if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ memcpy((void *)pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); } } return sqlite3_finalize(pStmt); } SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); /* ** Implementation of the scalar function fts3_tokenizer_internal_test(). ** This function is used for testing only, it is not included in the ** build unless SQLITE_TEST is defined. ** ** The purpose of this is to test that the fts3_tokenizer() function ** can be used as designed by the C-code in the queryTokenizer and ** registerTokenizer() functions above. These two functions are repeated ** in the README.tokenizer file as an example, so it is important to ** test them. ** ** To run the tests, evaluate the fts3_tokenizer_internal_test() scalar ** function with no arguments. An assert() will fail if a problem is ** detected. i.e.: ** ** SELECT fts3_tokenizer_internal_test(); ** */ static void intTestFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int rc; const sqlite3_tokenizer_module *p1; const sqlite3_tokenizer_module *p2; sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); UNUSED_PARAMETER(argc); UNUSED_PARAMETER(argv); /* Test the query function */ sqlite3Fts3SimpleTokenizerModule(&p1); rc = queryTokenizer(db, "simple", &p2); assert( rc==SQLITE_OK ); assert( p1==p2 ); rc = queryTokenizer(db, "nosuchtokenizer", &p2); assert( rc==SQLITE_ERROR ); assert( p2==0 ); assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); /* Test the storage function */ rc = registerTokenizer(db, "nosuchtokenizer", p1); assert( rc==SQLITE_OK ); rc = queryTokenizer(db, "nosuchtokenizer", &p2); assert( rc==SQLITE_OK ); assert( p2==p1 ); sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); } #endif /* ** Set up SQL objects in database db used to access the contents of ** the hash table pointed to by argument pHash. The hash table must ** been initialized to use string keys, and to take a private copy ** of the key when a value is inserted. i.e. by a call similar to: ** ** sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); ** ** This function adds a scalar function (see header comment above ** scalarFunc() in this file for details) and, if ENABLE_TABLE is ** defined at compilation time, a temporary virtual table (see header ** comment above struct HashTableVtab) to the database schema. Both ** provide read/write access to the contents of *pHash. ** ** The third argument to this function, zName, is used as the name ** of both the scalar and, if created, the virtual table. */ SQLITE_PRIVATE int sqlite3Fts3InitHashTable( sqlite3 *db, Fts3Hash *pHash, const char *zName ){ int rc = SQLITE_OK; void *p = (void *)pHash; const int any = SQLITE_ANY; #ifdef SQLITE_TEST char *zTest = 0; char *zTest2 = 0; void *pdb = (void *)db; zTest = sqlite3_mprintf("%s_test", zName); zTest2 = sqlite3_mprintf("%s_internal_test", zName); if( !zTest || !zTest2 ){ rc = SQLITE_NOMEM; } #endif if( SQLITE_OK==rc ){ rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0); } if( SQLITE_OK==rc ){ rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0); } #ifdef SQLITE_TEST if( SQLITE_OK==rc ){ rc = sqlite3_create_function(db, zTest, -1, any, p, testFunc, 0, 0); } if( SQLITE_OK==rc ){ rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0); } #endif #ifdef SQLITE_TEST sqlite3_free(zTest); sqlite3_free(zTest2); #endif return rc; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_tokenizer.c **************************************/ /************** Begin file fts3_tokenizer1.c *********************************/ /* ** 2006 Oct 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** Implementation of the "simple" full-text-search tokenizer. */ /* ** The code in this file is only compiled if: ** ** * The FTS3 module is being built as an extension ** (in which case SQLITE_CORE is not defined), or ** ** * The FTS3 module is being built into the core of ** SQLite (in which case SQLITE_ENABLE_FTS3 is defined). */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* #include */ /* #include */ /* #include "fts3_tokenizer.h" */ typedef struct simple_tokenizer { sqlite3_tokenizer base; char delim[128]; /* flag ASCII delimiters */ } simple_tokenizer; typedef struct simple_tokenizer_cursor { sqlite3_tokenizer_cursor base; const char *pInput; /* input we are tokenizing */ int nBytes; /* size of the input */ int iOffset; /* current position in pInput */ int iToken; /* index of next token to be returned */ char *pToken; /* storage for current token */ int nTokenAllocated; /* space allocated to zToken buffer */ } simple_tokenizer_cursor; static int simpleDelim(simple_tokenizer *t, unsigned char c){ return c<0x80 && t->delim[c]; } static int fts3_isalnum(int x){ return (x>='0' && x<='9') || (x>='A' && x<='Z') || (x>='a' && x<='z'); } /* ** Create a new tokenizer instance. */ static int simpleCreate( int argc, const char * const *argv, sqlite3_tokenizer **ppTokenizer ){ simple_tokenizer *t; t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); if( t==NULL ) return SQLITE_NOMEM; memset(t, 0, sizeof(*t)); /* TODO(shess) Delimiters need to remain the same from run to run, ** else we need to reindex. One solution would be a meta-table to ** track such information in the database, then we'd only want this ** information on the initial create. */ if( argc>1 ){ int i, n = (int)strlen(argv[1]); for(i=0; i=0x80 ){ sqlite3_free(t); return SQLITE_ERROR; } t->delim[ch] = 1; } } else { /* Mark non-alphanumeric ASCII characters as delimiters */ int i; for(i=1; i<0x80; i++){ t->delim[i] = !fts3_isalnum(i) ? -1 : 0; } } *ppTokenizer = &t->base; return SQLITE_OK; } /* ** Destroy a tokenizer */ static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ sqlite3_free(pTokenizer); return SQLITE_OK; } /* ** Prepare to begin tokenizing a particular string. The input ** string to be tokenized is pInput[0..nBytes-1]. A cursor ** used to incrementally tokenize this string is returned in ** *ppCursor. */ static int simpleOpen( sqlite3_tokenizer *pTokenizer, /* The tokenizer */ const char *pInput, int nBytes, /* String to be tokenized */ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ ){ simple_tokenizer_cursor *c; UNUSED_PARAMETER(pTokenizer); c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); if( c==NULL ) return SQLITE_NOMEM; c->pInput = pInput; if( pInput==0 ){ c->nBytes = 0; }else if( nBytes<0 ){ c->nBytes = (int)strlen(pInput); }else{ c->nBytes = nBytes; } c->iOffset = 0; /* start tokenizing at the beginning */ c->iToken = 0; c->pToken = NULL; /* no space allocated, yet. */ c->nTokenAllocated = 0; *ppCursor = &c->base; return SQLITE_OK; } /* ** Close a tokenization cursor previously opened by a call to ** simpleOpen() above. */ static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; sqlite3_free(c->pToken); sqlite3_free(c); return SQLITE_OK; } /* ** Extract the next token from a tokenization cursor. The cursor must ** have been opened by a prior call to simpleOpen(). */ static int simpleNext( sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ const char **ppToken, /* OUT: *ppToken is the token text */ int *pnBytes, /* OUT: Number of bytes in token */ int *piStartOffset, /* OUT: Starting offset of token */ int *piEndOffset, /* OUT: Ending offset of token */ int *piPosition /* OUT: Position integer of token */ ){ simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; unsigned char *p = (unsigned char *)c->pInput; while( c->iOffsetnBytes ){ int iStartOffset; /* Scan past delimiter characters */ while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ c->iOffset++; } /* Count non-delimiter characters. */ iStartOffset = c->iOffset; while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ c->iOffset++; } if( c->iOffset>iStartOffset ){ int i, n = c->iOffset-iStartOffset; if( n>c->nTokenAllocated ){ char *pNew; c->nTokenAllocated = n+20; pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated); if( !pNew ) return SQLITE_NOMEM; c->pToken = pNew; } for(i=0; ipToken[i] = (char)((ch>='A' && ch<='Z') ? ch-'A'+'a' : ch); } *ppToken = c->pToken; *pnBytes = n; *piStartOffset = iStartOffset; *piEndOffset = c->iOffset; *piPosition = c->iToken++; return SQLITE_OK; } } return SQLITE_DONE; } /* ** The set of routines that implement the simple tokenizer */ static const sqlite3_tokenizer_module simpleTokenizerModule = { 0, simpleCreate, simpleDestroy, simpleOpen, simpleClose, simpleNext, 0, }; /* ** Allocate a new simple tokenizer. Return a pointer to the new ** tokenizer in *ppModule */ SQLITE_PRIVATE void sqlite3Fts3SimpleTokenizerModule( sqlite3_tokenizer_module const**ppModule ){ *ppModule = &simpleTokenizerModule; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_tokenizer1.c *************************************/ /************** Begin file fts3_tokenize_vtab.c ******************************/ /* ** 2013 Apr 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file contains code for the "fts3tokenize" virtual table module. ** An fts3tokenize virtual table is created as follows: ** ** CREATE VIRTUAL TABLE USING fts3tokenize( ** , , ... ** ); ** ** The table created has the following schema: ** ** CREATE TABLE (input, token, start, end, position) ** ** When queried, the query must include a WHERE clause of type: ** ** input = ** ** The virtual table module tokenizes this , using the FTS3 ** tokenizer specified by the arguments to the CREATE VIRTUAL TABLE ** statement and returns one row for each token in the result. With ** fields set as follows: ** ** input: Always set to a copy of ** token: A token from the input. ** start: Byte offset of the token within the input . ** end: Byte offset of the byte immediately following the end of the ** token within the input string. ** pos: Token offset of token within input. ** */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ typedef struct Fts3tokTable Fts3tokTable; typedef struct Fts3tokCursor Fts3tokCursor; /* ** Virtual table structure. */ struct Fts3tokTable { sqlite3_vtab base; /* Base class used by SQLite core */ const sqlite3_tokenizer_module *pMod; sqlite3_tokenizer *pTok; }; /* ** Virtual table cursor structure. */ struct Fts3tokCursor { sqlite3_vtab_cursor base; /* Base class used by SQLite core */ char *zInput; /* Input string */ sqlite3_tokenizer_cursor *pCsr; /* Cursor to iterate through zInput */ int iRowid; /* Current 'rowid' value */ const char *zToken; /* Current 'token' value */ int nToken; /* Size of zToken in bytes */ int iStart; /* Current 'start' value */ int iEnd; /* Current 'end' value */ int iPos; /* Current 'pos' value */ }; /* ** Query FTS for the tokenizer implementation named zName. */ static int fts3tokQueryTokenizer( Fts3Hash *pHash, const char *zName, const sqlite3_tokenizer_module **pp, char **pzErr ){ sqlite3_tokenizer_module *p; int nName = (int)strlen(zName); p = (sqlite3_tokenizer_module *)sqlite3Fts3HashFind(pHash, zName, nName+1); if( !p ){ sqlite3Fts3ErrMsg(pzErr, "unknown tokenizer: %s", zName); return SQLITE_ERROR; } *pp = p; return SQLITE_OK; } /* ** The second argument, argv[], is an array of pointers to nul-terminated ** strings. This function makes a copy of the array and strings into a ** single block of memory. It then dequotes any of the strings that appear ** to be quoted. ** ** If successful, output parameter *pazDequote is set to point at the ** array of dequoted strings and SQLITE_OK is returned. The caller is ** responsible for eventually calling sqlite3_free() to free the array ** in this case. Or, if an error occurs, an SQLite error code is returned. ** The final value of *pazDequote is undefined in this case. */ static int fts3tokDequoteArray( int argc, /* Number of elements in argv[] */ const char * const *argv, /* Input array */ char ***pazDequote /* Output array */ ){ int rc = SQLITE_OK; /* Return code */ if( argc==0 ){ *pazDequote = 0; }else{ int i; int nByte = 0; char **azDequote; for(i=0; ixCreate((nDequote>1 ? nDequote-1 : 0), azArg, &pTok); } if( rc==SQLITE_OK ){ pTab = (Fts3tokTable *)sqlite3_malloc(sizeof(Fts3tokTable)); if( pTab==0 ){ rc = SQLITE_NOMEM; } } if( rc==SQLITE_OK ){ memset(pTab, 0, sizeof(Fts3tokTable)); pTab->pMod = pMod; pTab->pTok = pTok; *ppVtab = &pTab->base; }else{ if( pTok ){ pMod->xDestroy(pTok); } } sqlite3_free(azDequote); return rc; } /* ** This function does the work for both the xDisconnect and xDestroy methods. ** These tables have no persistent representation of their own, so xDisconnect ** and xDestroy are identical operations. */ static int fts3tokDisconnectMethod(sqlite3_vtab *pVtab){ Fts3tokTable *pTab = (Fts3tokTable *)pVtab; pTab->pMod->xDestroy(pTab->pTok); sqlite3_free(pTab); return SQLITE_OK; } /* ** xBestIndex - Analyze a WHERE and ORDER BY clause. */ static int fts3tokBestIndexMethod( sqlite3_vtab *pVTab, sqlite3_index_info *pInfo ){ int i; UNUSED_PARAMETER(pVTab); for(i=0; inConstraint; i++){ if( pInfo->aConstraint[i].usable && pInfo->aConstraint[i].iColumn==0 && pInfo->aConstraint[i].op==SQLITE_INDEX_CONSTRAINT_EQ ){ pInfo->idxNum = 1; pInfo->aConstraintUsage[i].argvIndex = 1; pInfo->aConstraintUsage[i].omit = 1; pInfo->estimatedCost = 1; return SQLITE_OK; } } pInfo->idxNum = 0; assert( pInfo->estimatedCost>1000000.0 ); return SQLITE_OK; } /* ** xOpen - Open a cursor. */ static int fts3tokOpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){ Fts3tokCursor *pCsr; UNUSED_PARAMETER(pVTab); pCsr = (Fts3tokCursor *)sqlite3_malloc(sizeof(Fts3tokCursor)); if( pCsr==0 ){ return SQLITE_NOMEM; } memset(pCsr, 0, sizeof(Fts3tokCursor)); *ppCsr = (sqlite3_vtab_cursor *)pCsr; return SQLITE_OK; } /* ** Reset the tokenizer cursor passed as the only argument. As if it had ** just been returned by fts3tokOpenMethod(). */ static void fts3tokResetCursor(Fts3tokCursor *pCsr){ if( pCsr->pCsr ){ Fts3tokTable *pTab = (Fts3tokTable *)(pCsr->base.pVtab); pTab->pMod->xClose(pCsr->pCsr); pCsr->pCsr = 0; } sqlite3_free(pCsr->zInput); pCsr->zInput = 0; pCsr->zToken = 0; pCsr->nToken = 0; pCsr->iStart = 0; pCsr->iEnd = 0; pCsr->iPos = 0; pCsr->iRowid = 0; } /* ** xClose - Close a cursor. */ static int fts3tokCloseMethod(sqlite3_vtab_cursor *pCursor){ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; fts3tokResetCursor(pCsr); sqlite3_free(pCsr); return SQLITE_OK; } /* ** xNext - Advance the cursor to the next row, if any. */ static int fts3tokNextMethod(sqlite3_vtab_cursor *pCursor){ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); int rc; /* Return code */ pCsr->iRowid++; rc = pTab->pMod->xNext(pCsr->pCsr, &pCsr->zToken, &pCsr->nToken, &pCsr->iStart, &pCsr->iEnd, &pCsr->iPos ); if( rc!=SQLITE_OK ){ fts3tokResetCursor(pCsr); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } return rc; } /* ** xFilter - Initialize a cursor to point at the start of its data. */ static int fts3tokFilterMethod( sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ int idxNum, /* Strategy index */ const char *idxStr, /* Unused */ int nVal, /* Number of elements in apVal */ sqlite3_value **apVal /* Arguments for the indexing scheme */ ){ int rc = SQLITE_ERROR; Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; Fts3tokTable *pTab = (Fts3tokTable *)(pCursor->pVtab); UNUSED_PARAMETER(idxStr); UNUSED_PARAMETER(nVal); fts3tokResetCursor(pCsr); if( idxNum==1 ){ const char *zByte = (const char *)sqlite3_value_text(apVal[0]); int nByte = sqlite3_value_bytes(apVal[0]); pCsr->zInput = sqlite3_malloc(nByte+1); if( pCsr->zInput==0 ){ rc = SQLITE_NOMEM; }else{ memcpy(pCsr->zInput, zByte, nByte); pCsr->zInput[nByte] = 0; rc = pTab->pMod->xOpen(pTab->pTok, pCsr->zInput, nByte, &pCsr->pCsr); if( rc==SQLITE_OK ){ pCsr->pCsr->pTokenizer = pTab->pTok; } } } if( rc!=SQLITE_OK ) return rc; return fts3tokNextMethod(pCursor); } /* ** xEof - Return true if the cursor is at EOF, or false otherwise. */ static int fts3tokEofMethod(sqlite3_vtab_cursor *pCursor){ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; return (pCsr->zToken==0); } /* ** xColumn - Return a column value. */ static int fts3tokColumnMethod( sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ sqlite3_context *pCtx, /* Context for sqlite3_result_xxx() calls */ int iCol /* Index of column to read value from */ ){ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; /* CREATE TABLE x(input, token, start, end, position) */ switch( iCol ){ case 0: sqlite3_result_text(pCtx, pCsr->zInput, -1, SQLITE_TRANSIENT); break; case 1: sqlite3_result_text(pCtx, pCsr->zToken, pCsr->nToken, SQLITE_TRANSIENT); break; case 2: sqlite3_result_int(pCtx, pCsr->iStart); break; case 3: sqlite3_result_int(pCtx, pCsr->iEnd); break; default: assert( iCol==4 ); sqlite3_result_int(pCtx, pCsr->iPos); break; } return SQLITE_OK; } /* ** xRowid - Return the current rowid for the cursor. */ static int fts3tokRowidMethod( sqlite3_vtab_cursor *pCursor, /* Cursor to retrieve value from */ sqlite_int64 *pRowid /* OUT: Rowid value */ ){ Fts3tokCursor *pCsr = (Fts3tokCursor *)pCursor; *pRowid = (sqlite3_int64)pCsr->iRowid; return SQLITE_OK; } /* ** Register the fts3tok module with database connection db. Return SQLITE_OK ** if successful or an error code if sqlite3_create_module() fails. */ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ static const sqlite3_module fts3tok_module = { 0, /* iVersion */ fts3tokConnectMethod, /* xCreate */ fts3tokConnectMethod, /* xConnect */ fts3tokBestIndexMethod, /* xBestIndex */ fts3tokDisconnectMethod, /* xDisconnect */ fts3tokDisconnectMethod, /* xDestroy */ fts3tokOpenMethod, /* xOpen */ fts3tokCloseMethod, /* xClose */ fts3tokFilterMethod, /* xFilter */ fts3tokNextMethod, /* xNext */ fts3tokEofMethod, /* xEof */ fts3tokColumnMethod, /* xColumn */ fts3tokRowidMethod, /* xRowid */ 0, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindFunction */ 0, /* xRename */ 0, /* xSavepoint */ 0, /* xRelease */ 0 /* xRollbackTo */ }; int rc; /* Return code */ rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash); return rc; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_tokenize_vtab.c **********************************/ /************** Begin file fts3_write.c **************************************/ /* ** 2009 Oct 23 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** This file is part of the SQLite FTS3 extension module. Specifically, ** this file contains code to insert, update and delete rows from FTS3 ** tables. It also contains code to merge FTS3 b-tree segments. Some ** of the sub-routines used to merge segments are also used by the query ** code in fts3.c. */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* #include */ #define FTS_MAX_APPENDABLE_HEIGHT 16 /* ** When full-text index nodes are loaded from disk, the buffer that they ** are loaded into has the following number of bytes of padding at the end ** of it. i.e. if a full-text index node is 900 bytes in size, then a buffer ** of 920 bytes is allocated for it. ** ** This means that if we have a pointer into a buffer containing node data, ** it is always safe to read up to two varints from it without risking an ** overread, even if the node data is corrupted. */ #define FTS3_NODE_PADDING (FTS3_VARINT_MAX*2) /* ** Under certain circumstances, b-tree nodes (doclists) can be loaded into ** memory incrementally instead of all at once. This can be a big performance ** win (reduced IO and CPU) if SQLite stops calling the virtual table xNext() ** method before retrieving all query results (as may happen, for example, ** if a query has a LIMIT clause). ** ** Incremental loading is used for b-tree nodes FTS3_NODE_CHUNK_THRESHOLD ** bytes and larger. Nodes are loaded in chunks of FTS3_NODE_CHUNKSIZE bytes. ** The code is written so that the hard lower-limit for each of these values ** is 1. Clearly such small values would be inefficient, but can be useful ** for testing purposes. ** ** If this module is built with SQLITE_TEST defined, these constants may ** be overridden at runtime for testing purposes. File fts3_test.c contains ** a Tcl interface to read and write the values. */ #ifdef SQLITE_TEST int test_fts3_node_chunksize = (4*1024); int test_fts3_node_chunk_threshold = (4*1024)*4; # define FTS3_NODE_CHUNKSIZE test_fts3_node_chunksize # define FTS3_NODE_CHUNK_THRESHOLD test_fts3_node_chunk_threshold #else # define FTS3_NODE_CHUNKSIZE (4*1024) # define FTS3_NODE_CHUNK_THRESHOLD (FTS3_NODE_CHUNKSIZE*4) #endif /* ** The two values that may be meaningfully bound to the :1 parameter in ** statements SQL_REPLACE_STAT and SQL_SELECT_STAT. */ #define FTS_STAT_DOCTOTAL 0 #define FTS_STAT_INCRMERGEHINT 1 #define FTS_STAT_AUTOINCRMERGE 2 /* ** If FTS_LOG_MERGES is defined, call sqlite3_log() to report each automatic ** and incremental merge operation that takes place. This is used for ** debugging FTS only, it should not usually be turned on in production ** systems. */ #ifdef FTS3_LOG_MERGES static void fts3LogMerge(int nMerge, sqlite3_int64 iAbsLevel){ sqlite3_log(SQLITE_OK, "%d-way merge from level %d", nMerge, (int)iAbsLevel); } #else #define fts3LogMerge(x, y) #endif typedef struct PendingList PendingList; typedef struct SegmentNode SegmentNode; typedef struct SegmentWriter SegmentWriter; /* ** An instance of the following data structure is used to build doclists ** incrementally. See function fts3PendingListAppend() for details. */ struct PendingList { int nData; char *aData; int nSpace; sqlite3_int64 iLastDocid; sqlite3_int64 iLastCol; sqlite3_int64 iLastPos; }; /* ** Each cursor has a (possibly empty) linked list of the following objects. */ struct Fts3DeferredToken { Fts3PhraseToken *pToken; /* Pointer to corresponding expr token */ int iCol; /* Column token must occur in */ Fts3DeferredToken *pNext; /* Next in list of deferred tokens */ PendingList *pList; /* Doclist is assembled here */ }; /* ** An instance of this structure is used to iterate through the terms on ** a contiguous set of segment b-tree leaf nodes. Although the details of ** this structure are only manipulated by code in this file, opaque handles ** of type Fts3SegReader* are also used by code in fts3.c to iterate through ** terms when querying the full-text index. See functions: ** ** sqlite3Fts3SegReaderNew() ** sqlite3Fts3SegReaderFree() ** sqlite3Fts3SegReaderIterate() ** ** Methods used to manipulate Fts3SegReader structures: ** ** fts3SegReaderNext() ** fts3SegReaderFirstDocid() ** fts3SegReaderNextDocid() */ struct Fts3SegReader { int iIdx; /* Index within level, or 0x7FFFFFFF for PT */ u8 bLookup; /* True for a lookup only */ u8 rootOnly; /* True for a root-only reader */ sqlite3_int64 iStartBlock; /* Rowid of first leaf block to traverse */ sqlite3_int64 iLeafEndBlock; /* Rowid of final leaf block to traverse */ sqlite3_int64 iEndBlock; /* Rowid of final block in segment (or 0) */ sqlite3_int64 iCurrentBlock; /* Current leaf block (or 0) */ char *aNode; /* Pointer to node data (or NULL) */ int nNode; /* Size of buffer at aNode (or 0) */ int nPopulate; /* If >0, bytes of buffer aNode[] loaded */ sqlite3_blob *pBlob; /* If not NULL, blob handle to read node */ Fts3HashElem **ppNextElem; /* Variables set by fts3SegReaderNext(). These may be read directly ** by the caller. They are valid from the time SegmentReaderNew() returns ** until SegmentReaderNext() returns something other than SQLITE_OK ** (i.e. SQLITE_DONE). */ int nTerm; /* Number of bytes in current term */ char *zTerm; /* Pointer to current term */ int nTermAlloc; /* Allocated size of zTerm buffer */ char *aDoclist; /* Pointer to doclist of current entry */ int nDoclist; /* Size of doclist in current entry */ /* The following variables are used by fts3SegReaderNextDocid() to iterate ** through the current doclist (aDoclist/nDoclist). */ char *pOffsetList; int nOffsetList; /* For descending pending seg-readers only */ sqlite3_int64 iDocid; }; #define fts3SegReaderIsPending(p) ((p)->ppNextElem!=0) #define fts3SegReaderIsRootOnly(p) ((p)->rootOnly!=0) /* ** An instance of this structure is used to create a segment b-tree in the ** database. The internal details of this type are only accessed by the ** following functions: ** ** fts3SegWriterAdd() ** fts3SegWriterFlush() ** fts3SegWriterFree() */ struct SegmentWriter { SegmentNode *pTree; /* Pointer to interior tree structure */ sqlite3_int64 iFirst; /* First slot in %_segments written */ sqlite3_int64 iFree; /* Next free slot in %_segments */ char *zTerm; /* Pointer to previous term buffer */ int nTerm; /* Number of bytes in zTerm */ int nMalloc; /* Size of malloc'd buffer at zMalloc */ char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ int nSize; /* Size of allocation at aData */ int nData; /* Bytes of data in aData */ char *aData; /* Pointer to block from malloc() */ i64 nLeafData; /* Number of bytes of leaf data written */ }; /* ** Type SegmentNode is used by the following three functions to create ** the interior part of the segment b+-tree structures (everything except ** the leaf nodes). These functions and type are only ever used by code ** within the fts3SegWriterXXX() family of functions described above. ** ** fts3NodeAddTerm() ** fts3NodeWrite() ** fts3NodeFree() ** ** When a b+tree is written to the database (either as a result of a merge ** or the pending-terms table being flushed), leaves are written into the ** database file as soon as they are completely populated. The interior of ** the tree is assembled in memory and written out only once all leaves have ** been populated and stored. This is Ok, as the b+-tree fanout is usually ** very large, meaning that the interior of the tree consumes relatively ** little memory. */ struct SegmentNode { SegmentNode *pParent; /* Parent node (or NULL for root node) */ SegmentNode *pRight; /* Pointer to right-sibling */ SegmentNode *pLeftmost; /* Pointer to left-most node of this depth */ int nEntry; /* Number of terms written to node so far */ char *zTerm; /* Pointer to previous term buffer */ int nTerm; /* Number of bytes in zTerm */ int nMalloc; /* Size of malloc'd buffer at zMalloc */ char *zMalloc; /* Malloc'd space (possibly) used for zTerm */ int nData; /* Bytes of valid data so far */ char *aData; /* Node data */ }; /* ** Valid values for the second argument to fts3SqlStmt(). */ #define SQL_DELETE_CONTENT 0 #define SQL_IS_EMPTY 1 #define SQL_DELETE_ALL_CONTENT 2 #define SQL_DELETE_ALL_SEGMENTS 3 #define SQL_DELETE_ALL_SEGDIR 4 #define SQL_DELETE_ALL_DOCSIZE 5 #define SQL_DELETE_ALL_STAT 6 #define SQL_SELECT_CONTENT_BY_ROWID 7 #define SQL_NEXT_SEGMENT_INDEX 8 #define SQL_INSERT_SEGMENTS 9 #define SQL_NEXT_SEGMENTS_ID 10 #define SQL_INSERT_SEGDIR 11 #define SQL_SELECT_LEVEL 12 #define SQL_SELECT_LEVEL_RANGE 13 #define SQL_SELECT_LEVEL_COUNT 14 #define SQL_SELECT_SEGDIR_MAX_LEVEL 15 #define SQL_DELETE_SEGDIR_LEVEL 16 #define SQL_DELETE_SEGMENTS_RANGE 17 #define SQL_CONTENT_INSERT 18 #define SQL_DELETE_DOCSIZE 19 #define SQL_REPLACE_DOCSIZE 20 #define SQL_SELECT_DOCSIZE 21 #define SQL_SELECT_STAT 22 #define SQL_REPLACE_STAT 23 #define SQL_SELECT_ALL_PREFIX_LEVEL 24 #define SQL_DELETE_ALL_TERMS_SEGDIR 25 #define SQL_DELETE_SEGDIR_RANGE 26 #define SQL_SELECT_ALL_LANGID 27 #define SQL_FIND_MERGE_LEVEL 28 #define SQL_MAX_LEAF_NODE_ESTIMATE 29 #define SQL_DELETE_SEGDIR_ENTRY 30 #define SQL_SHIFT_SEGDIR_ENTRY 31 #define SQL_SELECT_SEGDIR 32 #define SQL_CHOMP_SEGDIR 33 #define SQL_SEGMENT_IS_APPENDABLE 34 #define SQL_SELECT_INDEXES 35 #define SQL_SELECT_MXLEVEL 36 #define SQL_SELECT_LEVEL_RANGE2 37 #define SQL_UPDATE_LEVEL_IDX 38 #define SQL_UPDATE_LEVEL 39 /* ** This function is used to obtain an SQLite prepared statement handle ** for the statement identified by the second argument. If successful, ** *pp is set to the requested statement handle and SQLITE_OK returned. ** Otherwise, an SQLite error code is returned and *pp is set to 0. ** ** If argument apVal is not NULL, then it must point to an array with ** at least as many entries as the requested statement has bound ** parameters. The values are bound to the statements parameters before ** returning. */ static int fts3SqlStmt( Fts3Table *p, /* Virtual table handle */ int eStmt, /* One of the SQL_XXX constants above */ sqlite3_stmt **pp, /* OUT: Statement handle */ sqlite3_value **apVal /* Values to bind to statement */ ){ const char *azSql[] = { /* 0 */ "DELETE FROM %Q.'%q_content' WHERE rowid = ?", /* 1 */ "SELECT NOT EXISTS(SELECT docid FROM %Q.'%q_content' WHERE rowid!=?)", /* 2 */ "DELETE FROM %Q.'%q_content'", /* 3 */ "DELETE FROM %Q.'%q_segments'", /* 4 */ "DELETE FROM %Q.'%q_segdir'", /* 5 */ "DELETE FROM %Q.'%q_docsize'", /* 6 */ "DELETE FROM %Q.'%q_stat'", /* 7 */ "SELECT %s WHERE rowid=?", /* 8 */ "SELECT (SELECT max(idx) FROM %Q.'%q_segdir' WHERE level = ?) + 1", /* 9 */ "REPLACE INTO %Q.'%q_segments'(blockid, block) VALUES(?, ?)", /* 10 */ "SELECT coalesce((SELECT max(blockid) FROM %Q.'%q_segments') + 1, 1)", /* 11 */ "REPLACE INTO %Q.'%q_segdir' VALUES(?,?,?,?,?,?)", /* Return segments in order from oldest to newest.*/ /* 12 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level = ? ORDER BY idx ASC", /* 13 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?" "ORDER BY level DESC, idx ASC", /* 14 */ "SELECT count(*) FROM %Q.'%q_segdir' WHERE level = ?", /* 15 */ "SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", /* 16 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ?", /* 17 */ "DELETE FROM %Q.'%q_segments' WHERE blockid BETWEEN ? AND ?", /* 18 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* 19 */ "DELETE FROM %Q.'%q_docsize' WHERE docid = ?", /* 20 */ "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* 21 */ "SELECT size FROM %Q.'%q_docsize' WHERE docid=?", /* 22 */ "SELECT value FROM %Q.'%q_stat' WHERE id=?", /* 23 */ "REPLACE INTO %Q.'%q_stat' VALUES(?,?)", /* 24 */ "", /* 25 */ "", /* 26 */ "DELETE FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ?", /* 27 */ "SELECT ? UNION SELECT level / (1024 * ?) FROM %Q.'%q_segdir'", /* This statement is used to determine which level to read the input from ** when performing an incremental merge. It returns the absolute level number ** of the oldest level in the db that contains at least ? segments. Or, ** if no level in the FTS index contains more than ? segments, the statement ** returns zero rows. */ /* 28 */ "SELECT level FROM %Q.'%q_segdir' GROUP BY level HAVING count(*)>=?" " ORDER BY (level %% 1024) ASC LIMIT 1", /* Estimate the upper limit on the number of leaf nodes in a new segment ** created by merging the oldest :2 segments from absolute level :1. See ** function sqlite3Fts3Incrmerge() for details. */ /* 29 */ "SELECT 2 * total(1 + leaves_end_block - start_block) " " FROM %Q.'%q_segdir' WHERE level = ? AND idx < ?", /* SQL_DELETE_SEGDIR_ENTRY ** Delete the %_segdir entry on absolute level :1 with index :2. */ /* 30 */ "DELETE FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", /* SQL_SHIFT_SEGDIR_ENTRY ** Modify the idx value for the segment with idx=:3 on absolute level :2 ** to :1. */ /* 31 */ "UPDATE %Q.'%q_segdir' SET idx = ? WHERE level=? AND idx=?", /* SQL_SELECT_SEGDIR ** Read a single entry from the %_segdir table. The entry from absolute ** level :1 with index value :2. */ /* 32 */ "SELECT idx, start_block, leaves_end_block, end_block, root " "FROM %Q.'%q_segdir' WHERE level = ? AND idx = ?", /* SQL_CHOMP_SEGDIR ** Update the start_block (:1) and root (:2) fields of the %_segdir ** entry located on absolute level :3 with index :4. */ /* 33 */ "UPDATE %Q.'%q_segdir' SET start_block = ?, root = ?" "WHERE level = ? AND idx = ?", /* SQL_SEGMENT_IS_APPENDABLE ** Return a single row if the segment with end_block=? is appendable. Or ** no rows otherwise. */ /* 34 */ "SELECT 1 FROM %Q.'%q_segments' WHERE blockid=? AND block IS NULL", /* SQL_SELECT_INDEXES ** Return the list of valid segment indexes for absolute level ? */ /* 35 */ "SELECT idx FROM %Q.'%q_segdir' WHERE level=? ORDER BY 1 ASC", /* SQL_SELECT_MXLEVEL ** Return the largest relative level in the FTS index or indexes. */ /* 36 */ "SELECT max( level %% 1024 ) FROM %Q.'%q_segdir'", /* Return segments in order from oldest to newest.*/ /* 37 */ "SELECT level, idx, end_block " "FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? " "ORDER BY level DESC, idx ASC", /* Update statements used while promoting segments */ /* 38 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=-1,idx=? " "WHERE level=? AND idx=?", /* 39 */ "UPDATE OR FAIL %Q.'%q_segdir' SET level=? WHERE level=-1" }; int rc = SQLITE_OK; sqlite3_stmt *pStmt; assert( SizeofArray(azSql)==SizeofArray(p->aStmt) ); assert( eStmt=0 ); pStmt = p->aStmt[eStmt]; if( !pStmt ){ char *zSql; if( eStmt==SQL_CONTENT_INSERT ){ zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName, p->zWriteExprlist); }else if( eStmt==SQL_SELECT_CONTENT_BY_ROWID ){ zSql = sqlite3_mprintf(azSql[eStmt], p->zReadExprlist); }else{ zSql = sqlite3_mprintf(azSql[eStmt], p->zDb, p->zName); } if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, NULL); sqlite3_free(zSql); assert( rc==SQLITE_OK || pStmt==0 ); p->aStmt[eStmt] = pStmt; } } if( apVal ){ int i; int nParam = sqlite3_bind_parameter_count(pStmt); for(i=0; rc==SQLITE_OK && inPendingData==0 ){ sqlite3_stmt *pStmt; rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_null(pStmt, 1); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); } } return rc; } /* ** FTS maintains a separate indexes for each language-id (a 32-bit integer). ** Within each language id, a separate index is maintained to store the ** document terms, and each configured prefix size (configured the FTS ** "prefix=" option). And each index consists of multiple levels ("relative ** levels"). ** ** All three of these values (the language id, the specific index and the ** level within the index) are encoded in 64-bit integer values stored ** in the %_segdir table on disk. This function is used to convert three ** separate component values into the single 64-bit integer value that ** can be used to query the %_segdir table. ** ** Specifically, each language-id/index combination is allocated 1024 ** 64-bit integer level values ("absolute levels"). The main terms index ** for language-id 0 is allocate values 0-1023. The first prefix index ** (if any) for language-id 0 is allocated values 1024-2047. And so on. ** Language 1 indexes are allocated immediately following language 0. ** ** So, for a system with nPrefix prefix indexes configured, the block of ** absolute levels that corresponds to language-id iLangid and index ** iIndex starts at absolute level ((iLangid * (nPrefix+1) + iIndex) * 1024). */ static sqlite3_int64 getAbsoluteLevel( Fts3Table *p, /* FTS3 table handle */ int iLangid, /* Language id */ int iIndex, /* Index in p->aIndex[] */ int iLevel /* Level of segments */ ){ sqlite3_int64 iBase; /* First absolute level for iLangid/iIndex */ assert( iLangid>=0 ); assert( p->nIndex>0 ); assert( iIndex>=0 && iIndexnIndex ); iBase = ((sqlite3_int64)iLangid * p->nIndex + iIndex) * FTS3_SEGDIR_MAXLEVEL; return iBase + iLevel; } /* ** Set *ppStmt to a statement handle that may be used to iterate through ** all rows in the %_segdir table, from oldest to newest. If successful, ** return SQLITE_OK. If an error occurs while preparing the statement, ** return an SQLite error code. ** ** There is only ever one instance of this SQL statement compiled for ** each FTS3 table. ** ** The statement returns the following columns from the %_segdir table: ** ** 0: idx ** 1: start_block ** 2: leaves_end_block ** 3: end_block ** 4: root */ SQLITE_PRIVATE int sqlite3Fts3AllSegdirs( Fts3Table *p, /* FTS3 table */ int iLangid, /* Language being queried */ int iIndex, /* Index for p->aIndex[] */ int iLevel, /* Level to select (relative level) */ sqlite3_stmt **ppStmt /* OUT: Compiled statement */ ){ int rc; sqlite3_stmt *pStmt = 0; assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel>=0 ); assert( iLevel=0 && iIndexnIndex ); if( iLevel<0 ){ /* "SELECT * FROM %_segdir WHERE level BETWEEN ? AND ? ORDER BY ..." */ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); sqlite3_bind_int64(pStmt, 2, getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) ); } }else{ /* "SELECT * FROM %_segdir WHERE level = ? ORDER BY ..." */ rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex,iLevel)); } } *ppStmt = pStmt; return rc; } /* ** Append a single varint to a PendingList buffer. SQLITE_OK is returned ** if successful, or an SQLite error code otherwise. ** ** This function also serves to allocate the PendingList structure itself. ** For example, to create a new PendingList structure containing two ** varints: ** ** PendingList *p = 0; ** fts3PendingListAppendVarint(&p, 1); ** fts3PendingListAppendVarint(&p, 2); */ static int fts3PendingListAppendVarint( PendingList **pp, /* IN/OUT: Pointer to PendingList struct */ sqlite3_int64 i /* Value to append to data */ ){ PendingList *p = *pp; /* Allocate or grow the PendingList as required. */ if( !p ){ p = sqlite3_malloc(sizeof(*p) + 100); if( !p ){ return SQLITE_NOMEM; } p->nSpace = 100; p->aData = (char *)&p[1]; p->nData = 0; } else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){ int nNew = p->nSpace * 2; p = sqlite3_realloc(p, sizeof(*p) + nNew); if( !p ){ sqlite3_free(*pp); *pp = 0; return SQLITE_NOMEM; } p->nSpace = nNew; p->aData = (char *)&p[1]; } /* Append the new serialized varint to the end of the list. */ p->nData += sqlite3Fts3PutVarint(&p->aData[p->nData], i); p->aData[p->nData] = '\0'; *pp = p; return SQLITE_OK; } /* ** Add a docid/column/position entry to a PendingList structure. Non-zero ** is returned if the structure is sqlite3_realloced as part of adding ** the entry. Otherwise, zero. ** ** If an OOM error occurs, *pRc is set to SQLITE_NOMEM before returning. ** Zero is always returned in this case. Otherwise, if no OOM error occurs, ** it is set to SQLITE_OK. */ static int fts3PendingListAppend( PendingList **pp, /* IN/OUT: PendingList structure */ sqlite3_int64 iDocid, /* Docid for entry to add */ sqlite3_int64 iCol, /* Column for entry to add */ sqlite3_int64 iPos, /* Position of term for entry to add */ int *pRc /* OUT: Return code */ ){ PendingList *p = *pp; int rc = SQLITE_OK; assert( !p || p->iLastDocid<=iDocid ); if( !p || p->iLastDocid!=iDocid ){ sqlite3_int64 iDelta = iDocid - (p ? p->iLastDocid : 0); if( p ){ assert( p->nDatanSpace ); assert( p->aData[p->nData]==0 ); p->nData++; } if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iDelta)) ){ goto pendinglistappend_out; } p->iLastCol = -1; p->iLastPos = 0; p->iLastDocid = iDocid; } if( iCol>0 && p->iLastCol!=iCol ){ if( SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, 1)) || SQLITE_OK!=(rc = fts3PendingListAppendVarint(&p, iCol)) ){ goto pendinglistappend_out; } p->iLastCol = iCol; p->iLastPos = 0; } if( iCol>=0 ){ assert( iPos>p->iLastPos || (iPos==0 && p->iLastPos==0) ); rc = fts3PendingListAppendVarint(&p, 2+iPos-p->iLastPos); if( rc==SQLITE_OK ){ p->iLastPos = iPos; } } pendinglistappend_out: *pRc = rc; if( p!=*pp ){ *pp = p; return 1; } return 0; } /* ** Free a PendingList object allocated by fts3PendingListAppend(). */ static void fts3PendingListDelete(PendingList *pList){ sqlite3_free(pList); } /* ** Add an entry to one of the pending-terms hash tables. */ static int fts3PendingTermsAddOne( Fts3Table *p, int iCol, int iPos, Fts3Hash *pHash, /* Pending terms hash table to add entry to */ const char *zToken, int nToken ){ PendingList *pList; int rc = SQLITE_OK; pList = (PendingList *)fts3HashFind(pHash, zToken, nToken); if( pList ){ p->nPendingData -= (pList->nData + nToken + sizeof(Fts3HashElem)); } if( fts3PendingListAppend(&pList, p->iPrevDocid, iCol, iPos, &rc) ){ if( pList==fts3HashInsert(pHash, zToken, nToken, pList) ){ /* Malloc failed while inserting the new entry. This can only ** happen if there was no previous entry for this token. */ assert( 0==fts3HashFind(pHash, zToken, nToken) ); sqlite3_free(pList); rc = SQLITE_NOMEM; } } if( rc==SQLITE_OK ){ p->nPendingData += (pList->nData + nToken + sizeof(Fts3HashElem)); } return rc; } /* ** Tokenize the nul-terminated string zText and add all tokens to the ** pending-terms hash-table. The docid used is that currently stored in ** p->iPrevDocid, and the column is specified by argument iCol. ** ** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. */ static int fts3PendingTermsAdd( Fts3Table *p, /* Table into which text will be inserted */ int iLangid, /* Language id to use */ const char *zText, /* Text of document to be inserted */ int iCol, /* Column into which text is being inserted */ u32 *pnWord /* IN/OUT: Incr. by number tokens inserted */ ){ int rc; int iStart = 0; int iEnd = 0; int iPos = 0; int nWord = 0; char const *zToken; int nToken = 0; sqlite3_tokenizer *pTokenizer = p->pTokenizer; sqlite3_tokenizer_module const *pModule = pTokenizer->pModule; sqlite3_tokenizer_cursor *pCsr; int (*xNext)(sqlite3_tokenizer_cursor *pCursor, const char**,int*,int*,int*,int*); assert( pTokenizer && pModule ); /* If the user has inserted a NULL value, this function may be called with ** zText==0. In this case, add zero token entries to the hash table and ** return early. */ if( zText==0 ){ *pnWord = 0; return SQLITE_OK; } rc = sqlite3Fts3OpenTokenizer(pTokenizer, iLangid, zText, -1, &pCsr); if( rc!=SQLITE_OK ){ return rc; } xNext = pModule->xNext; while( SQLITE_OK==rc && SQLITE_OK==(rc = xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos)) ){ int i; if( iPos>=nWord ) nWord = iPos+1; /* Positions cannot be negative; we use -1 as a terminator internally. ** Tokens must have a non-zero length. */ if( iPos<0 || !zToken || nToken<=0 ){ rc = SQLITE_ERROR; break; } /* Add the term to the terms index */ rc = fts3PendingTermsAddOne( p, iCol, iPos, &p->aIndex[0].hPending, zToken, nToken ); /* Add the term to each of the prefix indexes that it is not too ** short for. */ for(i=1; rc==SQLITE_OK && inIndex; i++){ struct Fts3Index *pIndex = &p->aIndex[i]; if( nTokennPrefix ) continue; rc = fts3PendingTermsAddOne( p, iCol, iPos, &pIndex->hPending, zToken, pIndex->nPrefix ); } } pModule->xClose(pCsr); *pnWord += nWord; return (rc==SQLITE_DONE ? SQLITE_OK : rc); } /* ** Calling this function indicates that subsequent calls to ** fts3PendingTermsAdd() are to add term/position-list pairs for the ** contents of the document with docid iDocid. */ static int fts3PendingTermsDocid( Fts3Table *p, /* Full-text table handle */ int iLangid, /* Language id of row being written */ sqlite_int64 iDocid /* Docid of row being written */ ){ assert( iLangid>=0 ); /* TODO(shess) Explore whether partially flushing the buffer on ** forced-flush would provide better performance. I suspect that if ** we ordered the doclists by size and flushed the largest until the ** buffer was half empty, that would let the less frequent terms ** generate longer doclists. */ if( iDocid<=p->iPrevDocid || p->iPrevLangid!=iLangid || p->nPendingData>p->nMaxPendingData ){ int rc = sqlite3Fts3PendingTermsFlush(p); if( rc!=SQLITE_OK ) return rc; } p->iPrevDocid = iDocid; p->iPrevLangid = iLangid; return SQLITE_OK; } /* ** Discard the contents of the pending-terms hash tables. */ SQLITE_PRIVATE void sqlite3Fts3PendingTermsClear(Fts3Table *p){ int i; for(i=0; inIndex; i++){ Fts3HashElem *pElem; Fts3Hash *pHash = &p->aIndex[i].hPending; for(pElem=fts3HashFirst(pHash); pElem; pElem=fts3HashNext(pElem)){ PendingList *pList = (PendingList *)fts3HashData(pElem); fts3PendingListDelete(pList); } fts3HashClear(pHash); } p->nPendingData = 0; } /* ** This function is called by the xUpdate() method as part of an INSERT ** operation. It adds entries for each term in the new record to the ** pendingTerms hash table. ** ** Argument apVal is the same as the similarly named argument passed to ** fts3InsertData(). Parameter iDocid is the docid of the new row. */ static int fts3InsertTerms( Fts3Table *p, int iLangid, sqlite3_value **apVal, u32 *aSz ){ int i; /* Iterator variable */ for(i=2; inColumn+2; i++){ int iCol = i-2; if( p->abNotindexed[iCol]==0 ){ const char *zText = (const char *)sqlite3_value_text(apVal[i]); int rc = fts3PendingTermsAdd(p, iLangid, zText, iCol, &aSz[iCol]); if( rc!=SQLITE_OK ){ return rc; } aSz[p->nColumn] += sqlite3_value_bytes(apVal[i]); } } return SQLITE_OK; } /* ** This function is called by the xUpdate() method for an INSERT operation. ** The apVal parameter is passed a copy of the apVal argument passed by ** SQLite to the xUpdate() method. i.e: ** ** apVal[0] Not used for INSERT. ** apVal[1] rowid ** apVal[2] Left-most user-defined column ** ... ** apVal[p->nColumn+1] Right-most user-defined column ** apVal[p->nColumn+2] Hidden column with same name as table ** apVal[p->nColumn+3] Hidden "docid" column (alias for rowid) ** apVal[p->nColumn+4] Hidden languageid column */ static int fts3InsertData( Fts3Table *p, /* Full-text table */ sqlite3_value **apVal, /* Array of values to insert */ sqlite3_int64 *piDocid /* OUT: Docid for row just inserted */ ){ int rc; /* Return code */ sqlite3_stmt *pContentInsert; /* INSERT INTO %_content VALUES(...) */ if( p->zContentTbl ){ sqlite3_value *pRowid = apVal[p->nColumn+3]; if( sqlite3_value_type(pRowid)==SQLITE_NULL ){ pRowid = apVal[1]; } if( sqlite3_value_type(pRowid)!=SQLITE_INTEGER ){ return SQLITE_CONSTRAINT; } *piDocid = sqlite3_value_int64(pRowid); return SQLITE_OK; } /* Locate the statement handle used to insert data into the %_content ** table. The SQL for this statement is: ** ** INSERT INTO %_content VALUES(?, ?, ?, ...) ** ** The statement features N '?' variables, where N is the number of user ** defined columns in the FTS3 table, plus one for the docid field. */ rc = fts3SqlStmt(p, SQL_CONTENT_INSERT, &pContentInsert, &apVal[1]); if( rc==SQLITE_OK && p->zLanguageid ){ rc = sqlite3_bind_int( pContentInsert, p->nColumn+2, sqlite3_value_int(apVal[p->nColumn+4]) ); } if( rc!=SQLITE_OK ) return rc; /* There is a quirk here. The users INSERT statement may have specified ** a value for the "rowid" field, for the "docid" field, or for both. ** Which is a problem, since "rowid" and "docid" are aliases for the ** same value. For example: ** ** INSERT INTO fts3tbl(rowid, docid) VALUES(1, 2); ** ** In FTS3, this is an error. It is an error to specify non-NULL values ** for both docid and some other rowid alias. */ if( SQLITE_NULL!=sqlite3_value_type(apVal[3+p->nColumn]) ){ if( SQLITE_NULL==sqlite3_value_type(apVal[0]) && SQLITE_NULL!=sqlite3_value_type(apVal[1]) ){ /* A rowid/docid conflict. */ return SQLITE_ERROR; } rc = sqlite3_bind_value(pContentInsert, 1, apVal[3+p->nColumn]); if( rc!=SQLITE_OK ) return rc; } /* Execute the statement to insert the record. Set *piDocid to the ** new docid value. */ sqlite3_step(pContentInsert); rc = sqlite3_reset(pContentInsert); *piDocid = sqlite3_last_insert_rowid(p->db); return rc; } /* ** Remove all data from the FTS3 table. Clear the hash table containing ** pending terms. */ static int fts3DeleteAll(Fts3Table *p, int bContent){ int rc = SQLITE_OK; /* Return code */ /* Discard the contents of the pending-terms hash table. */ sqlite3Fts3PendingTermsClear(p); /* Delete everything from the shadow tables. Except, leave %_content as ** is if bContent is false. */ assert( p->zContentTbl==0 || bContent==0 ); if( bContent ) fts3SqlExec(&rc, p, SQL_DELETE_ALL_CONTENT, 0); fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGMENTS, 0); fts3SqlExec(&rc, p, SQL_DELETE_ALL_SEGDIR, 0); if( p->bHasDocsize ){ fts3SqlExec(&rc, p, SQL_DELETE_ALL_DOCSIZE, 0); } if( p->bHasStat ){ fts3SqlExec(&rc, p, SQL_DELETE_ALL_STAT, 0); } return rc; } /* ** */ static int langidFromSelect(Fts3Table *p, sqlite3_stmt *pSelect){ int iLangid = 0; if( p->zLanguageid ) iLangid = sqlite3_column_int(pSelect, p->nColumn+1); return iLangid; } /* ** The first element in the apVal[] array is assumed to contain the docid ** (an integer) of a row about to be deleted. Remove all terms from the ** full-text index. */ static void fts3DeleteTerms( int *pRC, /* Result code */ Fts3Table *p, /* The FTS table to delete from */ sqlite3_value *pRowid, /* The docid to be deleted */ u32 *aSz, /* Sizes of deleted document written here */ int *pbFound /* OUT: Set to true if row really does exist */ ){ int rc; sqlite3_stmt *pSelect; assert( *pbFound==0 ); if( *pRC ) return; rc = fts3SqlStmt(p, SQL_SELECT_CONTENT_BY_ROWID, &pSelect, &pRowid); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pSelect) ){ int i; int iLangid = langidFromSelect(p, pSelect); rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pSelect, 0)); for(i=1; rc==SQLITE_OK && i<=p->nColumn; i++){ int iCol = i-1; if( p->abNotindexed[iCol]==0 ){ const char *zText = (const char *)sqlite3_column_text(pSelect, i); rc = fts3PendingTermsAdd(p, iLangid, zText, -1, &aSz[iCol]); aSz[p->nColumn] += sqlite3_column_bytes(pSelect, i); } } if( rc!=SQLITE_OK ){ sqlite3_reset(pSelect); *pRC = rc; return; } *pbFound = 1; } rc = sqlite3_reset(pSelect); }else{ sqlite3_reset(pSelect); } *pRC = rc; } /* ** Forward declaration to account for the circular dependency between ** functions fts3SegmentMerge() and fts3AllocateSegdirIdx(). */ static int fts3SegmentMerge(Fts3Table *, int, int, int); /* ** This function allocates a new level iLevel index in the segdir table. ** Usually, indexes are allocated within a level sequentially starting ** with 0, so the allocated index is one greater than the value returned ** by: ** ** SELECT max(idx) FROM %_segdir WHERE level = :iLevel ** ** However, if there are already FTS3_MERGE_COUNT indexes at the requested ** level, they are merged into a single level (iLevel+1) segment and the ** allocated index is 0. ** ** If successful, *piIdx is set to the allocated index slot and SQLITE_OK ** returned. Otherwise, an SQLite error code is returned. */ static int fts3AllocateSegdirIdx( Fts3Table *p, int iLangid, /* Language id */ int iIndex, /* Index for p->aIndex */ int iLevel, int *piIdx ){ int rc; /* Return Code */ sqlite3_stmt *pNextIdx; /* Query for next idx at level iLevel */ int iNext = 0; /* Result of query pNextIdx */ assert( iLangid>=0 ); assert( p->nIndex>=1 ); /* Set variable iNext to the next available segdir index at level iLevel. */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pNextIdx, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64( pNextIdx, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) ); if( SQLITE_ROW==sqlite3_step(pNextIdx) ){ iNext = sqlite3_column_int(pNextIdx, 0); } rc = sqlite3_reset(pNextIdx); } if( rc==SQLITE_OK ){ /* If iNext is FTS3_MERGE_COUNT, indicating that level iLevel is already ** full, merge all segments in level iLevel into a single iLevel+1 ** segment and allocate (newly freed) index 0 at level iLevel. Otherwise, ** if iNext is less than FTS3_MERGE_COUNT, allocate index iNext. */ if( iNext>=FTS3_MERGE_COUNT ){ fts3LogMerge(16, getAbsoluteLevel(p, iLangid, iIndex, iLevel)); rc = fts3SegmentMerge(p, iLangid, iIndex, iLevel); *piIdx = 0; }else{ *piIdx = iNext; } } return rc; } /* ** The %_segments table is declared as follows: ** ** CREATE TABLE %_segments(blockid INTEGER PRIMARY KEY, block BLOB) ** ** This function reads data from a single row of the %_segments table. The ** specific row is identified by the iBlockid parameter. If paBlob is not ** NULL, then a buffer is allocated using sqlite3_malloc() and populated ** with the contents of the blob stored in the "block" column of the ** identified table row is. Whether or not paBlob is NULL, *pnBlob is set ** to the size of the blob in bytes before returning. ** ** If an error occurs, or the table does not contain the specified row, ** an SQLite error code is returned. Otherwise, SQLITE_OK is returned. If ** paBlob is non-NULL, then it is the responsibility of the caller to ** eventually free the returned buffer. ** ** This function may leave an open sqlite3_blob* handle in the ** Fts3Table.pSegments variable. This handle is reused by subsequent calls ** to this function. The handle may be closed by calling the ** sqlite3Fts3SegmentsClose() function. Reusing a blob handle is a handy ** performance improvement, but the blob handle should always be closed ** before control is returned to the user (to prevent a lock being held ** on the database file for longer than necessary). Thus, any virtual table ** method (xFilter etc.) that may directly or indirectly call this function ** must call sqlite3Fts3SegmentsClose() before returning. */ SQLITE_PRIVATE int sqlite3Fts3ReadBlock( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iBlockid, /* Access the row with blockid=$iBlockid */ char **paBlob, /* OUT: Blob data in malloc'd buffer */ int *pnBlob, /* OUT: Size of blob data */ int *pnLoad /* OUT: Bytes actually loaded */ ){ int rc; /* Return code */ /* pnBlob must be non-NULL. paBlob may be NULL or non-NULL. */ assert( pnBlob ); if( p->pSegments ){ rc = sqlite3_blob_reopen(p->pSegments, iBlockid); }else{ if( 0==p->zSegmentsTbl ){ p->zSegmentsTbl = sqlite3_mprintf("%s_segments", p->zName); if( 0==p->zSegmentsTbl ) return SQLITE_NOMEM; } rc = sqlite3_blob_open( p->db, p->zDb, p->zSegmentsTbl, "block", iBlockid, 0, &p->pSegments ); } if( rc==SQLITE_OK ){ int nByte = sqlite3_blob_bytes(p->pSegments); *pnBlob = nByte; if( paBlob ){ char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); if( !aByte ){ rc = SQLITE_NOMEM; }else{ if( pnLoad && nByte>(FTS3_NODE_CHUNK_THRESHOLD) ){ nByte = FTS3_NODE_CHUNKSIZE; *pnLoad = nByte; } rc = sqlite3_blob_read(p->pSegments, aByte, nByte, 0); memset(&aByte[nByte], 0, FTS3_NODE_PADDING); if( rc!=SQLITE_OK ){ sqlite3_free(aByte); aByte = 0; } } *paBlob = aByte; } } return rc; } /* ** Close the blob handle at p->pSegments, if it is open. See comments above ** the sqlite3Fts3ReadBlock() function for details. */ SQLITE_PRIVATE void sqlite3Fts3SegmentsClose(Fts3Table *p){ sqlite3_blob_close(p->pSegments); p->pSegments = 0; } static int fts3SegReaderIncrRead(Fts3SegReader *pReader){ int nRead; /* Number of bytes to read */ int rc; /* Return code */ nRead = MIN(pReader->nNode - pReader->nPopulate, FTS3_NODE_CHUNKSIZE); rc = sqlite3_blob_read( pReader->pBlob, &pReader->aNode[pReader->nPopulate], nRead, pReader->nPopulate ); if( rc==SQLITE_OK ){ pReader->nPopulate += nRead; memset(&pReader->aNode[pReader->nPopulate], 0, FTS3_NODE_PADDING); if( pReader->nPopulate==pReader->nNode ){ sqlite3_blob_close(pReader->pBlob); pReader->pBlob = 0; pReader->nPopulate = 0; } } return rc; } static int fts3SegReaderRequire(Fts3SegReader *pReader, char *pFrom, int nByte){ int rc = SQLITE_OK; assert( !pReader->pBlob || (pFrom>=pReader->aNode && pFrom<&pReader->aNode[pReader->nNode]) ); while( pReader->pBlob && rc==SQLITE_OK && (pFrom - pReader->aNode + nByte)>pReader->nPopulate ){ rc = fts3SegReaderIncrRead(pReader); } return rc; } /* ** Set an Fts3SegReader cursor to point at EOF. */ static void fts3SegReaderSetEof(Fts3SegReader *pSeg){ if( !fts3SegReaderIsRootOnly(pSeg) ){ sqlite3_free(pSeg->aNode); sqlite3_blob_close(pSeg->pBlob); pSeg->pBlob = 0; } pSeg->aNode = 0; } /* ** Move the iterator passed as the first argument to the next term in the ** segment. If successful, SQLITE_OK is returned. If there is no next term, ** SQLITE_DONE. Otherwise, an SQLite error code. */ static int fts3SegReaderNext( Fts3Table *p, Fts3SegReader *pReader, int bIncr ){ int rc; /* Return code of various sub-routines */ char *pNext; /* Cursor variable */ int nPrefix; /* Number of bytes in term prefix */ int nSuffix; /* Number of bytes in term suffix */ if( !pReader->aDoclist ){ pNext = pReader->aNode; }else{ pNext = &pReader->aDoclist[pReader->nDoclist]; } if( !pNext || pNext>=&pReader->aNode[pReader->nNode] ){ if( fts3SegReaderIsPending(pReader) ){ Fts3HashElem *pElem = *(pReader->ppNextElem); if( pElem==0 ){ pReader->aNode = 0; }else{ PendingList *pList = (PendingList *)fts3HashData(pElem); pReader->zTerm = (char *)fts3HashKey(pElem); pReader->nTerm = fts3HashKeysize(pElem); pReader->nNode = pReader->nDoclist = pList->nData + 1; pReader->aNode = pReader->aDoclist = pList->aData; pReader->ppNextElem++; assert( pReader->aNode ); } return SQLITE_OK; } fts3SegReaderSetEof(pReader); /* If iCurrentBlock>=iLeafEndBlock, this is an EOF condition. All leaf ** blocks have already been traversed. */ assert( pReader->iCurrentBlock<=pReader->iLeafEndBlock ); if( pReader->iCurrentBlock>=pReader->iLeafEndBlock ){ return SQLITE_OK; } rc = sqlite3Fts3ReadBlock( p, ++pReader->iCurrentBlock, &pReader->aNode, &pReader->nNode, (bIncr ? &pReader->nPopulate : 0) ); if( rc!=SQLITE_OK ) return rc; assert( pReader->pBlob==0 ); if( bIncr && pReader->nPopulatenNode ){ pReader->pBlob = p->pSegments; p->pSegments = 0; } pNext = pReader->aNode; } assert( !fts3SegReaderIsPending(pReader) ); rc = fts3SegReaderRequire(pReader, pNext, FTS3_VARINT_MAX*2); if( rc!=SQLITE_OK ) return rc; /* Because of the FTS3_NODE_PADDING bytes of padding, the following is ** safe (no risk of overread) even if the node data is corrupted. */ pNext += fts3GetVarint32(pNext, &nPrefix); pNext += fts3GetVarint32(pNext, &nSuffix); if( nPrefix<0 || nSuffix<=0 || &pNext[nSuffix]>&pReader->aNode[pReader->nNode] ){ return FTS_CORRUPT_VTAB; } if( nPrefix+nSuffix>pReader->nTermAlloc ){ int nNew = (nPrefix+nSuffix)*2; char *zNew = sqlite3_realloc(pReader->zTerm, nNew); if( !zNew ){ return SQLITE_NOMEM; } pReader->zTerm = zNew; pReader->nTermAlloc = nNew; } rc = fts3SegReaderRequire(pReader, pNext, nSuffix+FTS3_VARINT_MAX); if( rc!=SQLITE_OK ) return rc; memcpy(&pReader->zTerm[nPrefix], pNext, nSuffix); pReader->nTerm = nPrefix+nSuffix; pNext += nSuffix; pNext += fts3GetVarint32(pNext, &pReader->nDoclist); pReader->aDoclist = pNext; pReader->pOffsetList = 0; /* Check that the doclist does not appear to extend past the end of the ** b-tree node. And that the final byte of the doclist is 0x00. If either ** of these statements is untrue, then the data structure is corrupt. */ if( &pReader->aDoclist[pReader->nDoclist]>&pReader->aNode[pReader->nNode] || (pReader->nPopulate==0 && pReader->aDoclist[pReader->nDoclist-1]) ){ return FTS_CORRUPT_VTAB; } return SQLITE_OK; } /* ** Set the SegReader to point to the first docid in the doclist associated ** with the current term. */ static int fts3SegReaderFirstDocid(Fts3Table *pTab, Fts3SegReader *pReader){ int rc = SQLITE_OK; assert( pReader->aDoclist ); assert( !pReader->pOffsetList ); if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ u8 bEof = 0; pReader->iDocid = 0; pReader->nOffsetList = 0; sqlite3Fts3DoclistPrev(0, pReader->aDoclist, pReader->nDoclist, &pReader->pOffsetList, &pReader->iDocid, &pReader->nOffsetList, &bEof ); }else{ rc = fts3SegReaderRequire(pReader, pReader->aDoclist, FTS3_VARINT_MAX); if( rc==SQLITE_OK ){ int n = sqlite3Fts3GetVarint(pReader->aDoclist, &pReader->iDocid); pReader->pOffsetList = &pReader->aDoclist[n]; } } return rc; } /* ** Advance the SegReader to point to the next docid in the doclist ** associated with the current term. ** ** If arguments ppOffsetList and pnOffsetList are not NULL, then ** *ppOffsetList is set to point to the first column-offset list ** in the doclist entry (i.e. immediately past the docid varint). ** *pnOffsetList is set to the length of the set of column-offset ** lists, not including the nul-terminator byte. For example: */ static int fts3SegReaderNextDocid( Fts3Table *pTab, Fts3SegReader *pReader, /* Reader to advance to next docid */ char **ppOffsetList, /* OUT: Pointer to current position-list */ int *pnOffsetList /* OUT: Length of *ppOffsetList in bytes */ ){ int rc = SQLITE_OK; char *p = pReader->pOffsetList; char c = 0; assert( p ); if( pTab->bDescIdx && fts3SegReaderIsPending(pReader) ){ /* A pending-terms seg-reader for an FTS4 table that uses order=desc. ** Pending-terms doclists are always built up in ascending order, so ** we have to iterate through them backwards here. */ u8 bEof = 0; if( ppOffsetList ){ *ppOffsetList = pReader->pOffsetList; *pnOffsetList = pReader->nOffsetList - 1; } sqlite3Fts3DoclistPrev(0, pReader->aDoclist, pReader->nDoclist, &p, &pReader->iDocid, &pReader->nOffsetList, &bEof ); if( bEof ){ pReader->pOffsetList = 0; }else{ pReader->pOffsetList = p; } }else{ char *pEnd = &pReader->aDoclist[pReader->nDoclist]; /* Pointer p currently points at the first byte of an offset list. The ** following block advances it to point one byte past the end of ** the same offset list. */ while( 1 ){ /* The following line of code (and the "p++" below the while() loop) is ** normally all that is required to move pointer p to the desired ** position. The exception is if this node is being loaded from disk ** incrementally and pointer "p" now points to the first byte past ** the populated part of pReader->aNode[]. */ while( *p | c ) c = *p++ & 0x80; assert( *p==0 ); if( pReader->pBlob==0 || p<&pReader->aNode[pReader->nPopulate] ) break; rc = fts3SegReaderIncrRead(pReader); if( rc!=SQLITE_OK ) return rc; } p++; /* If required, populate the output variables with a pointer to and the ** size of the previous offset-list. */ if( ppOffsetList ){ *ppOffsetList = pReader->pOffsetList; *pnOffsetList = (int)(p - pReader->pOffsetList - 1); } /* List may have been edited in place by fts3EvalNearTrim() */ while( p=pEnd ){ pReader->pOffsetList = 0; }else{ rc = fts3SegReaderRequire(pReader, p, FTS3_VARINT_MAX); if( rc==SQLITE_OK ){ sqlite3_int64 iDelta; pReader->pOffsetList = p + sqlite3Fts3GetVarint(p, &iDelta); if( pTab->bDescIdx ){ pReader->iDocid -= iDelta; }else{ pReader->iDocid += iDelta; } } } } return SQLITE_OK; } SQLITE_PRIVATE int sqlite3Fts3MsrOvfl( Fts3Cursor *pCsr, Fts3MultiSegReader *pMsr, int *pnOvfl ){ Fts3Table *p = (Fts3Table*)pCsr->base.pVtab; int nOvfl = 0; int ii; int rc = SQLITE_OK; int pgsz = p->nPgsz; assert( p->bFts4 ); assert( pgsz>0 ); for(ii=0; rc==SQLITE_OK && iinSegment; ii++){ Fts3SegReader *pReader = pMsr->apSegment[ii]; if( !fts3SegReaderIsPending(pReader) && !fts3SegReaderIsRootOnly(pReader) ){ sqlite3_int64 jj; for(jj=pReader->iStartBlock; jj<=pReader->iLeafEndBlock; jj++){ int nBlob; rc = sqlite3Fts3ReadBlock(p, jj, 0, &nBlob, 0); if( rc!=SQLITE_OK ) break; if( (nBlob+35)>pgsz ){ nOvfl += (nBlob + 34)/pgsz; } } } } *pnOvfl = nOvfl; return rc; } /* ** Free all allocations associated with the iterator passed as the ** second argument. */ SQLITE_PRIVATE void sqlite3Fts3SegReaderFree(Fts3SegReader *pReader){ if( pReader && !fts3SegReaderIsPending(pReader) ){ sqlite3_free(pReader->zTerm); if( !fts3SegReaderIsRootOnly(pReader) ){ sqlite3_free(pReader->aNode); sqlite3_blob_close(pReader->pBlob); } } sqlite3_free(pReader); } /* ** Allocate a new SegReader object. */ SQLITE_PRIVATE int sqlite3Fts3SegReaderNew( int iAge, /* Segment "age". */ int bLookup, /* True for a lookup only */ sqlite3_int64 iStartLeaf, /* First leaf to traverse */ sqlite3_int64 iEndLeaf, /* Final leaf to traverse */ sqlite3_int64 iEndBlock, /* Final block of segment */ const char *zRoot, /* Buffer containing root node */ int nRoot, /* Size of buffer containing root node */ Fts3SegReader **ppReader /* OUT: Allocated Fts3SegReader */ ){ Fts3SegReader *pReader; /* Newly allocated SegReader object */ int nExtra = 0; /* Bytes to allocate segment root node */ assert( iStartLeaf<=iEndLeaf ); if( iStartLeaf==0 ){ nExtra = nRoot + FTS3_NODE_PADDING; } pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); if( !pReader ){ return SQLITE_NOMEM; } memset(pReader, 0, sizeof(Fts3SegReader)); pReader->iIdx = iAge; pReader->bLookup = bLookup!=0; pReader->iStartBlock = iStartLeaf; pReader->iLeafEndBlock = iEndLeaf; pReader->iEndBlock = iEndBlock; if( nExtra ){ /* The entire segment is stored in the root node. */ pReader->aNode = (char *)&pReader[1]; pReader->rootOnly = 1; pReader->nNode = nRoot; memcpy(pReader->aNode, zRoot, nRoot); memset(&pReader->aNode[nRoot], 0, FTS3_NODE_PADDING); }else{ pReader->iCurrentBlock = iStartLeaf-1; } *ppReader = pReader; return SQLITE_OK; } /* ** This is a comparison function used as a qsort() callback when sorting ** an array of pending terms by term. This occurs as part of flushing ** the contents of the pending-terms hash table to the database. */ static int SQLITE_CDECL fts3CompareElemByTerm( const void *lhs, const void *rhs ){ char *z1 = fts3HashKey(*(Fts3HashElem **)lhs); char *z2 = fts3HashKey(*(Fts3HashElem **)rhs); int n1 = fts3HashKeysize(*(Fts3HashElem **)lhs); int n2 = fts3HashKeysize(*(Fts3HashElem **)rhs); int n = (n1aIndex */ const char *zTerm, /* Term to search for */ int nTerm, /* Size of buffer zTerm */ int bPrefix, /* True for a prefix iterator */ Fts3SegReader **ppReader /* OUT: SegReader for pending-terms */ ){ Fts3SegReader *pReader = 0; /* Fts3SegReader object to return */ Fts3HashElem *pE; /* Iterator variable */ Fts3HashElem **aElem = 0; /* Array of term hash entries to scan */ int nElem = 0; /* Size of array at aElem */ int rc = SQLITE_OK; /* Return Code */ Fts3Hash *pHash; pHash = &p->aIndex[iIndex].hPending; if( bPrefix ){ int nAlloc = 0; /* Size of allocated array at aElem */ for(pE=fts3HashFirst(pHash); pE; pE=fts3HashNext(pE)){ char *zKey = (char *)fts3HashKey(pE); int nKey = fts3HashKeysize(pE); if( nTerm==0 || (nKey>=nTerm && 0==memcmp(zKey, zTerm, nTerm)) ){ if( nElem==nAlloc ){ Fts3HashElem **aElem2; nAlloc += 16; aElem2 = (Fts3HashElem **)sqlite3_realloc( aElem, nAlloc*sizeof(Fts3HashElem *) ); if( !aElem2 ){ rc = SQLITE_NOMEM; nElem = 0; break; } aElem = aElem2; } aElem[nElem++] = pE; } } /* If more than one term matches the prefix, sort the Fts3HashElem ** objects in term order using qsort(). This uses the same comparison ** callback as is used when flushing terms to disk. */ if( nElem>1 ){ qsort(aElem, nElem, sizeof(Fts3HashElem *), fts3CompareElemByTerm); } }else{ /* The query is a simple term lookup that matches at most one term in ** the index. All that is required is a straight hash-lookup. ** ** Because the stack address of pE may be accessed via the aElem pointer ** below, the "Fts3HashElem *pE" must be declared so that it is valid ** within this entire function, not just this "else{...}" block. */ pE = fts3HashFindElem(pHash, zTerm, nTerm); if( pE ){ aElem = &pE; nElem = 1; } } if( nElem>0 ){ int nByte = sizeof(Fts3SegReader) + (nElem+1)*sizeof(Fts3HashElem *); pReader = (Fts3SegReader *)sqlite3_malloc(nByte); if( !pReader ){ rc = SQLITE_NOMEM; }else{ memset(pReader, 0, nByte); pReader->iIdx = 0x7FFFFFFF; pReader->ppNextElem = (Fts3HashElem **)&pReader[1]; memcpy(pReader->ppNextElem, aElem, nElem*sizeof(Fts3HashElem *)); } } if( bPrefix ){ sqlite3_free(aElem); } *ppReader = pReader; return rc; } /* ** Compare the entries pointed to by two Fts3SegReader structures. ** Comparison is as follows: ** ** 1) EOF is greater than not EOF. ** ** 2) The current terms (if any) are compared using memcmp(). If one ** term is a prefix of another, the longer term is considered the ** larger. ** ** 3) By segment age. An older segment is considered larger. */ static int fts3SegReaderCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ int rc; if( pLhs->aNode && pRhs->aNode ){ int rc2 = pLhs->nTerm - pRhs->nTerm; if( rc2<0 ){ rc = memcmp(pLhs->zTerm, pRhs->zTerm, pLhs->nTerm); }else{ rc = memcmp(pLhs->zTerm, pRhs->zTerm, pRhs->nTerm); } if( rc==0 ){ rc = rc2; } }else{ rc = (pLhs->aNode==0) - (pRhs->aNode==0); } if( rc==0 ){ rc = pRhs->iIdx - pLhs->iIdx; } assert( rc!=0 ); return rc; } /* ** A different comparison function for SegReader structures. In this ** version, it is assumed that each SegReader points to an entry in ** a doclist for identical terms. Comparison is made as follows: ** ** 1) EOF (end of doclist in this case) is greater than not EOF. ** ** 2) By current docid. ** ** 3) By segment age. An older segment is considered larger. */ static int fts3SegReaderDoclistCmp(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); if( rc==0 ){ if( pLhs->iDocid==pRhs->iDocid ){ rc = pRhs->iIdx - pLhs->iIdx; }else{ rc = (pLhs->iDocid > pRhs->iDocid) ? 1 : -1; } } assert( pLhs->aNode && pRhs->aNode ); return rc; } static int fts3SegReaderDoclistCmpRev(Fts3SegReader *pLhs, Fts3SegReader *pRhs){ int rc = (pLhs->pOffsetList==0)-(pRhs->pOffsetList==0); if( rc==0 ){ if( pLhs->iDocid==pRhs->iDocid ){ rc = pRhs->iIdx - pLhs->iIdx; }else{ rc = (pLhs->iDocid < pRhs->iDocid) ? 1 : -1; } } assert( pLhs->aNode && pRhs->aNode ); return rc; } /* ** Compare the term that the Fts3SegReader object passed as the first argument ** points to with the term specified by arguments zTerm and nTerm. ** ** If the pSeg iterator is already at EOF, return 0. Otherwise, return ** -ve if the pSeg term is less than zTerm/nTerm, 0 if the two terms are ** equal, or +ve if the pSeg term is greater than zTerm/nTerm. */ static int fts3SegReaderTermCmp( Fts3SegReader *pSeg, /* Segment reader object */ const char *zTerm, /* Term to compare to */ int nTerm /* Size of term zTerm in bytes */ ){ int res = 0; if( pSeg->aNode ){ if( pSeg->nTerm>nTerm ){ res = memcmp(pSeg->zTerm, zTerm, nTerm); }else{ res = memcmp(pSeg->zTerm, zTerm, pSeg->nTerm); } if( res==0 ){ res = pSeg->nTerm-nTerm; } } return res; } /* ** Argument apSegment is an array of nSegment elements. It is known that ** the final (nSegment-nSuspect) members are already in sorted order ** (according to the comparison function provided). This function shuffles ** the array around until all entries are in sorted order. */ static void fts3SegReaderSort( Fts3SegReader **apSegment, /* Array to sort entries of */ int nSegment, /* Size of apSegment array */ int nSuspect, /* Unsorted entry count */ int (*xCmp)(Fts3SegReader *, Fts3SegReader *) /* Comparison function */ ){ int i; /* Iterator variable */ assert( nSuspect<=nSegment ); if( nSuspect==nSegment ) nSuspect--; for(i=nSuspect-1; i>=0; i--){ int j; for(j=i; j<(nSegment-1); j++){ Fts3SegReader *pTmp; if( xCmp(apSegment[j], apSegment[j+1])<0 ) break; pTmp = apSegment[j+1]; apSegment[j+1] = apSegment[j]; apSegment[j] = pTmp; } } #ifndef NDEBUG /* Check that the list really is sorted now. */ for(i=0; i<(nSuspect-1); i++){ assert( xCmp(apSegment[i], apSegment[i+1])<0 ); } #endif } /* ** Insert a record into the %_segments table. */ static int fts3WriteSegment( Fts3Table *p, /* Virtual table handle */ sqlite3_int64 iBlock, /* Block id for new block */ char *z, /* Pointer to buffer containing block data */ int n /* Size of buffer z in bytes */ ){ sqlite3_stmt *pStmt; int rc = fts3SqlStmt(p, SQL_INSERT_SEGMENTS, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pStmt, 1, iBlock); sqlite3_bind_blob(pStmt, 2, z, n, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); } return rc; } /* ** Find the largest relative level number in the table. If successful, set ** *pnMax to this value and return SQLITE_OK. Otherwise, if an error occurs, ** set *pnMax to zero and return an SQLite error code. */ SQLITE_PRIVATE int sqlite3Fts3MaxLevel(Fts3Table *p, int *pnMax){ int rc; int mxLevel = 0; sqlite3_stmt *pStmt = 0; rc = fts3SqlStmt(p, SQL_SELECT_MXLEVEL, &pStmt, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pStmt) ){ mxLevel = sqlite3_column_int(pStmt, 0); } rc = sqlite3_reset(pStmt); } *pnMax = mxLevel; return rc; } /* ** Insert a record into the %_segdir table. */ static int fts3WriteSegdir( Fts3Table *p, /* Virtual table handle */ sqlite3_int64 iLevel, /* Value for "level" field (absolute level) */ int iIdx, /* Value for "idx" field */ sqlite3_int64 iStartBlock, /* Value for "start_block" field */ sqlite3_int64 iLeafEndBlock, /* Value for "leaves_end_block" field */ sqlite3_int64 iEndBlock, /* Value for "end_block" field */ sqlite3_int64 nLeafData, /* Bytes of leaf data in segment */ char *zRoot, /* Blob value for "root" field */ int nRoot /* Number of bytes in buffer zRoot */ ){ sqlite3_stmt *pStmt; int rc = fts3SqlStmt(p, SQL_INSERT_SEGDIR, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pStmt, 1, iLevel); sqlite3_bind_int(pStmt, 2, iIdx); sqlite3_bind_int64(pStmt, 3, iStartBlock); sqlite3_bind_int64(pStmt, 4, iLeafEndBlock); if( nLeafData==0 ){ sqlite3_bind_int64(pStmt, 5, iEndBlock); }else{ char *zEnd = sqlite3_mprintf("%lld %lld", iEndBlock, nLeafData); if( !zEnd ) return SQLITE_NOMEM; sqlite3_bind_text(pStmt, 5, zEnd, -1, sqlite3_free); } sqlite3_bind_blob(pStmt, 6, zRoot, nRoot, SQLITE_STATIC); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); } return rc; } /* ** Return the size of the common prefix (if any) shared by zPrev and ** zNext, in bytes. For example, ** ** fts3PrefixCompress("abc", 3, "abcdef", 6) // returns 3 ** fts3PrefixCompress("abX", 3, "abcdef", 6) // returns 2 ** fts3PrefixCompress("abX", 3, "Xbcdef", 6) // returns 0 */ static int fts3PrefixCompress( const char *zPrev, /* Buffer containing previous term */ int nPrev, /* Size of buffer zPrev in bytes */ const char *zNext, /* Buffer containing next term */ int nNext /* Size of buffer zNext in bytes */ ){ int n; UNUSED_PARAMETER(nNext); for(n=0; nnData; /* Current size of node in bytes */ int nReq = nData; /* Required space after adding zTerm */ int nPrefix; /* Number of bytes of prefix compression */ int nSuffix; /* Suffix length */ nPrefix = fts3PrefixCompress(pTree->zTerm, pTree->nTerm, zTerm, nTerm); nSuffix = nTerm-nPrefix; nReq += sqlite3Fts3VarintLen(nPrefix)+sqlite3Fts3VarintLen(nSuffix)+nSuffix; if( nReq<=p->nNodeSize || !pTree->zTerm ){ if( nReq>p->nNodeSize ){ /* An unusual case: this is the first term to be added to the node ** and the static node buffer (p->nNodeSize bytes) is not large ** enough. Use a separately malloced buffer instead This wastes ** p->nNodeSize bytes, but since this scenario only comes about when ** the database contain two terms that share a prefix of almost 2KB, ** this is not expected to be a serious problem. */ assert( pTree->aData==(char *)&pTree[1] ); pTree->aData = (char *)sqlite3_malloc(nReq); if( !pTree->aData ){ return SQLITE_NOMEM; } } if( pTree->zTerm ){ /* There is no prefix-length field for first term in a node */ nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nPrefix); } nData += sqlite3Fts3PutVarint(&pTree->aData[nData], nSuffix); memcpy(&pTree->aData[nData], &zTerm[nPrefix], nSuffix); pTree->nData = nData + nSuffix; pTree->nEntry++; if( isCopyTerm ){ if( pTree->nMalloczMalloc, nTerm*2); if( !zNew ){ return SQLITE_NOMEM; } pTree->nMalloc = nTerm*2; pTree->zMalloc = zNew; } pTree->zTerm = pTree->zMalloc; memcpy(pTree->zTerm, zTerm, nTerm); pTree->nTerm = nTerm; }else{ pTree->zTerm = (char *)zTerm; pTree->nTerm = nTerm; } return SQLITE_OK; } } /* If control flows to here, it was not possible to append zTerm to the ** current node. Create a new node (a right-sibling of the current node). ** If this is the first node in the tree, the term is added to it. ** ** Otherwise, the term is not added to the new node, it is left empty for ** now. Instead, the term is inserted into the parent of pTree. If pTree ** has no parent, one is created here. */ pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize); if( !pNew ){ return SQLITE_NOMEM; } memset(pNew, 0, sizeof(SegmentNode)); pNew->nData = 1 + FTS3_VARINT_MAX; pNew->aData = (char *)&pNew[1]; if( pTree ){ SegmentNode *pParent = pTree->pParent; rc = fts3NodeAddTerm(p, &pParent, isCopyTerm, zTerm, nTerm); if( pTree->pParent==0 ){ pTree->pParent = pParent; } pTree->pRight = pNew; pNew->pLeftmost = pTree->pLeftmost; pNew->pParent = pParent; pNew->zMalloc = pTree->zMalloc; pNew->nMalloc = pTree->nMalloc; pTree->zMalloc = 0; }else{ pNew->pLeftmost = pNew; rc = fts3NodeAddTerm(p, &pNew, isCopyTerm, zTerm, nTerm); } *ppTree = pNew; return rc; } /* ** Helper function for fts3NodeWrite(). */ static int fts3TreeFinishNode( SegmentNode *pTree, int iHeight, sqlite3_int64 iLeftChild ){ int nStart; assert( iHeight>=1 && iHeight<128 ); nStart = FTS3_VARINT_MAX - sqlite3Fts3VarintLen(iLeftChild); pTree->aData[nStart] = (char)iHeight; sqlite3Fts3PutVarint(&pTree->aData[nStart+1], iLeftChild); return nStart; } /* ** Write the buffer for the segment node pTree and all of its peers to the ** database. Then call this function recursively to write the parent of ** pTree and its peers to the database. ** ** Except, if pTree is a root node, do not write it to the database. Instead, ** set output variables *paRoot and *pnRoot to contain the root node. ** ** If successful, SQLITE_OK is returned and output variable *piLast is ** set to the largest blockid written to the database (or zero if no ** blocks were written to the db). Otherwise, an SQLite error code is ** returned. */ static int fts3NodeWrite( Fts3Table *p, /* Virtual table handle */ SegmentNode *pTree, /* SegmentNode handle */ int iHeight, /* Height of this node in tree */ sqlite3_int64 iLeaf, /* Block id of first leaf node */ sqlite3_int64 iFree, /* Block id of next free slot in %_segments */ sqlite3_int64 *piLast, /* OUT: Block id of last entry written */ char **paRoot, /* OUT: Data for root node */ int *pnRoot /* OUT: Size of root node in bytes */ ){ int rc = SQLITE_OK; if( !pTree->pParent ){ /* Root node of the tree. */ int nStart = fts3TreeFinishNode(pTree, iHeight, iLeaf); *piLast = iFree-1; *pnRoot = pTree->nData - nStart; *paRoot = &pTree->aData[nStart]; }else{ SegmentNode *pIter; sqlite3_int64 iNextFree = iFree; sqlite3_int64 iNextLeaf = iLeaf; for(pIter=pTree->pLeftmost; pIter && rc==SQLITE_OK; pIter=pIter->pRight){ int nStart = fts3TreeFinishNode(pIter, iHeight, iNextLeaf); int nWrite = pIter->nData - nStart; rc = fts3WriteSegment(p, iNextFree, &pIter->aData[nStart], nWrite); iNextFree++; iNextLeaf += (pIter->nEntry+1); } if( rc==SQLITE_OK ){ assert( iNextLeaf==iFree ); rc = fts3NodeWrite( p, pTree->pParent, iHeight+1, iFree, iNextFree, piLast, paRoot, pnRoot ); } } return rc; } /* ** Free all memory allocations associated with the tree pTree. */ static void fts3NodeFree(SegmentNode *pTree){ if( pTree ){ SegmentNode *p = pTree->pLeftmost; fts3NodeFree(p->pParent); while( p ){ SegmentNode *pRight = p->pRight; if( p->aData!=(char *)&p[1] ){ sqlite3_free(p->aData); } assert( pRight==0 || p->zMalloc==0 ); sqlite3_free(p->zMalloc); sqlite3_free(p); p = pRight; } } } /* ** Add a term to the segment being constructed by the SegmentWriter object ** *ppWriter. When adding the first term to a segment, *ppWriter should ** be passed NULL. This function will allocate a new SegmentWriter object ** and return it via the input/output variable *ppWriter in this case. ** ** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. */ static int fts3SegWriterAdd( Fts3Table *p, /* Virtual table handle */ SegmentWriter **ppWriter, /* IN/OUT: SegmentWriter handle */ int isCopyTerm, /* True if buffer zTerm must be copied */ const char *zTerm, /* Pointer to buffer containing term */ int nTerm, /* Size of term in bytes */ const char *aDoclist, /* Pointer to buffer containing doclist */ int nDoclist /* Size of doclist in bytes */ ){ int nPrefix; /* Size of term prefix in bytes */ int nSuffix; /* Size of term suffix in bytes */ int nReq; /* Number of bytes required on leaf page */ int nData; SegmentWriter *pWriter = *ppWriter; if( !pWriter ){ int rc; sqlite3_stmt *pStmt; /* Allocate the SegmentWriter structure */ pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter)); if( !pWriter ) return SQLITE_NOMEM; memset(pWriter, 0, sizeof(SegmentWriter)); *ppWriter = pWriter; /* Allocate a buffer in which to accumulate data */ pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize); if( !pWriter->aData ) return SQLITE_NOMEM; pWriter->nSize = p->nNodeSize; /* Find the next free blockid in the %_segments table */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pStmt, 0); if( rc!=SQLITE_OK ) return rc; if( SQLITE_ROW==sqlite3_step(pStmt) ){ pWriter->iFree = sqlite3_column_int64(pStmt, 0); pWriter->iFirst = pWriter->iFree; } rc = sqlite3_reset(pStmt); if( rc!=SQLITE_OK ) return rc; } nData = pWriter->nData; nPrefix = fts3PrefixCompress(pWriter->zTerm, pWriter->nTerm, zTerm, nTerm); nSuffix = nTerm-nPrefix; /* Figure out how many bytes are required by this new entry */ nReq = sqlite3Fts3VarintLen(nPrefix) + /* varint containing prefix size */ sqlite3Fts3VarintLen(nSuffix) + /* varint containing suffix size */ nSuffix + /* Term suffix */ sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ nDoclist; /* Doclist data */ if( nData>0 && nData+nReq>p->nNodeSize ){ int rc; /* The current leaf node is full. Write it out to the database. */ rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, nData); if( rc!=SQLITE_OK ) return rc; p->nLeafAdd++; /* Add the current term to the interior node tree. The term added to ** the interior tree must: ** ** a) be greater than the largest term on the leaf node just written ** to the database (still available in pWriter->zTerm), and ** ** b) be less than or equal to the term about to be added to the new ** leaf node (zTerm/nTerm). ** ** In other words, it must be the prefix of zTerm 1 byte longer than ** the common prefix (if any) of zTerm and pWriter->zTerm. */ assert( nPrefixpTree, isCopyTerm, zTerm, nPrefix+1); if( rc!=SQLITE_OK ) return rc; nData = 0; pWriter->nTerm = 0; nPrefix = 0; nSuffix = nTerm; nReq = 1 + /* varint containing prefix size */ sqlite3Fts3VarintLen(nTerm) + /* varint containing suffix size */ nTerm + /* Term suffix */ sqlite3Fts3VarintLen(nDoclist) + /* Size of doclist */ nDoclist; /* Doclist data */ } /* Increase the total number of bytes written to account for the new entry. */ pWriter->nLeafData += nReq; /* If the buffer currently allocated is too small for this entry, realloc ** the buffer to make it large enough. */ if( nReq>pWriter->nSize ){ char *aNew = sqlite3_realloc(pWriter->aData, nReq); if( !aNew ) return SQLITE_NOMEM; pWriter->aData = aNew; pWriter->nSize = nReq; } assert( nData+nReq<=pWriter->nSize ); /* Append the prefix-compressed term and doclist to the buffer. */ nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nPrefix); nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nSuffix); memcpy(&pWriter->aData[nData], &zTerm[nPrefix], nSuffix); nData += nSuffix; nData += sqlite3Fts3PutVarint(&pWriter->aData[nData], nDoclist); memcpy(&pWriter->aData[nData], aDoclist, nDoclist); pWriter->nData = nData + nDoclist; /* Save the current term so that it can be used to prefix-compress the next. ** If the isCopyTerm parameter is true, then the buffer pointed to by ** zTerm is transient, so take a copy of the term data. Otherwise, just ** store a copy of the pointer. */ if( isCopyTerm ){ if( nTerm>pWriter->nMalloc ){ char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2); if( !zNew ){ return SQLITE_NOMEM; } pWriter->nMalloc = nTerm*2; pWriter->zMalloc = zNew; pWriter->zTerm = zNew; } assert( pWriter->zTerm==pWriter->zMalloc ); memcpy(pWriter->zTerm, zTerm, nTerm); }else{ pWriter->zTerm = (char *)zTerm; } pWriter->nTerm = nTerm; return SQLITE_OK; } /* ** Flush all data associated with the SegmentWriter object pWriter to the ** database. This function must be called after all terms have been added ** to the segment using fts3SegWriterAdd(). If successful, SQLITE_OK is ** returned. Otherwise, an SQLite error code. */ static int fts3SegWriterFlush( Fts3Table *p, /* Virtual table handle */ SegmentWriter *pWriter, /* SegmentWriter to flush to the db */ sqlite3_int64 iLevel, /* Value for 'level' column of %_segdir */ int iIdx /* Value for 'idx' column of %_segdir */ ){ int rc; /* Return code */ if( pWriter->pTree ){ sqlite3_int64 iLast = 0; /* Largest block id written to database */ sqlite3_int64 iLastLeaf; /* Largest leaf block id written to db */ char *zRoot = NULL; /* Pointer to buffer containing root node */ int nRoot = 0; /* Size of buffer zRoot */ iLastLeaf = pWriter->iFree; rc = fts3WriteSegment(p, pWriter->iFree++, pWriter->aData, pWriter->nData); if( rc==SQLITE_OK ){ rc = fts3NodeWrite(p, pWriter->pTree, 1, pWriter->iFirst, pWriter->iFree, &iLast, &zRoot, &nRoot); } if( rc==SQLITE_OK ){ rc = fts3WriteSegdir(p, iLevel, iIdx, pWriter->iFirst, iLastLeaf, iLast, pWriter->nLeafData, zRoot, nRoot); } }else{ /* The entire tree fits on the root node. Write it to the segdir table. */ rc = fts3WriteSegdir(p, iLevel, iIdx, 0, 0, 0, pWriter->nLeafData, pWriter->aData, pWriter->nData); } p->nLeafAdd++; return rc; } /* ** Release all memory held by the SegmentWriter object passed as the ** first argument. */ static void fts3SegWriterFree(SegmentWriter *pWriter){ if( pWriter ){ sqlite3_free(pWriter->aData); sqlite3_free(pWriter->zMalloc); fts3NodeFree(pWriter->pTree); sqlite3_free(pWriter); } } /* ** The first value in the apVal[] array is assumed to contain an integer. ** This function tests if there exist any documents with docid values that ** are different from that integer. i.e. if deleting the document with docid ** pRowid would mean the FTS3 table were empty. ** ** If successful, *pisEmpty is set to true if the table is empty except for ** document pRowid, or false otherwise, and SQLITE_OK is returned. If an ** error occurs, an SQLite error code is returned. */ static int fts3IsEmpty(Fts3Table *p, sqlite3_value *pRowid, int *pisEmpty){ sqlite3_stmt *pStmt; int rc; if( p->zContentTbl ){ /* If using the content=xxx option, assume the table is never empty */ *pisEmpty = 0; rc = SQLITE_OK; }else{ rc = fts3SqlStmt(p, SQL_IS_EMPTY, &pStmt, &pRowid); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pStmt) ){ *pisEmpty = sqlite3_column_int(pStmt, 0); } rc = sqlite3_reset(pStmt); } } return rc; } /* ** Set *pnMax to the largest segment level in the database for the index ** iIndex. ** ** Segment levels are stored in the 'level' column of the %_segdir table. ** ** Return SQLITE_OK if successful, or an SQLite error code if not. */ static int fts3SegmentMaxLevel( Fts3Table *p, int iLangid, int iIndex, sqlite3_int64 *pnMax ){ sqlite3_stmt *pStmt; int rc; assert( iIndex>=0 && iIndexnIndex ); /* Set pStmt to the compiled version of: ** ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? ** ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). */ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); if( rc!=SQLITE_OK ) return rc; sqlite3_bind_int64(pStmt, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); sqlite3_bind_int64(pStmt, 2, getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) ); if( SQLITE_ROW==sqlite3_step(pStmt) ){ *pnMax = sqlite3_column_int64(pStmt, 0); } return sqlite3_reset(pStmt); } /* ** iAbsLevel is an absolute level that may be assumed to exist within ** the database. This function checks if it is the largest level number ** within its index. Assuming no error occurs, *pbMax is set to 1 if ** iAbsLevel is indeed the largest level, or 0 otherwise, and SQLITE_OK ** is returned. If an error occurs, an error code is returned and the ** final value of *pbMax is undefined. */ static int fts3SegmentIsMaxLevel(Fts3Table *p, i64 iAbsLevel, int *pbMax){ /* Set pStmt to the compiled version of: ** ** SELECT max(level) FROM %Q.'%q_segdir' WHERE level BETWEEN ? AND ? ** ** (1024 is actually the value of macro FTS3_SEGDIR_PREFIXLEVEL_STR). */ sqlite3_stmt *pStmt; int rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR_MAX_LEVEL, &pStmt, 0); if( rc!=SQLITE_OK ) return rc; sqlite3_bind_int64(pStmt, 1, iAbsLevel+1); sqlite3_bind_int64(pStmt, 2, ((iAbsLevel/FTS3_SEGDIR_MAXLEVEL)+1) * FTS3_SEGDIR_MAXLEVEL ); *pbMax = 0; if( SQLITE_ROW==sqlite3_step(pStmt) ){ *pbMax = sqlite3_column_type(pStmt, 0)==SQLITE_NULL; } return sqlite3_reset(pStmt); } /* ** Delete all entries in the %_segments table associated with the segment ** opened with seg-reader pSeg. This function does not affect the contents ** of the %_segdir table. */ static int fts3DeleteSegment( Fts3Table *p, /* FTS table handle */ Fts3SegReader *pSeg /* Segment to delete */ ){ int rc = SQLITE_OK; /* Return code */ if( pSeg->iStartBlock ){ sqlite3_stmt *pDelete; /* SQL statement to delete rows */ rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDelete, 1, pSeg->iStartBlock); sqlite3_bind_int64(pDelete, 2, pSeg->iEndBlock); sqlite3_step(pDelete); rc = sqlite3_reset(pDelete); } } return rc; } /* ** This function is used after merging multiple segments into a single large ** segment to delete the old, now redundant, segment b-trees. Specifically, ** it: ** ** 1) Deletes all %_segments entries for the segments associated with ** each of the SegReader objects in the array passed as the third ** argument, and ** ** 2) deletes all %_segdir entries with level iLevel, or all %_segdir ** entries regardless of level if (iLevel<0). ** ** SQLITE_OK is returned if successful, otherwise an SQLite error code. */ static int fts3DeleteSegdir( Fts3Table *p, /* Virtual table handle */ int iLangid, /* Language id */ int iIndex, /* Index for p->aIndex */ int iLevel, /* Level of %_segdir entries to delete */ Fts3SegReader **apSegment, /* Array of SegReader objects */ int nReader /* Size of array apSegment */ ){ int rc = SQLITE_OK; /* Return Code */ int i; /* Iterator variable */ sqlite3_stmt *pDelete = 0; /* SQL statement to delete rows */ for(i=0; rc==SQLITE_OK && i=0 || iLevel==FTS3_SEGCURSOR_ALL ); if( iLevel==FTS3_SEGCURSOR_ALL ){ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_RANGE, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, 0)); sqlite3_bind_int64(pDelete, 2, getAbsoluteLevel(p, iLangid, iIndex, FTS3_SEGDIR_MAXLEVEL-1) ); } }else{ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_LEVEL, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64( pDelete, 1, getAbsoluteLevel(p, iLangid, iIndex, iLevel) ); } } if( rc==SQLITE_OK ){ sqlite3_step(pDelete); rc = sqlite3_reset(pDelete); } return rc; } /* ** When this function is called, buffer *ppList (size *pnList bytes) contains ** a position list that may (or may not) feature multiple columns. This ** function adjusts the pointer *ppList and the length *pnList so that they ** identify the subset of the position list that corresponds to column iCol. ** ** If there are no entries in the input position list for column iCol, then ** *pnList is set to zero before returning. ** ** If parameter bZero is non-zero, then any part of the input list following ** the end of the output list is zeroed before returning. */ static void fts3ColumnFilter( int iCol, /* Column to filter on */ int bZero, /* Zero out anything following *ppList */ char **ppList, /* IN/OUT: Pointer to position list */ int *pnList /* IN/OUT: Size of buffer *ppList in bytes */ ){ char *pList = *ppList; int nList = *pnList; char *pEnd = &pList[nList]; int iCurrent = 0; char *p = pList; assert( iCol>=0 ); while( 1 ){ char c = 0; while( ppMsr->nBuffer ){ char *pNew; pMsr->nBuffer = nList*2; pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer); if( !pNew ) return SQLITE_NOMEM; pMsr->aBuffer = pNew; } memcpy(pMsr->aBuffer, pList, nList); return SQLITE_OK; } SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext( Fts3Table *p, /* Virtual table handle */ Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */ sqlite3_int64 *piDocid, /* OUT: Docid value */ char **paPoslist, /* OUT: Pointer to position list */ int *pnPoslist /* OUT: Size of position list in bytes */ ){ int nMerge = pMsr->nAdvance; Fts3SegReader **apSegment = pMsr->apSegment; int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp ); if( nMerge==0 ){ *paPoslist = 0; return SQLITE_OK; } while( 1 ){ Fts3SegReader *pSeg; pSeg = pMsr->apSegment[0]; if( pSeg->pOffsetList==0 ){ *paPoslist = 0; break; }else{ int rc; char *pList; int nList; int j; sqlite3_int64 iDocid = apSegment[0]->iDocid; rc = fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); j = 1; while( rc==SQLITE_OK && jpOffsetList && apSegment[j]->iDocid==iDocid ){ rc = fts3SegReaderNextDocid(p, apSegment[j], 0, 0); j++; } if( rc!=SQLITE_OK ) return rc; fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp); if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){ rc = fts3MsrBufferData(pMsr, pList, nList+1); if( rc!=SQLITE_OK ) return rc; assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 ); pList = pMsr->aBuffer; } if( pMsr->iColFilter>=0 ){ fts3ColumnFilter(pMsr->iColFilter, 1, &pList, &nList); } if( nList>0 ){ *paPoslist = pList; *piDocid = iDocid; *pnPoslist = nList; break; } } } return SQLITE_OK; } static int fts3SegReaderStart( Fts3Table *p, /* Virtual table handle */ Fts3MultiSegReader *pCsr, /* Cursor object */ const char *zTerm, /* Term searched for (or NULL) */ int nTerm /* Length of zTerm in bytes */ ){ int i; int nSeg = pCsr->nSegment; /* If the Fts3SegFilter defines a specific term (or term prefix) to search ** for, then advance each segment iterator until it points to a term of ** equal or greater value than the specified term. This prevents many ** unnecessary merge/sort operations for the case where single segment ** b-tree leaf nodes contain more than one term. */ for(i=0; pCsr->bRestart==0 && inSegment; i++){ int res = 0; Fts3SegReader *pSeg = pCsr->apSegment[i]; do { int rc = fts3SegReaderNext(p, pSeg, 0); if( rc!=SQLITE_OK ) return rc; }while( zTerm && (res = fts3SegReaderTermCmp(pSeg, zTerm, nTerm))<0 ); if( pSeg->bLookup && res!=0 ){ fts3SegReaderSetEof(pSeg); } } fts3SegReaderSort(pCsr->apSegment, nSeg, nSeg, fts3SegReaderCmp); return SQLITE_OK; } SQLITE_PRIVATE int sqlite3Fts3SegReaderStart( Fts3Table *p, /* Virtual table handle */ Fts3MultiSegReader *pCsr, /* Cursor object */ Fts3SegFilter *pFilter /* Restrictions on range of iteration */ ){ pCsr->pFilter = pFilter; return fts3SegReaderStart(p, pCsr, pFilter->zTerm, pFilter->nTerm); } SQLITE_PRIVATE int sqlite3Fts3MsrIncrStart( Fts3Table *p, /* Virtual table handle */ Fts3MultiSegReader *pCsr, /* Cursor object */ int iCol, /* Column to match on. */ const char *zTerm, /* Term to iterate through a doclist for */ int nTerm /* Number of bytes in zTerm */ ){ int i; int rc; int nSegment = pCsr->nSegment; int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp ); assert( pCsr->pFilter==0 ); assert( zTerm && nTerm>0 ); /* Advance each segment iterator until it points to the term zTerm/nTerm. */ rc = fts3SegReaderStart(p, pCsr, zTerm, nTerm); if( rc!=SQLITE_OK ) return rc; /* Determine how many of the segments actually point to zTerm/nTerm. */ for(i=0; iapSegment[i]; if( !pSeg->aNode || fts3SegReaderTermCmp(pSeg, zTerm, nTerm) ){ break; } } pCsr->nAdvance = i; /* Advance each of the segments to point to the first docid. */ for(i=0; inAdvance; i++){ rc = fts3SegReaderFirstDocid(p, pCsr->apSegment[i]); if( rc!=SQLITE_OK ) return rc; } fts3SegReaderSort(pCsr->apSegment, i, i, xCmp); assert( iCol<0 || iColnColumn ); pCsr->iColFilter = iCol; return SQLITE_OK; } /* ** This function is called on a MultiSegReader that has been started using ** sqlite3Fts3MsrIncrStart(). One or more calls to MsrIncrNext() may also ** have been made. Calling this function puts the MultiSegReader in such ** a state that if the next two calls are: ** ** sqlite3Fts3SegReaderStart() ** sqlite3Fts3SegReaderStep() ** ** then the entire doclist for the term is available in ** MultiSegReader.aDoclist/nDoclist. */ SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){ int i; /* Used to iterate through segment-readers */ assert( pCsr->zTerm==0 ); assert( pCsr->nTerm==0 ); assert( pCsr->aDoclist==0 ); assert( pCsr->nDoclist==0 ); pCsr->nAdvance = 0; pCsr->bRestart = 1; for(i=0; inSegment; i++){ pCsr->apSegment[i]->pOffsetList = 0; pCsr->apSegment[i]->nOffsetList = 0; pCsr->apSegment[i]->iDocid = 0; } return SQLITE_OK; } SQLITE_PRIVATE int sqlite3Fts3SegReaderStep( Fts3Table *p, /* Virtual table handle */ Fts3MultiSegReader *pCsr /* Cursor object */ ){ int rc = SQLITE_OK; int isIgnoreEmpty = (pCsr->pFilter->flags & FTS3_SEGMENT_IGNORE_EMPTY); int isRequirePos = (pCsr->pFilter->flags & FTS3_SEGMENT_REQUIRE_POS); int isColFilter = (pCsr->pFilter->flags & FTS3_SEGMENT_COLUMN_FILTER); int isPrefix = (pCsr->pFilter->flags & FTS3_SEGMENT_PREFIX); int isScan = (pCsr->pFilter->flags & FTS3_SEGMENT_SCAN); int isFirst = (pCsr->pFilter->flags & FTS3_SEGMENT_FIRST); Fts3SegReader **apSegment = pCsr->apSegment; int nSegment = pCsr->nSegment; Fts3SegFilter *pFilter = pCsr->pFilter; int (*xCmp)(Fts3SegReader *, Fts3SegReader *) = ( p->bDescIdx ? fts3SegReaderDoclistCmpRev : fts3SegReaderDoclistCmp ); if( pCsr->nSegment==0 ) return SQLITE_OK; do { int nMerge; int i; /* Advance the first pCsr->nAdvance entries in the apSegment[] array ** forward. Then sort the list in order of current term again. */ for(i=0; inAdvance; i++){ Fts3SegReader *pSeg = apSegment[i]; if( pSeg->bLookup ){ fts3SegReaderSetEof(pSeg); }else{ rc = fts3SegReaderNext(p, pSeg, 0); } if( rc!=SQLITE_OK ) return rc; } fts3SegReaderSort(apSegment, nSegment, pCsr->nAdvance, fts3SegReaderCmp); pCsr->nAdvance = 0; /* If all the seg-readers are at EOF, we're finished. return SQLITE_OK. */ assert( rc==SQLITE_OK ); if( apSegment[0]->aNode==0 ) break; pCsr->nTerm = apSegment[0]->nTerm; pCsr->zTerm = apSegment[0]->zTerm; /* If this is a prefix-search, and if the term that apSegment[0] points ** to does not share a suffix with pFilter->zTerm/nTerm, then all ** required callbacks have been made. In this case exit early. ** ** Similarly, if this is a search for an exact match, and the first term ** of segment apSegment[0] is not a match, exit early. */ if( pFilter->zTerm && !isScan ){ if( pCsr->nTermnTerm || (!isPrefix && pCsr->nTerm>pFilter->nTerm) || memcmp(pCsr->zTerm, pFilter->zTerm, pFilter->nTerm) ){ break; } } nMerge = 1; while( nMergeaNode && apSegment[nMerge]->nTerm==pCsr->nTerm && 0==memcmp(pCsr->zTerm, apSegment[nMerge]->zTerm, pCsr->nTerm) ){ nMerge++; } assert( isIgnoreEmpty || (isRequirePos && !isColFilter) ); if( nMerge==1 && !isIgnoreEmpty && !isFirst && (p->bDescIdx==0 || fts3SegReaderIsPending(apSegment[0])==0) ){ pCsr->nDoclist = apSegment[0]->nDoclist; if( fts3SegReaderIsPending(apSegment[0]) ){ rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist); pCsr->aDoclist = pCsr->aBuffer; }else{ pCsr->aDoclist = apSegment[0]->aDoclist; } if( rc==SQLITE_OK ) rc = SQLITE_ROW; }else{ int nDoclist = 0; /* Size of doclist */ sqlite3_int64 iPrev = 0; /* Previous docid stored in doclist */ /* The current term of the first nMerge entries in the array ** of Fts3SegReader objects is the same. The doclists must be merged ** and a single term returned with the merged doclist. */ for(i=0; ipOffsetList ){ int j; /* Number of segments that share a docid */ char *pList = 0; int nList = 0; int nByte; sqlite3_int64 iDocid = apSegment[0]->iDocid; fts3SegReaderNextDocid(p, apSegment[0], &pList, &nList); j = 1; while( jpOffsetList && apSegment[j]->iDocid==iDocid ){ fts3SegReaderNextDocid(p, apSegment[j], 0, 0); j++; } if( isColFilter ){ fts3ColumnFilter(pFilter->iCol, 0, &pList, &nList); } if( !isIgnoreEmpty || nList>0 ){ /* Calculate the 'docid' delta value to write into the merged ** doclist. */ sqlite3_int64 iDelta; if( p->bDescIdx && nDoclist>0 ){ iDelta = iPrev - iDocid; }else{ iDelta = iDocid - iPrev; } assert( iDelta>0 || (nDoclist==0 && iDelta==iDocid) ); assert( nDoclist>0 || iDelta==iDocid ); nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0); if( nDoclist+nByte>pCsr->nBuffer ){ char *aNew; pCsr->nBuffer = (nDoclist+nByte)*2; aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer); if( !aNew ){ return SQLITE_NOMEM; } pCsr->aBuffer = aNew; } if( isFirst ){ char *a = &pCsr->aBuffer[nDoclist]; int nWrite; nWrite = sqlite3Fts3FirstFilter(iDelta, pList, nList, a); if( nWrite ){ iPrev = iDocid; nDoclist += nWrite; } }else{ nDoclist += sqlite3Fts3PutVarint(&pCsr->aBuffer[nDoclist], iDelta); iPrev = iDocid; if( isRequirePos ){ memcpy(&pCsr->aBuffer[nDoclist], pList, nList); nDoclist += nList; pCsr->aBuffer[nDoclist++] = '\0'; } } } fts3SegReaderSort(apSegment, nMerge, j, xCmp); } if( nDoclist>0 ){ pCsr->aDoclist = pCsr->aBuffer; pCsr->nDoclist = nDoclist; rc = SQLITE_ROW; } } pCsr->nAdvance = nMerge; }while( rc==SQLITE_OK ); return rc; } SQLITE_PRIVATE void sqlite3Fts3SegReaderFinish( Fts3MultiSegReader *pCsr /* Cursor object */ ){ if( pCsr ){ int i; for(i=0; inSegment; i++){ sqlite3Fts3SegReaderFree(pCsr->apSegment[i]); } sqlite3_free(pCsr->apSegment); sqlite3_free(pCsr->aBuffer); pCsr->nSegment = 0; pCsr->apSegment = 0; pCsr->aBuffer = 0; } } /* ** Decode the "end_block" field, selected by column iCol of the SELECT ** statement passed as the first argument. ** ** The "end_block" field may contain either an integer, or a text field ** containing the text representation of two non-negative integers separated ** by one or more space (0x20) characters. In the first case, set *piEndBlock ** to the integer value and *pnByte to zero before returning. In the second, ** set *piEndBlock to the first value and *pnByte to the second. */ static void fts3ReadEndBlockField( sqlite3_stmt *pStmt, int iCol, i64 *piEndBlock, i64 *pnByte ){ const unsigned char *zText = sqlite3_column_text(pStmt, iCol); if( zText ){ int i; int iMul = 1; i64 iVal = 0; for(i=0; zText[i]>='0' && zText[i]<='9'; i++){ iVal = iVal*10 + (zText[i] - '0'); } *piEndBlock = iVal; while( zText[i]==' ' ) i++; iVal = 0; if( zText[i]=='-' ){ i++; iMul = -1; } for(/* no-op */; zText[i]>='0' && zText[i]<='9'; i++){ iVal = iVal*10 + (zText[i] - '0'); } *pnByte = (iVal * (i64)iMul); } } /* ** A segment of size nByte bytes has just been written to absolute level ** iAbsLevel. Promote any segments that should be promoted as a result. */ static int fts3PromoteSegments( Fts3Table *p, /* FTS table handle */ sqlite3_int64 iAbsLevel, /* Absolute level just updated */ sqlite3_int64 nByte /* Size of new segment at iAbsLevel */ ){ int rc = SQLITE_OK; sqlite3_stmt *pRange; rc = fts3SqlStmt(p, SQL_SELECT_LEVEL_RANGE2, &pRange, 0); if( rc==SQLITE_OK ){ int bOk = 0; i64 iLast = (iAbsLevel/FTS3_SEGDIR_MAXLEVEL + 1) * FTS3_SEGDIR_MAXLEVEL - 1; i64 nLimit = (nByte*3)/2; /* Loop through all entries in the %_segdir table corresponding to ** segments in this index on levels greater than iAbsLevel. If there is ** at least one such segment, and it is possible to determine that all ** such segments are smaller than nLimit bytes in size, they will be ** promoted to level iAbsLevel. */ sqlite3_bind_int64(pRange, 1, iAbsLevel+1); sqlite3_bind_int64(pRange, 2, iLast); while( SQLITE_ROW==sqlite3_step(pRange) ){ i64 nSize = 0, dummy; fts3ReadEndBlockField(pRange, 2, &dummy, &nSize); if( nSize<=0 || nSize>nLimit ){ /* If nSize==0, then the %_segdir.end_block field does not not ** contain a size value. This happens if it was written by an ** old version of FTS. In this case it is not possible to determine ** the size of the segment, and so segment promotion does not ** take place. */ bOk = 0; break; } bOk = 1; } rc = sqlite3_reset(pRange); if( bOk ){ int iIdx = 0; sqlite3_stmt *pUpdate1 = 0; sqlite3_stmt *pUpdate2 = 0; if( rc==SQLITE_OK ){ rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL_IDX, &pUpdate1, 0); } if( rc==SQLITE_OK ){ rc = fts3SqlStmt(p, SQL_UPDATE_LEVEL, &pUpdate2, 0); } if( rc==SQLITE_OK ){ /* Loop through all %_segdir entries for segments in this index with ** levels equal to or greater than iAbsLevel. As each entry is visited, ** updated it to set (level = -1) and (idx = N), where N is 0 for the ** oldest segment in the range, 1 for the next oldest, and so on. ** ** In other words, move all segments being promoted to level -1, ** setting the "idx" fields as appropriate to keep them in the same ** order. The contents of level -1 (which is never used, except ** transiently here), will be moved back to level iAbsLevel below. */ sqlite3_bind_int64(pRange, 1, iAbsLevel); while( SQLITE_ROW==sqlite3_step(pRange) ){ sqlite3_bind_int(pUpdate1, 1, iIdx++); sqlite3_bind_int(pUpdate1, 2, sqlite3_column_int(pRange, 0)); sqlite3_bind_int(pUpdate1, 3, sqlite3_column_int(pRange, 1)); sqlite3_step(pUpdate1); rc = sqlite3_reset(pUpdate1); if( rc!=SQLITE_OK ){ sqlite3_reset(pRange); break; } } } if( rc==SQLITE_OK ){ rc = sqlite3_reset(pRange); } /* Move level -1 to level iAbsLevel */ if( rc==SQLITE_OK ){ sqlite3_bind_int64(pUpdate2, 1, iAbsLevel); sqlite3_step(pUpdate2); rc = sqlite3_reset(pUpdate2); } } } return rc; } /* ** Merge all level iLevel segments in the database into a single ** iLevel+1 segment. Or, if iLevel<0, merge all segments into a ** single segment with a level equal to the numerically largest level ** currently present in the database. ** ** If this function is called with iLevel<0, but there is only one ** segment in the database, SQLITE_DONE is returned immediately. ** Otherwise, if successful, SQLITE_OK is returned. If an error occurs, ** an SQLite error code is returned. */ static int fts3SegmentMerge( Fts3Table *p, int iLangid, /* Language id to merge */ int iIndex, /* Index in p->aIndex[] to merge */ int iLevel /* Level to merge */ ){ int rc; /* Return code */ int iIdx = 0; /* Index of new segment */ sqlite3_int64 iNewLevel = 0; /* Level/index to create new segment at */ SegmentWriter *pWriter = 0; /* Used to write the new, merged, segment */ Fts3SegFilter filter; /* Segment term filter condition */ Fts3MultiSegReader csr; /* Cursor to iterate through level(s) */ int bIgnoreEmpty = 0; /* True to ignore empty segments */ i64 iMaxLevel = 0; /* Max level number for this index/langid */ assert( iLevel==FTS3_SEGCURSOR_ALL || iLevel==FTS3_SEGCURSOR_PENDING || iLevel>=0 ); assert( iLevel=0 && iIndexnIndex ); rc = sqlite3Fts3SegReaderCursor(p, iLangid, iIndex, iLevel, 0, 0, 1, 0, &csr); if( rc!=SQLITE_OK || csr.nSegment==0 ) goto finished; if( iLevel!=FTS3_SEGCURSOR_PENDING ){ rc = fts3SegmentMaxLevel(p, iLangid, iIndex, &iMaxLevel); if( rc!=SQLITE_OK ) goto finished; } if( iLevel==FTS3_SEGCURSOR_ALL ){ /* This call is to merge all segments in the database to a single ** segment. The level of the new segment is equal to the numerically ** greatest segment level currently present in the database for this ** index. The idx of the new segment is always 0. */ if( csr.nSegment==1 ){ rc = SQLITE_DONE; goto finished; } iNewLevel = iMaxLevel; bIgnoreEmpty = 1; }else{ /* This call is to merge all segments at level iLevel. find the next ** available segment index at level iLevel+1. The call to ** fts3AllocateSegdirIdx() will merge the segments at level iLevel+1 to ** a single iLevel+2 segment if necessary. */ assert( FTS3_SEGCURSOR_PENDING==-1 ); iNewLevel = getAbsoluteLevel(p, iLangid, iIndex, iLevel+1); rc = fts3AllocateSegdirIdx(p, iLangid, iIndex, iLevel+1, &iIdx); bIgnoreEmpty = (iLevel!=FTS3_SEGCURSOR_PENDING) && (iNewLevel>iMaxLevel); } if( rc!=SQLITE_OK ) goto finished; assert( csr.nSegment>0 ); assert( iNewLevel>=getAbsoluteLevel(p, iLangid, iIndex, 0) ); assert( iNewLevelnLeafData); } } } finished: fts3SegWriterFree(pWriter); sqlite3Fts3SegReaderFinish(&csr); return rc; } /* ** Flush the contents of pendingTerms to level 0 segments. */ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ int rc = SQLITE_OK; int i; for(i=0; rc==SQLITE_OK && inIndex; i++){ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } sqlite3Fts3PendingTermsClear(p); /* Determine the auto-incr-merge setting if unknown. If enabled, ** estimate the number of leaf blocks of content to be written */ if( rc==SQLITE_OK && p->bHasStat && p->nAutoincrmerge==0xff && p->nLeafAdd>0 ){ sqlite3_stmt *pStmt = 0; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); rc = sqlite3_step(pStmt); if( rc==SQLITE_ROW ){ p->nAutoincrmerge = sqlite3_column_int(pStmt, 0); if( p->nAutoincrmerge==1 ) p->nAutoincrmerge = 8; }else if( rc==SQLITE_DONE ){ p->nAutoincrmerge = 0; } rc = sqlite3_reset(pStmt); } } return rc; } /* ** Encode N integers as varints into a blob. */ static void fts3EncodeIntArray( int N, /* The number of integers to encode */ u32 *a, /* The integer values */ char *zBuf, /* Write the BLOB here */ int *pNBuf /* Write number of bytes if zBuf[] used here */ ){ int i, j; for(i=j=0; iiPrevDocid. The sizes are encoded as ** a blob of varints. */ static void fts3InsertDocsize( int *pRC, /* Result code */ Fts3Table *p, /* Table into which to insert */ u32 *aSz /* Sizes of each column, in tokens */ ){ char *pBlob; /* The BLOB encoding of the document size */ int nBlob; /* Number of bytes in the BLOB */ sqlite3_stmt *pStmt; /* Statement used to insert the encoding */ int rc; /* Result code from subfunctions */ if( *pRC ) return; pBlob = sqlite3_malloc( 10*p->nColumn ); if( pBlob==0 ){ *pRC = SQLITE_NOMEM; return; } fts3EncodeIntArray(p->nColumn, aSz, pBlob, &nBlob); rc = fts3SqlStmt(p, SQL_REPLACE_DOCSIZE, &pStmt, 0); if( rc ){ sqlite3_free(pBlob); *pRC = rc; return; } sqlite3_bind_int64(pStmt, 1, p->iPrevDocid); sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, sqlite3_free); sqlite3_step(pStmt); *pRC = sqlite3_reset(pStmt); } /* ** Record 0 of the %_stat table contains a blob consisting of N varints, ** where N is the number of user defined columns in the fts3 table plus ** two. If nCol is the number of user defined columns, then values of the ** varints are set as follows: ** ** Varint 0: Total number of rows in the table. ** ** Varint 1..nCol: For each column, the total number of tokens stored in ** the column for all rows of the table. ** ** Varint 1+nCol: The total size, in bytes, of all text values in all ** columns of all rows of the table. ** */ static void fts3UpdateDocTotals( int *pRC, /* The result code */ Fts3Table *p, /* Table being updated */ u32 *aSzIns, /* Size increases */ u32 *aSzDel, /* Size decreases */ int nChng /* Change in the number of documents */ ){ char *pBlob; /* Storage for BLOB written into %_stat */ int nBlob; /* Size of BLOB written into %_stat */ u32 *a; /* Array of integers that becomes the BLOB */ sqlite3_stmt *pStmt; /* Statement for reading and writing */ int i; /* Loop counter */ int rc; /* Result code from subfunctions */ const int nStat = p->nColumn+2; if( *pRC ) return; a = sqlite3_malloc( (sizeof(u32)+10)*nStat ); if( a==0 ){ *pRC = SQLITE_NOMEM; return; } pBlob = (char*)&a[nStat]; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); if( sqlite3_step(pStmt)==SQLITE_ROW ){ fts3DecodeIntArray(nStat, a, sqlite3_column_blob(pStmt, 0), sqlite3_column_bytes(pStmt, 0)); }else{ memset(a, 0, sizeof(u32)*(nStat) ); } rc = sqlite3_reset(pStmt); if( rc!=SQLITE_OK ){ sqlite3_free(a); *pRC = rc; return; } if( nChng<0 && a[0]<(u32)(-nChng) ){ a[0] = 0; }else{ a[0] += nChng; } for(i=0; inColumn+1; i++){ u32 x = a[i+1]; if( x+aSzIns[i] < aSzDel[i] ){ x = 0; }else{ x = x + aSzIns[i] - aSzDel[i]; } a[i+1] = x; } fts3EncodeIntArray(nStat, a, pBlob, &nBlob); rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); if( rc ){ sqlite3_free(a); *pRC = rc; return; } sqlite3_bind_int(pStmt, 1, FTS_STAT_DOCTOTAL); sqlite3_bind_blob(pStmt, 2, pBlob, nBlob, SQLITE_STATIC); sqlite3_step(pStmt); *pRC = sqlite3_reset(pStmt); sqlite3_free(a); } /* ** Merge the entire database so that there is one segment for each ** iIndex/iLangid combination. */ static int fts3DoOptimize(Fts3Table *p, int bReturnDone){ int bSeenDone = 0; int rc; sqlite3_stmt *pAllLangid = 0; rc = fts3SqlStmt(p, SQL_SELECT_ALL_LANGID, &pAllLangid, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int(pAllLangid, 1, p->iPrevLangid); sqlite3_bind_int(pAllLangid, 2, p->nIndex); while( sqlite3_step(pAllLangid)==SQLITE_ROW ){ int i; int iLangid = sqlite3_column_int(pAllLangid, 0); for(i=0; rc==SQLITE_OK && inIndex; i++){ rc = fts3SegmentMerge(p, iLangid, i, FTS3_SEGCURSOR_ALL); if( rc==SQLITE_DONE ){ bSeenDone = 1; rc = SQLITE_OK; } } } rc2 = sqlite3_reset(pAllLangid); if( rc==SQLITE_OK ) rc = rc2; } sqlite3Fts3SegmentsClose(p); sqlite3Fts3PendingTermsClear(p); return (rc==SQLITE_OK && bReturnDone && bSeenDone) ? SQLITE_DONE : rc; } /* ** This function is called when the user executes the following statement: ** ** INSERT INTO () VALUES('rebuild'); ** ** The entire FTS index is discarded and rebuilt. If the table is one ** created using the content=xxx option, then the new index is based on ** the current contents of the xxx table. Otherwise, it is rebuilt based ** on the contents of the %_content table. */ static int fts3DoRebuild(Fts3Table *p){ int rc; /* Return Code */ rc = fts3DeleteAll(p, 0); if( rc==SQLITE_OK ){ u32 *aSz = 0; u32 *aSzIns = 0; u32 *aSzDel = 0; sqlite3_stmt *pStmt = 0; int nEntry = 0; /* Compose and prepare an SQL statement to loop through the content table */ char *zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); } if( rc==SQLITE_OK ){ int nByte = sizeof(u32) * (p->nColumn+1)*3; aSz = (u32 *)sqlite3_malloc(nByte); if( aSz==0 ){ rc = SQLITE_NOMEM; }else{ memset(aSz, 0, nByte); aSzIns = &aSz[p->nColumn+1]; aSzDel = &aSzIns[p->nColumn+1]; } } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ int iCol; int iLangid = langidFromSelect(p, pStmt); rc = fts3PendingTermsDocid(p, iLangid, sqlite3_column_int64(pStmt, 0)); memset(aSz, 0, sizeof(aSz[0]) * (p->nColumn+1)); for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ if( p->abNotindexed[iCol]==0 ){ const char *z = (const char *) sqlite3_column_text(pStmt, iCol+1); rc = fts3PendingTermsAdd(p, iLangid, z, iCol, &aSz[iCol]); aSz[p->nColumn] += sqlite3_column_bytes(pStmt, iCol+1); } } if( p->bHasDocsize ){ fts3InsertDocsize(&rc, p, aSz); } if( rc!=SQLITE_OK ){ sqlite3_finalize(pStmt); pStmt = 0; }else{ nEntry++; for(iCol=0; iCol<=p->nColumn; iCol++){ aSzIns[iCol] += aSz[iCol]; } } } if( p->bFts4 ){ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nEntry); } sqlite3_free(aSz); if( pStmt ){ int rc2 = sqlite3_finalize(pStmt); if( rc==SQLITE_OK ){ rc = rc2; } } } return rc; } /* ** This function opens a cursor used to read the input data for an ** incremental merge operation. Specifically, it opens a cursor to scan ** the oldest nSeg segments (idx=0 through idx=(nSeg-1)) in absolute ** level iAbsLevel. */ static int fts3IncrmergeCsr( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level to open */ int nSeg, /* Number of segments to merge */ Fts3MultiSegReader *pCsr /* Cursor object to populate */ ){ int rc; /* Return Code */ sqlite3_stmt *pStmt = 0; /* Statement used to read %_segdir entry */ int nByte; /* Bytes allocated at pCsr->apSegment[] */ /* Allocate space for the Fts3MultiSegReader.aCsr[] array */ memset(pCsr, 0, sizeof(*pCsr)); nByte = sizeof(Fts3SegReader *) * nSeg; pCsr->apSegment = (Fts3SegReader **)sqlite3_malloc(nByte); if( pCsr->apSegment==0 ){ rc = SQLITE_NOMEM; }else{ memset(pCsr->apSegment, 0, nByte); rc = fts3SqlStmt(p, SQL_SELECT_LEVEL, &pStmt, 0); } if( rc==SQLITE_OK ){ int i; int rc2; sqlite3_bind_int64(pStmt, 1, iAbsLevel); assert( pCsr->nSegment==0 ); for(i=0; rc==SQLITE_OK && sqlite3_step(pStmt)==SQLITE_ROW && iapSegment[i] ); pCsr->nSegment++; } rc2 = sqlite3_reset(pStmt); if( rc==SQLITE_OK ) rc = rc2; } return rc; } typedef struct IncrmergeWriter IncrmergeWriter; typedef struct NodeWriter NodeWriter; typedef struct Blob Blob; typedef struct NodeReader NodeReader; /* ** An instance of the following structure is used as a dynamic buffer ** to build up nodes or other blobs of data in. ** ** The function blobGrowBuffer() is used to extend the allocation. */ struct Blob { char *a; /* Pointer to allocation */ int n; /* Number of valid bytes of data in a[] */ int nAlloc; /* Allocated size of a[] (nAlloc>=n) */ }; /* ** This structure is used to build up buffers containing segment b-tree ** nodes (blocks). */ struct NodeWriter { sqlite3_int64 iBlock; /* Current block id */ Blob key; /* Last key written to the current block */ Blob block; /* Current block image */ }; /* ** An object of this type contains the state required to create or append ** to an appendable b-tree segment. */ struct IncrmergeWriter { int nLeafEst; /* Space allocated for leaf blocks */ int nWork; /* Number of leaf pages flushed */ sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ int iIdx; /* Index of *output* segment in iAbsLevel+1 */ sqlite3_int64 iStart; /* Block number of first allocated block */ sqlite3_int64 iEnd; /* Block number of last allocated block */ sqlite3_int64 nLeafData; /* Bytes of leaf page data so far */ u8 bNoLeafData; /* If true, store 0 for segment size */ NodeWriter aNodeWriter[FTS_MAX_APPENDABLE_HEIGHT]; }; /* ** An object of the following type is used to read data from a single ** FTS segment node. See the following functions: ** ** nodeReaderInit() ** nodeReaderNext() ** nodeReaderRelease() */ struct NodeReader { const char *aNode; int nNode; int iOff; /* Current offset within aNode[] */ /* Output variables. Containing the current node entry. */ sqlite3_int64 iChild; /* Pointer to child node */ Blob term; /* Current term */ const char *aDoclist; /* Pointer to doclist */ int nDoclist; /* Size of doclist in bytes */ }; /* ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, if the allocation at pBlob->a is not already at least nMin ** bytes in size, extend (realloc) it to be so. ** ** If an OOM error occurs, set *pRc to SQLITE_NOMEM and leave pBlob->a ** unmodified. Otherwise, if the allocation succeeds, update pBlob->nAlloc ** to reflect the new size of the pBlob->a[] buffer. */ static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ int nAlloc = nMin; char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); if( a ){ pBlob->nAlloc = nAlloc; pBlob->a = a; }else{ *pRc = SQLITE_NOMEM; } } } /* ** Attempt to advance the node-reader object passed as the first argument to ** the next entry on the node. ** ** Return an error code if an error occurs (SQLITE_NOMEM is possible). ** Otherwise return SQLITE_OK. If there is no next entry on the node ** (e.g. because the current entry is the last) set NodeReader->aNode to ** NULL to indicate EOF. Otherwise, populate the NodeReader structure output ** variables for the new entry. */ static int nodeReaderNext(NodeReader *p){ int bFirst = (p->term.n==0); /* True for first term on the node */ int nPrefix = 0; /* Bytes to copy from previous term */ int nSuffix = 0; /* Bytes to append to the prefix */ int rc = SQLITE_OK; /* Return code */ assert( p->aNode ); if( p->iChild && bFirst==0 ) p->iChild++; if( p->iOff>=p->nNode ){ /* EOF */ p->aNode = 0; }else{ if( bFirst==0 ){ p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nPrefix); } p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &nSuffix); blobGrowBuffer(&p->term, nPrefix+nSuffix, &rc); if( rc==SQLITE_OK ){ memcpy(&p->term.a[nPrefix], &p->aNode[p->iOff], nSuffix); p->term.n = nPrefix+nSuffix; p->iOff += nSuffix; if( p->iChild==0 ){ p->iOff += fts3GetVarint32(&p->aNode[p->iOff], &p->nDoclist); p->aDoclist = &p->aNode[p->iOff]; p->iOff += p->nDoclist; } } } assert( p->iOff<=p->nNode ); return rc; } /* ** Release all dynamic resources held by node-reader object *p. */ static void nodeReaderRelease(NodeReader *p){ sqlite3_free(p->term.a); } /* ** Initialize a node-reader object to read the node in buffer aNode/nNode. ** ** If successful, SQLITE_OK is returned and the NodeReader object set to ** point to the first entry on the node (if any). Otherwise, an SQLite ** error code is returned. */ static int nodeReaderInit(NodeReader *p, const char *aNode, int nNode){ memset(p, 0, sizeof(NodeReader)); p->aNode = aNode; p->nNode = nNode; /* Figure out if this is a leaf or an internal node. */ if( p->aNode[0] ){ /* An internal node. */ p->iOff = 1 + sqlite3Fts3GetVarint(&p->aNode[1], &p->iChild); }else{ p->iOff = 1; } return nodeReaderNext(p); } /* ** This function is called while writing an FTS segment each time a leaf o ** node is finished and written to disk. The key (zTerm/nTerm) is guaranteed ** to be greater than the largest key on the node just written, but smaller ** than or equal to the first key that will be written to the next leaf ** node. ** ** The block id of the leaf node just written to disk may be found in ** (pWriter->aNodeWriter[0].iBlock) when this function is called. */ static int fts3IncrmergePush( Fts3Table *p, /* Fts3 table handle */ IncrmergeWriter *pWriter, /* Writer object */ const char *zTerm, /* Term to write to internal node */ int nTerm /* Bytes at zTerm */ ){ sqlite3_int64 iPtr = pWriter->aNodeWriter[0].iBlock; int iLayer; assert( nTerm>0 ); for(iLayer=1; ALWAYS(iLayeraNodeWriter[iLayer]; int rc = SQLITE_OK; int nPrefix; int nSuffix; int nSpace; /* Figure out how much space the key will consume if it is written to ** the current node of layer iLayer. Due to the prefix compression, ** the space required changes depending on which node the key is to ** be added to. */ nPrefix = fts3PrefixCompress(pNode->key.a, pNode->key.n, zTerm, nTerm); nSuffix = nTerm - nPrefix; nSpace = sqlite3Fts3VarintLen(nPrefix); nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; if( pNode->key.n==0 || (pNode->block.n + nSpace)<=p->nNodeSize ){ /* If the current node of layer iLayer contains zero keys, or if adding ** the key to it will not cause it to grow to larger than nNodeSize ** bytes in size, write the key here. */ Blob *pBlk = &pNode->block; if( pBlk->n==0 ){ blobGrowBuffer(pBlk, p->nNodeSize, &rc); if( rc==SQLITE_OK ){ pBlk->a[0] = (char)iLayer; pBlk->n = 1 + sqlite3Fts3PutVarint(&pBlk->a[1], iPtr); } } blobGrowBuffer(pBlk, pBlk->n + nSpace, &rc); blobGrowBuffer(&pNode->key, nTerm, &rc); if( rc==SQLITE_OK ){ if( pNode->key.n ){ pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix); } pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix); memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix); pBlk->n += nSuffix; memcpy(pNode->key.a, zTerm, nTerm); pNode->key.n = nTerm; } }else{ /* Otherwise, flush the current node of layer iLayer to disk. ** Then allocate a new, empty sibling node. The key will be written ** into the parent of this node. */ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); assert( pNode->block.nAlloc>=p->nNodeSize ); pNode->block.a[0] = (char)iLayer; pNode->block.n = 1 + sqlite3Fts3PutVarint(&pNode->block.a[1], iPtr+1); iNextPtr = pNode->iBlock; pNode->iBlock++; pNode->key.n = 0; } if( rc!=SQLITE_OK || iNextPtr==0 ) return rc; iPtr = iNextPtr; } assert( 0 ); return 0; } /* ** Append a term and (optionally) doclist to the FTS segment node currently ** stored in blob *pNode. The node need not contain any terms, but the ** header must be written before this function is called. ** ** A node header is a single 0x00 byte for a leaf node, or a height varint ** followed by the left-hand-child varint for an internal node. ** ** The term to be appended is passed via arguments zTerm/nTerm. For a ** leaf node, the doclist is passed as aDoclist/nDoclist. For an internal ** node, both aDoclist and nDoclist must be passed 0. ** ** If the size of the value in blob pPrev is zero, then this is the first ** term written to the node. Otherwise, pPrev contains a copy of the ** previous term. Before this function returns, it is updated to contain a ** copy of zTerm/nTerm. ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev ** is extended by this function if requrired. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. */ static int fts3AppendToNode( Blob *pNode, /* Current node image to append to */ Blob *pPrev, /* Buffer containing previous term written */ const char *zTerm, /* New term to write */ int nTerm, /* Size of zTerm in bytes */ const char *aDoclist, /* Doclist (or NULL) to write */ int nDoclist /* Size of aDoclist in bytes */ ){ int rc = SQLITE_OK; /* Return code */ int bFirst = (pPrev->n==0); /* True if this is the first term written */ int nPrefix; /* Size of term prefix in bytes */ int nSuffix; /* Size of term suffix in bytes */ /* Node must have already been started. There must be a doclist for a ** leaf node, and there must not be a doclist for an internal node. */ assert( pNode->n>0 ); assert( (pNode->a[0]=='\0')==(aDoclist!=0) ); blobGrowBuffer(pPrev, nTerm, &rc); if( rc!=SQLITE_OK ) return rc; nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); nSuffix = nTerm - nPrefix; memcpy(pPrev->a, zTerm, nTerm); pPrev->n = nTerm; if( bFirst==0 ){ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nPrefix); } pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nSuffix); memcpy(&pNode->a[pNode->n], &zTerm[nPrefix], nSuffix); pNode->n += nSuffix; if( aDoclist ){ pNode->n += sqlite3Fts3PutVarint(&pNode->a[pNode->n], nDoclist); memcpy(&pNode->a[pNode->n], aDoclist, nDoclist); pNode->n += nDoclist; } assert( pNode->n<=pNode->nAlloc ); return SQLITE_OK; } /* ** Append the current term and doclist pointed to by cursor pCsr to the ** appendable b-tree segment opened for writing by pWriter. ** ** Return SQLITE_OK if successful, or an SQLite error code otherwise. */ static int fts3IncrmergeAppend( Fts3Table *p, /* Fts3 table handle */ IncrmergeWriter *pWriter, /* Writer object */ Fts3MultiSegReader *pCsr /* Cursor containing term and doclist */ ){ const char *zTerm = pCsr->zTerm; int nTerm = pCsr->nTerm; const char *aDoclist = pCsr->aDoclist; int nDoclist = pCsr->nDoclist; int rc = SQLITE_OK; /* Return code */ int nSpace; /* Total space in bytes required on leaf */ int nPrefix; /* Size of prefix shared with previous term */ int nSuffix; /* Size of suffix (nTerm - nPrefix) */ NodeWriter *pLeaf; /* Object used to write leaf nodes */ pLeaf = &pWriter->aNodeWriter[0]; nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm); nSuffix = nTerm - nPrefix; nSpace = sqlite3Fts3VarintLen(nPrefix); nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; /* If the current block is not empty, and if adding this term/doclist ** to the current block would make it larger than Fts3Table.nNodeSize ** bytes, write this block out to the database. */ if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); pWriter->nWork++; /* Add the current term to the parent node. The term added to the ** parent must: ** ** a) be greater than the largest term on the leaf node just written ** to the database (still available in pLeaf->key), and ** ** b) be less than or equal to the term about to be added to the new ** leaf node (zTerm/nTerm). ** ** In other words, it must be the prefix of zTerm 1 byte longer than ** the common prefix (if any) of zTerm and pWriter->zTerm. */ if( rc==SQLITE_OK ){ rc = fts3IncrmergePush(p, pWriter, zTerm, nPrefix+1); } /* Advance to the next output block */ pLeaf->iBlock++; pLeaf->key.n = 0; pLeaf->block.n = 0; nSuffix = nTerm; nSpace = 1; nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; } pWriter->nLeafData += nSpace; blobGrowBuffer(&pLeaf->block, pLeaf->block.n + nSpace, &rc); if( rc==SQLITE_OK ){ if( pLeaf->block.n==0 ){ pLeaf->block.n = 1; pLeaf->block.a[0] = '\0'; } rc = fts3AppendToNode( &pLeaf->block, &pLeaf->key, zTerm, nTerm, aDoclist, nDoclist ); } return rc; } /* ** This function is called to release all dynamic resources held by the ** merge-writer object pWriter, and if no error has occurred, to flush ** all outstanding node buffers held by pWriter to disk. ** ** If *pRc is not SQLITE_OK when this function is called, then no attempt ** is made to write any data to disk. Instead, this function serves only ** to release outstanding resources. ** ** Otherwise, if *pRc is initially SQLITE_OK and an error occurs while ** flushing buffers to disk, *pRc is set to an SQLite error code before ** returning. */ static void fts3IncrmergeRelease( Fts3Table *p, /* FTS3 table handle */ IncrmergeWriter *pWriter, /* Merge-writer object */ int *pRc /* IN/OUT: Error code */ ){ int i; /* Used to iterate through non-root layers */ int iRoot; /* Index of root in pWriter->aNodeWriter */ NodeWriter *pRoot; /* NodeWriter for root node */ int rc = *pRc; /* Error code */ /* Set iRoot to the index in pWriter->aNodeWriter[] of the output segment ** root node. If the segment fits entirely on a single leaf node, iRoot ** will be set to 0. If the root node is the parent of the leaves, iRoot ** will be 1. And so on. */ for(iRoot=FTS_MAX_APPENDABLE_HEIGHT-1; iRoot>=0; iRoot--){ NodeWriter *pNode = &pWriter->aNodeWriter[iRoot]; if( pNode->block.n>0 ) break; assert( *pRc || pNode->block.nAlloc==0 ); assert( *pRc || pNode->key.nAlloc==0 ); sqlite3_free(pNode->block.a); sqlite3_free(pNode->key.a); } /* Empty output segment. This is a no-op. */ if( iRoot<0 ) return; /* The entire output segment fits on a single node. Normally, this means ** the node would be stored as a blob in the "root" column of the %_segdir ** table. However, this is not permitted in this case. The problem is that ** space has already been reserved in the %_segments table, and so the ** start_block and end_block fields of the %_segdir table must be populated. ** And, by design or by accident, released versions of FTS cannot handle ** segments that fit entirely on the root node with start_block!=0. ** ** Instead, create a synthetic root node that contains nothing but a ** pointer to the single content node. So that the segment consists of a ** single leaf and a single interior (root) node. ** ** Todo: Better might be to defer allocating space in the %_segments ** table until we are sure it is needed. */ if( iRoot==0 ){ Blob *pBlock = &pWriter->aNodeWriter[1].block; blobGrowBuffer(pBlock, 1 + FTS3_VARINT_MAX, &rc); if( rc==SQLITE_OK ){ pBlock->a[0] = 0x01; pBlock->n = 1 + sqlite3Fts3PutVarint( &pBlock->a[1], pWriter->aNodeWriter[0].iBlock ); } iRoot = 1; } pRoot = &pWriter->aNodeWriter[iRoot]; /* Flush all currently outstanding nodes to disk. */ for(i=0; iaNodeWriter[i]; if( pNode->block.n>0 && rc==SQLITE_OK ){ rc = fts3WriteSegment(p, pNode->iBlock, pNode->block.a, pNode->block.n); } sqlite3_free(pNode->block.a); sqlite3_free(pNode->key.a); } /* Write the %_segdir record. */ if( rc==SQLITE_OK ){ rc = fts3WriteSegdir(p, pWriter->iAbsLevel+1, /* level */ pWriter->iIdx, /* idx */ pWriter->iStart, /* start_block */ pWriter->aNodeWriter[0].iBlock, /* leaves_end_block */ pWriter->iEnd, /* end_block */ (pWriter->bNoLeafData==0 ? pWriter->nLeafData : 0), /* end_block */ pRoot->block.a, pRoot->block.n /* root */ ); } sqlite3_free(pRoot->block.a); sqlite3_free(pRoot->key.a); *pRc = rc; } /* ** Compare the term in buffer zLhs (size in bytes nLhs) with that in ** zRhs (size in bytes nRhs) using memcmp. If one term is a prefix of ** the other, it is considered to be smaller than the other. ** ** Return -ve if zLhs is smaller than zRhs, 0 if it is equal, or +ve ** if it is greater. */ static int fts3TermCmp( const char *zLhs, int nLhs, /* LHS of comparison */ const char *zRhs, int nRhs /* RHS of comparison */ ){ int nCmp = MIN(nLhs, nRhs); int res; res = memcmp(zLhs, zRhs, nCmp); if( res==0 ) res = nLhs - nRhs; return res; } /* ** Query to see if the entry in the %_segments table with blockid iEnd is ** NULL. If no error occurs and the entry is NULL, set *pbRes 1 before ** returning. Otherwise, set *pbRes to 0. ** ** Or, if an error occurs while querying the database, return an SQLite ** error code. The final value of *pbRes is undefined in this case. ** ** This is used to test if a segment is an "appendable" segment. If it ** is, then a NULL entry has been inserted into the %_segments table ** with blockid %_segdir.end_block. */ static int fts3IsAppendable(Fts3Table *p, sqlite3_int64 iEnd, int *pbRes){ int bRes = 0; /* Result to set *pbRes to */ sqlite3_stmt *pCheck = 0; /* Statement to query database with */ int rc; /* Return code */ rc = fts3SqlStmt(p, SQL_SEGMENT_IS_APPENDABLE, &pCheck, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pCheck, 1, iEnd); if( SQLITE_ROW==sqlite3_step(pCheck) ) bRes = 1; rc = sqlite3_reset(pCheck); } *pbRes = bRes; return rc; } /* ** This function is called when initializing an incremental-merge operation. ** It checks if the existing segment with index value iIdx at absolute level ** (iAbsLevel+1) can be appended to by the incremental merge. If it can, the ** merge-writer object *pWriter is initialized to write to it. ** ** An existing segment can be appended to by an incremental merge if: ** ** * It was initially created as an appendable segment (with all required ** space pre-allocated), and ** ** * The first key read from the input (arguments zKey and nKey) is ** greater than the largest key currently stored in the potential ** output segment. */ static int fts3IncrmergeLoad( Fts3Table *p, /* Fts3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ int iIdx, /* Index of candidate output segment */ const char *zKey, /* First key to write */ int nKey, /* Number of bytes in nKey */ IncrmergeWriter *pWriter /* Populate this object */ ){ int rc; /* Return code */ sqlite3_stmt *pSelect = 0; /* SELECT to read %_segdir entry */ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pSelect, 0); if( rc==SQLITE_OK ){ sqlite3_int64 iStart = 0; /* Value of %_segdir.start_block */ sqlite3_int64 iLeafEnd = 0; /* Value of %_segdir.leaves_end_block */ sqlite3_int64 iEnd = 0; /* Value of %_segdir.end_block */ const char *aRoot = 0; /* Pointer to %_segdir.root buffer */ int nRoot = 0; /* Size of aRoot[] in bytes */ int rc2; /* Return code from sqlite3_reset() */ int bAppendable = 0; /* Set to true if segment is appendable */ /* Read the %_segdir entry for index iIdx absolute level (iAbsLevel+1) */ sqlite3_bind_int64(pSelect, 1, iAbsLevel+1); sqlite3_bind_int(pSelect, 2, iIdx); if( sqlite3_step(pSelect)==SQLITE_ROW ){ iStart = sqlite3_column_int64(pSelect, 1); iLeafEnd = sqlite3_column_int64(pSelect, 2); fts3ReadEndBlockField(pSelect, 3, &iEnd, &pWriter->nLeafData); if( pWriter->nLeafData<0 ){ pWriter->nLeafData = pWriter->nLeafData * -1; } pWriter->bNoLeafData = (pWriter->nLeafData==0); nRoot = sqlite3_column_bytes(pSelect, 4); aRoot = sqlite3_column_blob(pSelect, 4); }else{ return sqlite3_reset(pSelect); } /* Check for the zero-length marker in the %_segments table */ rc = fts3IsAppendable(p, iEnd, &bAppendable); /* Check that zKey/nKey is larger than the largest key the candidate */ if( rc==SQLITE_OK && bAppendable ){ char *aLeaf = 0; int nLeaf = 0; rc = sqlite3Fts3ReadBlock(p, iLeafEnd, &aLeaf, &nLeaf, 0); if( rc==SQLITE_OK ){ NodeReader reader; for(rc = nodeReaderInit(&reader, aLeaf, nLeaf); rc==SQLITE_OK && reader.aNode; rc = nodeReaderNext(&reader) ){ assert( reader.aNode ); } if( fts3TermCmp(zKey, nKey, reader.term.a, reader.term.n)<=0 ){ bAppendable = 0; } nodeReaderRelease(&reader); } sqlite3_free(aLeaf); } if( rc==SQLITE_OK && bAppendable ){ /* It is possible to append to this segment. Set up the IncrmergeWriter ** object to do so. */ int i; int nHeight = (int)aRoot[0]; NodeWriter *pNode; pWriter->nLeafEst = (int)((iEnd - iStart) + 1)/FTS_MAX_APPENDABLE_HEIGHT; pWriter->iStart = iStart; pWriter->iEnd = iEnd; pWriter->iAbsLevel = iAbsLevel; pWriter->iIdx = iIdx; for(i=nHeight+1; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; } pNode = &pWriter->aNodeWriter[nHeight]; pNode->iBlock = pWriter->iStart + pWriter->nLeafEst*nHeight; blobGrowBuffer(&pNode->block, MAX(nRoot, p->nNodeSize), &rc); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aRoot, nRoot); pNode->block.n = nRoot; } for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ NodeReader reader; pNode = &pWriter->aNodeWriter[i]; rc = nodeReaderInit(&reader, pNode->block.a, pNode->block.n); while( reader.aNode && rc==SQLITE_OK ) rc = nodeReaderNext(&reader); blobGrowBuffer(&pNode->key, reader.term.n, &rc); if( rc==SQLITE_OK ){ memcpy(pNode->key.a, reader.term.a, reader.term.n); pNode->key.n = reader.term.n; if( i>0 ){ char *aBlock = 0; int nBlock = 0; pNode = &pWriter->aNodeWriter[i-1]; pNode->iBlock = reader.iChild; rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock, 0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize), &rc); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; } sqlite3_free(aBlock); } } nodeReaderRelease(&reader); } } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } return rc; } /* ** Determine the largest segment index value that exists within absolute ** level iAbsLevel+1. If no error occurs, set *piIdx to this value plus ** one before returning SQLITE_OK. Or, if there are no segments at all ** within level iAbsLevel, set *piIdx to zero. ** ** If an error occurs, return an SQLite error code. The final value of ** *piIdx is undefined in this case. */ static int fts3IncrmergeOutputIdx( Fts3Table *p, /* FTS Table handle */ sqlite3_int64 iAbsLevel, /* Absolute index of input segments */ int *piIdx /* OUT: Next free index at iAbsLevel+1 */ ){ int rc; sqlite3_stmt *pOutputIdx = 0; /* SQL used to find output index */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENT_INDEX, &pOutputIdx, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pOutputIdx, 1, iAbsLevel+1); sqlite3_step(pOutputIdx); *piIdx = sqlite3_column_int(pOutputIdx, 0); rc = sqlite3_reset(pOutputIdx); } return rc; } /* ** Allocate an appendable output segment on absolute level iAbsLevel+1 ** with idx value iIdx. ** ** In the %_segdir table, a segment is defined by the values in three ** columns: ** ** start_block ** leaves_end_block ** end_block ** ** When an appendable segment is allocated, it is estimated that the ** maximum number of leaf blocks that may be required is the sum of the ** number of leaf blocks consumed by the input segments, plus the number ** of input segments, multiplied by two. This value is stored in stack ** variable nLeafEst. ** ** A total of 16*nLeafEst blocks are allocated when an appendable segment ** is created ((1 + end_block - start_block)==16*nLeafEst). The contiguous ** array of leaf nodes starts at the first block allocated. The array ** of interior nodes that are parents of the leaf nodes start at block ** (start_block + (1 + end_block - start_block) / 16). And so on. ** ** In the actual code below, the value "16" is replaced with the ** pre-processor macro FTS_MAX_APPENDABLE_HEIGHT. */ static int fts3IncrmergeWriter( Fts3Table *p, /* Fts3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of input segments */ int iIdx, /* Index of new output segment */ Fts3MultiSegReader *pCsr, /* Cursor that data will be read from */ IncrmergeWriter *pWriter /* Populate this object */ ){ int rc; /* Return Code */ int i; /* Iterator variable */ int nLeafEst = 0; /* Blocks allocated for leaf nodes */ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ /* Calculate nLeafEst. */ rc = fts3SqlStmt(p, SQL_MAX_LEAF_NODE_ESTIMATE, &pLeafEst, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ nLeafEst = sqlite3_column_int(pLeafEst, 0); } rc = sqlite3_reset(pLeafEst); } if( rc!=SQLITE_OK ) return rc; /* Calculate the first block to use in the output segment */ rc = fts3SqlStmt(p, SQL_NEXT_SEGMENTS_ID, &pFirstBlock, 0); if( rc==SQLITE_OK ){ if( SQLITE_ROW==sqlite3_step(pFirstBlock) ){ pWriter->iStart = sqlite3_column_int64(pFirstBlock, 0); pWriter->iEnd = pWriter->iStart - 1; pWriter->iEnd += nLeafEst * FTS_MAX_APPENDABLE_HEIGHT; } rc = sqlite3_reset(pFirstBlock); } if( rc!=SQLITE_OK ) return rc; /* Insert the marker in the %_segments table to make sure nobody tries ** to steal the space just allocated. This is also used to identify ** appendable segments. */ rc = fts3WriteSegment(p, pWriter->iEnd, 0, 0); if( rc!=SQLITE_OK ) return rc; pWriter->iAbsLevel = iAbsLevel; pWriter->nLeafEst = nLeafEst; pWriter->iIdx = iIdx; /* Set up the array of NodeWriter objects */ for(i=0; iaNodeWriter[i].iBlock = pWriter->iStart + i*pWriter->nLeafEst; } return SQLITE_OK; } /* ** Remove an entry from the %_segdir table. This involves running the ** following two statements: ** ** DELETE FROM %_segdir WHERE level = :iAbsLevel AND idx = :iIdx ** UPDATE %_segdir SET idx = idx - 1 WHERE level = :iAbsLevel AND idx > :iIdx ** ** The DELETE statement removes the specific %_segdir level. The UPDATE ** statement ensures that the remaining segments have contiguously allocated ** idx values. */ static int fts3RemoveSegdirEntry( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level to delete from */ int iIdx /* Index of %_segdir entry to delete */ ){ int rc; /* Return code */ sqlite3_stmt *pDelete = 0; /* DELETE statement */ rc = fts3SqlStmt(p, SQL_DELETE_SEGDIR_ENTRY, &pDelete, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDelete, 1, iAbsLevel); sqlite3_bind_int(pDelete, 2, iIdx); sqlite3_step(pDelete); rc = sqlite3_reset(pDelete); } return rc; } /* ** One or more segments have just been removed from absolute level iAbsLevel. ** Update the 'idx' values of the remaining segments in the level so that ** the idx values are a contiguous sequence starting from 0. */ static int fts3RepackSegdirLevel( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel /* Absolute level to repack */ ){ int rc; /* Return code */ int *aIdx = 0; /* Array of remaining idx values */ int nIdx = 0; /* Valid entries in aIdx[] */ int nAlloc = 0; /* Allocated size of aIdx[] */ int i; /* Iterator variable */ sqlite3_stmt *pSelect = 0; /* Select statement to read idx values */ sqlite3_stmt *pUpdate = 0; /* Update statement to modify idx values */ rc = fts3SqlStmt(p, SQL_SELECT_INDEXES, &pSelect, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int64(pSelect, 1, iAbsLevel); while( SQLITE_ROW==sqlite3_step(pSelect) ){ if( nIdx>=nAlloc ){ int *aNew; nAlloc += 16; aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); if( !aNew ){ rc = SQLITE_NOMEM; break; } aIdx = aNew; } aIdx[nIdx++] = sqlite3_column_int(pSelect, 0); } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } if( rc==SQLITE_OK ){ rc = fts3SqlStmt(p, SQL_SHIFT_SEGDIR_ENTRY, &pUpdate, 0); } if( rc==SQLITE_OK ){ sqlite3_bind_int64(pUpdate, 2, iAbsLevel); } assert( p->bIgnoreSavepoint==0 ); p->bIgnoreSavepoint = 1; for(i=0; rc==SQLITE_OK && ibIgnoreSavepoint = 0; sqlite3_free(aIdx); return rc; } static void fts3StartNode(Blob *pNode, int iHeight, sqlite3_int64 iChild){ pNode->a[0] = (char)iHeight; if( iChild ){ assert( pNode->nAlloc>=1+sqlite3Fts3VarintLen(iChild) ); pNode->n = 1 + sqlite3Fts3PutVarint(&pNode->a[1], iChild); }else{ assert( pNode->nAlloc>=1 ); pNode->n = 1; } } /* ** The first two arguments are a pointer to and the size of a segment b-tree ** node. The node may be a leaf or an internal node. ** ** This function creates a new node image in blob object *pNew by copying ** all terms that are greater than or equal to zTerm/nTerm (for leaf nodes) ** or greater than zTerm/nTerm (for internal nodes) from aNode/nNode. */ static int fts3TruncateNode( const char *aNode, /* Current node image */ int nNode, /* Size of aNode in bytes */ Blob *pNew, /* OUT: Write new node image here */ const char *zTerm, /* Omit all terms smaller than this */ int nTerm, /* Size of zTerm in bytes */ sqlite3_int64 *piBlock /* OUT: Block number in next layer down */ ){ NodeReader reader; /* Reader object */ Blob prev = {0, 0, 0}; /* Previous term written to new node */ int rc = SQLITE_OK; /* Return code */ int bLeaf = aNode[0]=='\0'; /* True for a leaf node */ /* Allocate required output space */ blobGrowBuffer(pNew, nNode, &rc); if( rc!=SQLITE_OK ) return rc; pNew->n = 0; /* Populate new node buffer */ for(rc = nodeReaderInit(&reader, aNode, nNode); rc==SQLITE_OK && reader.aNode; rc = nodeReaderNext(&reader) ){ if( pNew->n==0 ){ int res = fts3TermCmp(reader.term.a, reader.term.n, zTerm, nTerm); if( res<0 || (bLeaf==0 && res==0) ) continue; fts3StartNode(pNew, (int)aNode[0], reader.iChild); *piBlock = reader.iChild; } rc = fts3AppendToNode( pNew, &prev, reader.term.a, reader.term.n, reader.aDoclist, reader.nDoclist ); if( rc!=SQLITE_OK ) break; } if( pNew->n==0 ){ fts3StartNode(pNew, (int)aNode[0], reader.iChild); *piBlock = reader.iChild; } assert( pNew->n<=pNew->nAlloc ); nodeReaderRelease(&reader); sqlite3_free(prev.a); return rc; } /* ** Remove all terms smaller than zTerm/nTerm from segment iIdx in absolute ** level iAbsLevel. This may involve deleting entries from the %_segments ** table, and modifying existing entries in both the %_segments and %_segdir ** tables. ** ** SQLITE_OK is returned if the segment is updated successfully. Or an ** SQLite error code otherwise. */ static int fts3TruncateSegment( Fts3Table *p, /* FTS3 table handle */ sqlite3_int64 iAbsLevel, /* Absolute level of segment to modify */ int iIdx, /* Index within level of segment to modify */ const char *zTerm, /* Remove terms smaller than this */ int nTerm /* Number of bytes in buffer zTerm */ ){ int rc = SQLITE_OK; /* Return code */ Blob root = {0,0,0}; /* New root page image */ Blob block = {0,0,0}; /* Buffer used for any other block */ sqlite3_int64 iBlock = 0; /* Block id */ sqlite3_int64 iNewStart = 0; /* New value for iStartBlock */ sqlite3_int64 iOldStart = 0; /* Old value for iStartBlock */ sqlite3_stmt *pFetch = 0; /* Statement used to fetch segdir */ rc = fts3SqlStmt(p, SQL_SELECT_SEGDIR, &pFetch, 0); if( rc==SQLITE_OK ){ int rc2; /* sqlite3_reset() return code */ sqlite3_bind_int64(pFetch, 1, iAbsLevel); sqlite3_bind_int(pFetch, 2, iIdx); if( SQLITE_ROW==sqlite3_step(pFetch) ){ const char *aRoot = sqlite3_column_blob(pFetch, 4); int nRoot = sqlite3_column_bytes(pFetch, 4); iOldStart = sqlite3_column_int64(pFetch, 1); rc = fts3TruncateNode(aRoot, nRoot, &root, zTerm, nTerm, &iBlock); } rc2 = sqlite3_reset(pFetch); if( rc==SQLITE_OK ) rc = rc2; } while( rc==SQLITE_OK && iBlock ){ char *aBlock = 0; int nBlock = 0; iNewStart = iBlock; rc = sqlite3Fts3ReadBlock(p, iBlock, &aBlock, &nBlock, 0); if( rc==SQLITE_OK ){ rc = fts3TruncateNode(aBlock, nBlock, &block, zTerm, nTerm, &iBlock); } if( rc==SQLITE_OK ){ rc = fts3WriteSegment(p, iNewStart, block.a, block.n); } sqlite3_free(aBlock); } /* Variable iNewStart now contains the first valid leaf node. */ if( rc==SQLITE_OK && iNewStart ){ sqlite3_stmt *pDel = 0; rc = fts3SqlStmt(p, SQL_DELETE_SEGMENTS_RANGE, &pDel, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pDel, 1, iOldStart); sqlite3_bind_int64(pDel, 2, iNewStart-1); sqlite3_step(pDel); rc = sqlite3_reset(pDel); } } if( rc==SQLITE_OK ){ sqlite3_stmt *pChomp = 0; rc = fts3SqlStmt(p, SQL_CHOMP_SEGDIR, &pChomp, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pChomp, 1, iNewStart); sqlite3_bind_blob(pChomp, 2, root.a, root.n, SQLITE_STATIC); sqlite3_bind_int64(pChomp, 3, iAbsLevel); sqlite3_bind_int(pChomp, 4, iIdx); sqlite3_step(pChomp); rc = sqlite3_reset(pChomp); } } sqlite3_free(root.a); sqlite3_free(block.a); return rc; } /* ** This function is called after an incrmental-merge operation has run to ** merge (or partially merge) two or more segments from absolute level ** iAbsLevel. ** ** Each input segment is either removed from the db completely (if all of ** its data was copied to the output segment by the incrmerge operation) ** or modified in place so that it no longer contains those entries that ** have been duplicated in the output segment. */ static int fts3IncrmergeChomp( Fts3Table *p, /* FTS table handle */ sqlite3_int64 iAbsLevel, /* Absolute level containing segments */ Fts3MultiSegReader *pCsr, /* Chomp all segments opened by this cursor */ int *pnRem /* Number of segments not deleted */ ){ int i; int nRem = 0; int rc = SQLITE_OK; for(i=pCsr->nSegment-1; i>=0 && rc==SQLITE_OK; i--){ Fts3SegReader *pSeg = 0; int j; /* Find the Fts3SegReader object with Fts3SegReader.iIdx==i. It is hiding ** somewhere in the pCsr->apSegment[] array. */ for(j=0; ALWAYS(jnSegment); j++){ pSeg = pCsr->apSegment[j]; if( pSeg->iIdx==i ) break; } assert( jnSegment && pSeg->iIdx==i ); if( pSeg->aNode==0 ){ /* Seg-reader is at EOF. Remove the entire input segment. */ rc = fts3DeleteSegment(p, pSeg); if( rc==SQLITE_OK ){ rc = fts3RemoveSegdirEntry(p, iAbsLevel, pSeg->iIdx); } *pnRem = 0; }else{ /* The incremental merge did not copy all the data from this ** segment to the upper level. The segment is modified in place ** so that it contains no keys smaller than zTerm/nTerm. */ const char *zTerm = pSeg->zTerm; int nTerm = pSeg->nTerm; rc = fts3TruncateSegment(p, iAbsLevel, pSeg->iIdx, zTerm, nTerm); nRem++; } } if( rc==SQLITE_OK && nRem!=pCsr->nSegment ){ rc = fts3RepackSegdirLevel(p, iAbsLevel); } *pnRem = nRem; return rc; } /* ** Store an incr-merge hint in the database. */ static int fts3IncrmergeHintStore(Fts3Table *p, Blob *pHint){ sqlite3_stmt *pReplace = 0; int rc; /* Return code */ rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pReplace, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int(pReplace, 1, FTS_STAT_INCRMERGEHINT); sqlite3_bind_blob(pReplace, 2, pHint->a, pHint->n, SQLITE_STATIC); sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); } return rc; } /* ** Load an incr-merge hint from the database. The incr-merge hint, if one ** exists, is stored in the rowid==1 row of the %_stat table. ** ** If successful, populate blob *pHint with the value read from the %_stat ** table and return SQLITE_OK. Otherwise, if an error occurs, return an ** SQLite error code. */ static int fts3IncrmergeHintLoad(Fts3Table *p, Blob *pHint){ sqlite3_stmt *pSelect = 0; int rc; pHint->n = 0; rc = fts3SqlStmt(p, SQL_SELECT_STAT, &pSelect, 0); if( rc==SQLITE_OK ){ int rc2; sqlite3_bind_int(pSelect, 1, FTS_STAT_INCRMERGEHINT); if( SQLITE_ROW==sqlite3_step(pSelect) ){ const char *aHint = sqlite3_column_blob(pSelect, 0); int nHint = sqlite3_column_bytes(pSelect, 0); if( aHint ){ blobGrowBuffer(pHint, nHint, &rc); if( rc==SQLITE_OK ){ memcpy(pHint->a, aHint, nHint); pHint->n = nHint; } } } rc2 = sqlite3_reset(pSelect); if( rc==SQLITE_OK ) rc = rc2; } return rc; } /* ** If *pRc is not SQLITE_OK when this function is called, it is a no-op. ** Otherwise, append an entry to the hint stored in blob *pHint. Each entry ** consists of two varints, the absolute level number of the input segments ** and the number of input segments. ** ** If successful, leave *pRc set to SQLITE_OK and return. If an error occurs, ** set *pRc to an SQLite error code before returning. */ static void fts3IncrmergeHintPush( Blob *pHint, /* Hint blob to append to */ i64 iAbsLevel, /* First varint to store in hint */ int nInput, /* Second varint to store in hint */ int *pRc /* IN/OUT: Error code */ ){ blobGrowBuffer(pHint, pHint->n + 2*FTS3_VARINT_MAX, pRc); if( *pRc==SQLITE_OK ){ pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], iAbsLevel); pHint->n += sqlite3Fts3PutVarint(&pHint->a[pHint->n], (i64)nInput); } } /* ** Read the last entry (most recently pushed) from the hint blob *pHint ** and then remove the entry. Write the two values read to *piAbsLevel and ** *pnInput before returning. ** ** If no error occurs, return SQLITE_OK. If the hint blob in *pHint does ** not contain at least two valid varints, return SQLITE_CORRUPT_VTAB. */ static int fts3IncrmergeHintPop(Blob *pHint, i64 *piAbsLevel, int *pnInput){ const int nHint = pHint->n; int i; i = pHint->n-2; while( i>0 && (pHint->a[i-1] & 0x80) ) i--; while( i>0 && (pHint->a[i-1] & 0x80) ) i--; pHint->n = i; i += sqlite3Fts3GetVarint(&pHint->a[i], piAbsLevel); i += fts3GetVarint32(&pHint->a[i], pnInput); if( i!=nHint ) return FTS_CORRUPT_VTAB; return SQLITE_OK; } /* ** Attempt an incremental merge that writes nMerge leaf blocks. ** ** Incremental merges happen nMin segments at a time. The segments ** to be merged are the nMin oldest segments (the ones with the smallest ** values for the _segdir.idx field) in the highest level that contains ** at least nMin segments. Multiple merges might occur in an attempt to ** write the quota of nMerge leaf blocks. */ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){ int rc; /* Return code */ int nRem = nMerge; /* Number of leaf pages yet to be written */ Fts3MultiSegReader *pCsr; /* Cursor used to read input data */ Fts3SegFilter *pFilter; /* Filter used with cursor pCsr */ IncrmergeWriter *pWriter; /* Writer object */ int nSeg = 0; /* Number of input segments */ sqlite3_int64 iAbsLevel = 0; /* Absolute level number to work on */ Blob hint = {0, 0, 0}; /* Hint read from %_stat table */ int bDirtyHint = 0; /* True if blob 'hint' has been modified */ /* Allocate space for the cursor, filter and writer objects */ const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); if( !pWriter ) return SQLITE_NOMEM; pFilter = (Fts3SegFilter *)&pWriter[1]; pCsr = (Fts3MultiSegReader *)&pFilter[1]; rc = fts3IncrmergeHintLoad(p, &hint); while( rc==SQLITE_OK && nRem>0 ){ const i64 nMod = FTS3_SEGDIR_MAXLEVEL * p->nIndex; sqlite3_stmt *pFindLevel = 0; /* SQL used to determine iAbsLevel */ int bUseHint = 0; /* True if attempting to append */ int iIdx = 0; /* Largest idx in level (iAbsLevel+1) */ /* Search the %_segdir table for the absolute level with the smallest ** relative level number that contains at least nMin segments, if any. ** If one is found, set iAbsLevel to the absolute level number and ** nSeg to nMin. If no level with at least nMin segments can be found, ** set nSeg to -1. */ rc = fts3SqlStmt(p, SQL_FIND_MERGE_LEVEL, &pFindLevel, 0); sqlite3_bind_int(pFindLevel, 1, nMin); if( sqlite3_step(pFindLevel)==SQLITE_ROW ){ iAbsLevel = sqlite3_column_int64(pFindLevel, 0); nSeg = nMin; }else{ nSeg = -1; } rc = sqlite3_reset(pFindLevel); /* If the hint read from the %_stat table is not empty, check if the ** last entry in it specifies a relative level smaller than or equal ** to the level identified by the block above (if any). If so, this ** iteration of the loop will work on merging at the hinted level. */ if( rc==SQLITE_OK && hint.n ){ int nHint = hint.n; sqlite3_int64 iHintAbsLevel = 0; /* Hint level */ int nHintSeg = 0; /* Hint number of segments */ rc = fts3IncrmergeHintPop(&hint, &iHintAbsLevel, &nHintSeg); if( nSeg<0 || (iAbsLevel % nMod) >= (iHintAbsLevel % nMod) ){ iAbsLevel = iHintAbsLevel; nSeg = nHintSeg; bUseHint = 1; bDirtyHint = 1; }else{ /* This undoes the effect of the HintPop() above - so that no entry ** is removed from the hint blob. */ hint.n = nHint; } } /* If nSeg is less that zero, then there is no level with at least ** nMin segments and no hint in the %_stat table. No work to do. ** Exit early in this case. */ if( nSeg<0 ) break; /* Open a cursor to iterate through the contents of the oldest nSeg ** indexes of absolute level iAbsLevel. If this cursor is opened using ** the 'hint' parameters, it is possible that there are less than nSeg ** segments available in level iAbsLevel. In this case, no work is ** done on iAbsLevel - fall through to the next iteration of the loop ** to start work on some other level. */ memset(pWriter, 0, nAlloc); pFilter->flags = FTS3_SEGMENT_REQUIRE_POS; if( rc==SQLITE_OK ){ rc = fts3IncrmergeOutputIdx(p, iAbsLevel, &iIdx); assert( bUseHint==1 || bUseHint==0 ); if( iIdx==0 || (bUseHint && iIdx==1) ){ int bIgnore = 0; rc = fts3SegmentIsMaxLevel(p, iAbsLevel+1, &bIgnore); if( bIgnore ){ pFilter->flags |= FTS3_SEGMENT_IGNORE_EMPTY; } } } if( rc==SQLITE_OK ){ rc = fts3IncrmergeCsr(p, iAbsLevel, nSeg, pCsr); } if( SQLITE_OK==rc && pCsr->nSegment==nSeg && SQLITE_OK==(rc = sqlite3Fts3SegReaderStart(p, pCsr, pFilter)) && SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, pCsr)) ){ if( bUseHint && iIdx>0 ){ const char *zKey = pCsr->zTerm; int nKey = pCsr->nTerm; rc = fts3IncrmergeLoad(p, iAbsLevel, iIdx-1, zKey, nKey, pWriter); }else{ rc = fts3IncrmergeWriter(p, iAbsLevel, iIdx, pCsr, pWriter); } if( rc==SQLITE_OK && pWriter->nLeafEst ){ fts3LogMerge(nSeg, iAbsLevel); do { rc = fts3IncrmergeAppend(p, pWriter, pCsr); if( rc==SQLITE_OK ) rc = sqlite3Fts3SegReaderStep(p, pCsr); if( pWriter->nWork>=nRem && rc==SQLITE_ROW ) rc = SQLITE_OK; }while( rc==SQLITE_ROW ); /* Update or delete the input segments */ if( rc==SQLITE_OK ){ nRem -= (1 + pWriter->nWork); rc = fts3IncrmergeChomp(p, iAbsLevel, pCsr, &nSeg); if( nSeg!=0 ){ bDirtyHint = 1; fts3IncrmergeHintPush(&hint, iAbsLevel, nSeg, &rc); } } } if( nSeg!=0 ){ pWriter->nLeafData = pWriter->nLeafData * -1; } fts3IncrmergeRelease(p, pWriter, &rc); if( nSeg==0 && pWriter->bNoLeafData==0 ){ fts3PromoteSegments(p, iAbsLevel+1, pWriter->nLeafData); } } sqlite3Fts3SegReaderFinish(pCsr); } /* Write the hint values into the %_stat table for the next incr-merger */ if( bDirtyHint && rc==SQLITE_OK ){ rc = fts3IncrmergeHintStore(p, &hint); } sqlite3_free(pWriter); sqlite3_free(hint.a); return rc; } /* ** Convert the text beginning at *pz into an integer and return ** its value. Advance *pz to point to the first character past ** the integer. */ static int fts3Getint(const char **pz){ const char *z = *pz; int i = 0; while( (*z)>='0' && (*z)<='9' ) i = 10*i + *(z++) - '0'; *pz = z; return i; } /* ** Process statements of the form: ** ** INSERT INTO table(table) VALUES('merge=A,B'); ** ** A and B are integers that decode to be the number of leaf pages ** written for the merge, and the minimum number of segments on a level ** before it will be selected for a merge, respectively. */ static int fts3DoIncrmerge( Fts3Table *p, /* FTS3 table handle */ const char *zParam /* Nul-terminated string containing "A,B" */ ){ int rc; int nMin = (FTS3_MERGE_COUNT / 2); int nMerge = 0; const char *z = zParam; /* Read the first integer value */ nMerge = fts3Getint(&z); /* If the first integer value is followed by a ',', read the second ** integer value. */ if( z[0]==',' && z[1]!='\0' ){ z++; nMin = fts3Getint(&z); } if( z[0]!='\0' || nMin<2 ){ rc = SQLITE_ERROR; }else{ rc = SQLITE_OK; if( !p->bHasStat ){ assert( p->bFts4==0 ); sqlite3Fts3CreateStatTable(&rc, p); } if( rc==SQLITE_OK ){ rc = sqlite3Fts3Incrmerge(p, nMerge, nMin); } sqlite3Fts3SegmentsClose(p); } return rc; } /* ** Process statements of the form: ** ** INSERT INTO table(table) VALUES('automerge=X'); ** ** where X is an integer. X==0 means to turn automerge off. X!=0 means ** turn it on. The setting is persistent. */ static int fts3DoAutoincrmerge( Fts3Table *p, /* FTS3 table handle */ const char *zParam /* Nul-terminated string containing boolean */ ){ int rc = SQLITE_OK; sqlite3_stmt *pStmt = 0; p->nAutoincrmerge = fts3Getint(&zParam); if( p->nAutoincrmerge==1 || p->nAutoincrmerge>FTS3_MERGE_COUNT ){ p->nAutoincrmerge = 8; } if( !p->bHasStat ){ assert( p->bFts4==0 ); sqlite3Fts3CreateStatTable(&rc, p); if( rc ) return rc; } rc = fts3SqlStmt(p, SQL_REPLACE_STAT, &pStmt, 0); if( rc ) return rc; sqlite3_bind_int(pStmt, 1, FTS_STAT_AUTOINCRMERGE); sqlite3_bind_int(pStmt, 2, p->nAutoincrmerge); sqlite3_step(pStmt); rc = sqlite3_reset(pStmt); return rc; } /* ** Return a 64-bit checksum for the FTS index entry specified by the ** arguments to this function. */ static u64 fts3ChecksumEntry( const char *zTerm, /* Pointer to buffer containing term */ int nTerm, /* Size of zTerm in bytes */ int iLangid, /* Language id for current row */ int iIndex, /* Index (0..Fts3Table.nIndex-1) */ i64 iDocid, /* Docid for current row. */ int iCol, /* Column number */ int iPos /* Position */ ){ int i; u64 ret = (u64)iDocid; ret += (ret<<3) + iLangid; ret += (ret<<3) + iIndex; ret += (ret<<3) + iCol; ret += (ret<<3) + iPos; for(i=0; inIndex-1) */ int *pRc /* OUT: Return code */ ){ Fts3SegFilter filter; Fts3MultiSegReader csr; int rc; u64 cksum = 0; assert( *pRc==SQLITE_OK ); memset(&filter, 0, sizeof(filter)); memset(&csr, 0, sizeof(csr)); filter.flags = FTS3_SEGMENT_REQUIRE_POS|FTS3_SEGMENT_IGNORE_EMPTY; filter.flags |= FTS3_SEGMENT_SCAN; rc = sqlite3Fts3SegReaderCursor( p, iLangid, iIndex, FTS3_SEGCURSOR_ALL, 0, 0, 0, 1,&csr ); if( rc==SQLITE_OK ){ rc = sqlite3Fts3SegReaderStart(p, &csr, &filter); } if( rc==SQLITE_OK ){ while( SQLITE_ROW==(rc = sqlite3Fts3SegReaderStep(p, &csr)) ){ char *pCsr = csr.aDoclist; char *pEnd = &pCsr[csr.nDoclist]; i64 iDocid = 0; i64 iCol = 0; i64 iPos = 0; pCsr += sqlite3Fts3GetVarint(pCsr, &iDocid); while( pCsriPrevLangid); sqlite3_bind_int(pAllLangid, 2, p->nIndex); while( rc==SQLITE_OK && sqlite3_step(pAllLangid)==SQLITE_ROW ){ int iLangid = sqlite3_column_int(pAllLangid, 0); int i; for(i=0; inIndex; i++){ cksum1 = cksum1 ^ fts3ChecksumIndex(p, iLangid, i, &rc); } } rc2 = sqlite3_reset(pAllLangid); if( rc==SQLITE_OK ) rc = rc2; } /* This block calculates the checksum according to the %_content table */ if( rc==SQLITE_OK ){ sqlite3_tokenizer_module const *pModule = p->pTokenizer->pModule; sqlite3_stmt *pStmt = 0; char *zSql; zSql = sqlite3_mprintf("SELECT %s" , p->zReadExprlist); if( !zSql ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ i64 iDocid = sqlite3_column_int64(pStmt, 0); int iLang = langidFromSelect(p, pStmt); int iCol; for(iCol=0; rc==SQLITE_OK && iColnColumn; iCol++){ if( p->abNotindexed[iCol]==0 ){ const char *zText = (const char *)sqlite3_column_text(pStmt, iCol+1); int nText = sqlite3_column_bytes(pStmt, iCol+1); sqlite3_tokenizer_cursor *pT = 0; rc = sqlite3Fts3OpenTokenizer(p->pTokenizer, iLang, zText, nText,&pT); while( rc==SQLITE_OK ){ char const *zToken; /* Buffer containing token */ int nToken = 0; /* Number of bytes in token */ int iDum1 = 0, iDum2 = 0; /* Dummy variables */ int iPos = 0; /* Position of token in zText */ rc = pModule->xNext(pT, &zToken, &nToken, &iDum1, &iDum2, &iPos); if( rc==SQLITE_OK ){ int i; cksum2 = cksum2 ^ fts3ChecksumEntry( zToken, nToken, iLang, 0, iDocid, iCol, iPos ); for(i=1; inIndex; i++){ if( p->aIndex[i].nPrefix<=nToken ){ cksum2 = cksum2 ^ fts3ChecksumEntry( zToken, p->aIndex[i].nPrefix, iLang, i, iDocid, iCol, iPos ); } } } } if( pT ) pModule->xClose(pT); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } } } sqlite3_finalize(pStmt); } *pbOk = (cksum1==cksum2); return rc; } /* ** Run the integrity-check. If no error occurs and the current contents of ** the FTS index are correct, return SQLITE_OK. Or, if the contents of the ** FTS index are incorrect, return SQLITE_CORRUPT_VTAB. ** ** Or, if an error (e.g. an OOM or IO error) occurs, return an SQLite ** error code. ** ** The integrity-check works as follows. For each token and indexed token ** prefix in the document set, a 64-bit checksum is calculated (by code ** in fts3ChecksumEntry()) based on the following: ** ** + The index number (0 for the main index, 1 for the first prefix ** index etc.), ** + The token (or token prefix) text itself, ** + The language-id of the row it appears in, ** + The docid of the row it appears in, ** + The column it appears in, and ** + The tokens position within that column. ** ** The checksums for all entries in the index are XORed together to create ** a single checksum for the entire index. ** ** The integrity-check code calculates the same checksum in two ways: ** ** 1. By scanning the contents of the FTS index, and ** 2. By scanning and tokenizing the content table. ** ** If the two checksums are identical, the integrity-check is deemed to have ** passed. */ static int fts3DoIntegrityCheck( Fts3Table *p /* FTS3 table handle */ ){ int rc; int bOk = 0; rc = fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; return rc; } /* ** Handle a 'special' INSERT of the form: ** ** "INSERT INTO tbl(tbl) VALUES()" ** ** Argument pVal contains the result of . Currently the only ** meaningful value to insert is the text 'optimize'. */ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ int rc; /* Return Code */ const char *zVal = (const char *)sqlite3_value_text(pVal); int nVal = sqlite3_value_bytes(pVal); if( !zVal ){ return SQLITE_NOMEM; }else if( nVal==8 && 0==sqlite3_strnicmp(zVal, "optimize", 8) ){ rc = fts3DoOptimize(p, 0); }else if( nVal==7 && 0==sqlite3_strnicmp(zVal, "rebuild", 7) ){ rc = fts3DoRebuild(p); }else if( nVal==15 && 0==sqlite3_strnicmp(zVal, "integrity-check", 15) ){ rc = fts3DoIntegrityCheck(p); }else if( nVal>6 && 0==sqlite3_strnicmp(zVal, "merge=", 6) ){ rc = fts3DoIncrmerge(p, &zVal[6]); }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ rc = fts3DoAutoincrmerge(p, &zVal[10]); #ifdef SQLITE_TEST }else if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ p->nNodeSize = atoi(&zVal[9]); rc = SQLITE_OK; }else if( nVal>11 && 0==sqlite3_strnicmp(zVal, "maxpending=", 9) ){ p->nMaxPendingData = atoi(&zVal[11]); rc = SQLITE_OK; }else if( nVal>21 && 0==sqlite3_strnicmp(zVal, "test-no-incr-doclist=", 21) ){ p->bNoIncrDoclist = atoi(&zVal[21]); rc = SQLITE_OK; #endif }else{ rc = SQLITE_ERROR; } return rc; } #ifndef SQLITE_DISABLE_FTS4_DEFERRED /* ** Delete all cached deferred doclists. Deferred doclists are cached ** (allocated) by the sqlite3Fts3CacheDeferredDoclists() function. */ SQLITE_PRIVATE void sqlite3Fts3FreeDeferredDoclists(Fts3Cursor *pCsr){ Fts3DeferredToken *pDef; for(pDef=pCsr->pDeferred; pDef; pDef=pDef->pNext){ fts3PendingListDelete(pDef->pList); pDef->pList = 0; } } /* ** Free all entries in the pCsr->pDeffered list. Entries are added to ** this list using sqlite3Fts3DeferToken(). */ SQLITE_PRIVATE void sqlite3Fts3FreeDeferredTokens(Fts3Cursor *pCsr){ Fts3DeferredToken *pDef; Fts3DeferredToken *pNext; for(pDef=pCsr->pDeferred; pDef; pDef=pNext){ pNext = pDef->pNext; fts3PendingListDelete(pDef->pList); sqlite3_free(pDef); } pCsr->pDeferred = 0; } /* ** Generate deferred-doclists for all tokens in the pCsr->pDeferred list ** based on the row that pCsr currently points to. ** ** A deferred-doclist is like any other doclist with position information ** included, except that it only contains entries for a single row of the ** table, not for all rows. */ SQLITE_PRIVATE int sqlite3Fts3CacheDeferredDoclists(Fts3Cursor *pCsr){ int rc = SQLITE_OK; /* Return code */ if( pCsr->pDeferred ){ int i; /* Used to iterate through table columns */ sqlite3_int64 iDocid; /* Docid of the row pCsr points to */ Fts3DeferredToken *pDef; /* Used to iterate through deferred tokens */ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab; sqlite3_tokenizer *pT = p->pTokenizer; sqlite3_tokenizer_module const *pModule = pT->pModule; assert( pCsr->isRequireSeek==0 ); iDocid = sqlite3_column_int64(pCsr->pStmt, 0); for(i=0; inColumn && rc==SQLITE_OK; i++){ if( p->abNotindexed[i]==0 ){ const char *zText = (const char *)sqlite3_column_text(pCsr->pStmt, i+1); sqlite3_tokenizer_cursor *pTC = 0; rc = sqlite3Fts3OpenTokenizer(pT, pCsr->iLangid, zText, -1, &pTC); while( rc==SQLITE_OK ){ char const *zToken; /* Buffer containing token */ int nToken = 0; /* Number of bytes in token */ int iDum1 = 0, iDum2 = 0; /* Dummy variables */ int iPos = 0; /* Position of token in zText */ rc = pModule->xNext(pTC, &zToken, &nToken, &iDum1, &iDum2, &iPos); for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ Fts3PhraseToken *pPT = pDef->pToken; if( (pDef->iCol>=p->nColumn || pDef->iCol==i) && (pPT->bFirst==0 || iPos==0) && (pPT->n==nToken || (pPT->isPrefix && pPT->nz, pPT->n)) ){ fts3PendingListAppend(&pDef->pList, iDocid, i, iPos, &rc); } } } if( pTC ) pModule->xClose(pTC); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } } for(pDef=pCsr->pDeferred; pDef && rc==SQLITE_OK; pDef=pDef->pNext){ if( pDef->pList ){ rc = fts3PendingListAppendVarint(&pDef->pList, 0); } } } return rc; } SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList( Fts3DeferredToken *p, char **ppData, int *pnData ){ char *pRet; int nSkip; sqlite3_int64 dummy; *ppData = 0; *pnData = 0; if( p->pList==0 ){ return SQLITE_OK; } pRet = (char *)sqlite3_malloc(p->pList->nData); if( !pRet ) return SQLITE_NOMEM; nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy); *pnData = p->pList->nData - nSkip; *ppData = pRet; memcpy(pRet, &p->pList->aData[nSkip], *pnData); return SQLITE_OK; } /* ** Add an entry for token pToken to the pCsr->pDeferred list. */ SQLITE_PRIVATE int sqlite3Fts3DeferToken( Fts3Cursor *pCsr, /* Fts3 table cursor */ Fts3PhraseToken *pToken, /* Token to defer */ int iCol /* Column that token must appear in (or -1) */ ){ Fts3DeferredToken *pDeferred; pDeferred = sqlite3_malloc(sizeof(*pDeferred)); if( !pDeferred ){ return SQLITE_NOMEM; } memset(pDeferred, 0, sizeof(*pDeferred)); pDeferred->pToken = pToken; pDeferred->pNext = pCsr->pDeferred; pDeferred->iCol = iCol; pCsr->pDeferred = pDeferred; assert( pToken->pDeferred==0 ); pToken->pDeferred = pDeferred; return SQLITE_OK; } #endif /* ** SQLite value pRowid contains the rowid of a row that may or may not be ** present in the FTS3 table. If it is, delete it and adjust the contents ** of subsiduary data structures accordingly. */ static int fts3DeleteByRowid( Fts3Table *p, sqlite3_value *pRowid, int *pnChng, /* IN/OUT: Decrement if row is deleted */ u32 *aSzDel ){ int rc = SQLITE_OK; /* Return code */ int bFound = 0; /* True if *pRowid really is in the table */ fts3DeleteTerms(&rc, p, pRowid, aSzDel, &bFound); if( bFound && rc==SQLITE_OK ){ int isEmpty = 0; /* Deleting *pRowid leaves the table empty */ rc = fts3IsEmpty(p, pRowid, &isEmpty); if( rc==SQLITE_OK ){ if( isEmpty ){ /* Deleting this row means the whole table is empty. In this case ** delete the contents of all three tables and throw away any ** data in the pendingTerms hash table. */ rc = fts3DeleteAll(p, 1); *pnChng = 0; memset(aSzDel, 0, sizeof(u32) * (p->nColumn+1) * 2); }else{ *pnChng = *pnChng - 1; if( p->zContentTbl==0 ){ fts3SqlExec(&rc, p, SQL_DELETE_CONTENT, &pRowid); } if( p->bHasDocsize ){ fts3SqlExec(&rc, p, SQL_DELETE_DOCSIZE, &pRowid); } } } } return rc; } /* ** This function does the work for the xUpdate method of FTS3 virtual ** tables. The schema of the virtual table being: ** ** CREATE TABLE
    ( ** , **
    HIDDEN, ** docid HIDDEN, ** HIDDEN ** ); ** ** */ SQLITE_PRIVATE int sqlite3Fts3UpdateMethod( sqlite3_vtab *pVtab, /* FTS3 vtab object */ int nArg, /* Size of argument array */ sqlite3_value **apVal, /* Array of arguments */ sqlite_int64 *pRowid /* OUT: The affected (or effected) rowid */ ){ Fts3Table *p = (Fts3Table *)pVtab; int rc = SQLITE_OK; /* Return Code */ int isRemove = 0; /* True for an UPDATE or DELETE */ u32 *aSzIns = 0; /* Sizes of inserted documents */ u32 *aSzDel = 0; /* Sizes of deleted documents */ int nChng = 0; /* Net change in number of documents */ int bInsertDone = 0; /* At this point it must be known if the %_stat table exists or not. ** So bHasStat may not be 2. */ assert( p->bHasStat==0 || p->bHasStat==1 ); assert( p->pSegments==0 ); assert( nArg==1 /* DELETE operations */ || nArg==(2 + p->nColumn + 3) /* INSERT or UPDATE operations */ ); /* Check for a "special" INSERT operation. One of the form: ** ** INSERT INTO xyz(xyz) VALUES('command'); */ if( nArg>1 && sqlite3_value_type(apVal[0])==SQLITE_NULL && sqlite3_value_type(apVal[p->nColumn+2])!=SQLITE_NULL ){ rc = fts3SpecialInsert(p, apVal[p->nColumn+2]); goto update_out; } if( nArg>1 && sqlite3_value_int(apVal[2 + p->nColumn + 2])<0 ){ rc = SQLITE_CONSTRAINT; goto update_out; } /* Allocate space to hold the change in document sizes */ aSzDel = sqlite3_malloc( sizeof(aSzDel[0])*(p->nColumn+1)*2 ); if( aSzDel==0 ){ rc = SQLITE_NOMEM; goto update_out; } aSzIns = &aSzDel[p->nColumn+1]; memset(aSzDel, 0, sizeof(aSzDel[0])*(p->nColumn+1)*2); rc = fts3Writelock(p); if( rc!=SQLITE_OK ) goto update_out; /* If this is an INSERT operation, or an UPDATE that modifies the rowid ** value, then this operation requires constraint handling. ** ** If the on-conflict mode is REPLACE, this means that the existing row ** should be deleted from the database before inserting the new row. Or, ** if the on-conflict mode is other than REPLACE, then this method must ** detect the conflict and return SQLITE_CONSTRAINT before beginning to ** modify the database file. */ if( nArg>1 && p->zContentTbl==0 ){ /* Find the value object that holds the new rowid value. */ sqlite3_value *pNewRowid = apVal[3+p->nColumn]; if( sqlite3_value_type(pNewRowid)==SQLITE_NULL ){ pNewRowid = apVal[1]; } if( sqlite3_value_type(pNewRowid)!=SQLITE_NULL && ( sqlite3_value_type(apVal[0])==SQLITE_NULL || sqlite3_value_int64(apVal[0])!=sqlite3_value_int64(pNewRowid) )){ /* The new rowid is not NULL (in this case the rowid will be ** automatically assigned and there is no chance of a conflict), and ** the statement is either an INSERT or an UPDATE that modifies the ** rowid column. So if the conflict mode is REPLACE, then delete any ** existing row with rowid=pNewRowid. ** ** Or, if the conflict mode is not REPLACE, insert the new record into ** the %_content table. If we hit the duplicate rowid constraint (or any ** other error) while doing so, return immediately. ** ** This branch may also run if pNewRowid contains a value that cannot ** be losslessly converted to an integer. In this case, the eventual ** call to fts3InsertData() (either just below or further on in this ** function) will return SQLITE_MISMATCH. If fts3DeleteByRowid is ** invoked, it will delete zero rows (since no row will have ** docid=$pNewRowid if $pNewRowid is not an integer value). */ if( sqlite3_vtab_on_conflict(p->db)==SQLITE_REPLACE ){ rc = fts3DeleteByRowid(p, pNewRowid, &nChng, aSzDel); }else{ rc = fts3InsertData(p, apVal, pRowid); bInsertDone = 1; } } } if( rc!=SQLITE_OK ){ goto update_out; } /* If this is a DELETE or UPDATE operation, remove the old record. */ if( sqlite3_value_type(apVal[0])!=SQLITE_NULL ){ assert( sqlite3_value_type(apVal[0])==SQLITE_INTEGER ); rc = fts3DeleteByRowid(p, apVal[0], &nChng, aSzDel); isRemove = 1; } /* If this is an INSERT or UPDATE operation, insert the new record. */ if( nArg>1 && rc==SQLITE_OK ){ int iLangid = sqlite3_value_int(apVal[2 + p->nColumn + 2]); if( bInsertDone==0 ){ rc = fts3InsertData(p, apVal, pRowid); if( rc==SQLITE_CONSTRAINT && p->zContentTbl==0 ){ rc = FTS_CORRUPT_VTAB; } } if( rc==SQLITE_OK && (!isRemove || *pRowid!=p->iPrevDocid ) ){ rc = fts3PendingTermsDocid(p, iLangid, *pRowid); } if( rc==SQLITE_OK ){ assert( p->iPrevDocid==*pRowid ); rc = fts3InsertTerms(p, iLangid, apVal, aSzIns); } if( p->bHasDocsize ){ fts3InsertDocsize(&rc, p, aSzIns); } nChng++; } if( p->bFts4 ){ fts3UpdateDocTotals(&rc, p, aSzIns, aSzDel, nChng); } update_out: sqlite3_free(aSzDel); sqlite3Fts3SegmentsClose(p); return rc; } /* ** Flush any data in the pending-terms hash table to disk. If successful, ** merge all segments in the database (including the new segment, if ** there was any data to flush) into a single segment. */ SQLITE_PRIVATE int sqlite3Fts3Optimize(Fts3Table *p){ int rc; rc = sqlite3_exec(p->db, "SAVEPOINT fts3", 0, 0, 0); if( rc==SQLITE_OK ){ rc = fts3DoOptimize(p, 1); if( rc==SQLITE_OK || rc==SQLITE_DONE ){ int rc2 = sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); if( rc2!=SQLITE_OK ) rc = rc2; }else{ sqlite3_exec(p->db, "ROLLBACK TO fts3", 0, 0, 0); sqlite3_exec(p->db, "RELEASE fts3", 0, 0, 0); } } sqlite3Fts3SegmentsClose(p); return rc; } #endif /************** End of fts3_write.c ******************************************/ /************** Begin file fts3_snippet.c ************************************/ /* ** 2009 Oct 23 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* ** Characters that may appear in the second argument to matchinfo(). */ #define FTS3_MATCHINFO_NPHRASE 'p' /* 1 value */ #define FTS3_MATCHINFO_NCOL 'c' /* 1 value */ #define FTS3_MATCHINFO_NDOC 'n' /* 1 value */ #define FTS3_MATCHINFO_AVGLENGTH 'a' /* nCol values */ #define FTS3_MATCHINFO_LENGTH 'l' /* nCol values */ #define FTS3_MATCHINFO_LCS 's' /* nCol values */ #define FTS3_MATCHINFO_HITS 'x' /* 3*nCol*nPhrase values */ #define FTS3_MATCHINFO_LHITS 'y' /* nCol*nPhrase values */ #define FTS3_MATCHINFO_LHITS_BM 'b' /* nCol*nPhrase values */ /* ** The default value for the second argument to matchinfo(). */ #define FTS3_MATCHINFO_DEFAULT "pcx" /* ** Used as an fts3ExprIterate() context when loading phrase doclists to ** Fts3Expr.aDoclist[]/nDoclist. */ typedef struct LoadDoclistCtx LoadDoclistCtx; struct LoadDoclistCtx { Fts3Cursor *pCsr; /* FTS3 Cursor */ int nPhrase; /* Number of phrases seen so far */ int nToken; /* Number of tokens seen so far */ }; /* ** The following types are used as part of the implementation of the ** fts3BestSnippet() routine. */ typedef struct SnippetIter SnippetIter; typedef struct SnippetPhrase SnippetPhrase; typedef struct SnippetFragment SnippetFragment; struct SnippetIter { Fts3Cursor *pCsr; /* Cursor snippet is being generated from */ int iCol; /* Extract snippet from this column */ int nSnippet; /* Requested snippet length (in tokens) */ int nPhrase; /* Number of phrases in query */ SnippetPhrase *aPhrase; /* Array of size nPhrase */ int iCurrent; /* First token of current snippet */ }; struct SnippetPhrase { int nToken; /* Number of tokens in phrase */ char *pList; /* Pointer to start of phrase position list */ int iHead; /* Next value in position list */ char *pHead; /* Position list data following iHead */ int iTail; /* Next value in trailing position list */ char *pTail; /* Position list data following iTail */ }; struct SnippetFragment { int iCol; /* Column snippet is extracted from */ int iPos; /* Index of first token in snippet */ u64 covered; /* Mask of query phrases covered */ u64 hlmask; /* Mask of snippet terms to highlight */ }; /* ** This type is used as an fts3ExprIterate() context object while ** accumulating the data returned by the matchinfo() function. */ typedef struct MatchInfo MatchInfo; struct MatchInfo { Fts3Cursor *pCursor; /* FTS3 Cursor */ int nCol; /* Number of columns in table */ int nPhrase; /* Number of matchable phrases in query */ sqlite3_int64 nDoc; /* Number of docs in database */ char flag; u32 *aMatchinfo; /* Pre-allocated buffer */ }; /* ** An instance of this structure is used to manage a pair of buffers, each ** (nElem * sizeof(u32)) bytes in size. See the MatchinfoBuffer code below ** for details. */ struct MatchinfoBuffer { u8 aRef[3]; int nElem; int bGlobal; /* Set if global data is loaded */ char *zMatchinfo; u32 aMatchinfo[1]; }; /* ** The snippet() and offsets() functions both return text values. An instance ** of the following structure is used to accumulate those values while the ** functions are running. See fts3StringAppend() for details. */ typedef struct StrBuffer StrBuffer; struct StrBuffer { char *z; /* Pointer to buffer containing string */ int n; /* Length of z in bytes (excl. nul-term) */ int nAlloc; /* Allocated size of buffer z in bytes */ }; /************************************************************************* ** Start of MatchinfoBuffer code. */ /* ** Allocate a two-slot MatchinfoBuffer object. */ static MatchinfoBuffer *fts3MIBufferNew(int nElem, const char *zMatchinfo){ MatchinfoBuffer *pRet; int nByte = sizeof(u32) * (2*nElem + 1) + sizeof(MatchinfoBuffer); int nStr = (int)strlen(zMatchinfo); pRet = sqlite3_malloc(nByte + nStr+1); if( pRet ){ memset(pRet, 0, nByte); pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + sizeof(u32)*(nElem+1); pRet->nElem = nElem; pRet->zMatchinfo = ((char*)pRet) + nByte; memcpy(pRet->zMatchinfo, zMatchinfo, nStr+1); pRet->aRef[0] = 1; } return pRet; } static void fts3MIBufferFree(void *p){ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); assert( (u32*)p==&pBuf->aMatchinfo[1] || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] ); if( (u32*)p==&pBuf->aMatchinfo[1] ){ pBuf->aRef[1] = 0; }else{ pBuf->aRef[2] = 0; } if( pBuf->aRef[0]==0 && pBuf->aRef[1]==0 && pBuf->aRef[2]==0 ){ sqlite3_free(pBuf); } } static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ void (*xRet)(void*) = 0; u32 *aOut = 0; if( p->aRef[1]==0 ){ p->aRef[1] = 1; aOut = &p->aMatchinfo[1]; xRet = fts3MIBufferFree; } else if( p->aRef[2]==0 ){ p->aRef[2] = 1; aOut = &p->aMatchinfo[p->nElem+2]; xRet = fts3MIBufferFree; }else{ aOut = (u32*)sqlite3_malloc(p->nElem * sizeof(u32)); if( aOut ){ xRet = sqlite3_free; if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); } } *paOut = aOut; return xRet; } static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ p->bGlobal = 1; memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); } /* ** Free a MatchinfoBuffer object allocated using fts3MIBufferNew() */ SQLITE_PRIVATE void sqlite3Fts3MIBufferFree(MatchinfoBuffer *p){ if( p ){ assert( p->aRef[0]==1 ); p->aRef[0] = 0; if( p->aRef[0]==0 && p->aRef[1]==0 && p->aRef[2]==0 ){ sqlite3_free(p); } } } /* ** End of MatchinfoBuffer code. *************************************************************************/ /* ** This function is used to help iterate through a position-list. A position ** list is a list of unique integers, sorted from smallest to largest. Each ** element of the list is represented by an FTS3 varint that takes the value ** of the difference between the current element and the previous one plus ** two. For example, to store the position-list: ** ** 4 9 113 ** ** the three varints: ** ** 6 7 106 ** ** are encoded. ** ** When this function is called, *pp points to the start of an element of ** the list. *piPos contains the value of the previous entry in the list. ** After it returns, *piPos contains the value of the next element of the ** list and *pp is advanced to the following varint. */ static void fts3GetDeltaPosition(char **pp, int *piPos){ int iVal; *pp += fts3GetVarint32(*pp, &iVal); *piPos += (iVal-2); } /* ** Helper function for fts3ExprIterate() (see below). */ static int fts3ExprIterate2( Fts3Expr *pExpr, /* Expression to iterate phrases of */ int *piPhrase, /* Pointer to phrase counter */ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ void *pCtx /* Second argument to pass to callback */ ){ int rc; /* Return code */ int eType = pExpr->eType; /* Type of expression node pExpr */ if( eType!=FTSQUERY_PHRASE ){ assert( pExpr->pLeft && pExpr->pRight ); rc = fts3ExprIterate2(pExpr->pLeft, piPhrase, x, pCtx); if( rc==SQLITE_OK && eType!=FTSQUERY_NOT ){ rc = fts3ExprIterate2(pExpr->pRight, piPhrase, x, pCtx); } }else{ rc = x(pExpr, *piPhrase, pCtx); (*piPhrase)++; } return rc; } /* ** Iterate through all phrase nodes in an FTS3 query, except those that ** are part of a sub-tree that is the right-hand-side of a NOT operator. ** For each phrase node found, the supplied callback function is invoked. ** ** If the callback function returns anything other than SQLITE_OK, ** the iteration is abandoned and the error code returned immediately. ** Otherwise, SQLITE_OK is returned after a callback has been made for ** all eligible phrase nodes. */ static int fts3ExprIterate( Fts3Expr *pExpr, /* Expression to iterate phrases of */ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ void *pCtx /* Second argument to pass to callback */ ){ int iPhrase = 0; /* Variable used as the phrase counter */ return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); } /* ** This is an fts3ExprIterate() callback used while loading the doclists ** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also ** fts3ExprLoadDoclists(). */ static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ int rc = SQLITE_OK; Fts3Phrase *pPhrase = pExpr->pPhrase; LoadDoclistCtx *p = (LoadDoclistCtx *)ctx; UNUSED_PARAMETER(iPhrase); p->nPhrase++; p->nToken += pPhrase->nToken; return rc; } /* ** Load the doclists for each phrase in the query associated with FTS3 cursor ** pCsr. ** ** If pnPhrase is not NULL, then *pnPhrase is set to the number of matchable ** phrases in the expression (all phrases except those directly or ** indirectly descended from the right-hand-side of a NOT operator). If ** pnToken is not NULL, then it is set to the number of tokens in all ** matchable phrases of the expression. */ static int fts3ExprLoadDoclists( Fts3Cursor *pCsr, /* Fts3 cursor for current query */ int *pnPhrase, /* OUT: Number of phrases in query */ int *pnToken /* OUT: Number of tokens in query */ ){ int rc; /* Return Code */ LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ sCtx.pCsr = pCsr; rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); if( pnPhrase ) *pnPhrase = sCtx.nPhrase; if( pnToken ) *pnToken = sCtx.nToken; return rc; } static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ (*(int *)ctx)++; pExpr->iPhrase = iPhrase; return SQLITE_OK; } static int fts3ExprPhraseCount(Fts3Expr *pExpr){ int nPhrase = 0; (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); return nPhrase; } /* ** Advance the position list iterator specified by the first two ** arguments so that it points to the first element with a value greater ** than or equal to parameter iNext. */ static void fts3SnippetAdvance(char **ppIter, int *piIter, int iNext){ char *pIter = *ppIter; if( pIter ){ int iIter = *piIter; while( iIteriCurrent<0 ){ /* The SnippetIter object has just been initialized. The first snippet ** candidate always starts at offset 0 (even if this candidate has a ** score of 0.0). */ pIter->iCurrent = 0; /* Advance the 'head' iterator of each phrase to the first offset that ** is greater than or equal to (iNext+nSnippet). */ for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, pIter->nSnippet); } }else{ int iStart; int iEnd = 0x7FFFFFFF; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; if( pPhrase->pHead && pPhrase->iHeadiHead; } } if( iEnd==0x7FFFFFFF ){ return 1; } pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; fts3SnippetAdvance(&pPhrase->pHead, &pPhrase->iHead, iEnd+1); fts3SnippetAdvance(&pPhrase->pTail, &pPhrase->iTail, iStart); } } return 0; } /* ** Retrieve information about the current candidate snippet of snippet ** iterator pIter. */ static void fts3SnippetDetails( SnippetIter *pIter, /* Snippet iterator */ u64 mCovered, /* Bitmask of phrases already covered */ int *piToken, /* OUT: First token of proposed snippet */ int *piScore, /* OUT: "Score" for this snippet */ u64 *pmCover, /* OUT: Bitmask of phrases covered */ u64 *pmHighlight /* OUT: Bitmask of terms to highlight */ ){ int iStart = pIter->iCurrent; /* First token of snippet */ int iScore = 0; /* Score of this snippet */ int i; /* Loop counter */ u64 mCover = 0; /* Mask of phrases covered by this snippet */ u64 mHighlight = 0; /* Mask of tokens to highlight in snippet */ for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; if( pPhrase->pTail ){ char *pCsr = pPhrase->pTail; int iCsr = pPhrase->iTail; while( iCsr<(iStart+pIter->nSnippet) ){ int j; u64 mPhrase = (u64)1 << i; u64 mPos = (u64)1 << (iCsr - iStart); assert( iCsr>=iStart ); if( (mCover|mCovered)&mPhrase ){ iScore++; }else{ iScore += 1000; } mCover |= mPhrase; for(j=0; jnToken; j++){ mHighlight |= (mPos>>j); } if( 0==(*pCsr & 0x0FE) ) break; fts3GetDeltaPosition(&pCsr, &iCsr); } } } /* Set the output variables before returning. */ *piToken = iStart; *piScore = iScore; *pmCover = mCover; *pmHighlight = mHighlight; } /* ** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). ** Each invocation populates an element of the SnippetIter.aPhrase[] array. */ static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ SnippetIter *p = (SnippetIter *)ctx; SnippetPhrase *pPhrase = &p->aPhrase[iPhrase]; char *pCsr; int rc; pPhrase->nToken = pExpr->pPhrase->nToken; rc = sqlite3Fts3EvalPhrasePoslist(p->pCsr, pExpr, p->iCol, &pCsr); assert( rc==SQLITE_OK || pCsr==0 ); if( pCsr ){ int iFirst = 0; pPhrase->pList = pCsr; fts3GetDeltaPosition(&pCsr, &iFirst); assert( iFirst>=0 ); pPhrase->pHead = pCsr; pPhrase->pTail = pCsr; pPhrase->iHead = iFirst; pPhrase->iTail = iFirst; }else{ assert( rc!=SQLITE_OK || ( pPhrase->pList==0 && pPhrase->pHead==0 && pPhrase->pTail==0 )); } return rc; } /* ** Select the fragment of text consisting of nFragment contiguous tokens ** from column iCol that represent the "best" snippet. The best snippet ** is the snippet with the highest score, where scores are calculated ** by adding: ** ** (a) +1 point for each occurrence of a matchable phrase in the snippet. ** ** (b) +1000 points for the first occurrence of each matchable phrase in ** the snippet for which the corresponding mCovered bit is not set. ** ** The selected snippet parameters are stored in structure *pFragment before ** returning. The score of the selected snippet is stored in *piScore ** before returning. */ static int fts3BestSnippet( int nSnippet, /* Desired snippet length */ Fts3Cursor *pCsr, /* Cursor to create snippet for */ int iCol, /* Index of column to create snippet from */ u64 mCovered, /* Mask of phrases already covered */ u64 *pmSeen, /* IN/OUT: Mask of phrases seen */ SnippetFragment *pFragment, /* OUT: Best snippet found */ int *piScore /* OUT: Score of snippet pFragment */ ){ int rc; /* Return Code */ int nList; /* Number of phrases in expression */ SnippetIter sIter; /* Iterates through snippet candidates */ int nByte; /* Number of bytes of space to allocate */ int iBestScore = -1; /* Best snippet score found so far */ int i; /* Loop counter */ memset(&sIter, 0, sizeof(sIter)); /* Iterate through the phrases in the expression to count them. The same ** callback makes sure the doclists are loaded for each phrase. */ rc = fts3ExprLoadDoclists(pCsr, &nList, 0); if( rc!=SQLITE_OK ){ return rc; } /* Now that it is known how many phrases there are, allocate and zero ** the required space using malloc(). */ nByte = sizeof(SnippetPhrase) * nList; sIter.aPhrase = (SnippetPhrase *)sqlite3_malloc(nByte); if( !sIter.aPhrase ){ return SQLITE_NOMEM; } memset(sIter.aPhrase, 0, nByte); /* Initialize the contents of the SnippetIter object. Then iterate through ** the set of phrases in the expression to populate the aPhrase[] array. */ sIter.pCsr = pCsr; sIter.iCol = iCol; sIter.nSnippet = nSnippet; sIter.nPhrase = nList; sIter.iCurrent = -1; rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter); if( rc==SQLITE_OK ){ /* Set the *pmSeen output variable. */ for(i=0; iiCol = iCol; while( !fts3SnippetNextCandidate(&sIter) ){ int iPos; int iScore; u64 mCover; u64 mHighlite; fts3SnippetDetails(&sIter, mCovered, &iPos, &iScore, &mCover,&mHighlite); assert( iScore>=0 ); if( iScore>iBestScore ){ pFragment->iPos = iPos; pFragment->hlmask = mHighlite; pFragment->covered = mCover; iBestScore = iScore; } } *piScore = iBestScore; } sqlite3_free(sIter.aPhrase); return rc; } /* ** Append a string to the string-buffer passed as the first argument. ** ** If nAppend is negative, then the length of the string zAppend is ** determined using strlen(). */ static int fts3StringAppend( StrBuffer *pStr, /* Buffer to append to */ const char *zAppend, /* Pointer to data to append to buffer */ int nAppend /* Size of zAppend in bytes (or -1) */ ){ if( nAppend<0 ){ nAppend = (int)strlen(zAppend); } /* If there is insufficient space allocated at StrBuffer.z, use realloc() ** to grow the buffer until so that it is big enough to accomadate the ** appended data. */ if( pStr->n+nAppend+1>=pStr->nAlloc ){ int nAlloc = pStr->nAlloc+nAppend+100; char *zNew = sqlite3_realloc(pStr->z, nAlloc); if( !zNew ){ return SQLITE_NOMEM; } pStr->z = zNew; pStr->nAlloc = nAlloc; } assert( pStr->z!=0 && (pStr->nAlloc >= pStr->n+nAppend+1) ); /* Append the data to the string buffer. */ memcpy(&pStr->z[pStr->n], zAppend, nAppend); pStr->n += nAppend; pStr->z[pStr->n] = '\0'; return SQLITE_OK; } /* ** The fts3BestSnippet() function often selects snippets that end with a ** query term. That is, the final term of the snippet is always a term ** that requires highlighting. For example, if 'X' is a highlighted term ** and '.' is a non-highlighted term, BestSnippet() may select: ** ** ........X.....X ** ** This function "shifts" the beginning of the snippet forward in the ** document so that there are approximately the same number of ** non-highlighted terms to the right of the final highlighted term as there ** are to the left of the first highlighted term. For example, to this: ** ** ....X.....X.... ** ** This is done as part of extracting the snippet text, not when selecting ** the snippet. Snippet selection is done based on doclists only, so there ** is no way for fts3BestSnippet() to know whether or not the document ** actually contains terms that follow the final highlighted term. */ static int fts3SnippetShift( Fts3Table *pTab, /* FTS3 table snippet comes from */ int iLangid, /* Language id to use in tokenizing */ int nSnippet, /* Number of tokens desired for snippet */ const char *zDoc, /* Document text to extract snippet from */ int nDoc, /* Size of buffer zDoc in bytes */ int *piPos, /* IN/OUT: First token of snippet */ u64 *pHlmask /* IN/OUT: Mask of tokens to highlight */ ){ u64 hlmask = *pHlmask; /* Local copy of initial highlight-mask */ if( hlmask ){ int nLeft; /* Tokens to the left of first highlight */ int nRight; /* Tokens to the right of last highlight */ int nDesired; /* Ideal number of tokens to shift forward */ for(nLeft=0; !(hlmask & ((u64)1 << nLeft)); nLeft++); for(nRight=0; !(hlmask & ((u64)1 << (nSnippet-1-nRight))); nRight++); nDesired = (nLeft-nRight)/2; /* Ideally, the start of the snippet should be pushed forward in the ** document nDesired tokens. This block checks if there are actually ** nDesired tokens to the right of the snippet. If so, *piPos and ** *pHlMask are updated to shift the snippet nDesired tokens to the ** right. Otherwise, the snippet is shifted by the number of tokens ** available. */ if( nDesired>0 ){ int nShift; /* Number of tokens to shift snippet by */ int iCurrent = 0; /* Token counter */ int rc; /* Return Code */ sqlite3_tokenizer_module *pMod; sqlite3_tokenizer_cursor *pC; pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; /* Open a cursor on zDoc/nDoc. Check if there are (nSnippet+nDesired) ** or more tokens in zDoc/nDoc. */ rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, iLangid, zDoc, nDoc, &pC); if( rc!=SQLITE_OK ){ return rc; } while( rc==SQLITE_OK && iCurrent<(nSnippet+nDesired) ){ const char *ZDUMMY; int DUMMY1 = 0, DUMMY2 = 0, DUMMY3 = 0; rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &DUMMY2, &DUMMY3, &iCurrent); } pMod->xClose(pC); if( rc!=SQLITE_OK && rc!=SQLITE_DONE ){ return rc; } nShift = (rc==SQLITE_DONE)+iCurrent-nSnippet; assert( nShift<=nDesired ); if( nShift>0 ){ *piPos += nShift; *pHlmask = hlmask >> nShift; } } } return SQLITE_OK; } /* ** Extract the snippet text for fragment pFragment from cursor pCsr and ** append it to string buffer pOut. */ static int fts3SnippetText( Fts3Cursor *pCsr, /* FTS3 Cursor */ SnippetFragment *pFragment, /* Snippet to extract */ int iFragment, /* Fragment number */ int isLast, /* True for final fragment in snippet */ int nSnippet, /* Number of tokens in extracted snippet */ const char *zOpen, /* String inserted before highlighted term */ const char *zClose, /* String inserted after highlighted term */ const char *zEllipsis, /* String inserted between snippets */ StrBuffer *pOut /* Write output here */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc; /* Return code */ const char *zDoc; /* Document text to extract snippet from */ int nDoc; /* Size of zDoc in bytes */ int iCurrent = 0; /* Current token number of document */ int iEnd = 0; /* Byte offset of end of current token */ int isShiftDone = 0; /* True after snippet is shifted */ int iPos = pFragment->iPos; /* First token of snippet */ u64 hlmask = pFragment->hlmask; /* Highlight-mask for snippet */ int iCol = pFragment->iCol+1; /* Query column to extract text from */ sqlite3_tokenizer_module *pMod; /* Tokenizer module methods object */ sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor open on zDoc/nDoc */ zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol); if( zDoc==0 ){ if( sqlite3_column_type(pCsr->pStmt, iCol)!=SQLITE_NULL ){ return SQLITE_NOMEM; } return SQLITE_OK; } nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol); /* Open a token cursor on the document. */ pMod = (sqlite3_tokenizer_module *)pTab->pTokenizer->pModule; rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, zDoc,nDoc,&pC); if( rc!=SQLITE_OK ){ return rc; } while( rc==SQLITE_OK ){ const char *ZDUMMY; /* Dummy argument used with tokenizer */ int DUMMY1 = -1; /* Dummy argument used with tokenizer */ int iBegin = 0; /* Offset in zDoc of start of token */ int iFin = 0; /* Offset in zDoc of end of token */ int isHighlight = 0; /* True for highlighted terms */ /* Variable DUMMY1 is initialized to a negative value above. Elsewhere ** in the FTS code the variable that the third argument to xNext points to ** is initialized to zero before the first (*but not necessarily ** subsequent*) call to xNext(). This is done for a particular application ** that needs to know whether or not the tokenizer is being used for ** snippet generation or for some other purpose. ** ** Extreme care is required when writing code to depend on this ** initialization. It is not a documented part of the tokenizer interface. ** If a tokenizer is used directly by any code outside of FTS, this ** convention might not be respected. */ rc = pMod->xNext(pC, &ZDUMMY, &DUMMY1, &iBegin, &iFin, &iCurrent); if( rc!=SQLITE_OK ){ if( rc==SQLITE_DONE ){ /* Special case - the last token of the snippet is also the last token ** of the column. Append any punctuation that occurred between the end ** of the previous token and the end of the document to the output. ** Then break out of the loop. */ rc = fts3StringAppend(pOut, &zDoc[iEnd], -1); } break; } if( iCurrentiLangid, nSnippet, &zDoc[iBegin], n, &iPos, &hlmask ); isShiftDone = 1; /* Now that the shift has been done, check if the initial "..." are ** required. They are required if (a) this is not the first fragment, ** or (b) this fragment does not begin at position 0 of its column. */ if( rc==SQLITE_OK ){ if( iPos>0 || iFragment>0 ){ rc = fts3StringAppend(pOut, zEllipsis, -1); }else if( iBegin ){ rc = fts3StringAppend(pOut, zDoc, iBegin); } } if( rc!=SQLITE_OK || iCurrent=(iPos+nSnippet) ){ if( isLast ){ rc = fts3StringAppend(pOut, zEllipsis, -1); } break; } /* Set isHighlight to true if this term should be highlighted. */ isHighlight = (hlmask & ((u64)1 << (iCurrent-iPos)))!=0; if( iCurrent>iPos ) rc = fts3StringAppend(pOut, &zDoc[iEnd], iBegin-iEnd); if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zOpen, -1); if( rc==SQLITE_OK ) rc = fts3StringAppend(pOut, &zDoc[iBegin], iFin-iBegin); if( rc==SQLITE_OK && isHighlight ) rc = fts3StringAppend(pOut, zClose, -1); iEnd = iFin; } pMod->xClose(pC); return rc; } /* ** This function is used to count the entries in a column-list (a ** delta-encoded list of term offsets within a single column of a single ** row). When this function is called, *ppCollist should point to the ** beginning of the first varint in the column-list (the varint that ** contains the position of the first matching term in the column data). ** Before returning, *ppCollist is set to point to the first byte after ** the last varint in the column-list (either the 0x00 signifying the end ** of the position-list, or the 0x01 that precedes the column number of ** the next column in the position-list). ** ** The number of elements in the column-list is returned. */ static int fts3ColumnlistCount(char **ppCollist){ char *pEnd = *ppCollist; char c = 0; int nEntry = 0; /* A column-list is terminated by either a 0x01 or 0x00. */ while( 0xFE & (*pEnd | c) ){ c = *pEnd++ & 0x80; if( !c ) nEntry++; } *ppCollist = pEnd; return nEntry; } /* ** This function gathers 'y' or 'b' data for a single phrase. */ static void fts3ExprLHits( Fts3Expr *pExpr, /* Phrase expression node */ MatchInfo *p /* Matchinfo context */ ){ Fts3Table *pTab = (Fts3Table *)p->pCursor->base.pVtab; int iStart; Fts3Phrase *pPhrase = pExpr->pPhrase; char *pIter = pPhrase->doclist.pList; int iCol = 0; assert( p->flag==FTS3_MATCHINFO_LHITS_BM || p->flag==FTS3_MATCHINFO_LHITS ); if( p->flag==FTS3_MATCHINFO_LHITS ){ iStart = pExpr->iPhrase * p->nCol; }else{ iStart = pExpr->iPhrase * ((p->nCol + 31) / 32); } while( 1 ){ int nHit = fts3ColumnlistCount(&pIter); if( (pPhrase->iColumn>=pTab->nColumn || pPhrase->iColumn==iCol) ){ if( p->flag==FTS3_MATCHINFO_LHITS ){ p->aMatchinfo[iStart + iCol] = (u32)nHit; }else if( nHit ){ p->aMatchinfo[iStart + (iCol+1)/32] |= (1 << (iCol&0x1F)); } } assert( *pIter==0x00 || *pIter==0x01 ); if( *pIter!=0x01 ) break; pIter++; pIter += fts3GetVarint32(pIter, &iCol); } } /* ** Gather the results for matchinfo directives 'y' and 'b'. */ static void fts3ExprLHitGather( Fts3Expr *pExpr, MatchInfo *p ){ assert( (pExpr->pLeft==0)==(pExpr->pRight==0) ); if( pExpr->bEof==0 && pExpr->iDocid==p->pCursor->iPrevId ){ if( pExpr->pLeft ){ fts3ExprLHitGather(pExpr->pLeft, p); fts3ExprLHitGather(pExpr->pRight, p); }else{ fts3ExprLHits(pExpr, p); } } } /* ** fts3ExprIterate() callback used to collect the "global" matchinfo stats ** for a single query. ** ** fts3ExprIterate() callback to load the 'global' elements of a ** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements ** of the matchinfo array that are constant for all rows returned by the ** current query. ** ** Argument pCtx is actually a pointer to a struct of type MatchInfo. This ** function populates Matchinfo.aMatchinfo[] as follows: ** ** for(iCol=0; iColpCursor, pExpr, &p->aMatchinfo[3*iPhrase*p->nCol] ); } /* ** fts3ExprIterate() callback used to collect the "local" part of the ** FTS3_MATCHINFO_HITS array. The local stats are those elements of the ** array that are different for each row returned by the query. */ static int fts3ExprLocalHitsCb( Fts3Expr *pExpr, /* Phrase expression node */ int iPhrase, /* Phrase number */ void *pCtx /* Pointer to MatchInfo structure */ ){ int rc = SQLITE_OK; MatchInfo *p = (MatchInfo *)pCtx; int iStart = iPhrase * p->nCol * 3; int i; for(i=0; inCol && rc==SQLITE_OK; i++){ char *pCsr; rc = sqlite3Fts3EvalPhrasePoslist(p->pCursor, pExpr, i, &pCsr); if( pCsr ){ p->aMatchinfo[iStart+i*3] = fts3ColumnlistCount(&pCsr); }else{ p->aMatchinfo[iStart+i*3] = 0; } } return rc; } static int fts3MatchinfoCheck( Fts3Table *pTab, char cArg, char **pzErr ){ if( (cArg==FTS3_MATCHINFO_NPHRASE) || (cArg==FTS3_MATCHINFO_NCOL) || (cArg==FTS3_MATCHINFO_NDOC && pTab->bFts4) || (cArg==FTS3_MATCHINFO_AVGLENGTH && pTab->bFts4) || (cArg==FTS3_MATCHINFO_LENGTH && pTab->bHasDocsize) || (cArg==FTS3_MATCHINFO_LCS) || (cArg==FTS3_MATCHINFO_HITS) || (cArg==FTS3_MATCHINFO_LHITS) || (cArg==FTS3_MATCHINFO_LHITS_BM) ){ return SQLITE_OK; } sqlite3Fts3ErrMsg(pzErr, "unrecognized matchinfo request: %c", cArg); return SQLITE_ERROR; } static int fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ int nVal; /* Number of integers output by cArg */ switch( cArg ){ case FTS3_MATCHINFO_NDOC: case FTS3_MATCHINFO_NPHRASE: case FTS3_MATCHINFO_NCOL: nVal = 1; break; case FTS3_MATCHINFO_AVGLENGTH: case FTS3_MATCHINFO_LENGTH: case FTS3_MATCHINFO_LCS: nVal = pInfo->nCol; break; case FTS3_MATCHINFO_LHITS: nVal = pInfo->nCol * pInfo->nPhrase; break; case FTS3_MATCHINFO_LHITS_BM: nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); break; default: assert( cArg==FTS3_MATCHINFO_HITS ); nVal = pInfo->nCol * pInfo->nPhrase * 3; break; } return nVal; } static int fts3MatchinfoSelectDoctotal( Fts3Table *pTab, sqlite3_stmt **ppStmt, sqlite3_int64 *pnDoc, const char **paLen ){ sqlite3_stmt *pStmt; const char *a; sqlite3_int64 nDoc; if( !*ppStmt ){ int rc = sqlite3Fts3SelectDoctotal(pTab, ppStmt); if( rc!=SQLITE_OK ) return rc; } pStmt = *ppStmt; assert( sqlite3_data_count(pStmt)==1 ); a = sqlite3_column_blob(pStmt, 0); a += sqlite3Fts3GetVarint(a, &nDoc); if( nDoc==0 ) return FTS_CORRUPT_VTAB; *pnDoc = (u32)nDoc; if( paLen ) *paLen = a; return SQLITE_OK; } /* ** An instance of the following structure is used to store state while ** iterating through a multi-column position-list corresponding to the ** hits for a single phrase on a single row in order to calculate the ** values for a matchinfo() FTS3_MATCHINFO_LCS request. */ typedef struct LcsIterator LcsIterator; struct LcsIterator { Fts3Expr *pExpr; /* Pointer to phrase expression */ int iPosOffset; /* Tokens count up to end of this phrase */ char *pRead; /* Cursor used to iterate through aDoclist */ int iPos; /* Current position */ }; /* ** If LcsIterator.iCol is set to the following value, the iterator has ** finished iterating through all offsets for all columns. */ #define LCS_ITERATOR_FINISHED 0x7FFFFFFF; static int fts3MatchinfoLcsCb( Fts3Expr *pExpr, /* Phrase expression node */ int iPhrase, /* Phrase number (numbered from zero) */ void *pCtx /* Pointer to MatchInfo structure */ ){ LcsIterator *aIter = (LcsIterator *)pCtx; aIter[iPhrase].pExpr = pExpr; return SQLITE_OK; } /* ** Advance the iterator passed as an argument to the next position. Return ** 1 if the iterator is at EOF or if it now points to the start of the ** position list for the next column. */ static int fts3LcsIteratorAdvance(LcsIterator *pIter){ char *pRead = pIter->pRead; sqlite3_int64 iRead; int rc = 0; pRead += sqlite3Fts3GetVarint(pRead, &iRead); if( iRead==0 || iRead==1 ){ pRead = 0; rc = 1; }else{ pIter->iPos += (int)(iRead-2); } pIter->pRead = pRead; return rc; } /* ** This function implements the FTS3_MATCHINFO_LCS matchinfo() flag. ** ** If the call is successful, the longest-common-substring lengths for each ** column are written into the first nCol elements of the pInfo->aMatchinfo[] ** array before returning. SQLITE_OK is returned in this case. ** ** Otherwise, if an error occurs, an SQLite error code is returned and the ** data written to the first nCol elements of pInfo->aMatchinfo[] is ** undefined. */ static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ LcsIterator *aIter; int i; int iCol; int nToken = 0; /* Allocate and populate the array of LcsIterator objects. The array ** contains one element for each matchable phrase in the query. **/ aIter = sqlite3_malloc(sizeof(LcsIterator) * pCsr->nPhrase); if( !aIter ) return SQLITE_NOMEM; memset(aIter, 0, sizeof(LcsIterator) * pCsr->nPhrase); (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); for(i=0; inPhrase; i++){ LcsIterator *pIter = &aIter[i]; nToken -= pIter->pExpr->pPhrase->nToken; pIter->iPosOffset = nToken; } for(iCol=0; iColnCol; iCol++){ int nLcs = 0; /* LCS value for this column */ int nLive = 0; /* Number of iterators in aIter not at EOF */ for(i=0; inPhrase; i++){ int rc; LcsIterator *pIt = &aIter[i]; rc = sqlite3Fts3EvalPhrasePoslist(pCsr, pIt->pExpr, iCol, &pIt->pRead); if( rc!=SQLITE_OK ) return rc; if( pIt->pRead ){ pIt->iPos = pIt->iPosOffset; fts3LcsIteratorAdvance(&aIter[i]); nLive++; } } while( nLive>0 ){ LcsIterator *pAdv = 0; /* The iterator to advance by one position */ int nThisLcs = 0; /* LCS for the current iterator positions */ for(i=0; inPhrase; i++){ LcsIterator *pIter = &aIter[i]; if( pIter->pRead==0 ){ /* This iterator is already at EOF for this column. */ nThisLcs = 0; }else{ if( pAdv==0 || pIter->iPosiPos ){ pAdv = pIter; } if( nThisLcs==0 || pIter->iPos==pIter[-1].iPos ){ nThisLcs++; }else{ nThisLcs = 1; } if( nThisLcs>nLcs ) nLcs = nThisLcs; } } if( fts3LcsIteratorAdvance(pAdv) ) nLive--; } pInfo->aMatchinfo[iCol] = nLcs; } sqlite3_free(aIter); return SQLITE_OK; } /* ** Populate the buffer pInfo->aMatchinfo[] with an array of integers to ** be returned by the matchinfo() function. Argument zArg contains the ** format string passed as the second argument to matchinfo (or the ** default value "pcx" if no second argument was specified). The format ** string has already been validated and the pInfo->aMatchinfo[] array ** is guaranteed to be large enough for the output. ** ** If bGlobal is true, then populate all fields of the matchinfo() output. ** If it is false, then assume that those fields that do not change between ** rows (i.e. FTS3_MATCHINFO_NPHRASE, NCOL, NDOC, AVGLENGTH and part of HITS) ** have already been populated. ** ** Return SQLITE_OK if successful, or an SQLite error code if an error ** occurs. If a value other than SQLITE_OK is returned, the state the ** pInfo->aMatchinfo[] buffer is left in is undefined. */ static int fts3MatchinfoValues( Fts3Cursor *pCsr, /* FTS3 cursor object */ int bGlobal, /* True to grab the global stats */ MatchInfo *pInfo, /* Matchinfo context object */ const char *zArg /* Matchinfo format string */ ){ int rc = SQLITE_OK; int i; Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; sqlite3_stmt *pSelect = 0; for(i=0; rc==SQLITE_OK && zArg[i]; i++){ pInfo->flag = zArg[i]; switch( zArg[i] ){ case FTS3_MATCHINFO_NPHRASE: if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nPhrase; break; case FTS3_MATCHINFO_NCOL: if( bGlobal ) pInfo->aMatchinfo[0] = pInfo->nCol; break; case FTS3_MATCHINFO_NDOC: if( bGlobal ){ sqlite3_int64 nDoc = 0; rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, 0); pInfo->aMatchinfo[0] = (u32)nDoc; } break; case FTS3_MATCHINFO_AVGLENGTH: if( bGlobal ){ sqlite3_int64 nDoc; /* Number of rows in table */ const char *a; /* Aggregate column length array */ rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &nDoc, &a); if( rc==SQLITE_OK ){ int iCol; for(iCol=0; iColnCol; iCol++){ u32 iVal; sqlite3_int64 nToken; a += sqlite3Fts3GetVarint(a, &nToken); iVal = (u32)(((u32)(nToken&0xffffffff)+nDoc/2)/nDoc); pInfo->aMatchinfo[iCol] = iVal; } } } break; case FTS3_MATCHINFO_LENGTH: { sqlite3_stmt *pSelectDocsize = 0; rc = sqlite3Fts3SelectDocsize(pTab, pCsr->iPrevId, &pSelectDocsize); if( rc==SQLITE_OK ){ int iCol; const char *a = sqlite3_column_blob(pSelectDocsize, 0); for(iCol=0; iColnCol; iCol++){ sqlite3_int64 nToken; a += sqlite3Fts3GetVarint(a, &nToken); pInfo->aMatchinfo[iCol] = (u32)nToken; } } sqlite3_reset(pSelectDocsize); break; } case FTS3_MATCHINFO_LCS: rc = fts3ExprLoadDoclists(pCsr, 0, 0); if( rc==SQLITE_OK ){ rc = fts3MatchinfoLcs(pCsr, pInfo); } break; case FTS3_MATCHINFO_LHITS_BM: case FTS3_MATCHINFO_LHITS: { int nZero = fts3MatchinfoSize(pInfo, zArg[i]) * sizeof(u32); memset(pInfo->aMatchinfo, 0, nZero); fts3ExprLHitGather(pCsr->pExpr, pInfo); break; } default: { Fts3Expr *pExpr; assert( zArg[i]==FTS3_MATCHINFO_HITS ); pExpr = pCsr->pExpr; rc = fts3ExprLoadDoclists(pCsr, 0, 0); if( rc!=SQLITE_OK ) break; if( bGlobal ){ if( pCsr->pDeferred ){ rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc, 0); if( rc!=SQLITE_OK ) break; } rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); sqlite3Fts3EvalTestDeferred(pCsr, &rc); if( rc!=SQLITE_OK ) break; } (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); break; } } pInfo->aMatchinfo += fts3MatchinfoSize(pInfo, zArg[i]); } sqlite3_reset(pSelect); return rc; } /* ** Populate pCsr->aMatchinfo[] with data for the current row. The ** 'matchinfo' data is an array of 32-bit unsigned integers (C type u32). */ static void fts3GetMatchinfo( sqlite3_context *pCtx, /* Return results here */ Fts3Cursor *pCsr, /* FTS3 Cursor object */ const char *zArg /* Second argument to matchinfo() function */ ){ MatchInfo sInfo; Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc = SQLITE_OK; int bGlobal = 0; /* Collect 'global' stats as well as local */ u32 *aOut = 0; void (*xDestroyOut)(void*) = 0; memset(&sInfo, 0, sizeof(MatchInfo)); sInfo.pCursor = pCsr; sInfo.nCol = pTab->nColumn; /* If there is cached matchinfo() data, but the format string for the ** cache does not match the format string for this request, discard ** the cached data. */ if( pCsr->pMIBuffer && strcmp(pCsr->pMIBuffer->zMatchinfo, zArg) ){ sqlite3Fts3MIBufferFree(pCsr->pMIBuffer); pCsr->pMIBuffer = 0; } /* If Fts3Cursor.pMIBuffer is NULL, then this is the first time the ** matchinfo function has been called for this query. In this case ** allocate the array used to accumulate the matchinfo data and ** initialize those elements that are constant for every row. */ if( pCsr->pMIBuffer==0 ){ int nMatchinfo = 0; /* Number of u32 elements in match-info */ int i; /* Used to iterate through zArg */ /* Determine the number of phrases in the query */ pCsr->nPhrase = fts3ExprPhraseCount(pCsr->pExpr); sInfo.nPhrase = pCsr->nPhrase; /* Determine the number of integers in the buffer returned by this call. */ for(i=0; zArg[i]; i++){ char *zErr = 0; if( fts3MatchinfoCheck(pTab, zArg[i], &zErr) ){ sqlite3_result_error(pCtx, zErr, -1); sqlite3_free(zErr); return; } nMatchinfo += fts3MatchinfoSize(&sInfo, zArg[i]); } /* Allocate space for Fts3Cursor.aMatchinfo[] and Fts3Cursor.zMatchinfo. */ pCsr->pMIBuffer = fts3MIBufferNew(nMatchinfo, zArg); if( !pCsr->pMIBuffer ) rc = SQLITE_NOMEM; pCsr->isMatchinfoNeeded = 1; bGlobal = 1; } if( rc==SQLITE_OK ){ xDestroyOut = fts3MIBufferAlloc(pCsr->pMIBuffer, &aOut); if( xDestroyOut==0 ){ rc = SQLITE_NOMEM; } } if( rc==SQLITE_OK ){ sInfo.aMatchinfo = aOut; sInfo.nPhrase = pCsr->nPhrase; rc = fts3MatchinfoValues(pCsr, bGlobal, &sInfo, zArg); if( bGlobal ){ fts3MIBufferSetGlobal(pCsr->pMIBuffer); } } if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pCtx, rc); if( xDestroyOut ) xDestroyOut(aOut); }else{ int n = pCsr->pMIBuffer->nElem * sizeof(u32); sqlite3_result_blob(pCtx, aOut, n, xDestroyOut); } } /* ** Implementation of snippet() function. */ SQLITE_PRIVATE void sqlite3Fts3Snippet( sqlite3_context *pCtx, /* SQLite function call context */ Fts3Cursor *pCsr, /* Cursor object */ const char *zStart, /* Snippet start text - "" */ const char *zEnd, /* Snippet end text - "" */ const char *zEllipsis, /* Snippet ellipsis text - "..." */ int iCol, /* Extract snippet from this column */ int nToken /* Approximate number of tokens in snippet */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; int rc = SQLITE_OK; int i; StrBuffer res = {0, 0, 0}; /* The returned text includes up to four fragments of text extracted from ** the data in the current row. The first iteration of the for(...) loop ** below attempts to locate a single fragment of text nToken tokens in ** size that contains at least one instance of all phrases in the query ** expression that appear in the current row. If such a fragment of text ** cannot be found, the second iteration of the loop attempts to locate ** a pair of fragments, and so on. */ int nSnippet = 0; /* Number of fragments in this snippet */ SnippetFragment aSnippet[4]; /* Maximum of 4 fragments per snippet */ int nFToken = -1; /* Number of tokens in each fragment */ if( !pCsr->pExpr ){ sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); return; } for(nSnippet=1; 1; nSnippet++){ int iSnip; /* Loop counter 0..nSnippet-1 */ u64 mCovered = 0; /* Bitmask of phrases covered by snippet */ u64 mSeen = 0; /* Bitmask of phrases seen by BestSnippet() */ if( nToken>=0 ){ nFToken = (nToken+nSnippet-1) / nSnippet; }else{ nFToken = -1 * nToken; } for(iSnip=0; iSnipnColumn; iRead++){ SnippetFragment sF = {0, 0, 0, 0}; int iS = 0; if( iCol>=0 && iRead!=iCol ) continue; /* Find the best snippet of nFToken tokens in column iRead. */ rc = fts3BestSnippet(nFToken, pCsr, iRead, mCovered, &mSeen, &sF, &iS); if( rc!=SQLITE_OK ){ goto snippet_out; } if( iS>iBestScore ){ *pFragment = sF; iBestScore = iS; } } mCovered |= pFragment->covered; } /* If all query phrases seen by fts3BestSnippet() are present in at least ** one of the nSnippet snippet fragments, break out of the loop. */ assert( (mCovered&mSeen)==mCovered ); if( mSeen==mCovered || nSnippet==SizeofArray(aSnippet) ) break; } assert( nFToken>0 ); for(i=0; ipCsr, pExpr, p->iCol, &pList); nTerm = pExpr->pPhrase->nToken; if( pList ){ fts3GetDeltaPosition(&pList, &iPos); assert( iPos>=0 ); } for(iTerm=0; iTermaTerm[p->iTerm++]; pT->iOff = nTerm-iTerm-1; pT->pList = pList; pT->iPos = iPos; } return rc; } /* ** Implementation of offsets() function. */ SQLITE_PRIVATE void sqlite3Fts3Offsets( sqlite3_context *pCtx, /* SQLite function call context */ Fts3Cursor *pCsr /* Cursor object */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; sqlite3_tokenizer_module const *pMod = pTab->pTokenizer->pModule; int rc; /* Return Code */ int nToken; /* Number of tokens in query */ int iCol; /* Column currently being processed */ StrBuffer res = {0, 0, 0}; /* Result string */ TermOffsetCtx sCtx; /* Context for fts3ExprTermOffsetInit() */ if( !pCsr->pExpr ){ sqlite3_result_text(pCtx, "", 0, SQLITE_STATIC); return; } memset(&sCtx, 0, sizeof(sCtx)); assert( pCsr->isRequireSeek==0 ); /* Count the number of terms in the query */ rc = fts3ExprLoadDoclists(pCsr, 0, &nToken); if( rc!=SQLITE_OK ) goto offsets_out; /* Allocate the array of TermOffset iterators. */ sCtx.aTerm = (TermOffset *)sqlite3_malloc(sizeof(TermOffset)*nToken); if( 0==sCtx.aTerm ){ rc = SQLITE_NOMEM; goto offsets_out; } sCtx.iDocid = pCsr->iPrevId; sCtx.pCsr = pCsr; /* Loop through the table columns, appending offset information to ** string-buffer res for each column. */ for(iCol=0; iColnColumn; iCol++){ sqlite3_tokenizer_cursor *pC; /* Tokenizer cursor */ const char *ZDUMMY; /* Dummy argument used with xNext() */ int NDUMMY = 0; /* Dummy argument used with xNext() */ int iStart = 0; int iEnd = 0; int iCurrent = 0; const char *zDoc; int nDoc; /* Initialize the contents of sCtx.aTerm[] for column iCol. There is ** no way that this operation can fail, so the return code from ** fts3ExprIterate() can be discarded. */ sCtx.iCol = iCol; sCtx.iTerm = 0; (void)fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); /* Retreive the text stored in column iCol. If an SQL NULL is stored ** in column iCol, jump immediately to the next iteration of the loop. ** If an OOM occurs while retrieving the data (this can happen if SQLite ** needs to transform the data from utf-16 to utf-8), return SQLITE_NOMEM ** to the caller. */ zDoc = (const char *)sqlite3_column_text(pCsr->pStmt, iCol+1); nDoc = sqlite3_column_bytes(pCsr->pStmt, iCol+1); if( zDoc==0 ){ if( sqlite3_column_type(pCsr->pStmt, iCol+1)==SQLITE_NULL ){ continue; } rc = SQLITE_NOMEM; goto offsets_out; } /* Initialize a tokenizer iterator to iterate through column iCol. */ rc = sqlite3Fts3OpenTokenizer(pTab->pTokenizer, pCsr->iLangid, zDoc, nDoc, &pC ); if( rc!=SQLITE_OK ) goto offsets_out; rc = pMod->xNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); while( rc==SQLITE_OK ){ int i; /* Used to loop through terms */ int iMinPos = 0x7FFFFFFF; /* Position of next token */ TermOffset *pTerm = 0; /* TermOffset associated with next token */ for(i=0; ipList && (pT->iPos-pT->iOff)iPos-pT->iOff; pTerm = pT; } } if( !pTerm ){ /* All offsets for this column have been gathered. */ rc = SQLITE_DONE; }else{ assert( iCurrent<=iMinPos ); if( 0==(0xFE&*pTerm->pList) ){ pTerm->pList = 0; }else{ fts3GetDeltaPosition(&pTerm->pList, &pTerm->iPos); } while( rc==SQLITE_OK && iCurrentxNext(pC, &ZDUMMY, &NDUMMY, &iStart, &iEnd, &iCurrent); } if( rc==SQLITE_OK ){ char aBuffer[64]; sqlite3_snprintf(sizeof(aBuffer), aBuffer, "%d %d %d %d ", iCol, pTerm-sCtx.aTerm, iStart, iEnd-iStart ); rc = fts3StringAppend(&res, aBuffer, -1); }else if( rc==SQLITE_DONE && pTab->zContentTbl==0 ){ rc = FTS_CORRUPT_VTAB; } } } if( rc==SQLITE_DONE ){ rc = SQLITE_OK; } pMod->xClose(pC); if( rc!=SQLITE_OK ) goto offsets_out; } offsets_out: sqlite3_free(sCtx.aTerm); assert( rc!=SQLITE_DONE ); sqlite3Fts3SegmentsClose(pTab); if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pCtx, rc); sqlite3_free(res.z); }else{ sqlite3_result_text(pCtx, res.z, res.n-1, sqlite3_free); } return; } /* ** Implementation of matchinfo() function. */ SQLITE_PRIVATE void sqlite3Fts3Matchinfo( sqlite3_context *pContext, /* Function call context */ Fts3Cursor *pCsr, /* FTS3 table cursor */ const char *zArg /* Second arg to matchinfo() function */ ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; const char *zFormat; if( zArg ){ zFormat = zArg; }else{ zFormat = FTS3_MATCHINFO_DEFAULT; } if( !pCsr->pExpr ){ sqlite3_result_blob(pContext, "", 0, SQLITE_STATIC); return; }else{ /* Retrieve matchinfo() data. */ fts3GetMatchinfo(pContext, pCsr, zFormat); sqlite3Fts3SegmentsClose(pTab); } } #endif /************** End of fts3_snippet.c ****************************************/ /************** Begin file fts3_unicode.c ************************************/ /* ** 2012 May 24 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** ** ** Implementation of the "unicode" full-text-search tokenizer. */ #ifndef SQLITE_DISABLE_FTS3_UNICODE /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) /* #include */ /* #include */ /* #include */ /* #include */ /* #include "fts3_tokenizer.h" */ /* ** The following two macros - READ_UTF8 and WRITE_UTF8 - have been copied ** from the sqlite3 source file utf.c. If this file is compiled as part ** of the amalgamation, they are not required. */ #ifndef SQLITE_AMALGAMATION static const unsigned char sqlite3Utf8Trans1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, }; #define READ_UTF8(zIn, zTerm, c) \ c = *(zIn++); \ if( c>=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ c = (c<<6) + (0x3f & *(zIn++)); \ } \ if( c<0x80 \ || (c&0xFFFFF800)==0xD800 \ || (c&0xFFFFFFFE)==0xFFFE ){ c = 0xFFFD; } \ } #define WRITE_UTF8(zOut, c) { \ if( c<0x00080 ){ \ *zOut++ = (u8)(c&0xFF); \ } \ else if( c<0x00800 ){ \ *zOut++ = 0xC0 + (u8)((c>>6)&0x1F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ else if( c<0x10000 ){ \ *zOut++ = 0xE0 + (u8)((c>>12)&0x0F); \ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ }else{ \ *zOut++ = 0xF0 + (u8)((c>>18) & 0x07); \ *zOut++ = 0x80 + (u8)((c>>12) & 0x3F); \ *zOut++ = 0x80 + (u8)((c>>6) & 0x3F); \ *zOut++ = 0x80 + (u8)(c & 0x3F); \ } \ } #endif /* ifndef SQLITE_AMALGAMATION */ typedef struct unicode_tokenizer unicode_tokenizer; typedef struct unicode_cursor unicode_cursor; struct unicode_tokenizer { sqlite3_tokenizer base; int bRemoveDiacritic; int nException; int *aiException; }; struct unicode_cursor { sqlite3_tokenizer_cursor base; const unsigned char *aInput; /* Input text being tokenized */ int nInput; /* Size of aInput[] in bytes */ int iOff; /* Current offset within aInput[] */ int iToken; /* Index of next token to be returned */ char *zToken; /* storage for current token */ int nAlloc; /* space allocated at zToken */ }; /* ** Destroy a tokenizer allocated by unicodeCreate(). */ static int unicodeDestroy(sqlite3_tokenizer *pTokenizer){ if( pTokenizer ){ unicode_tokenizer *p = (unicode_tokenizer *)pTokenizer; sqlite3_free(p->aiException); sqlite3_free(p); } return SQLITE_OK; } /* ** As part of a tokenchars= or separators= option, the CREATE VIRTUAL TABLE ** statement has specified that the tokenizer for this table shall consider ** all characters in string zIn/nIn to be separators (if bAlnum==0) or ** token characters (if bAlnum==1). ** ** For each codepoint in the zIn/nIn string, this function checks if the ** sqlite3FtsUnicodeIsalnum() function already returns the desired result. ** If so, no action is taken. Otherwise, the codepoint is added to the ** unicode_tokenizer.aiException[] array. For the purposes of tokenization, ** the return value of sqlite3FtsUnicodeIsalnum() is inverted for all ** codepoints in the aiException[] array. ** ** If a standalone diacritic mark (one that sqlite3FtsUnicodeIsdiacritic() ** identifies as a diacritic) occurs in the zIn/nIn string it is ignored. ** It is not possible to change the behavior of the tokenizer with respect ** to these codepoints. */ static int unicodeAddExceptions( unicode_tokenizer *p, /* Tokenizer to add exceptions to */ int bAlnum, /* Replace Isalnum() return value with this */ const char *zIn, /* Array of characters to make exceptions */ int nIn /* Length of z in bytes */ ){ const unsigned char *z = (const unsigned char *)zIn; const unsigned char *zTerm = &z[nIn]; int iCode; int nEntry = 0; assert( bAlnum==0 || bAlnum==1 ); while( zaiException, (p->nException+nEntry)*sizeof(int)); if( aNew==0 ) return SQLITE_NOMEM; nNew = p->nException; z = (const unsigned char *)zIn; while( zi; j--) aNew[j] = aNew[j-1]; aNew[i] = iCode; nNew++; } } p->aiException = aNew; p->nException = nNew; } return SQLITE_OK; } /* ** Return true if the p->aiException[] array contains the value iCode. */ static int unicodeIsException(unicode_tokenizer *p, int iCode){ if( p->nException>0 ){ int *a = p->aiException; int iLo = 0; int iHi = p->nException-1; while( iHi>=iLo ){ int iTest = (iHi + iLo) / 2; if( iCode==a[iTest] ){ return 1; }else if( iCode>a[iTest] ){ iLo = iTest+1; }else{ iHi = iTest-1; } } } return 0; } /* ** Return true if, for the purposes of tokenization, codepoint iCode is ** considered a token character (not a separator). */ static int unicodeIsAlnum(unicode_tokenizer *p, int iCode){ assert( (sqlite3FtsUnicodeIsalnum(iCode) & 0xFFFFFFFE)==0 ); return sqlite3FtsUnicodeIsalnum(iCode) ^ unicodeIsException(p, iCode); } /* ** Create a new tokenizer instance. */ static int unicodeCreate( int nArg, /* Size of array argv[] */ const char * const *azArg, /* Tokenizer creation arguments */ sqlite3_tokenizer **pp /* OUT: New tokenizer handle */ ){ unicode_tokenizer *pNew; /* New tokenizer object */ int i; int rc = SQLITE_OK; pNew = (unicode_tokenizer *) sqlite3_malloc(sizeof(unicode_tokenizer)); if( pNew==NULL ) return SQLITE_NOMEM; memset(pNew, 0, sizeof(unicode_tokenizer)); pNew->bRemoveDiacritic = 1; for(i=0; rc==SQLITE_OK && ibRemoveDiacritic = 1; } else if( n==19 && memcmp("remove_diacritics=0", z, 19)==0 ){ pNew->bRemoveDiacritic = 0; } else if( n>=11 && memcmp("tokenchars=", z, 11)==0 ){ rc = unicodeAddExceptions(pNew, 1, &z[11], n-11); } else if( n>=11 && memcmp("separators=", z, 11)==0 ){ rc = unicodeAddExceptions(pNew, 0, &z[11], n-11); } else{ /* Unrecognized argument */ rc = SQLITE_ERROR; } } if( rc!=SQLITE_OK ){ unicodeDestroy((sqlite3_tokenizer *)pNew); pNew = 0; } *pp = (sqlite3_tokenizer *)pNew; return rc; } /* ** Prepare to begin tokenizing a particular string. The input ** string to be tokenized is pInput[0..nBytes-1]. A cursor ** used to incrementally tokenize this string is returned in ** *ppCursor. */ static int unicodeOpen( sqlite3_tokenizer *p, /* The tokenizer */ const char *aInput, /* Input string */ int nInput, /* Size of string aInput in bytes */ sqlite3_tokenizer_cursor **pp /* OUT: New cursor object */ ){ unicode_cursor *pCsr; pCsr = (unicode_cursor *)sqlite3_malloc(sizeof(unicode_cursor)); if( pCsr==0 ){ return SQLITE_NOMEM; } memset(pCsr, 0, sizeof(unicode_cursor)); pCsr->aInput = (const unsigned char *)aInput; if( aInput==0 ){ pCsr->nInput = 0; }else if( nInput<0 ){ pCsr->nInput = (int)strlen(aInput); }else{ pCsr->nInput = nInput; } *pp = &pCsr->base; UNUSED_PARAMETER(p); return SQLITE_OK; } /* ** Close a tokenization cursor previously opened by a call to ** simpleOpen() above. */ static int unicodeClose(sqlite3_tokenizer_cursor *pCursor){ unicode_cursor *pCsr = (unicode_cursor *) pCursor; sqlite3_free(pCsr->zToken); sqlite3_free(pCsr); return SQLITE_OK; } /* ** Extract the next token from a tokenization cursor. The cursor must ** have been opened by a prior call to simpleOpen(). */ static int unicodeNext( sqlite3_tokenizer_cursor *pC, /* Cursor returned by simpleOpen */ const char **paToken, /* OUT: Token text */ int *pnToken, /* OUT: Number of bytes at *paToken */ int *piStart, /* OUT: Starting offset of token */ int *piEnd, /* OUT: Ending offset of token */ int *piPos /* OUT: Position integer of token */ ){ unicode_cursor *pCsr = (unicode_cursor *)pC; unicode_tokenizer *p = ((unicode_tokenizer *)pCsr->base.pTokenizer); int iCode = 0; char *zOut; const unsigned char *z = &pCsr->aInput[pCsr->iOff]; const unsigned char *zStart = z; const unsigned char *zEnd; const unsigned char *zTerm = &pCsr->aInput[pCsr->nInput]; /* Scan past any delimiter characters before the start of the next token. ** Return SQLITE_DONE early if this takes us all the way to the end of ** the input. */ while( z=zTerm ) return SQLITE_DONE; zOut = pCsr->zToken; do { int iOut; /* Grow the output buffer if required. */ if( (zOut-pCsr->zToken)>=(pCsr->nAlloc-4) ){ char *zNew = sqlite3_realloc(pCsr->zToken, pCsr->nAlloc+64); if( !zNew ) return SQLITE_NOMEM; zOut = &zNew[zOut - pCsr->zToken]; pCsr->zToken = zNew; pCsr->nAlloc += 64; } /* Write the folded case of the last character read to the output */ zEnd = z; iOut = sqlite3FtsUnicodeFold(iCode, p->bRemoveDiacritic); if( iOut ){ WRITE_UTF8(zOut, iOut); } /* If the cursor is not at EOF, read the next character */ if( z>=zTerm ) break; READ_UTF8(z, zTerm, iCode); }while( unicodeIsAlnum(p, iCode) || sqlite3FtsUnicodeIsdiacritic(iCode) ); /* Set the output variables and return. */ pCsr->iOff = (int)(z - pCsr->aInput); *paToken = pCsr->zToken; *pnToken = (int)(zOut - pCsr->zToken); *piStart = (int)(zStart - pCsr->aInput); *piEnd = (int)(zEnd - pCsr->aInput); *piPos = pCsr->iToken++; return SQLITE_OK; } /* ** Set *ppModule to a pointer to the sqlite3_tokenizer_module ** structure for the unicode tokenizer. */ SQLITE_PRIVATE void sqlite3Fts3UnicodeTokenizer(sqlite3_tokenizer_module const **ppModule){ static const sqlite3_tokenizer_module module = { 0, unicodeCreate, unicodeDestroy, unicodeOpen, unicodeClose, unicodeNext, 0, }; *ppModule = &module; } #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ #endif /* ifndef SQLITE_DISABLE_FTS3_UNICODE */ /************** End of fts3_unicode.c ****************************************/ /************** Begin file fts3_unicode2.c ***********************************/ /* ** 2012 May 25 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ****************************************************************************** */ /* ** DO NOT EDIT THIS MACHINE GENERATED FILE. */ #ifndef SQLITE_DISABLE_FTS3_UNICODE #if defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) /* #include */ /* ** Return true if the argument corresponds to a unicode codepoint ** classified as either a letter or a number. Otherwise false. ** ** The results are undefined if the value passed to this function ** is less than zero. */ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int c){ /* Each unsigned integer in the following array corresponds to a contiguous ** range of unicode codepoints that are not either letters or numbers (i.e. ** codepoints for which this function should return 0). ** ** The most significant 22 bits in each 32-bit value contain the first ** codepoint in the range. The least significant 10 bits are used to store ** the size of the range (always at least 1). In other words, the value ** ((C<<22) + N) represents a range of N codepoints starting with codepoint ** C. It is not possible to represent a range larger than 1023 codepoints ** using this format. */ static const unsigned int aEntry[] = { 0x00000030, 0x0000E807, 0x00016C06, 0x0001EC2F, 0x0002AC07, 0x0002D001, 0x0002D803, 0x0002EC01, 0x0002FC01, 0x00035C01, 0x0003DC01, 0x000B0804, 0x000B480E, 0x000B9407, 0x000BB401, 0x000BBC81, 0x000DD401, 0x000DF801, 0x000E1002, 0x000E1C01, 0x000FD801, 0x00120808, 0x00156806, 0x00162402, 0x00163C01, 0x00164437, 0x0017CC02, 0x00180005, 0x00181816, 0x00187802, 0x00192C15, 0x0019A804, 0x0019C001, 0x001B5001, 0x001B580F, 0x001B9C07, 0x001BF402, 0x001C000E, 0x001C3C01, 0x001C4401, 0x001CC01B, 0x001E980B, 0x001FAC09, 0x001FD804, 0x00205804, 0x00206C09, 0x00209403, 0x0020A405, 0x0020C00F, 0x00216403, 0x00217801, 0x0023901B, 0x00240004, 0x0024E803, 0x0024F812, 0x00254407, 0x00258804, 0x0025C001, 0x00260403, 0x0026F001, 0x0026F807, 0x00271C02, 0x00272C03, 0x00275C01, 0x00278802, 0x0027C802, 0x0027E802, 0x00280403, 0x0028F001, 0x0028F805, 0x00291C02, 0x00292C03, 0x00294401, 0x0029C002, 0x0029D401, 0x002A0403, 0x002AF001, 0x002AF808, 0x002B1C03, 0x002B2C03, 0x002B8802, 0x002BC002, 0x002C0403, 0x002CF001, 0x002CF807, 0x002D1C02, 0x002D2C03, 0x002D5802, 0x002D8802, 0x002DC001, 0x002E0801, 0x002EF805, 0x002F1803, 0x002F2804, 0x002F5C01, 0x002FCC08, 0x00300403, 0x0030F807, 0x00311803, 0x00312804, 0x00315402, 0x00318802, 0x0031FC01, 0x00320802, 0x0032F001, 0x0032F807, 0x00331803, 0x00332804, 0x00335402, 0x00338802, 0x00340802, 0x0034F807, 0x00351803, 0x00352804, 0x00355C01, 0x00358802, 0x0035E401, 0x00360802, 0x00372801, 0x00373C06, 0x00375801, 0x00376008, 0x0037C803, 0x0038C401, 0x0038D007, 0x0038FC01, 0x00391C09, 0x00396802, 0x003AC401, 0x003AD006, 0x003AEC02, 0x003B2006, 0x003C041F, 0x003CD00C, 0x003DC417, 0x003E340B, 0x003E6424, 0x003EF80F, 0x003F380D, 0x0040AC14, 0x00412806, 0x00415804, 0x00417803, 0x00418803, 0x00419C07, 0x0041C404, 0x0042080C, 0x00423C01, 0x00426806, 0x0043EC01, 0x004D740C, 0x004E400A, 0x00500001, 0x0059B402, 0x005A0001, 0x005A6C02, 0x005BAC03, 0x005C4803, 0x005CC805, 0x005D4802, 0x005DC802, 0x005ED023, 0x005F6004, 0x005F7401, 0x0060000F, 0x0062A401, 0x0064800C, 0x0064C00C, 0x00650001, 0x00651002, 0x0066C011, 0x00672002, 0x00677822, 0x00685C05, 0x00687802, 0x0069540A, 0x0069801D, 0x0069FC01, 0x006A8007, 0x006AA006, 0x006C0005, 0x006CD011, 0x006D6823, 0x006E0003, 0x006E840D, 0x006F980E, 0x006FF004, 0x00709014, 0x0070EC05, 0x0071F802, 0x00730008, 0x00734019, 0x0073B401, 0x0073C803, 0x00770027, 0x0077F004, 0x007EF401, 0x007EFC03, 0x007F3403, 0x007F7403, 0x007FB403, 0x007FF402, 0x00800065, 0x0081A806, 0x0081E805, 0x00822805, 0x0082801A, 0x00834021, 0x00840002, 0x00840C04, 0x00842002, 0x00845001, 0x00845803, 0x00847806, 0x00849401, 0x00849C01, 0x0084A401, 0x0084B801, 0x0084E802, 0x00850005, 0x00852804, 0x00853C01, 0x00864264, 0x00900027, 0x0091000B, 0x0092704E, 0x00940200, 0x009C0475, 0x009E53B9, 0x00AD400A, 0x00B39406, 0x00B3BC03, 0x00B3E404, 0x00B3F802, 0x00B5C001, 0x00B5FC01, 0x00B7804F, 0x00B8C00C, 0x00BA001A, 0x00BA6C59, 0x00BC00D6, 0x00BFC00C, 0x00C00005, 0x00C02019, 0x00C0A807, 0x00C0D802, 0x00C0F403, 0x00C26404, 0x00C28001, 0x00C3EC01, 0x00C64002, 0x00C6580A, 0x00C70024, 0x00C8001F, 0x00C8A81E, 0x00C94001, 0x00C98020, 0x00CA2827, 0x00CB003F, 0x00CC0100, 0x01370040, 0x02924037, 0x0293F802, 0x02983403, 0x0299BC10, 0x029A7C01, 0x029BC008, 0x029C0017, 0x029C8002, 0x029E2402, 0x02A00801, 0x02A01801, 0x02A02C01, 0x02A08C09, 0x02A0D804, 0x02A1D004, 0x02A20002, 0x02A2D011, 0x02A33802, 0x02A38012, 0x02A3E003, 0x02A4980A, 0x02A51C0D, 0x02A57C01, 0x02A60004, 0x02A6CC1B, 0x02A77802, 0x02A8A40E, 0x02A90C01, 0x02A93002, 0x02A97004, 0x02A9DC03, 0x02A9EC01, 0x02AAC001, 0x02AAC803, 0x02AADC02, 0x02AAF802, 0x02AB0401, 0x02AB7802, 0x02ABAC07, 0x02ABD402, 0x02AF8C0B, 0x03600001, 0x036DFC02, 0x036FFC02, 0x037FFC01, 0x03EC7801, 0x03ECA401, 0x03EEC810, 0x03F4F802, 0x03F7F002, 0x03F8001A, 0x03F88007, 0x03F8C023, 0x03F95013, 0x03F9A004, 0x03FBFC01, 0x03FC040F, 0x03FC6807, 0x03FCEC06, 0x03FD6C0B, 0x03FF8007, 0x03FFA007, 0x03FFE405, 0x04040003, 0x0404DC09, 0x0405E411, 0x0406400C, 0x0407402E, 0x040E7C01, 0x040F4001, 0x04215C01, 0x04247C01, 0x0424FC01, 0x04280403, 0x04281402, 0x04283004, 0x0428E003, 0x0428FC01, 0x04294009, 0x0429FC01, 0x042CE407, 0x04400003, 0x0440E016, 0x04420003, 0x0442C012, 0x04440003, 0x04449C0E, 0x04450004, 0x04460003, 0x0446CC0E, 0x04471404, 0x045AAC0D, 0x0491C004, 0x05BD442E, 0x05BE3C04, 0x074000F6, 0x07440027, 0x0744A4B5, 0x07480046, 0x074C0057, 0x075B0401, 0x075B6C01, 0x075BEC01, 0x075C5401, 0x075CD401, 0x075D3C01, 0x075DBC01, 0x075E2401, 0x075EA401, 0x075F0C01, 0x07BBC002, 0x07C0002C, 0x07C0C064, 0x07C2800F, 0x07C2C40E, 0x07C3040F, 0x07C3440F, 0x07C4401F, 0x07C4C03C, 0x07C5C02B, 0x07C7981D, 0x07C8402B, 0x07C90009, 0x07C94002, 0x07CC0021, 0x07CCC006, 0x07CCDC46, 0x07CE0014, 0x07CE8025, 0x07CF1805, 0x07CF8011, 0x07D0003F, 0x07D10001, 0x07D108B6, 0x07D3E404, 0x07D4003E, 0x07D50004, 0x07D54018, 0x07D7EC46, 0x07D9140B, 0x07DA0046, 0x07DC0074, 0x38000401, 0x38008060, 0x380400F0, }; static const unsigned int aAscii[4] = { 0xFFFFFFFF, 0xFC00FFFF, 0xF8000001, 0xF8000001, }; if( c<128 ){ return ( (aAscii[c >> 5] & (1 << (c & 0x001F)))==0 ); }else if( c<(1<<22) ){ unsigned int key = (((unsigned int)c)<<10) | 0x000003FF; int iRes = 0; int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; int iLo = 0; while( iHi>=iLo ){ int iTest = (iHi + iLo) / 2; if( key >= aEntry[iTest] ){ iRes = iTest; iLo = iTest+1; }else{ iHi = iTest-1; } } assert( aEntry[0]=aEntry[iRes] ); return (((unsigned int)c) >= ((aEntry[iRes]>>10) + (aEntry[iRes]&0x3FF))); } return 1; } /* ** If the argument is a codepoint corresponding to a lowercase letter ** in the ASCII range with a diacritic added, return the codepoint ** of the ASCII letter only. For example, if passed 235 - "LATIN ** SMALL LETTER E WITH DIAERESIS" - return 65 ("LATIN SMALL LETTER ** E"). The resuls of passing a codepoint that corresponds to an ** uppercase letter are undefined. */ static int remove_diacritic(int c){ unsigned short aDia[] = { 0, 1797, 1848, 1859, 1891, 1928, 1940, 1995, 2024, 2040, 2060, 2110, 2168, 2206, 2264, 2286, 2344, 2383, 2472, 2488, 2516, 2596, 2668, 2732, 2782, 2842, 2894, 2954, 2984, 3000, 3028, 3336, 3456, 3696, 3712, 3728, 3744, 3896, 3912, 3928, 3968, 4008, 4040, 4106, 4138, 4170, 4202, 4234, 4266, 4296, 4312, 4344, 4408, 4424, 4472, 4504, 6148, 6198, 6264, 6280, 6360, 6429, 6505, 6529, 61448, 61468, 61534, 61592, 61642, 61688, 61704, 61726, 61784, 61800, 61836, 61880, 61914, 61948, 61998, 62122, 62154, 62200, 62218, 62302, 62364, 62442, 62478, 62536, 62554, 62584, 62604, 62640, 62648, 62656, 62664, 62730, 62924, 63050, 63082, 63274, 63390, }; char aChar[] = { '\0', 'a', 'c', 'e', 'i', 'n', 'o', 'u', 'y', 'y', 'a', 'c', 'd', 'e', 'e', 'g', 'h', 'i', 'j', 'k', 'l', 'n', 'o', 'r', 's', 't', 'u', 'u', 'w', 'y', 'z', 'o', 'u', 'a', 'i', 'o', 'u', 'g', 'k', 'o', 'j', 'g', 'n', 'a', 'e', 'i', 'o', 'r', 'u', 's', 't', 'h', 'a', 'e', 'o', 'y', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', 'a', 'b', 'd', 'd', 'e', 'f', 'g', 'h', 'h', 'i', 'k', 'l', 'l', 'm', 'n', 'p', 'r', 'r', 's', 't', 'u', 'v', 'w', 'w', 'x', 'y', 'z', 'h', 't', 'w', 'y', 'a', 'e', 'i', 'o', 'u', 'y', }; unsigned int key = (((unsigned int)c)<<3) | 0x00000007; int iRes = 0; int iHi = sizeof(aDia)/sizeof(aDia[0]) - 1; int iLo = 0; while( iHi>=iLo ){ int iTest = (iHi + iLo) / 2; if( key >= aDia[iTest] ){ iRes = iTest; iLo = iTest+1; }else{ iHi = iTest-1; } } assert( key>=aDia[iRes] ); return ((c > (aDia[iRes]>>3) + (aDia[iRes]&0x07)) ? c : (int)aChar[iRes]); } /* ** Return true if the argument interpreted as a unicode codepoint ** is a diacritical modifier character. */ SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int c){ unsigned int mask0 = 0x08029FDF; unsigned int mask1 = 0x000361F8; if( c<768 || c>817 ) return 0; return (c < 768+32) ? (mask0 & (1 << (c-768))) : (mask1 & (1 << (c-768-32))); } /* ** Interpret the argument as a unicode codepoint. If the codepoint ** is an upper case character that has a lower case equivalent, ** return the codepoint corresponding to the lower case version. ** Otherwise, return a copy of the argument. ** ** The results are undefined if the value passed to this function ** is less than zero. */ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){ /* Each entry in the following array defines a rule for folding a range ** of codepoints to lower case. The rule applies to a range of nRange ** codepoints starting at codepoint iCode. ** ** If the least significant bit in flags is clear, then the rule applies ** to all nRange codepoints (i.e. all nRange codepoints are upper case and ** need to be folded). Or, if it is set, then the rule only applies to ** every second codepoint in the range, starting with codepoint C. ** ** The 7 most significant bits in flags are an index into the aiOff[] ** array. If a specific codepoint C does require folding, then its lower ** case equivalent is ((C + aiOff[flags>>1]) & 0xFFFF). ** ** The contents of this array are generated by parsing the CaseFolding.txt ** file distributed as part of the "Unicode Character Database". See ** http://www.unicode.org for details. */ static const struct TableEntry { unsigned short iCode; unsigned char flags; unsigned char nRange; } aEntry[] = { {65, 14, 26}, {181, 64, 1}, {192, 14, 23}, {216, 14, 7}, {256, 1, 48}, {306, 1, 6}, {313, 1, 16}, {330, 1, 46}, {376, 116, 1}, {377, 1, 6}, {383, 104, 1}, {385, 50, 1}, {386, 1, 4}, {390, 44, 1}, {391, 0, 1}, {393, 42, 2}, {395, 0, 1}, {398, 32, 1}, {399, 38, 1}, {400, 40, 1}, {401, 0, 1}, {403, 42, 1}, {404, 46, 1}, {406, 52, 1}, {407, 48, 1}, {408, 0, 1}, {412, 52, 1}, {413, 54, 1}, {415, 56, 1}, {416, 1, 6}, {422, 60, 1}, {423, 0, 1}, {425, 60, 1}, {428, 0, 1}, {430, 60, 1}, {431, 0, 1}, {433, 58, 2}, {435, 1, 4}, {439, 62, 1}, {440, 0, 1}, {444, 0, 1}, {452, 2, 1}, {453, 0, 1}, {455, 2, 1}, {456, 0, 1}, {458, 2, 1}, {459, 1, 18}, {478, 1, 18}, {497, 2, 1}, {498, 1, 4}, {502, 122, 1}, {503, 134, 1}, {504, 1, 40}, {544, 110, 1}, {546, 1, 18}, {570, 70, 1}, {571, 0, 1}, {573, 108, 1}, {574, 68, 1}, {577, 0, 1}, {579, 106, 1}, {580, 28, 1}, {581, 30, 1}, {582, 1, 10}, {837, 36, 1}, {880, 1, 4}, {886, 0, 1}, {902, 18, 1}, {904, 16, 3}, {908, 26, 1}, {910, 24, 2}, {913, 14, 17}, {931, 14, 9}, {962, 0, 1}, {975, 4, 1}, {976, 140, 1}, {977, 142, 1}, {981, 146, 1}, {982, 144, 1}, {984, 1, 24}, {1008, 136, 1}, {1009, 138, 1}, {1012, 130, 1}, {1013, 128, 1}, {1015, 0, 1}, {1017, 152, 1}, {1018, 0, 1}, {1021, 110, 3}, {1024, 34, 16}, {1040, 14, 32}, {1120, 1, 34}, {1162, 1, 54}, {1216, 6, 1}, {1217, 1, 14}, {1232, 1, 88}, {1329, 22, 38}, {4256, 66, 38}, {4295, 66, 1}, {4301, 66, 1}, {7680, 1, 150}, {7835, 132, 1}, {7838, 96, 1}, {7840, 1, 96}, {7944, 150, 8}, {7960, 150, 6}, {7976, 150, 8}, {7992, 150, 8}, {8008, 150, 6}, {8025, 151, 8}, {8040, 150, 8}, {8072, 150, 8}, {8088, 150, 8}, {8104, 150, 8}, {8120, 150, 2}, {8122, 126, 2}, {8124, 148, 1}, {8126, 100, 1}, {8136, 124, 4}, {8140, 148, 1}, {8152, 150, 2}, {8154, 120, 2}, {8168, 150, 2}, {8170, 118, 2}, {8172, 152, 1}, {8184, 112, 2}, {8186, 114, 2}, {8188, 148, 1}, {8486, 98, 1}, {8490, 92, 1}, {8491, 94, 1}, {8498, 12, 1}, {8544, 8, 16}, {8579, 0, 1}, {9398, 10, 26}, {11264, 22, 47}, {11360, 0, 1}, {11362, 88, 1}, {11363, 102, 1}, {11364, 90, 1}, {11367, 1, 6}, {11373, 84, 1}, {11374, 86, 1}, {11375, 80, 1}, {11376, 82, 1}, {11378, 0, 1}, {11381, 0, 1}, {11390, 78, 2}, {11392, 1, 100}, {11499, 1, 4}, {11506, 0, 1}, {42560, 1, 46}, {42624, 1, 24}, {42786, 1, 14}, {42802, 1, 62}, {42873, 1, 4}, {42877, 76, 1}, {42878, 1, 10}, {42891, 0, 1}, {42893, 74, 1}, {42896, 1, 4}, {42912, 1, 10}, {42922, 72, 1}, {65313, 14, 26}, }; static const unsigned short aiOff[] = { 1, 2, 8, 15, 16, 26, 28, 32, 37, 38, 40, 48, 63, 64, 69, 71, 79, 80, 116, 202, 203, 205, 206, 207, 209, 210, 211, 213, 214, 217, 218, 219, 775, 7264, 10792, 10795, 23228, 23256, 30204, 54721, 54753, 54754, 54756, 54787, 54793, 54809, 57153, 57274, 57921, 58019, 58363, 61722, 65268, 65341, 65373, 65406, 65408, 65410, 65415, 65424, 65436, 65439, 65450, 65462, 65472, 65476, 65478, 65480, 65482, 65488, 65506, 65511, 65514, 65521, 65527, 65528, 65529, }; int ret = c; assert( c>=0 ); assert( sizeof(unsigned short)==2 && sizeof(unsigned char)==1 ); if( c<128 ){ if( c>='A' && c<='Z' ) ret = c + ('a' - 'A'); }else if( c<65536 ){ int iHi = sizeof(aEntry)/sizeof(aEntry[0]) - 1; int iLo = 0; int iRes = -1; while( iHi>=iLo ){ int iTest = (iHi + iLo) / 2; int cmp = (c - aEntry[iTest].iCode); if( cmp>=0 ){ iRes = iTest; iLo = iTest+1; }else{ iHi = iTest-1; } } assert( iRes<0 || c>=aEntry[iRes].iCode ); if( iRes>=0 ){ const struct TableEntry *p = &aEntry[iRes]; if( c<(p->iCode + p->nRange) && 0==(0x01 & p->flags & (p->iCode ^ c)) ){ ret = (c + (aiOff[p->flags>>1])) & 0x0000FFFF; assert( ret>0 ); } } if( bRemoveDiacritic ) ret = remove_diacritic(ret); } else if( c>=66560 && c<66600 ){ ret = c + 40; } return ret; } #endif /* defined(SQLITE_ENABLE_FTS3) || defined(SQLITE_ENABLE_FTS4) */ #endif /* !defined(SQLITE_DISABLE_FTS3_UNICODE) */ /************** End of fts3_unicode2.c ***************************************/ /************** Begin file rtree.c *******************************************/ /* ** 2001 September 15 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains code for implementations of the r-tree and r*-tree ** algorithms packaged as an SQLite virtual table module. */ /* ** Database Format of R-Tree Tables ** -------------------------------- ** ** The data structure for a single virtual r-tree table is stored in three ** native SQLite tables declared as follows. In each case, the '%' character ** in the table name is replaced with the user-supplied name of the r-tree ** table. ** ** CREATE TABLE %_node(nodeno INTEGER PRIMARY KEY, data BLOB) ** CREATE TABLE %_parent(nodeno INTEGER PRIMARY KEY, parentnode INTEGER) ** CREATE TABLE %_rowid(rowid INTEGER PRIMARY KEY, nodeno INTEGER) ** ** The data for each node of the r-tree structure is stored in the %_node ** table. For each node that is not the root node of the r-tree, there is ** an entry in the %_parent table associating the node with its parent. ** And for each row of data in the table, there is an entry in the %_rowid ** table that maps from the entries rowid to the id of the node that it ** is stored on. ** ** The root node of an r-tree always exists, even if the r-tree table is ** empty. The nodeno of the root node is always 1. All other nodes in the ** table must be the same size as the root node. The content of each node ** is formatted as follows: ** ** 1. If the node is the root node (node 1), then the first 2 bytes ** of the node contain the tree depth as a big-endian integer. ** For non-root nodes, the first 2 bytes are left unused. ** ** 2. The next 2 bytes contain the number of entries currently ** stored in the node. ** ** 3. The remainder of the node contains the node entries. Each entry ** consists of a single 8-byte integer followed by an even number ** of 4-byte coordinates. For leaf nodes the integer is the rowid ** of a record. For internal nodes it is the node number of a ** child page. */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RTREE) #ifndef SQLITE_CORE /* #include "sqlite3ext.h" */ SQLITE_EXTENSION_INIT1 #else /* #include "sqlite3.h" */ #endif /* #include */ /* #include */ /* #include */ #ifndef SQLITE_AMALGAMATION #include "sqlite3rtree.h" typedef sqlite3_int64 i64; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; #endif /* The following macro is used to suppress compiler warnings. */ #ifndef UNUSED_PARAMETER # define UNUSED_PARAMETER(x) (void)(x) #endif typedef struct Rtree Rtree; typedef struct RtreeCursor RtreeCursor; typedef struct RtreeNode RtreeNode; typedef struct RtreeCell RtreeCell; typedef struct RtreeConstraint RtreeConstraint; typedef struct RtreeMatchArg RtreeMatchArg; typedef struct RtreeGeomCallback RtreeGeomCallback; typedef union RtreeCoord RtreeCoord; typedef struct RtreeSearchPoint RtreeSearchPoint; /* The rtree may have between 1 and RTREE_MAX_DIMENSIONS dimensions. */ #define RTREE_MAX_DIMENSIONS 5 /* Size of hash table Rtree.aHash. This hash table is not expected to ** ever contain very many entries, so a fixed number of buckets is ** used. */ #define HASHSIZE 97 /* The xBestIndex method of this virtual table requires an estimate of ** the number of rows in the virtual table to calculate the costs of ** various strategies. If possible, this estimate is loaded from the ** sqlite_stat1 table (with RTREE_MIN_ROWEST as a hard-coded minimum). ** Otherwise, if no sqlite_stat1 entry is available, use ** RTREE_DEFAULT_ROWEST. */ #define RTREE_DEFAULT_ROWEST 1048576 #define RTREE_MIN_ROWEST 100 /* ** An rtree virtual-table object. */ struct Rtree { sqlite3_vtab base; /* Base class. Must be first */ sqlite3 *db; /* Host database connection */ int iNodeSize; /* Size in bytes of each node in the node table */ u8 nDim; /* Number of dimensions */ u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */ u8 nBytesPerCell; /* Bytes consumed per cell */ int iDepth; /* Current depth of the r-tree structure */ char *zDb; /* Name of database containing r-tree table */ char *zName; /* Name of r-tree table */ int nBusy; /* Current number of users of this structure */ i64 nRowEst; /* Estimated number of rows in this table */ /* List of nodes removed during a CondenseTree operation. List is ** linked together via the pointer normally used for hash chains - ** RtreeNode.pNext. RtreeNode.iNode stores the depth of the sub-tree ** headed by the node (leaf nodes have RtreeNode.iNode==0). */ RtreeNode *pDeleted; int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ /* Statements to read/write/delete a record from xxx_node */ sqlite3_stmt *pReadNode; sqlite3_stmt *pWriteNode; sqlite3_stmt *pDeleteNode; /* Statements to read/write/delete a record from xxx_rowid */ sqlite3_stmt *pReadRowid; sqlite3_stmt *pWriteRowid; sqlite3_stmt *pDeleteRowid; /* Statements to read/write/delete a record from xxx_parent */ sqlite3_stmt *pReadParent; sqlite3_stmt *pWriteParent; sqlite3_stmt *pDeleteParent; RtreeNode *aHash[HASHSIZE]; /* Hash table of in-memory nodes. */ }; /* Possible values for Rtree.eCoordType: */ #define RTREE_COORD_REAL32 0 #define RTREE_COORD_INT32 1 /* ** If SQLITE_RTREE_INT_ONLY is defined, then this virtual table will ** only deal with integer coordinates. No floating point operations ** will be done. */ #ifdef SQLITE_RTREE_INT_ONLY typedef sqlite3_int64 RtreeDValue; /* High accuracy coordinate */ typedef int RtreeValue; /* Low accuracy coordinate */ # define RTREE_ZERO 0 #else typedef double RtreeDValue; /* High accuracy coordinate */ typedef float RtreeValue; /* Low accuracy coordinate */ # define RTREE_ZERO 0.0 #endif /* ** When doing a search of an r-tree, instances of the following structure ** record intermediate results from the tree walk. ** ** The id is always a node-id. For iLevel>=1 the id is the node-id of ** the node that the RtreeSearchPoint represents. When iLevel==0, however, ** the id is of the parent node and the cell that RtreeSearchPoint ** represents is the iCell-th entry in the parent node. */ struct RtreeSearchPoint { RtreeDValue rScore; /* The score for this node. Smallest goes first. */ sqlite3_int64 id; /* Node ID */ u8 iLevel; /* 0=entries. 1=leaf node. 2+ for higher */ u8 eWithin; /* PARTLY_WITHIN or FULLY_WITHIN */ u8 iCell; /* Cell index within the node */ }; /* ** The minimum number of cells allowed for a node is a third of the ** maximum. In Gutman's notation: ** ** m = M/3 ** ** If an R*-tree "Reinsert" operation is required, the same number of ** cells are removed from the overfull node and reinserted into the tree. */ #define RTREE_MINCELLS(p) ((((p)->iNodeSize-4)/(p)->nBytesPerCell)/3) #define RTREE_REINSERT(p) RTREE_MINCELLS(p) #define RTREE_MAXCELLS 51 /* ** The smallest possible node-size is (512-64)==448 bytes. And the largest ** supported cell size is 48 bytes (8 byte rowid + ten 4 byte coordinates). ** Therefore all non-root nodes must contain at least 3 entries. Since ** 2^40 is greater than 2^64, an r-tree structure always has a depth of ** 40 or less. */ #define RTREE_MAX_DEPTH 40 /* ** Number of entries in the cursor RtreeNode cache. The first entry is ** used to cache the RtreeNode for RtreeCursor.sPoint. The remaining ** entries cache the RtreeNode for the first elements of the priority queue. */ #define RTREE_CACHE_SZ 5 /* ** An rtree cursor object. */ struct RtreeCursor { sqlite3_vtab_cursor base; /* Base class. Must be first */ u8 atEOF; /* True if at end of search */ u8 bPoint; /* True if sPoint is valid */ int iStrategy; /* Copy of idxNum search parameter */ int nConstraint; /* Number of entries in aConstraint */ RtreeConstraint *aConstraint; /* Search constraints. */ int nPointAlloc; /* Number of slots allocated for aPoint[] */ int nPoint; /* Number of slots used in aPoint[] */ int mxLevel; /* iLevel value for root of the tree */ RtreeSearchPoint *aPoint; /* Priority queue for search points */ RtreeSearchPoint sPoint; /* Cached next search point */ RtreeNode *aNode[RTREE_CACHE_SZ]; /* Rtree node cache */ u32 anQueue[RTREE_MAX_DEPTH+1]; /* Number of queued entries by iLevel */ }; /* Return the Rtree of a RtreeCursor */ #define RTREE_OF_CURSOR(X) ((Rtree*)((X)->base.pVtab)) /* ** A coordinate can be either a floating point number or a integer. All ** coordinates within a single R-Tree are always of the same time. */ union RtreeCoord { RtreeValue f; /* Floating point value */ int i; /* Integer value */ u32 u; /* Unsigned for byte-order conversions */ }; /* ** The argument is an RtreeCoord. Return the value stored within the RtreeCoord ** formatted as a RtreeDValue (double or int64). This macro assumes that local ** variable pRtree points to the Rtree structure associated with the ** RtreeCoord. */ #ifdef SQLITE_RTREE_INT_ONLY # define DCOORD(coord) ((RtreeDValue)coord.i) #else # define DCOORD(coord) ( \ (pRtree->eCoordType==RTREE_COORD_REAL32) ? \ ((double)coord.f) : \ ((double)coord.i) \ ) #endif /* ** A search constraint. */ struct RtreeConstraint { int iCoord; /* Index of constrained coordinate */ int op; /* Constraining operation */ union { RtreeDValue rValue; /* Constraint value. */ int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*); int (*xQueryFunc)(sqlite3_rtree_query_info*); } u; sqlite3_rtree_query_info *pInfo; /* xGeom and xQueryFunc argument */ }; /* Possible values for RtreeConstraint.op */ #define RTREE_EQ 0x41 /* A */ #define RTREE_LE 0x42 /* B */ #define RTREE_LT 0x43 /* C */ #define RTREE_GE 0x44 /* D */ #define RTREE_GT 0x45 /* E */ #define RTREE_MATCH 0x46 /* F: Old-style sqlite3_rtree_geometry_callback() */ #define RTREE_QUERY 0x47 /* G: New-style sqlite3_rtree_query_callback() */ /* ** An rtree structure node. */ struct RtreeNode { RtreeNode *pParent; /* Parent node */ i64 iNode; /* The node number */ int nRef; /* Number of references to this node */ int isDirty; /* True if the node needs to be written to disk */ u8 *zData; /* Content of the node, as should be on disk */ RtreeNode *pNext; /* Next node in this hash collision chain */ }; /* Return the number of cells in a node */ #define NCELL(pNode) readInt16(&(pNode)->zData[2]) /* ** A single cell from a node, deserialized */ struct RtreeCell { i64 iRowid; /* Node or entry ID */ RtreeCoord aCoord[RTREE_MAX_DIMENSIONS*2]; /* Bounding box coordinates */ }; /* ** This object becomes the sqlite3_user_data() for the SQL functions ** that are created by sqlite3_rtree_geometry_callback() and ** sqlite3_rtree_query_callback() and which appear on the right of MATCH ** operators in order to constrain a search. ** ** xGeom and xQueryFunc are the callback functions. Exactly one of ** xGeom and xQueryFunc fields is non-NULL, depending on whether the ** SQL function was created using sqlite3_rtree_geometry_callback() or ** sqlite3_rtree_query_callback(). ** ** This object is deleted automatically by the destructor mechanism in ** sqlite3_create_function_v2(). */ struct RtreeGeomCallback { int (*xGeom)(sqlite3_rtree_geometry*, int, RtreeDValue*, int*); int (*xQueryFunc)(sqlite3_rtree_query_info*); void (*xDestructor)(void*); void *pContext; }; /* ** Value for the first field of every RtreeMatchArg object. The MATCH ** operator tests that the first field of a blob operand matches this ** value to avoid operating on invalid blobs (which could cause a segfault). */ #define RTREE_GEOMETRY_MAGIC 0x891245AB /* ** An instance of this structure (in the form of a BLOB) is returned by ** the SQL functions that sqlite3_rtree_geometry_callback() and ** sqlite3_rtree_query_callback() create, and is read as the right-hand ** operand to the MATCH operator of an R-Tree. */ struct RtreeMatchArg { u32 magic; /* Always RTREE_GEOMETRY_MAGIC */ RtreeGeomCallback cb; /* Info about the callback functions */ int nParam; /* Number of parameters to the SQL function */ sqlite3_value **apSqlParam; /* Original SQL parameter values */ RtreeDValue aParam[1]; /* Values for parameters to the SQL function */ }; #ifndef MAX # define MAX(x,y) ((x) < (y) ? (y) : (x)) #endif #ifndef MIN # define MIN(x,y) ((x) > (y) ? (y) : (x)) #endif /* ** Functions to deserialize a 16 bit integer, 32 bit real number and ** 64 bit integer. The deserialized value is returned. */ static int readInt16(u8 *p){ return (p[0]<<8) + p[1]; } static void readCoord(u8 *p, RtreeCoord *pCoord){ pCoord->u = ( (((u32)p[0]) << 24) + (((u32)p[1]) << 16) + (((u32)p[2]) << 8) + (((u32)p[3]) << 0) ); } static i64 readInt64(u8 *p){ return ( (((i64)p[0]) << 56) + (((i64)p[1]) << 48) + (((i64)p[2]) << 40) + (((i64)p[3]) << 32) + (((i64)p[4]) << 24) + (((i64)p[5]) << 16) + (((i64)p[6]) << 8) + (((i64)p[7]) << 0) ); } /* ** Functions to serialize a 16 bit integer, 32 bit real number and ** 64 bit integer. The value returned is the number of bytes written ** to the argument buffer (always 2, 4 and 8 respectively). */ static int writeInt16(u8 *p, int i){ p[0] = (i>> 8)&0xFF; p[1] = (i>> 0)&0xFF; return 2; } static int writeCoord(u8 *p, RtreeCoord *pCoord){ u32 i; assert( sizeof(RtreeCoord)==4 ); assert( sizeof(u32)==4 ); i = pCoord->u; p[0] = (i>>24)&0xFF; p[1] = (i>>16)&0xFF; p[2] = (i>> 8)&0xFF; p[3] = (i>> 0)&0xFF; return 4; } static int writeInt64(u8 *p, i64 i){ p[0] = (i>>56)&0xFF; p[1] = (i>>48)&0xFF; p[2] = (i>>40)&0xFF; p[3] = (i>>32)&0xFF; p[4] = (i>>24)&0xFF; p[5] = (i>>16)&0xFF; p[6] = (i>> 8)&0xFF; p[7] = (i>> 0)&0xFF; return 8; } /* ** Increment the reference count of node p. */ static void nodeReference(RtreeNode *p){ if( p ){ p->nRef++; } } /* ** Clear the content of node p (set all bytes to 0x00). */ static void nodeZero(Rtree *pRtree, RtreeNode *p){ memset(&p->zData[2], 0, pRtree->iNodeSize-2); p->isDirty = 1; } /* ** Given a node number iNode, return the corresponding key to use ** in the Rtree.aHash table. */ static int nodeHash(i64 iNode){ return iNode % HASHSIZE; } /* ** Search the node hash table for node iNode. If found, return a pointer ** to it. Otherwise, return 0. */ static RtreeNode *nodeHashLookup(Rtree *pRtree, i64 iNode){ RtreeNode *p; for(p=pRtree->aHash[nodeHash(iNode)]; p && p->iNode!=iNode; p=p->pNext); return p; } /* ** Add node pNode to the node hash table. */ static void nodeHashInsert(Rtree *pRtree, RtreeNode *pNode){ int iHash; assert( pNode->pNext==0 ); iHash = nodeHash(pNode->iNode); pNode->pNext = pRtree->aHash[iHash]; pRtree->aHash[iHash] = pNode; } /* ** Remove node pNode from the node hash table. */ static void nodeHashDelete(Rtree *pRtree, RtreeNode *pNode){ RtreeNode **pp; if( pNode->iNode!=0 ){ pp = &pRtree->aHash[nodeHash(pNode->iNode)]; for( ; (*pp)!=pNode; pp = &(*pp)->pNext){ assert(*pp); } *pp = pNode->pNext; pNode->pNext = 0; } } /* ** Allocate and return new r-tree node. Initially, (RtreeNode.iNode==0), ** indicating that node has not yet been assigned a node number. It is ** assigned a node number when nodeWrite() is called to write the ** node contents out to the database. */ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ RtreeNode *pNode; pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode) + pRtree->iNodeSize); if( pNode ){ memset(pNode, 0, sizeof(RtreeNode) + pRtree->iNodeSize); pNode->zData = (u8 *)&pNode[1]; pNode->nRef = 1; pNode->pParent = pParent; pNode->isDirty = 1; nodeReference(pParent); } return pNode; } /* ** Obtain a reference to an r-tree node. */ static int nodeAcquire( Rtree *pRtree, /* R-tree structure */ i64 iNode, /* Node number to load */ RtreeNode *pParent, /* Either the parent node or NULL */ RtreeNode **ppNode /* OUT: Acquired node */ ){ int rc; int rc2 = SQLITE_OK; RtreeNode *pNode; /* Check if the requested node is already in the hash table. If so, ** increase its reference count and return it. */ if( (pNode = nodeHashLookup(pRtree, iNode)) ){ assert( !pParent || !pNode->pParent || pNode->pParent==pParent ); if( pParent && !pNode->pParent ){ nodeReference(pParent); pNode->pParent = pParent; } pNode->nRef++; *ppNode = pNode; return SQLITE_OK; } sqlite3_bind_int64(pRtree->pReadNode, 1, iNode); rc = sqlite3_step(pRtree->pReadNode); if( rc==SQLITE_ROW ){ const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0); if( pRtree->iNodeSize==sqlite3_column_bytes(pRtree->pReadNode, 0) ){ pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize); if( !pNode ){ rc2 = SQLITE_NOMEM; }else{ pNode->pParent = pParent; pNode->zData = (u8 *)&pNode[1]; pNode->nRef = 1; pNode->iNode = iNode; pNode->isDirty = 0; pNode->pNext = 0; memcpy(pNode->zData, zBlob, pRtree->iNodeSize); nodeReference(pParent); } } } rc = sqlite3_reset(pRtree->pReadNode); if( rc==SQLITE_OK ) rc = rc2; /* If the root node was just loaded, set pRtree->iDepth to the height ** of the r-tree structure. A height of zero means all data is stored on ** the root node. A height of one means the children of the root node ** are the leaves, and so on. If the depth as specified on the root node ** is greater than RTREE_MAX_DEPTH, the r-tree structure must be corrupt. */ if( pNode && iNode==1 ){ pRtree->iDepth = readInt16(pNode->zData); if( pRtree->iDepth>RTREE_MAX_DEPTH ){ rc = SQLITE_CORRUPT_VTAB; } } /* If no error has occurred so far, check if the "number of entries" ** field on the node is too large. If so, set the return code to ** SQLITE_CORRUPT_VTAB. */ if( pNode && rc==SQLITE_OK ){ if( NCELL(pNode)>((pRtree->iNodeSize-4)/pRtree->nBytesPerCell) ){ rc = SQLITE_CORRUPT_VTAB; } } if( rc==SQLITE_OK ){ if( pNode!=0 ){ nodeHashInsert(pRtree, pNode); }else{ rc = SQLITE_CORRUPT_VTAB; } *ppNode = pNode; }else{ sqlite3_free(pNode); *ppNode = 0; } return rc; } /* ** Overwrite cell iCell of node pNode with the contents of pCell. */ static void nodeOverwriteCell( Rtree *pRtree, /* The overall R-Tree */ RtreeNode *pNode, /* The node into which the cell is to be written */ RtreeCell *pCell, /* The cell to write */ int iCell /* Index into pNode into which pCell is written */ ){ int ii; u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; p += writeInt64(p, pCell->iRowid); for(ii=0; ii<(pRtree->nDim*2); ii++){ p += writeCoord(p, &pCell->aCoord[ii]); } pNode->isDirty = 1; } /* ** Remove the cell with index iCell from node pNode. */ static void nodeDeleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell){ u8 *pDst = &pNode->zData[4 + pRtree->nBytesPerCell*iCell]; u8 *pSrc = &pDst[pRtree->nBytesPerCell]; int nByte = (NCELL(pNode) - iCell - 1) * pRtree->nBytesPerCell; memmove(pDst, pSrc, nByte); writeInt16(&pNode->zData[2], NCELL(pNode)-1); pNode->isDirty = 1; } /* ** Insert the contents of cell pCell into node pNode. If the insert ** is successful, return SQLITE_OK. ** ** If there is not enough free space in pNode, return SQLITE_FULL. */ static int nodeInsertCell( Rtree *pRtree, /* The overall R-Tree */ RtreeNode *pNode, /* Write new cell into this node */ RtreeCell *pCell /* The cell to be inserted */ ){ int nCell; /* Current number of cells in pNode */ int nMaxCell; /* Maximum number of cells for pNode */ nMaxCell = (pRtree->iNodeSize-4)/pRtree->nBytesPerCell; nCell = NCELL(pNode); assert( nCell<=nMaxCell ); if( nCellzData[2], nCell+1); pNode->isDirty = 1; } return (nCell==nMaxCell); } /* ** If the node is dirty, write it out to the database. */ static int nodeWrite(Rtree *pRtree, RtreeNode *pNode){ int rc = SQLITE_OK; if( pNode->isDirty ){ sqlite3_stmt *p = pRtree->pWriteNode; if( pNode->iNode ){ sqlite3_bind_int64(p, 1, pNode->iNode); }else{ sqlite3_bind_null(p, 1); } sqlite3_bind_blob(p, 2, pNode->zData, pRtree->iNodeSize, SQLITE_STATIC); sqlite3_step(p); pNode->isDirty = 0; rc = sqlite3_reset(p); if( pNode->iNode==0 && rc==SQLITE_OK ){ pNode->iNode = sqlite3_last_insert_rowid(pRtree->db); nodeHashInsert(pRtree, pNode); } } return rc; } /* ** Release a reference to a node. If the node is dirty and the reference ** count drops to zero, the node data is written to the database. */ static int nodeRelease(Rtree *pRtree, RtreeNode *pNode){ int rc = SQLITE_OK; if( pNode ){ assert( pNode->nRef>0 ); pNode->nRef--; if( pNode->nRef==0 ){ if( pNode->iNode==1 ){ pRtree->iDepth = -1; } if( pNode->pParent ){ rc = nodeRelease(pRtree, pNode->pParent); } if( rc==SQLITE_OK ){ rc = nodeWrite(pRtree, pNode); } nodeHashDelete(pRtree, pNode); sqlite3_free(pNode); } } return rc; } /* ** Return the 64-bit integer value associated with cell iCell of ** node pNode. If pNode is a leaf node, this is a rowid. If it is ** an internal node, then the 64-bit integer is a child page number. */ static i64 nodeGetRowid( Rtree *pRtree, /* The overall R-Tree */ RtreeNode *pNode, /* The node from which to extract the ID */ int iCell /* The cell index from which to extract the ID */ ){ assert( iCellzData[4 + pRtree->nBytesPerCell*iCell]); } /* ** Return coordinate iCoord from cell iCell in node pNode. */ static void nodeGetCoord( Rtree *pRtree, /* The overall R-Tree */ RtreeNode *pNode, /* The node from which to extract a coordinate */ int iCell, /* The index of the cell within the node */ int iCoord, /* Which coordinate to extract */ RtreeCoord *pCoord /* OUT: Space to write result to */ ){ readCoord(&pNode->zData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); } /* ** Deserialize cell iCell of node pNode. Populate the structure pointed ** to by pCell with the results. */ static void nodeGetCell( Rtree *pRtree, /* The overall R-Tree */ RtreeNode *pNode, /* The node containing the cell to be read */ int iCell, /* Index of the cell within the node */ RtreeCell *pCell /* OUT: Write the cell contents here */ ){ u8 *pData; RtreeCoord *pCoord; int ii; pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell); pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell); pCoord = pCell->aCoord; for(ii=0; iinDim*2; ii++){ readCoord(&pData[ii*4], &pCoord[ii]); } } /* Forward declaration for the function that does the work of ** the virtual table module xCreate() and xConnect() methods. */ static int rtreeInit( sqlite3 *, void *, int, const char *const*, sqlite3_vtab **, char **, int ); /* ** Rtree virtual table module xCreate method. */ static int rtreeCreate( sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVtab, char **pzErr ){ return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 1); } /* ** Rtree virtual table module xConnect method. */ static int rtreeConnect( sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVtab, char **pzErr ){ return rtreeInit(db, pAux, argc, argv, ppVtab, pzErr, 0); } /* ** Increment the r-tree reference count. */ static void rtreeReference(Rtree *pRtree){ pRtree->nBusy++; } /* ** Decrement the r-tree reference count. When the reference count reaches ** zero the structure is deleted. */ static void rtreeRelease(Rtree *pRtree){ pRtree->nBusy--; if( pRtree->nBusy==0 ){ sqlite3_finalize(pRtree->pReadNode); sqlite3_finalize(pRtree->pWriteNode); sqlite3_finalize(pRtree->pDeleteNode); sqlite3_finalize(pRtree->pReadRowid); sqlite3_finalize(pRtree->pWriteRowid); sqlite3_finalize(pRtree->pDeleteRowid); sqlite3_finalize(pRtree->pReadParent); sqlite3_finalize(pRtree->pWriteParent); sqlite3_finalize(pRtree->pDeleteParent); sqlite3_free(pRtree); } } /* ** Rtree virtual table module xDisconnect method. */ static int rtreeDisconnect(sqlite3_vtab *pVtab){ rtreeRelease((Rtree *)pVtab); return SQLITE_OK; } /* ** Rtree virtual table module xDestroy method. */ static int rtreeDestroy(sqlite3_vtab *pVtab){ Rtree *pRtree = (Rtree *)pVtab; int rc; char *zCreate = sqlite3_mprintf( "DROP TABLE '%q'.'%q_node';" "DROP TABLE '%q'.'%q_rowid';" "DROP TABLE '%q'.'%q_parent';", pRtree->zDb, pRtree->zName, pRtree->zDb, pRtree->zName, pRtree->zDb, pRtree->zName ); if( !zCreate ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0); sqlite3_free(zCreate); } if( rc==SQLITE_OK ){ rtreeRelease(pRtree); } return rc; } /* ** Rtree virtual table module xOpen method. */ static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ int rc = SQLITE_NOMEM; RtreeCursor *pCsr; pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor)); if( pCsr ){ memset(pCsr, 0, sizeof(RtreeCursor)); pCsr->base.pVtab = pVTab; rc = SQLITE_OK; } *ppCursor = (sqlite3_vtab_cursor *)pCsr; return rc; } /* ** Free the RtreeCursor.aConstraint[] array and its contents. */ static void freeCursorConstraints(RtreeCursor *pCsr){ if( pCsr->aConstraint ){ int i; /* Used to iterate through constraint array */ for(i=0; inConstraint; i++){ sqlite3_rtree_query_info *pInfo = pCsr->aConstraint[i].pInfo; if( pInfo ){ if( pInfo->xDelUser ) pInfo->xDelUser(pInfo->pUser); sqlite3_free(pInfo); } } sqlite3_free(pCsr->aConstraint); pCsr->aConstraint = 0; } } /* ** Rtree virtual table module xClose method. */ static int rtreeClose(sqlite3_vtab_cursor *cur){ Rtree *pRtree = (Rtree *)(cur->pVtab); int ii; RtreeCursor *pCsr = (RtreeCursor *)cur; freeCursorConstraints(pCsr); sqlite3_free(pCsr->aPoint); for(ii=0; iiaNode[ii]); sqlite3_free(pCsr); return SQLITE_OK; } /* ** Rtree virtual table module xEof method. ** ** Return non-zero if the cursor does not currently point to a valid ** record (i.e if the scan has finished), or zero otherwise. */ static int rtreeEof(sqlite3_vtab_cursor *cur){ RtreeCursor *pCsr = (RtreeCursor *)cur; return pCsr->atEOF; } /* ** Convert raw bits from the on-disk RTree record into a coordinate value. ** The on-disk format is big-endian and needs to be converted for little- ** endian platforms. The on-disk record stores integer coordinates if ** eInt is true and it stores 32-bit floating point records if eInt is ** false. a[] is the four bytes of the on-disk record to be decoded. ** Store the results in "r". ** ** There are three versions of this macro, one each for little-endian and ** big-endian processors and a third generic implementation. The endian- ** specific implementations are much faster and are preferred if the ** processor endianness is known at compile-time. The SQLITE_BYTEORDER ** macro is part of sqliteInt.h and hence the endian-specific ** implementation will only be used if this module is compiled as part ** of the amalgamation. */ #if defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==1234 #define RTREE_DECODE_COORD(eInt, a, r) { \ RtreeCoord c; /* Coordinate decoded */ \ memcpy(&c.u,a,4); \ c.u = ((c.u>>24)&0xff)|((c.u>>8)&0xff00)| \ ((c.u&0xff)<<24)|((c.u&0xff00)<<8); \ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ } #elif defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==4321 #define RTREE_DECODE_COORD(eInt, a, r) { \ RtreeCoord c; /* Coordinate decoded */ \ memcpy(&c.u,a,4); \ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ } #else #define RTREE_DECODE_COORD(eInt, a, r) { \ RtreeCoord c; /* Coordinate decoded */ \ c.u = ((u32)a[0]<<24) + ((u32)a[1]<<16) \ +((u32)a[2]<<8) + a[3]; \ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \ } #endif /* ** Check the RTree node or entry given by pCellData and p against the MATCH ** constraint pConstraint. */ static int rtreeCallbackConstraint( RtreeConstraint *pConstraint, /* The constraint to test */ int eInt, /* True if RTree holding integer coordinates */ u8 *pCellData, /* Raw cell content */ RtreeSearchPoint *pSearch, /* Container of this cell */ sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */ int *peWithin /* OUT: visibility of the cell */ ){ int i; /* Loop counter */ sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */ int nCoord = pInfo->nCoord; /* No. of coordinates */ int rc; /* Callback return code */ sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */ assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY ); assert( nCoord==2 || nCoord==4 || nCoord==6 || nCoord==8 || nCoord==10 ); if( pConstraint->op==RTREE_QUERY && pSearch->iLevel==1 ){ pInfo->iRowid = readInt64(pCellData); } pCellData += 8; for(i=0; iop==RTREE_MATCH ){ rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo, nCoord, aCoord, &i); if( i==0 ) *peWithin = NOT_WITHIN; *prScore = RTREE_ZERO; }else{ pInfo->aCoord = aCoord; pInfo->iLevel = pSearch->iLevel - 1; pInfo->rScore = pInfo->rParentScore = pSearch->rScore; pInfo->eWithin = pInfo->eParentWithin = pSearch->eWithin; rc = pConstraint->u.xQueryFunc(pInfo); if( pInfo->eWithin<*peWithin ) *peWithin = pInfo->eWithin; if( pInfo->rScore<*prScore || *prScorerScore; } } return rc; } /* ** Check the internal RTree node given by pCellData against constraint p. ** If this constraint cannot be satisfied by any child within the node, ** set *peWithin to NOT_WITHIN. */ static void rtreeNonleafConstraint( RtreeConstraint *p, /* The constraint to test */ int eInt, /* True if RTree holds integer coordinates */ u8 *pCellData, /* Raw cell content as appears on disk */ int *peWithin /* Adjust downward, as appropriate */ ){ sqlite3_rtree_dbl val; /* Coordinate value convert to a double */ /* p->iCoord might point to either a lower or upper bound coordinate ** in a coordinate pair. But make pCellData point to the lower bound. */ pCellData += 8 + 4*(p->iCoord&0xfe); assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ ); switch( p->op ){ case RTREE_LE: case RTREE_LT: case RTREE_EQ: RTREE_DECODE_COORD(eInt, pCellData, val); /* val now holds the lower bound of the coordinate pair */ if( p->u.rValue>=val ) return; if( p->op!=RTREE_EQ ) break; /* RTREE_LE and RTREE_LT end here */ /* Fall through for the RTREE_EQ case */ default: /* RTREE_GT or RTREE_GE, or fallthrough of RTREE_EQ */ pCellData += 4; RTREE_DECODE_COORD(eInt, pCellData, val); /* val now holds the upper bound of the coordinate pair */ if( p->u.rValue<=val ) return; } *peWithin = NOT_WITHIN; } /* ** Check the leaf RTree cell given by pCellData against constraint p. ** If this constraint is not satisfied, set *peWithin to NOT_WITHIN. ** If the constraint is satisfied, leave *peWithin unchanged. ** ** The constraint is of the form: xN op $val ** ** The op is given by p->op. The xN is p->iCoord-th coordinate in ** pCellData. $val is given by p->u.rValue. */ static void rtreeLeafConstraint( RtreeConstraint *p, /* The constraint to test */ int eInt, /* True if RTree holds integer coordinates */ u8 *pCellData, /* Raw cell content as appears on disk */ int *peWithin /* Adjust downward, as appropriate */ ){ RtreeDValue xN; /* Coordinate value converted to a double */ assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ ); pCellData += 8 + p->iCoord*4; RTREE_DECODE_COORD(eInt, pCellData, xN); switch( p->op ){ case RTREE_LE: if( xN <= p->u.rValue ) return; break; case RTREE_LT: if( xN < p->u.rValue ) return; break; case RTREE_GE: if( xN >= p->u.rValue ) return; break; case RTREE_GT: if( xN > p->u.rValue ) return; break; default: if( xN == p->u.rValue ) return; break; } *peWithin = NOT_WITHIN; } /* ** One of the cells in node pNode is guaranteed to have a 64-bit ** integer value equal to iRowid. Return the index of this cell. */ static int nodeRowidIndex( Rtree *pRtree, RtreeNode *pNode, i64 iRowid, int *piIndex ){ int ii; int nCell = NCELL(pNode); assert( nCell<200 ); for(ii=0; iipParent; if( pParent ){ return nodeRowidIndex(pRtree, pParent, pNode->iNode, piIndex); } *piIndex = -1; return SQLITE_OK; } /* ** Compare two search points. Return negative, zero, or positive if the first ** is less than, equal to, or greater than the second. ** ** The rScore is the primary key. Smaller rScore values come first. ** If the rScore is a tie, then use iLevel as the tie breaker with smaller ** iLevel values coming first. In this way, if rScore is the same for all ** SearchPoints, then iLevel becomes the deciding factor and the result ** is a depth-first search, which is the desired default behavior. */ static int rtreeSearchPointCompare( const RtreeSearchPoint *pA, const RtreeSearchPoint *pB ){ if( pA->rScorerScore ) return -1; if( pA->rScore>pB->rScore ) return +1; if( pA->iLeveliLevel ) return -1; if( pA->iLevel>pB->iLevel ) return +1; return 0; } /* ** Interchange to search points in a cursor. */ static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){ RtreeSearchPoint t = p->aPoint[i]; assert( iaPoint[i] = p->aPoint[j]; p->aPoint[j] = t; i++; j++; if( i=RTREE_CACHE_SZ ){ nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); p->aNode[i] = 0; }else{ RtreeNode *pTemp = p->aNode[i]; p->aNode[i] = p->aNode[j]; p->aNode[j] = pTemp; } } } /* ** Return the search point with the lowest current score. */ static RtreeSearchPoint *rtreeSearchPointFirst(RtreeCursor *pCur){ return pCur->bPoint ? &pCur->sPoint : pCur->nPoint ? pCur->aPoint : 0; } /* ** Get the RtreeNode for the search point with the lowest score. */ static RtreeNode *rtreeNodeOfFirstSearchPoint(RtreeCursor *pCur, int *pRC){ sqlite3_int64 id; int ii = 1 - pCur->bPoint; assert( ii==0 || ii==1 ); assert( pCur->bPoint || pCur->nPoint ); if( pCur->aNode[ii]==0 ){ assert( pRC!=0 ); id = ii ? pCur->aPoint[0].id : pCur->sPoint.id; *pRC = nodeAcquire(RTREE_OF_CURSOR(pCur), id, 0, &pCur->aNode[ii]); } return pCur->aNode[ii]; } /* ** Push a new element onto the priority queue */ static RtreeSearchPoint *rtreeEnqueue( RtreeCursor *pCur, /* The cursor */ RtreeDValue rScore, /* Score for the new search point */ u8 iLevel /* Level for the new search point */ ){ int i, j; RtreeSearchPoint *pNew; if( pCur->nPoint>=pCur->nPointAlloc ){ int nNew = pCur->nPointAlloc*2 + 8; pNew = sqlite3_realloc(pCur->aPoint, nNew*sizeof(pCur->aPoint[0])); if( pNew==0 ) return 0; pCur->aPoint = pNew; pCur->nPointAlloc = nNew; } i = pCur->nPoint++; pNew = pCur->aPoint + i; pNew->rScore = rScore; pNew->iLevel = iLevel; assert( iLevel<=RTREE_MAX_DEPTH ); while( i>0 ){ RtreeSearchPoint *pParent; j = (i-1)/2; pParent = pCur->aPoint + j; if( rtreeSearchPointCompare(pNew, pParent)>=0 ) break; rtreeSearchPointSwap(pCur, j, i); i = j; pNew = pParent; } return pNew; } /* ** Allocate a new RtreeSearchPoint and return a pointer to it. Return ** NULL if malloc fails. */ static RtreeSearchPoint *rtreeSearchPointNew( RtreeCursor *pCur, /* The cursor */ RtreeDValue rScore, /* Score for the new search point */ u8 iLevel /* Level for the new search point */ ){ RtreeSearchPoint *pNew, *pFirst; pFirst = rtreeSearchPointFirst(pCur); pCur->anQueue[iLevel]++; if( pFirst==0 || pFirst->rScore>rScore || (pFirst->rScore==rScore && pFirst->iLevel>iLevel) ){ if( pCur->bPoint ){ int ii; pNew = rtreeEnqueue(pCur, rScore, iLevel); if( pNew==0 ) return 0; ii = (int)(pNew - pCur->aPoint) + 1; if( iiaNode[ii]==0 ); pCur->aNode[ii] = pCur->aNode[0]; }else{ nodeRelease(RTREE_OF_CURSOR(pCur), pCur->aNode[0]); } pCur->aNode[0] = 0; *pNew = pCur->sPoint; } pCur->sPoint.rScore = rScore; pCur->sPoint.iLevel = iLevel; pCur->bPoint = 1; return &pCur->sPoint; }else{ return rtreeEnqueue(pCur, rScore, iLevel); } } #if 0 /* Tracing routines for the RtreeSearchPoint queue */ static void tracePoint(RtreeSearchPoint *p, int idx, RtreeCursor *pCur){ if( idx<0 ){ printf(" s"); }else{ printf("%2d", idx); } printf(" %d.%05lld.%02d %g %d", p->iLevel, p->id, p->iCell, p->rScore, p->eWithin ); idx++; if( idxaNode[idx]); }else{ printf("\n"); } } static void traceQueue(RtreeCursor *pCur, const char *zPrefix){ int ii; printf("=== %9s ", zPrefix); if( pCur->bPoint ){ tracePoint(&pCur->sPoint, -1, pCur); } for(ii=0; iinPoint; ii++){ if( ii>0 || pCur->bPoint ) printf(" "); tracePoint(&pCur->aPoint[ii], ii, pCur); } } # define RTREE_QUEUE_TRACE(A,B) traceQueue(A,B) #else # define RTREE_QUEUE_TRACE(A,B) /* no-op */ #endif /* Remove the search point with the lowest current score. */ static void rtreeSearchPointPop(RtreeCursor *p){ int i, j, k, n; i = 1 - p->bPoint; assert( i==0 || i==1 ); if( p->aNode[i] ){ nodeRelease(RTREE_OF_CURSOR(p), p->aNode[i]); p->aNode[i] = 0; } if( p->bPoint ){ p->anQueue[p->sPoint.iLevel]--; p->bPoint = 0; }else if( p->nPoint ){ p->anQueue[p->aPoint[0].iLevel]--; n = --p->nPoint; p->aPoint[0] = p->aPoint[n]; if( naNode[1] = p->aNode[n+1]; p->aNode[n+1] = 0; } i = 0; while( (j = i*2+1)aPoint[k], &p->aPoint[j])<0 ){ if( rtreeSearchPointCompare(&p->aPoint[k], &p->aPoint[i])<0 ){ rtreeSearchPointSwap(p, i, k); i = k; }else{ break; } }else{ if( rtreeSearchPointCompare(&p->aPoint[j], &p->aPoint[i])<0 ){ rtreeSearchPointSwap(p, i, j); i = j; }else{ break; } } } } } /* ** Continue the search on cursor pCur until the front of the queue ** contains an entry suitable for returning as a result-set row, ** or until the RtreeSearchPoint queue is empty, indicating that the ** query has completed. */ static int rtreeStepToLeaf(RtreeCursor *pCur){ RtreeSearchPoint *p; Rtree *pRtree = RTREE_OF_CURSOR(pCur); RtreeNode *pNode; int eWithin; int rc = SQLITE_OK; int nCell; int nConstraint = pCur->nConstraint; int ii; int eInt; RtreeSearchPoint x; eInt = pRtree->eCoordType==RTREE_COORD_INT32; while( (p = rtreeSearchPointFirst(pCur))!=0 && p->iLevel>0 ){ pNode = rtreeNodeOfFirstSearchPoint(pCur, &rc); if( rc ) return rc; nCell = NCELL(pNode); assert( nCell<200 ); while( p->iCellzData + (4+pRtree->nBytesPerCell*p->iCell); eWithin = FULLY_WITHIN; for(ii=0; iiaConstraint + ii; if( pConstraint->op>=RTREE_MATCH ){ rc = rtreeCallbackConstraint(pConstraint, eInt, pCellData, p, &rScore, &eWithin); if( rc ) return rc; }else if( p->iLevel==1 ){ rtreeLeafConstraint(pConstraint, eInt, pCellData, &eWithin); }else{ rtreeNonleafConstraint(pConstraint, eInt, pCellData, &eWithin); } if( eWithin==NOT_WITHIN ) break; } p->iCell++; if( eWithin==NOT_WITHIN ) continue; x.iLevel = p->iLevel - 1; if( x.iLevel ){ x.id = readInt64(pCellData); x.iCell = 0; }else{ x.id = p->id; x.iCell = p->iCell - 1; } if( p->iCell>=nCell ){ RTREE_QUEUE_TRACE(pCur, "POP-S:"); rtreeSearchPointPop(pCur); } if( rScoreeWithin = eWithin; p->id = x.id; p->iCell = x.iCell; RTREE_QUEUE_TRACE(pCur, "PUSH-S:"); break; } if( p->iCell>=nCell ){ RTREE_QUEUE_TRACE(pCur, "POP-Se:"); rtreeSearchPointPop(pCur); } } pCur->atEOF = p==0; return SQLITE_OK; } /* ** Rtree virtual table module xNext method. */ static int rtreeNext(sqlite3_vtab_cursor *pVtabCursor){ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; int rc = SQLITE_OK; /* Move to the next entry that matches the configured constraints. */ RTREE_QUEUE_TRACE(pCsr, "POP-Nx:"); rtreeSearchPointPop(pCsr); rc = rtreeStepToLeaf(pCsr); return rc; } /* ** Rtree virtual table module xRowid method. */ static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); int rc = SQLITE_OK; RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); if( rc==SQLITE_OK && p ){ *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); } return rc; } /* ** Rtree virtual table module xColumn method. */ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ Rtree *pRtree = (Rtree *)cur->pVtab; RtreeCursor *pCsr = (RtreeCursor *)cur; RtreeSearchPoint *p = rtreeSearchPointFirst(pCsr); RtreeCoord c; int rc = SQLITE_OK; RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); if( rc ) return rc; if( p==0 ) return SQLITE_OK; if( i==0 ){ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); }else{ if( rc ) return rc; nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c); #ifndef SQLITE_RTREE_INT_ONLY if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ sqlite3_result_double(ctx, c.f); }else #endif { assert( pRtree->eCoordType==RTREE_COORD_INT32 ); sqlite3_result_int(ctx, c.i); } } return SQLITE_OK; } /* ** Use nodeAcquire() to obtain the leaf node containing the record with ** rowid iRowid. If successful, set *ppLeaf to point to the node and ** return SQLITE_OK. If there is no such record in the table, set ** *ppLeaf to 0 and return SQLITE_OK. If an error occurs, set *ppLeaf ** to zero and return an SQLite error code. */ static int findLeafNode( Rtree *pRtree, /* RTree to search */ i64 iRowid, /* The rowid searching for */ RtreeNode **ppLeaf, /* Write the node here */ sqlite3_int64 *piNode /* Write the node-id here */ ){ int rc; *ppLeaf = 0; sqlite3_bind_int64(pRtree->pReadRowid, 1, iRowid); if( sqlite3_step(pRtree->pReadRowid)==SQLITE_ROW ){ i64 iNode = sqlite3_column_int64(pRtree->pReadRowid, 0); if( piNode ) *piNode = iNode; rc = nodeAcquire(pRtree, iNode, 0, ppLeaf); sqlite3_reset(pRtree->pReadRowid); }else{ rc = sqlite3_reset(pRtree->pReadRowid); } return rc; } /* ** This function is called to configure the RtreeConstraint object passed ** as the second argument for a MATCH constraint. The value passed as the ** first argument to this function is the right-hand operand to the MATCH ** operator. */ static int deserializeGeometry(sqlite3_value *pValue, RtreeConstraint *pCons){ RtreeMatchArg *pBlob; /* BLOB returned by geometry function */ sqlite3_rtree_query_info *pInfo; /* Callback information */ int nBlob; /* Size of the geometry function blob */ int nExpected; /* Expected size of the BLOB */ /* Check that value is actually a blob. */ if( sqlite3_value_type(pValue)!=SQLITE_BLOB ) return SQLITE_ERROR; /* Check that the blob is roughly the right size. */ nBlob = sqlite3_value_bytes(pValue); if( nBlob<(int)sizeof(RtreeMatchArg) ){ return SQLITE_ERROR; } pInfo = (sqlite3_rtree_query_info*)sqlite3_malloc( sizeof(*pInfo)+nBlob ); if( !pInfo ) return SQLITE_NOMEM; memset(pInfo, 0, sizeof(*pInfo)); pBlob = (RtreeMatchArg*)&pInfo[1]; memcpy(pBlob, sqlite3_value_blob(pValue), nBlob); nExpected = (int)(sizeof(RtreeMatchArg) + pBlob->nParam*sizeof(sqlite3_value*) + (pBlob->nParam-1)*sizeof(RtreeDValue)); if( pBlob->magic!=RTREE_GEOMETRY_MAGIC || nBlob!=nExpected ){ sqlite3_free(pInfo); return SQLITE_ERROR; } pInfo->pContext = pBlob->cb.pContext; pInfo->nParam = pBlob->nParam; pInfo->aParam = pBlob->aParam; pInfo->apSqlParam = pBlob->apSqlParam; if( pBlob->cb.xGeom ){ pCons->u.xGeom = pBlob->cb.xGeom; }else{ pCons->op = RTREE_QUERY; pCons->u.xQueryFunc = pBlob->cb.xQueryFunc; } pCons->pInfo = pInfo; return SQLITE_OK; } /* ** Rtree virtual table module xFilter method. */ static int rtreeFilter( sqlite3_vtab_cursor *pVtabCursor, int idxNum, const char *idxStr, int argc, sqlite3_value **argv ){ Rtree *pRtree = (Rtree *)pVtabCursor->pVtab; RtreeCursor *pCsr = (RtreeCursor *)pVtabCursor; RtreeNode *pRoot = 0; int ii; int rc = SQLITE_OK; int iCell = 0; rtreeReference(pRtree); /* Reset the cursor to the same state as rtreeOpen() leaves it in. */ freeCursorConstraints(pCsr); sqlite3_free(pCsr->aPoint); memset(pCsr, 0, sizeof(RtreeCursor)); pCsr->base.pVtab = (sqlite3_vtab*)pRtree; pCsr->iStrategy = idxNum; if( idxNum==1 ){ /* Special case - lookup by rowid. */ RtreeNode *pLeaf; /* Leaf on which the required cell resides */ RtreeSearchPoint *p; /* Search point for the the leaf */ i64 iRowid = sqlite3_value_int64(argv[0]); i64 iNode = 0; rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode); if( rc==SQLITE_OK && pLeaf!=0 ){ p = rtreeSearchPointNew(pCsr, RTREE_ZERO, 0); assert( p!=0 ); /* Always returns pCsr->sPoint */ pCsr->aNode[0] = pLeaf; p->id = iNode; p->eWithin = PARTLY_WITHIN; rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell); p->iCell = iCell; RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:"); }else{ pCsr->atEOF = 1; } }else{ /* Normal case - r-tree scan. Set up the RtreeCursor.aConstraint array ** with the configured constraints. */ rc = nodeAcquire(pRtree, 1, 0, &pRoot); if( rc==SQLITE_OK && argc>0 ){ pCsr->aConstraint = sqlite3_malloc(sizeof(RtreeConstraint)*argc); pCsr->nConstraint = argc; if( !pCsr->aConstraint ){ rc = SQLITE_NOMEM; }else{ memset(pCsr->aConstraint, 0, sizeof(RtreeConstraint)*argc); memset(pCsr->anQueue, 0, sizeof(u32)*(pRtree->iDepth + 1)); assert( (idxStr==0 && argc==0) || (idxStr && (int)strlen(idxStr)==argc*2) ); for(ii=0; iiaConstraint[ii]; p->op = idxStr[ii*2]; p->iCoord = idxStr[ii*2+1]-'0'; if( p->op>=RTREE_MATCH ){ /* A MATCH operator. The right-hand-side must be a blob that ** can be cast into an RtreeMatchArg object. One created using ** an sqlite3_rtree_geometry_callback() SQL user function. */ rc = deserializeGeometry(argv[ii], p); if( rc!=SQLITE_OK ){ break; } p->pInfo->nCoord = pRtree->nDim*2; p->pInfo->anQueue = pCsr->anQueue; p->pInfo->mxLevel = pRtree->iDepth + 1; }else{ #ifdef SQLITE_RTREE_INT_ONLY p->u.rValue = sqlite3_value_int64(argv[ii]); #else p->u.rValue = sqlite3_value_double(argv[ii]); #endif } } } } if( rc==SQLITE_OK ){ RtreeSearchPoint *pNew; pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, pRtree->iDepth+1); if( pNew==0 ) return SQLITE_NOMEM; pNew->id = 1; pNew->iCell = 0; pNew->eWithin = PARTLY_WITHIN; assert( pCsr->bPoint==1 ); pCsr->aNode[0] = pRoot; pRoot = 0; RTREE_QUEUE_TRACE(pCsr, "PUSH-Fm:"); rc = rtreeStepToLeaf(pCsr); } } nodeRelease(pRtree, pRoot); rtreeRelease(pRtree); return rc; } /* ** Set the pIdxInfo->estimatedRows variable to nRow. Unless this ** extension is currently being used by a version of SQLite too old to ** support estimatedRows. In that case this function is a no-op. */ static void setEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ #if SQLITE_VERSION_NUMBER>=3008002 if( sqlite3_libversion_number()>=3008002 ){ pIdxInfo->estimatedRows = nRow; } #endif } /* ** Rtree virtual table module xBestIndex method. There are three ** table scan strategies to choose from (in order from most to ** least desirable): ** ** idxNum idxStr Strategy ** ------------------------------------------------ ** 1 Unused Direct lookup by rowid. ** 2 See below R-tree query or full-table scan. ** ------------------------------------------------ ** ** If strategy 1 is used, then idxStr is not meaningful. If strategy ** 2 is used, idxStr is formatted to contain 2 bytes for each ** constraint used. The first two bytes of idxStr correspond to ** the constraint in sqlite3_index_info.aConstraintUsage[] with ** (argvIndex==1) etc. ** ** The first of each pair of bytes in idxStr identifies the constraint ** operator as follows: ** ** Operator Byte Value ** ---------------------- ** = 0x41 ('A') ** <= 0x42 ('B') ** < 0x43 ('C') ** >= 0x44 ('D') ** > 0x45 ('E') ** MATCH 0x46 ('F') ** ---------------------- ** ** The second of each pair of bytes identifies the coordinate column ** to which the constraint applies. The leftmost coordinate column ** is 'a', the second from the left 'b' etc. */ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ Rtree *pRtree = (Rtree*)tab; int rc = SQLITE_OK; int ii; int bMatch = 0; /* True if there exists a MATCH constraint */ i64 nRow; /* Estimated rows returned by this scan */ int iIdx = 0; char zIdxStr[RTREE_MAX_DIMENSIONS*8+1]; memset(zIdxStr, 0, sizeof(zIdxStr)); /* Check if there exists a MATCH constraint - even an unusable one. If there ** is, do not consider the lookup-by-rowid plan as using such a plan would ** require the VDBE to evaluate the MATCH constraint, which is not currently ** possible. */ for(ii=0; iinConstraint; ii++){ if( pIdxInfo->aConstraint[ii].op==SQLITE_INDEX_CONSTRAINT_MATCH ){ bMatch = 1; } } assert( pIdxInfo->idxStr==0 ); for(ii=0; iinConstraint && iIdx<(int)(sizeof(zIdxStr)-1); ii++){ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; if( bMatch==0 && p->usable && p->iColumn==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ ){ /* We have an equality constraint on the rowid. Use strategy 1. */ int jj; for(jj=0; jjaConstraintUsage[jj].argvIndex = 0; pIdxInfo->aConstraintUsage[jj].omit = 0; } pIdxInfo->idxNum = 1; pIdxInfo->aConstraintUsage[ii].argvIndex = 1; pIdxInfo->aConstraintUsage[jj].omit = 1; /* This strategy involves a two rowid lookups on an B-Tree structures ** and then a linear search of an R-Tree node. This should be ** considered almost as quick as a direct rowid lookup (for which ** sqlite uses an internal cost of 0.0). It is expected to return ** a single row. */ pIdxInfo->estimatedCost = 30.0; setEstimatedRows(pIdxInfo, 1); return SQLITE_OK; } if( p->usable && (p->iColumn>0 || p->op==SQLITE_INDEX_CONSTRAINT_MATCH) ){ u8 op; switch( p->op ){ case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; default: assert( p->op==SQLITE_INDEX_CONSTRAINT_MATCH ); op = RTREE_MATCH; break; } zIdxStr[iIdx++] = op; zIdxStr[iIdx++] = p->iColumn - 1 + '0'; pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); pIdxInfo->aConstraintUsage[ii].omit = 1; } } pIdxInfo->idxNum = 2; pIdxInfo->needToFreeIdxStr = 1; if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ return SQLITE_NOMEM; } nRow = pRtree->nRowEst / (iIdx + 1); pIdxInfo->estimatedCost = (double)6.0 * (double)nRow; setEstimatedRows(pIdxInfo, nRow); return rc; } /* ** Return the N-dimensional volumn of the cell stored in *p. */ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){ RtreeDValue area = (RtreeDValue)1; int ii; for(ii=0; ii<(pRtree->nDim*2); ii+=2){ area = (area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii]))); } return area; } /* ** Return the margin length of cell p. The margin length is the sum ** of the objects size in each dimension. */ static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){ RtreeDValue margin = (RtreeDValue)0; int ii; for(ii=0; ii<(pRtree->nDim*2); ii+=2){ margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])); } return margin; } /* ** Store the union of cells p1 and p2 in p1. */ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ int ii; if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ for(ii=0; ii<(pRtree->nDim*2); ii+=2){ p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f); p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f); } }else{ for(ii=0; ii<(pRtree->nDim*2); ii+=2){ p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i); p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i); } } } /* ** Return true if the area covered by p2 is a subset of the area covered ** by p1. False otherwise. */ static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ int ii; int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); for(ii=0; ii<(pRtree->nDim*2); ii+=2){ RtreeCoord *a1 = &p1->aCoord[ii]; RtreeCoord *a2 = &p2->aCoord[ii]; if( (!isInt && (a2[0].fa1[1].f)) || ( isInt && (a2[0].ia1[1].i)) ){ return 0; } } return 1; } /* ** Return the amount cell p would grow by if it were unioned with pCell. */ static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ RtreeDValue area; RtreeCell cell; memcpy(&cell, p, sizeof(RtreeCell)); area = cellArea(pRtree, &cell); cellUnion(pRtree, &cell, pCell); return (cellArea(pRtree, &cell)-area); } static RtreeDValue cellOverlap( Rtree *pRtree, RtreeCell *p, RtreeCell *aCell, int nCell ){ int ii; RtreeDValue overlap = RTREE_ZERO; for(ii=0; iinDim*2); jj+=2){ RtreeDValue x1, x2; x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj])); x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1])); if( x2iDepth-iHeight); ii++){ int iCell; sqlite3_int64 iBest = 0; RtreeDValue fMinGrowth = RTREE_ZERO; RtreeDValue fMinArea = RTREE_ZERO; int nCell = NCELL(pNode); RtreeCell cell; RtreeNode *pChild; RtreeCell *aCell = 0; /* Select the child node which will be enlarged the least if pCell ** is inserted into it. Resolve ties by choosing the entry with ** the smallest area. */ for(iCell=0; iCellpParent ){ RtreeNode *pParent = p->pParent; RtreeCell cell; int iCell; if( nodeParentIndex(pRtree, p, &iCell) ){ return SQLITE_CORRUPT_VTAB; } nodeGetCell(pRtree, pParent, iCell, &cell); if( !cellContains(pRtree, &cell, pCell) ){ cellUnion(pRtree, &cell, pCell); nodeOverwriteCell(pRtree, pParent, &cell, iCell); } p = pParent; } return SQLITE_OK; } /* ** Write mapping (iRowid->iNode) to the _rowid table. */ static int rowidWrite(Rtree *pRtree, sqlite3_int64 iRowid, sqlite3_int64 iNode){ sqlite3_bind_int64(pRtree->pWriteRowid, 1, iRowid); sqlite3_bind_int64(pRtree->pWriteRowid, 2, iNode); sqlite3_step(pRtree->pWriteRowid); return sqlite3_reset(pRtree->pWriteRowid); } /* ** Write mapping (iNode->iPar) to the _parent table. */ static int parentWrite(Rtree *pRtree, sqlite3_int64 iNode, sqlite3_int64 iPar){ sqlite3_bind_int64(pRtree->pWriteParent, 1, iNode); sqlite3_bind_int64(pRtree->pWriteParent, 2, iPar); sqlite3_step(pRtree->pWriteParent); return sqlite3_reset(pRtree->pWriteParent); } static int rtreeInsertCell(Rtree *, RtreeNode *, RtreeCell *, int); /* ** Arguments aIdx, aDistance and aSpare all point to arrays of size ** nIdx. The aIdx array contains the set of integers from 0 to ** (nIdx-1) in no particular order. This function sorts the values ** in aIdx according to the indexed values in aDistance. For ** example, assuming the inputs: ** ** aIdx = { 0, 1, 2, 3 } ** aDistance = { 5.0, 2.0, 7.0, 6.0 } ** ** this function sets the aIdx array to contain: ** ** aIdx = { 0, 1, 2, 3 } ** ** The aSpare array is used as temporary working space by the ** sorting algorithm. */ static void SortByDistance( int *aIdx, int nIdx, RtreeDValue *aDistance, int *aSpare ){ if( nIdx>1 ){ int iLeft = 0; int iRight = 0; int nLeft = nIdx/2; int nRight = nIdx-nLeft; int *aLeft = aIdx; int *aRight = &aIdx[nLeft]; SortByDistance(aLeft, nLeft, aDistance, aSpare); SortByDistance(aRight, nRight, aDistance, aSpare); memcpy(aSpare, aLeft, sizeof(int)*nLeft); aLeft = aSpare; while( iLeft1 ){ int iLeft = 0; int iRight = 0; int nLeft = nIdx/2; int nRight = nIdx-nLeft; int *aLeft = aIdx; int *aRight = &aIdx[nLeft]; SortByDimension(pRtree, aLeft, nLeft, iDim, aCell, aSpare); SortByDimension(pRtree, aRight, nRight, iDim, aCell, aSpare); memcpy(aSpare, aLeft, sizeof(int)*nLeft); aLeft = aSpare; while( iLeftnDim+1)*(sizeof(int*)+nCell*sizeof(int)); aaSorted = (int **)sqlite3_malloc(nByte); if( !aaSorted ){ return SQLITE_NOMEM; } aSpare = &((int *)&aaSorted[pRtree->nDim])[pRtree->nDim*nCell]; memset(aaSorted, 0, nByte); for(ii=0; iinDim; ii++){ int jj; aaSorted[ii] = &((int *)&aaSorted[pRtree->nDim])[ii*nCell]; for(jj=0; jjnDim; ii++){ RtreeDValue margin = RTREE_ZERO; RtreeDValue fBestOverlap = RTREE_ZERO; RtreeDValue fBestArea = RTREE_ZERO; int iBestLeft = 0; int nLeft; for( nLeft=RTREE_MINCELLS(pRtree); nLeft<=(nCell-RTREE_MINCELLS(pRtree)); nLeft++ ){ RtreeCell left; RtreeCell right; int kk; RtreeDValue overlap; RtreeDValue area; memcpy(&left, &aCell[aaSorted[ii][0]], sizeof(RtreeCell)); memcpy(&right, &aCell[aaSorted[ii][nCell-1]], sizeof(RtreeCell)); for(kk=1; kk<(nCell-1); kk++){ if( kk0 ){ RtreeNode *pChild = nodeHashLookup(pRtree, iRowid); if( pChild ){ nodeRelease(pRtree, pChild->pParent); nodeReference(pNode); pChild->pParent = pNode; } } return xSetMapping(pRtree, iRowid, pNode->iNode); } static int SplitNode( Rtree *pRtree, RtreeNode *pNode, RtreeCell *pCell, int iHeight ){ int i; int newCellIsRight = 0; int rc = SQLITE_OK; int nCell = NCELL(pNode); RtreeCell *aCell; int *aiUsed; RtreeNode *pLeft = 0; RtreeNode *pRight = 0; RtreeCell leftbbox; RtreeCell rightbbox; /* Allocate an array and populate it with a copy of pCell and ** all cells from node pLeft. Then zero the original node. */ aCell = sqlite3_malloc((sizeof(RtreeCell)+sizeof(int))*(nCell+1)); if( !aCell ){ rc = SQLITE_NOMEM; goto splitnode_out; } aiUsed = (int *)&aCell[nCell+1]; memset(aiUsed, 0, sizeof(int)*(nCell+1)); for(i=0; iiNode==1 ){ pRight = nodeNew(pRtree, pNode); pLeft = nodeNew(pRtree, pNode); pRtree->iDepth++; pNode->isDirty = 1; writeInt16(pNode->zData, pRtree->iDepth); }else{ pLeft = pNode; pRight = nodeNew(pRtree, pLeft->pParent); nodeReference(pLeft); } if( !pLeft || !pRight ){ rc = SQLITE_NOMEM; goto splitnode_out; } memset(pLeft->zData, 0, pRtree->iNodeSize); memset(pRight->zData, 0, pRtree->iNodeSize); rc = splitNodeStartree(pRtree, aCell, nCell, pLeft, pRight, &leftbbox, &rightbbox); if( rc!=SQLITE_OK ){ goto splitnode_out; } /* Ensure both child nodes have node numbers assigned to them by calling ** nodeWrite(). Node pRight always needs a node number, as it was created ** by nodeNew() above. But node pLeft sometimes already has a node number. ** In this case avoid the all to nodeWrite(). */ if( SQLITE_OK!=(rc = nodeWrite(pRtree, pRight)) || (0==pLeft->iNode && SQLITE_OK!=(rc = nodeWrite(pRtree, pLeft))) ){ goto splitnode_out; } rightbbox.iRowid = pRight->iNode; leftbbox.iRowid = pLeft->iNode; if( pNode->iNode==1 ){ rc = rtreeInsertCell(pRtree, pLeft->pParent, &leftbbox, iHeight+1); if( rc!=SQLITE_OK ){ goto splitnode_out; } }else{ RtreeNode *pParent = pLeft->pParent; int iCell; rc = nodeParentIndex(pRtree, pLeft, &iCell); if( rc==SQLITE_OK ){ nodeOverwriteCell(pRtree, pParent, &leftbbox, iCell); rc = AdjustTree(pRtree, pParent, &leftbbox); } if( rc!=SQLITE_OK ){ goto splitnode_out; } } if( (rc = rtreeInsertCell(pRtree, pRight->pParent, &rightbbox, iHeight+1)) ){ goto splitnode_out; } for(i=0; iiRowid ){ newCellIsRight = 1; } if( rc!=SQLITE_OK ){ goto splitnode_out; } } if( pNode->iNode==1 ){ for(i=0; iiRowid, pLeft, iHeight); } if( rc==SQLITE_OK ){ rc = nodeRelease(pRtree, pRight); pRight = 0; } if( rc==SQLITE_OK ){ rc = nodeRelease(pRtree, pLeft); pLeft = 0; } splitnode_out: nodeRelease(pRtree, pRight); nodeRelease(pRtree, pLeft); sqlite3_free(aCell); return rc; } /* ** If node pLeaf is not the root of the r-tree and its pParent pointer is ** still NULL, load all ancestor nodes of pLeaf into memory and populate ** the pLeaf->pParent chain all the way up to the root node. ** ** This operation is required when a row is deleted (or updated - an update ** is implemented as a delete followed by an insert). SQLite provides the ** rowid of the row to delete, which can be used to find the leaf on which ** the entry resides (argument pLeaf). Once the leaf is located, this ** function is called to determine its ancestry. */ static int fixLeafParent(Rtree *pRtree, RtreeNode *pLeaf){ int rc = SQLITE_OK; RtreeNode *pChild = pLeaf; while( rc==SQLITE_OK && pChild->iNode!=1 && pChild->pParent==0 ){ int rc2 = SQLITE_OK; /* sqlite3_reset() return code */ sqlite3_bind_int64(pRtree->pReadParent, 1, pChild->iNode); rc = sqlite3_step(pRtree->pReadParent); if( rc==SQLITE_ROW ){ RtreeNode *pTest; /* Used to test for reference loops */ i64 iNode; /* Node number of parent node */ /* Before setting pChild->pParent, test that we are not creating a ** loop of references (as we would if, say, pChild==pParent). We don't ** want to do this as it leads to a memory leak when trying to delete ** the referenced counted node structures. */ iNode = sqlite3_column_int64(pRtree->pReadParent, 0); for(pTest=pLeaf; pTest && pTest->iNode!=iNode; pTest=pTest->pParent); if( !pTest ){ rc2 = nodeAcquire(pRtree, iNode, 0, &pChild->pParent); } } rc = sqlite3_reset(pRtree->pReadParent); if( rc==SQLITE_OK ) rc = rc2; if( rc==SQLITE_OK && !pChild->pParent ) rc = SQLITE_CORRUPT_VTAB; pChild = pChild->pParent; } return rc; } static int deleteCell(Rtree *, RtreeNode *, int, int); static int removeNode(Rtree *pRtree, RtreeNode *pNode, int iHeight){ int rc; int rc2; RtreeNode *pParent = 0; int iCell; assert( pNode->nRef==1 ); /* Remove the entry in the parent cell. */ rc = nodeParentIndex(pRtree, pNode, &iCell); if( rc==SQLITE_OK ){ pParent = pNode->pParent; pNode->pParent = 0; rc = deleteCell(pRtree, pParent, iCell, iHeight+1); } rc2 = nodeRelease(pRtree, pParent); if( rc==SQLITE_OK ){ rc = rc2; } if( rc!=SQLITE_OK ){ return rc; } /* Remove the xxx_node entry. */ sqlite3_bind_int64(pRtree->pDeleteNode, 1, pNode->iNode); sqlite3_step(pRtree->pDeleteNode); if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteNode)) ){ return rc; } /* Remove the xxx_parent entry. */ sqlite3_bind_int64(pRtree->pDeleteParent, 1, pNode->iNode); sqlite3_step(pRtree->pDeleteParent); if( SQLITE_OK!=(rc = sqlite3_reset(pRtree->pDeleteParent)) ){ return rc; } /* Remove the node from the in-memory hash table and link it into ** the Rtree.pDeleted list. Its contents will be re-inserted later on. */ nodeHashDelete(pRtree, pNode); pNode->iNode = iHeight; pNode->pNext = pRtree->pDeleted; pNode->nRef++; pRtree->pDeleted = pNode; return SQLITE_OK; } static int fixBoundingBox(Rtree *pRtree, RtreeNode *pNode){ RtreeNode *pParent = pNode->pParent; int rc = SQLITE_OK; if( pParent ){ int ii; int nCell = NCELL(pNode); RtreeCell box; /* Bounding box for pNode */ nodeGetCell(pRtree, pNode, 0, &box); for(ii=1; iiiNode; rc = nodeParentIndex(pRtree, pNode, &ii); if( rc==SQLITE_OK ){ nodeOverwriteCell(pRtree, pParent, &box, ii); rc = fixBoundingBox(pRtree, pParent); } } return rc; } /* ** Delete the cell at index iCell of node pNode. After removing the ** cell, adjust the r-tree data structure if required. */ static int deleteCell(Rtree *pRtree, RtreeNode *pNode, int iCell, int iHeight){ RtreeNode *pParent; int rc; if( SQLITE_OK!=(rc = fixLeafParent(pRtree, pNode)) ){ return rc; } /* Remove the cell from the node. This call just moves bytes around ** the in-memory node image, so it cannot fail. */ nodeDeleteCell(pRtree, pNode, iCell); /* If the node is not the tree root and now has less than the minimum ** number of cells, remove it from the tree. Otherwise, update the ** cell in the parent node so that it tightly contains the updated ** node. */ pParent = pNode->pParent; assert( pParent || pNode->iNode==1 ); if( pParent ){ if( NCELL(pNode)nDim; iDim++){ aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); } } for(iDim=0; iDimnDim; iDim++){ aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2)); } for(ii=0; iinDim; iDim++){ RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) - DCOORD(aCell[ii].aCoord[iDim*2])); aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); } } SortByDistance(aOrder, nCell, aDistance, aSpare); nodeZero(pRtree, pNode); for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ RtreeCell *p = &aCell[aOrder[ii]]; nodeInsertCell(pRtree, pNode, p); if( p->iRowid==pCell->iRowid ){ if( iHeight==0 ){ rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); }else{ rc = parentWrite(pRtree, p->iRowid, pNode->iNode); } } } if( rc==SQLITE_OK ){ rc = fixBoundingBox(pRtree, pNode); } for(; rc==SQLITE_OK && iiiNode currently contains ** the height of the sub-tree headed by the cell. */ RtreeNode *pInsert; RtreeCell *p = &aCell[aOrder[ii]]; rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); if( rc==SQLITE_OK ){ int rc2; rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); rc2 = nodeRelease(pRtree, pInsert); if( rc==SQLITE_OK ){ rc = rc2; } } } sqlite3_free(aCell); return rc; } /* ** Insert cell pCell into node pNode. Node pNode is the head of a ** subtree iHeight high (leaf nodes have iHeight==0). */ static int rtreeInsertCell( Rtree *pRtree, RtreeNode *pNode, RtreeCell *pCell, int iHeight ){ int rc = SQLITE_OK; if( iHeight>0 ){ RtreeNode *pChild = nodeHashLookup(pRtree, pCell->iRowid); if( pChild ){ nodeRelease(pRtree, pChild->pParent); nodeReference(pNode); pChild->pParent = pNode; } } if( nodeInsertCell(pRtree, pNode, pCell) ){ if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ rc = SplitNode(pRtree, pNode, pCell, iHeight); }else{ pRtree->iReinsertHeight = iHeight; rc = Reinsert(pRtree, pNode, pCell, iHeight); } }else{ rc = AdjustTree(pRtree, pNode, pCell); if( rc==SQLITE_OK ){ if( iHeight==0 ){ rc = rowidWrite(pRtree, pCell->iRowid, pNode->iNode); }else{ rc = parentWrite(pRtree, pCell->iRowid, pNode->iNode); } } } return rc; } static int reinsertNodeContent(Rtree *pRtree, RtreeNode *pNode){ int ii; int rc = SQLITE_OK; int nCell = NCELL(pNode); for(ii=0; rc==SQLITE_OK && iiiNode currently contains ** the height of the sub-tree headed by the cell. */ rc = ChooseLeaf(pRtree, &cell, (int)pNode->iNode, &pInsert); if( rc==SQLITE_OK ){ int rc2; rc = rtreeInsertCell(pRtree, pInsert, &cell, (int)pNode->iNode); rc2 = nodeRelease(pRtree, pInsert); if( rc==SQLITE_OK ){ rc = rc2; } } } return rc; } /* ** Select a currently unused rowid for a new r-tree record. */ static int newRowid(Rtree *pRtree, i64 *piRowid){ int rc; sqlite3_bind_null(pRtree->pWriteRowid, 1); sqlite3_bind_null(pRtree->pWriteRowid, 2); sqlite3_step(pRtree->pWriteRowid); rc = sqlite3_reset(pRtree->pWriteRowid); *piRowid = sqlite3_last_insert_rowid(pRtree->db); return rc; } /* ** Remove the entry with rowid=iDelete from the r-tree structure. */ static int rtreeDeleteRowid(Rtree *pRtree, sqlite3_int64 iDelete){ int rc; /* Return code */ RtreeNode *pLeaf = 0; /* Leaf node containing record iDelete */ int iCell; /* Index of iDelete cell in pLeaf */ RtreeNode *pRoot; /* Root node of rtree structure */ /* Obtain a reference to the root node to initialize Rtree.iDepth */ rc = nodeAcquire(pRtree, 1, 0, &pRoot); /* Obtain a reference to the leaf node that contains the entry ** about to be deleted. */ if( rc==SQLITE_OK ){ rc = findLeafNode(pRtree, iDelete, &pLeaf, 0); } /* Delete the cell in question from the leaf node. */ if( rc==SQLITE_OK ){ int rc2; rc = nodeRowidIndex(pRtree, pLeaf, iDelete, &iCell); if( rc==SQLITE_OK ){ rc = deleteCell(pRtree, pLeaf, iCell, 0); } rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){ rc = rc2; } } /* Delete the corresponding entry in the _rowid table. */ if( rc==SQLITE_OK ){ sqlite3_bind_int64(pRtree->pDeleteRowid, 1, iDelete); sqlite3_step(pRtree->pDeleteRowid); rc = sqlite3_reset(pRtree->pDeleteRowid); } /* Check if the root node now has exactly one child. If so, remove ** it, schedule the contents of the child for reinsertion and ** reduce the tree height by one. ** ** This is equivalent to copying the contents of the child into ** the root node (the operation that Gutman's paper says to perform ** in this scenario). */ if( rc==SQLITE_OK && pRtree->iDepth>0 && NCELL(pRoot)==1 ){ int rc2; RtreeNode *pChild; i64 iChild = nodeGetRowid(pRtree, pRoot, 0); rc = nodeAcquire(pRtree, iChild, pRoot, &pChild); if( rc==SQLITE_OK ){ rc = removeNode(pRtree, pChild, pRtree->iDepth-1); } rc2 = nodeRelease(pRtree, pChild); if( rc==SQLITE_OK ) rc = rc2; if( rc==SQLITE_OK ){ pRtree->iDepth--; writeInt16(pRoot->zData, pRtree->iDepth); pRoot->isDirty = 1; } } /* Re-insert the contents of any underfull nodes removed from the tree. */ for(pLeaf=pRtree->pDeleted; pLeaf; pLeaf=pRtree->pDeleted){ if( rc==SQLITE_OK ){ rc = reinsertNodeContent(pRtree, pLeaf); } pRtree->pDeleted = pLeaf->pNext; sqlite3_free(pLeaf); } /* Release the reference to the root node. */ if( rc==SQLITE_OK ){ rc = nodeRelease(pRtree, pRoot); }else{ nodeRelease(pRtree, pRoot); } return rc; } /* ** Rounding constants for float->double conversion. */ #define RNDTOWARDS (1.0 - 1.0/8388608.0) /* Round towards zero */ #define RNDAWAY (1.0 + 1.0/8388608.0) /* Round away from zero */ #if !defined(SQLITE_RTREE_INT_ONLY) /* ** Convert an sqlite3_value into an RtreeValue (presumably a float) ** while taking care to round toward negative or positive, respectively. */ static RtreeValue rtreeValueDown(sqlite3_value *v){ double d = sqlite3_value_double(v); float f = (float)d; if( f>d ){ f = (float)(d*(d<0 ? RNDAWAY : RNDTOWARDS)); } return f; } static RtreeValue rtreeValueUp(sqlite3_value *v){ double d = sqlite3_value_double(v); float f = (float)d; if( f1 */ int bHaveRowid = 0; /* Set to 1 after new rowid is determined */ rtreeReference(pRtree); assert(nData>=1); cell.iRowid = 0; /* Used only to suppress a compiler warning */ /* Constraint handling. A write operation on an r-tree table may return ** SQLITE_CONSTRAINT for two reasons: ** ** 1. A duplicate rowid value, or ** 2. The supplied data violates the "x2>=x1" constraint. ** ** In the first case, if the conflict-handling mode is REPLACE, then ** the conflicting row can be removed before proceeding. In the second ** case, SQLITE_CONSTRAINT must be returned regardless of the ** conflict-handling mode specified by the user. */ if( nData>1 ){ int ii; /* Populate the cell.aCoord[] array. The first coordinate is azData[3]. ** ** NB: nData can only be less than nDim*2+3 if the rtree is mis-declared ** with "column" that are interpreted as table constraints. ** Example: CREATE VIRTUAL TABLE bad USING rtree(x,y,CHECK(y>5)); ** This problem was discovered after years of use, so we silently ignore ** these kinds of misdeclared tables to avoid breaking any legacy. */ assert( nData<=(pRtree->nDim*2 + 3) ); #ifndef SQLITE_RTREE_INT_ONLY if( pRtree->eCoordType==RTREE_COORD_REAL32 ){ for(ii=0; iicell.aCoord[ii+1].f ){ rc = SQLITE_CONSTRAINT; goto constraint; } } }else #endif { for(ii=0; iicell.aCoord[ii+1].i ){ rc = SQLITE_CONSTRAINT; goto constraint; } } } /* If a rowid value was supplied, check if it is already present in ** the table. If so, the constraint has failed. */ if( sqlite3_value_type(azData[2])!=SQLITE_NULL ){ cell.iRowid = sqlite3_value_int64(azData[2]); if( sqlite3_value_type(azData[0])==SQLITE_NULL || sqlite3_value_int64(azData[0])!=cell.iRowid ){ int steprc; sqlite3_bind_int64(pRtree->pReadRowid, 1, cell.iRowid); steprc = sqlite3_step(pRtree->pReadRowid); rc = sqlite3_reset(pRtree->pReadRowid); if( SQLITE_ROW==steprc ){ if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){ rc = rtreeDeleteRowid(pRtree, cell.iRowid); }else{ rc = SQLITE_CONSTRAINT; goto constraint; } } } bHaveRowid = 1; } } /* If azData[0] is not an SQL NULL value, it is the rowid of a ** record to delete from the r-tree table. The following block does ** just that. */ if( sqlite3_value_type(azData[0])!=SQLITE_NULL ){ rc = rtreeDeleteRowid(pRtree, sqlite3_value_int64(azData[0])); } /* If the azData[] array contains more than one element, elements ** (azData[2]..azData[argc-1]) contain a new record to insert into ** the r-tree structure. */ if( rc==SQLITE_OK && nData>1 ){ /* Insert the new record into the r-tree */ RtreeNode *pLeaf = 0; /* Figure out the rowid of the new row. */ if( bHaveRowid==0 ){ rc = newRowid(pRtree, &cell.iRowid); } *pRowid = cell.iRowid; if( rc==SQLITE_OK ){ rc = ChooseLeaf(pRtree, &cell, 0, &pLeaf); } if( rc==SQLITE_OK ){ int rc2; pRtree->iReinsertHeight = -1; rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){ rc = rc2; } } } constraint: rtreeRelease(pRtree); return rc; } /* ** The xRename method for rtree module virtual tables. */ static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){ Rtree *pRtree = (Rtree *)pVtab; int rc = SQLITE_NOMEM; char *zSql = sqlite3_mprintf( "ALTER TABLE %Q.'%q_node' RENAME TO \"%w_node\";" "ALTER TABLE %Q.'%q_parent' RENAME TO \"%w_parent\";" "ALTER TABLE %Q.'%q_rowid' RENAME TO \"%w_rowid\";" , pRtree->zDb, pRtree->zName, zNewName , pRtree->zDb, pRtree->zName, zNewName , pRtree->zDb, pRtree->zName, zNewName ); if( zSql ){ rc = sqlite3_exec(pRtree->db, zSql, 0, 0, 0); sqlite3_free(zSql); } return rc; } /* ** This function populates the pRtree->nRowEst variable with an estimate ** of the number of rows in the virtual table. If possible, this is based ** on sqlite_stat1 data. Otherwise, use RTREE_DEFAULT_ROWEST. */ static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ const char *zFmt = "SELECT stat FROM %Q.sqlite_stat1 WHERE tbl = '%q_rowid'"; char *zSql; sqlite3_stmt *p; int rc; i64 nRow = 0; zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName); if( zSql==0 ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_prepare_v2(db, zSql, -1, &p, 0); if( rc==SQLITE_OK ){ if( sqlite3_step(p)==SQLITE_ROW ) nRow = sqlite3_column_int64(p, 0); rc = sqlite3_finalize(p); }else if( rc!=SQLITE_NOMEM ){ rc = SQLITE_OK; } if( rc==SQLITE_OK ){ if( nRow==0 ){ pRtree->nRowEst = RTREE_DEFAULT_ROWEST; }else{ pRtree->nRowEst = MAX(nRow, RTREE_MIN_ROWEST); } } sqlite3_free(zSql); } return rc; } static sqlite3_module rtreeModule = { 0, /* iVersion */ rtreeCreate, /* xCreate - create a table */ rtreeConnect, /* xConnect - connect to an existing table */ rtreeBestIndex, /* xBestIndex - Determine search strategy */ rtreeDisconnect, /* xDisconnect - Disconnect from a table */ rtreeDestroy, /* xDestroy - Drop a table */ rtreeOpen, /* xOpen - open a cursor */ rtreeClose, /* xClose - close a cursor */ rtreeFilter, /* xFilter - configure scan constraints */ rtreeNext, /* xNext - advance a cursor */ rtreeEof, /* xEof */ rtreeColumn, /* xColumn - read data */ rtreeRowid, /* xRowid - read data */ rtreeUpdate, /* xUpdate - write data */ 0, /* xBegin - begin transaction */ 0, /* xSync - sync transaction */ 0, /* xCommit - commit transaction */ 0, /* xRollback - rollback transaction */ 0, /* xFindFunction - function overloading */ rtreeRename, /* xRename - rename the table */ 0, /* xSavepoint */ 0, /* xRelease */ 0 /* xRollbackTo */ }; static int rtreeSqlInit( Rtree *pRtree, sqlite3 *db, const char *zDb, const char *zPrefix, int isCreate ){ int rc = SQLITE_OK; #define N_STATEMENT 9 static const char *azSql[N_STATEMENT] = { /* Read and write the xxx_node table */ "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1", "INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)", "DELETE FROM '%q'.'%q_node' WHERE nodeno = :1", /* Read and write the xxx_rowid table */ "SELECT nodeno FROM '%q'.'%q_rowid' WHERE rowid = :1", "INSERT OR REPLACE INTO '%q'.'%q_rowid' VALUES(:1, :2)", "DELETE FROM '%q'.'%q_rowid' WHERE rowid = :1", /* Read and write the xxx_parent table */ "SELECT parentnode FROM '%q'.'%q_parent' WHERE nodeno = :1", "INSERT OR REPLACE INTO '%q'.'%q_parent' VALUES(:1, :2)", "DELETE FROM '%q'.'%q_parent' WHERE nodeno = :1" }; sqlite3_stmt **appStmt[N_STATEMENT]; int i; pRtree->db = db; if( isCreate ){ char *zCreate = sqlite3_mprintf( "CREATE TABLE \"%w\".\"%w_node\"(nodeno INTEGER PRIMARY KEY, data BLOB);" "CREATE TABLE \"%w\".\"%w_rowid\"(rowid INTEGER PRIMARY KEY, nodeno INTEGER);" "CREATE TABLE \"%w\".\"%w_parent\"(nodeno INTEGER PRIMARY KEY," " parentnode INTEGER);" "INSERT INTO '%q'.'%q_node' VALUES(1, zeroblob(%d))", zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, zDb, zPrefix, pRtree->iNodeSize ); if( !zCreate ){ return SQLITE_NOMEM; } rc = sqlite3_exec(db, zCreate, 0, 0, 0); sqlite3_free(zCreate); if( rc!=SQLITE_OK ){ return rc; } } appStmt[0] = &pRtree->pReadNode; appStmt[1] = &pRtree->pWriteNode; appStmt[2] = &pRtree->pDeleteNode; appStmt[3] = &pRtree->pReadRowid; appStmt[4] = &pRtree->pWriteRowid; appStmt[5] = &pRtree->pDeleteRowid; appStmt[6] = &pRtree->pReadParent; appStmt[7] = &pRtree->pWriteParent; appStmt[8] = &pRtree->pDeleteParent; rc = rtreeQueryStat1(db, pRtree); for(i=0; iiNodeSize is populated and SQLITE_OK returned. ** Otherwise, an SQLite error code is returned. ** ** If this function is being called as part of an xConnect(), then the rtree ** table already exists. In this case the node-size is determined by inspecting ** the root node of the tree. ** ** Otherwise, for an xCreate(), use 64 bytes less than the database page-size. ** This ensures that each node is stored on a single database page. If the ** database page-size is so large that more than RTREE_MAXCELLS entries ** would fit in a single node, use a smaller node-size. */ static int getNodeSize( sqlite3 *db, /* Database handle */ Rtree *pRtree, /* Rtree handle */ int isCreate, /* True for xCreate, false for xConnect */ char **pzErr /* OUT: Error message, if any */ ){ int rc; char *zSql; if( isCreate ){ int iPageSize = 0; zSql = sqlite3_mprintf("PRAGMA %Q.page_size", pRtree->zDb); rc = getIntFromStmt(db, zSql, &iPageSize); if( rc==SQLITE_OK ){ pRtree->iNodeSize = iPageSize-64; if( (4+pRtree->nBytesPerCell*RTREE_MAXCELLS)iNodeSize ){ pRtree->iNodeSize = 4+pRtree->nBytesPerCell*RTREE_MAXCELLS; } }else{ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); } }else{ zSql = sqlite3_mprintf( "SELECT length(data) FROM '%q'.'%q_node' WHERE nodeno = 1", pRtree->zDb, pRtree->zName ); rc = getIntFromStmt(db, zSql, &pRtree->iNodeSize); if( rc!=SQLITE_OK ){ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); } } sqlite3_free(zSql); return rc; } /* ** This function is the implementation of both the xConnect and xCreate ** methods of the r-tree virtual table. ** ** argv[0] -> module name ** argv[1] -> database name ** argv[2] -> table name ** argv[...] -> column names... */ static int rtreeInit( sqlite3 *db, /* Database connection */ void *pAux, /* One of the RTREE_COORD_* constants */ int argc, const char *const*argv, /* Parameters to CREATE TABLE statement */ sqlite3_vtab **ppVtab, /* OUT: New virtual table */ char **pzErr, /* OUT: Error message, if any */ int isCreate /* True for xCreate, false for xConnect */ ){ int rc = SQLITE_OK; Rtree *pRtree; int nDb; /* Length of string argv[1] */ int nName; /* Length of string argv[2] */ int eCoordType = (pAux ? RTREE_COORD_INT32 : RTREE_COORD_REAL32); const char *aErrMsg[] = { 0, /* 0 */ "Wrong number of columns for an rtree table", /* 1 */ "Too few columns for an rtree table", /* 2 */ "Too many columns for an rtree table" /* 3 */ }; int iErr = (argc<6) ? 2 : argc>(RTREE_MAX_DIMENSIONS*2+4) ? 3 : argc%2; if( aErrMsg[iErr] ){ *pzErr = sqlite3_mprintf("%s", aErrMsg[iErr]); return SQLITE_ERROR; } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); /* Allocate the sqlite3_vtab structure */ nDb = (int)strlen(argv[1]); nName = (int)strlen(argv[2]); pRtree = (Rtree *)sqlite3_malloc(sizeof(Rtree)+nDb+nName+2); if( !pRtree ){ return SQLITE_NOMEM; } memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; pRtree->nDim = (argc-4)/2; pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2; pRtree->eCoordType = eCoordType; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); /* Figure out the node size to use. */ rc = getNodeSize(db, pRtree, isCreate, pzErr); /* Create/Connect to the underlying relational database schema. If ** that is successful, call sqlite3_declare_vtab() to configure ** the r-tree table schema. */ if( rc==SQLITE_OK ){ if( (rc = rtreeSqlInit(pRtree, db, argv[1], argv[2], isCreate)) ){ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db)); }else{ char *zSql = sqlite3_mprintf("CREATE TABLE x(%s", argv[3]); char *zTmp; int ii; for(ii=4; zSql && iinBusy==1 ); rtreeRelease(pRtree); } return rc; } /* ** Implementation of a scalar function that decodes r-tree nodes to ** human readable strings. This can be used for debugging and analysis. ** ** The scalar function takes two arguments: (1) the number of dimensions ** to the rtree (between 1 and 5, inclusive) and (2) a blob of data containing ** an r-tree node. For a two-dimensional r-tree structure called "rt", to ** deserialize all nodes, a statement like: ** ** SELECT rtreenode(2, data) FROM rt_node; ** ** The human readable string takes the form of a Tcl list with one ** entry for each cell in the r-tree node. Each entry is itself a ** list, containing the 8-byte rowid/pageno followed by the ** *2 coordinates. */ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){ char *zText = 0; RtreeNode node; Rtree tree; int ii; UNUSED_PARAMETER(nArg); memset(&node, 0, sizeof(RtreeNode)); memset(&tree, 0, sizeof(Rtree)); tree.nDim = sqlite3_value_int(apArg[0]); tree.nBytesPerCell = 8 + 8 * tree.nDim; node.zData = (u8 *)sqlite3_value_blob(apArg[1]); for(ii=0; iixDestructor ) pInfo->xDestructor(pInfo->pContext); sqlite3_free(p); } /* ** This routine frees the BLOB that is returned by geomCallback(). */ static void rtreeMatchArgFree(void *pArg){ int i; RtreeMatchArg *p = (RtreeMatchArg*)pArg; for(i=0; inParam; i++){ sqlite3_value_free(p->apSqlParam[i]); } sqlite3_free(p); } /* ** Each call to sqlite3_rtree_geometry_callback() or ** sqlite3_rtree_query_callback() creates an ordinary SQLite ** scalar function that is implemented by this routine. ** ** All this function does is construct an RtreeMatchArg object that ** contains the geometry-checking callback routines and a list of ** parameters to this function, then return that RtreeMatchArg object ** as a BLOB. ** ** The R-Tree MATCH operator will read the returned BLOB, deserialize ** the RtreeMatchArg object, and use the RtreeMatchArg object to figure ** out which elements of the R-Tree should be returned by the query. */ static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){ RtreeGeomCallback *pGeomCtx = (RtreeGeomCallback *)sqlite3_user_data(ctx); RtreeMatchArg *pBlob; int nBlob; int memErr = 0; nBlob = sizeof(RtreeMatchArg) + (nArg-1)*sizeof(RtreeDValue) + nArg*sizeof(sqlite3_value*); pBlob = (RtreeMatchArg *)sqlite3_malloc(nBlob); if( !pBlob ){ sqlite3_result_error_nomem(ctx); }else{ int i; pBlob->magic = RTREE_GEOMETRY_MAGIC; pBlob->cb = pGeomCtx[0]; pBlob->apSqlParam = (sqlite3_value**)&pBlob->aParam[nArg]; pBlob->nParam = nArg; for(i=0; iapSqlParam[i] = sqlite3_value_dup(aArg[i]); if( pBlob->apSqlParam[i]==0 ) memErr = 1; #ifdef SQLITE_RTREE_INT_ONLY pBlob->aParam[i] = sqlite3_value_int64(aArg[i]); #else pBlob->aParam[i] = sqlite3_value_double(aArg[i]); #endif } if( memErr ){ sqlite3_result_error_nomem(ctx); rtreeMatchArgFree(pBlob); }else{ sqlite3_result_blob(ctx, pBlob, nBlob, rtreeMatchArgFree); } } } /* ** Register a new geometry function for use with the r-tree MATCH operator. */ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback( sqlite3 *db, /* Register SQL function on this connection */ const char *zGeom, /* Name of the new SQL function */ int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */ void *pContext /* Extra data associated with the callback */ ){ RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ /* Allocate and populate the context object. */ pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); if( !pGeomCtx ) return SQLITE_NOMEM; pGeomCtx->xGeom = xGeom; pGeomCtx->xQueryFunc = 0; pGeomCtx->xDestructor = 0; pGeomCtx->pContext = pContext; return sqlite3_create_function_v2(db, zGeom, -1, SQLITE_ANY, (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback ); } /* ** Register a new 2nd-generation geometry function for use with the ** r-tree MATCH operator. */ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback( sqlite3 *db, /* Register SQL function on this connection */ const char *zQueryFunc, /* Name of new SQL function */ int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */ void *pContext, /* Extra data passed into the callback */ void (*xDestructor)(void*) /* Destructor for the extra data */ ){ RtreeGeomCallback *pGeomCtx; /* Context object for new user-function */ /* Allocate and populate the context object. */ pGeomCtx = (RtreeGeomCallback *)sqlite3_malloc(sizeof(RtreeGeomCallback)); if( !pGeomCtx ) return SQLITE_NOMEM; pGeomCtx->xGeom = 0; pGeomCtx->xQueryFunc = xQueryFunc; pGeomCtx->xDestructor = xDestructor; pGeomCtx->pContext = pContext; return sqlite3_create_function_v2(db, zQueryFunc, -1, SQLITE_ANY, (void *)pGeomCtx, geomCallback, 0, 0, rtreeFreeCallback ); } #if !SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif SQLITE_API int SQLITE_STDCALL sqlite3_rtree_init( sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi ){ SQLITE_EXTENSION_INIT2(pApi) return sqlite3RtreeInit(db); } #endif #endif /************** End of rtree.c ***********************************************/ /************** Begin file icu.c *********************************************/ /* ** 2007 May 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** $Id: icu.c,v 1.7 2007/12/13 21:54:11 drh Exp $ ** ** This file implements an integration between the ICU library ** ("International Components for Unicode", an open-source library ** for handling unicode data) and SQLite. The integration uses ** ICU to provide the following to SQLite: ** ** * An implementation of the SQL regexp() function (and hence REGEXP ** operator) using the ICU uregex_XX() APIs. ** ** * Implementations of the SQL scalar upper() and lower() functions ** for case mapping. ** ** * Integration of ICU and SQLite collation sequences. ** ** * An implementation of the LIKE operator that uses ICU to ** provide case-independent matching. */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ICU) /* Include ICU headers */ #include #include #include #include /* #include */ #ifndef SQLITE_CORE /* #include "sqlite3ext.h" */ SQLITE_EXTENSION_INIT1 #else /* #include "sqlite3.h" */ #endif /* ** Maximum length (in bytes) of the pattern in a LIKE or GLOB ** operator. */ #ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH # define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000 #endif /* ** Version of sqlite3_free() that is always a function, never a macro. */ static void xFree(void *p){ sqlite3_free(p); } /* ** Compare two UTF-8 strings for equality where the first string is ** a "LIKE" expression. Return true (1) if they are the same and ** false (0) if they are different. */ static int icuLikeCompare( const uint8_t *zPattern, /* LIKE pattern */ const uint8_t *zString, /* The UTF-8 string to compare against */ const UChar32 uEsc /* The escape character */ ){ static const int MATCH_ONE = (UChar32)'_'; static const int MATCH_ALL = (UChar32)'%'; int iPattern = 0; /* Current byte index in zPattern */ int iString = 0; /* Current byte index in zString */ int prevEscape = 0; /* True if the previous character was uEsc */ while( zPattern[iPattern]!=0 ){ /* Read (and consume) the next character from the input pattern. */ UChar32 uPattern; U8_NEXT_UNSAFE(zPattern, iPattern, uPattern); /* There are now 4 possibilities: ** ** 1. uPattern is an unescaped match-all character "%", ** 2. uPattern is an unescaped match-one character "_", ** 3. uPattern is an unescaped escape character, or ** 4. uPattern is to be handled as an ordinary character */ if( !prevEscape && uPattern==MATCH_ALL ){ /* Case 1. */ uint8_t c; /* Skip any MATCH_ALL or MATCH_ONE characters that follow a ** MATCH_ALL. For each MATCH_ONE, skip one character in the ** test string. */ while( (c=zPattern[iPattern]) == MATCH_ALL || c == MATCH_ONE ){ if( c==MATCH_ONE ){ if( zString[iString]==0 ) return 0; U8_FWD_1_UNSAFE(zString, iString); } iPattern++; } if( zPattern[iPattern]==0 ) return 1; while( zString[iString] ){ if( icuLikeCompare(&zPattern[iPattern], &zString[iString], uEsc) ){ return 1; } U8_FWD_1_UNSAFE(zString, iString); } return 0; }else if( !prevEscape && uPattern==MATCH_ONE ){ /* Case 2. */ if( zString[iString]==0 ) return 0; U8_FWD_1_UNSAFE(zString, iString); }else if( !prevEscape && uPattern==uEsc){ /* Case 3. */ prevEscape = 1; }else{ /* Case 4. */ UChar32 uString; U8_NEXT_UNSAFE(zString, iString, uString); uString = u_foldCase(uString, U_FOLD_CASE_DEFAULT); uPattern = u_foldCase(uPattern, U_FOLD_CASE_DEFAULT); if( uString!=uPattern ){ return 0; } prevEscape = 0; } } return zString[iString]==0; } /* ** Implementation of the like() SQL function. This function implements ** the build-in LIKE operator. The first argument to the function is the ** pattern and the second argument is the string. So, the SQL statements: ** ** A LIKE B ** ** is implemented as like(B, A). If there is an escape character E, ** ** A LIKE B ESCAPE E ** ** is mapped to like(B, A, E). */ static void icuLikeFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zA = sqlite3_value_text(argv[0]); const unsigned char *zB = sqlite3_value_text(argv[1]); UChar32 uEsc = 0; /* Limit the length of the LIKE or GLOB pattern to avoid problems ** of deep recursion and N*N behavior in patternCompare(). */ if( sqlite3_value_bytes(argv[0])>SQLITE_MAX_LIKE_PATTERN_LENGTH ){ sqlite3_result_error(context, "LIKE or GLOB pattern too complex", -1); return; } if( argc==3 ){ /* The escape character string must consist of a single UTF-8 character. ** Otherwise, return an error. */ int nE= sqlite3_value_bytes(argv[2]); const unsigned char *zE = sqlite3_value_text(argv[2]); int i = 0; if( zE==0 ) return; U8_NEXT(zE, i, nE, uEsc); if( i!=nE){ sqlite3_result_error(context, "ESCAPE expression must be a single character", -1); return; } } if( zA && zB ){ sqlite3_result_int(context, icuLikeCompare(zA, zB, uEsc)); } } /* ** This function is called when an ICU function called from within ** the implementation of an SQL scalar function returns an error. ** ** The scalar function context passed as the first argument is ** loaded with an error message based on the following two args. */ static void icuFunctionError( sqlite3_context *pCtx, /* SQLite scalar function context */ const char *zName, /* Name of ICU function that failed */ UErrorCode e /* Error code returned by ICU function */ ){ char zBuf[128]; sqlite3_snprintf(128, zBuf, "ICU error: %s(): %s", zName, u_errorName(e)); zBuf[127] = '\0'; sqlite3_result_error(pCtx, zBuf, -1); } /* ** Function to delete compiled regexp objects. Registered as ** a destructor function with sqlite3_set_auxdata(). */ static void icuRegexpDelete(void *p){ URegularExpression *pExpr = (URegularExpression *)p; uregex_close(pExpr); } /* ** Implementation of SQLite REGEXP operator. This scalar function takes ** two arguments. The first is a regular expression pattern to compile ** the second is a string to match against that pattern. If either ** argument is an SQL NULL, then NULL Is returned. Otherwise, the result ** is 1 if the string matches the pattern, or 0 otherwise. ** ** SQLite maps the regexp() function to the regexp() operator such ** that the following two are equivalent: ** ** zString REGEXP zPattern ** regexp(zPattern, zString) ** ** Uses the following ICU regexp APIs: ** ** uregex_open() ** uregex_matches() ** uregex_close() */ static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ UErrorCode status = U_ZERO_ERROR; URegularExpression *pExpr; UBool res; const UChar *zString = sqlite3_value_text16(apArg[1]); (void)nArg; /* Unused parameter */ /* If the left hand side of the regexp operator is NULL, ** then the result is also NULL. */ if( !zString ){ return; } pExpr = sqlite3_get_auxdata(p, 0); if( !pExpr ){ const UChar *zPattern = sqlite3_value_text16(apArg[0]); if( !zPattern ){ return; } pExpr = uregex_open(zPattern, -1, 0, 0, &status); if( U_SUCCESS(status) ){ sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); }else{ assert(!pExpr); icuFunctionError(p, "uregex_open", status); return; } } /* Configure the text that the regular expression operates on. */ uregex_setText(pExpr, zString, -1, &status); if( !U_SUCCESS(status) ){ icuFunctionError(p, "uregex_setText", status); return; } /* Attempt the match */ res = uregex_matches(pExpr, 0, &status); if( !U_SUCCESS(status) ){ icuFunctionError(p, "uregex_matches", status); return; } /* Set the text that the regular expression operates on to a NULL ** pointer. This is not really necessary, but it is tidier than ** leaving the regular expression object configured with an invalid ** pointer after this function returns. */ uregex_setText(pExpr, 0, 0, &status); /* Return 1 or 0. */ sqlite3_result_int(p, res ? 1 : 0); } /* ** Implementations of scalar functions for case mapping - upper() and ** lower(). Function upper() converts its input to upper-case (ABC). ** Function lower() converts to lower-case (abc). ** ** ICU provides two types of case mapping, "general" case mapping and ** "language specific". Refer to ICU documentation for the differences ** between the two. ** ** To utilise "general" case mapping, the upper() or lower() scalar ** functions are invoked with one argument: ** ** upper('ABC') -> 'abc' ** lower('abc') -> 'ABC' ** ** To access ICU "language specific" case mapping, upper() or lower() ** should be invoked with two arguments. The second argument is the name ** of the locale to use. Passing an empty string ("") or SQL NULL value ** as the second argument is the same as invoking the 1 argument version ** of upper() or lower(). ** ** lower('I', 'en_us') -> 'i' ** lower('I', 'tr_tr') -> 'ı' (small dotless i) ** ** http://www.icu-project.org/userguide/posix.html#case_mappings */ static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ const UChar *zInput; UChar *zOutput; int nInput; int nOutput; UErrorCode status = U_ZERO_ERROR; const char *zLocale = 0; assert(nArg==1 || nArg==2); if( nArg==2 ){ zLocale = (const char *)sqlite3_value_text(apArg[1]); } zInput = sqlite3_value_text16(apArg[0]); if( !zInput ){ return; } nInput = sqlite3_value_bytes16(apArg[0]); nOutput = nInput * 2 + 2; zOutput = sqlite3_malloc(nOutput); if( !zOutput ){ return; } if( sqlite3_user_data(p) ){ u_strToUpper(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); }else{ u_strToLower(zOutput, nOutput/2, zInput, nInput/2, zLocale, &status); } if( !U_SUCCESS(status) ){ icuFunctionError(p, "u_strToLower()/u_strToUpper", status); return; } sqlite3_result_text16(p, zOutput, -1, xFree); } /* ** Collation sequence destructor function. The pCtx argument points to ** a UCollator structure previously allocated using ucol_open(). */ static void icuCollationDel(void *pCtx){ UCollator *p = (UCollator *)pCtx; ucol_close(p); } /* ** Collation sequence comparison function. The pCtx argument points to ** a UCollator structure previously allocated using ucol_open(). */ static int icuCollationColl( void *pCtx, int nLeft, const void *zLeft, int nRight, const void *zRight ){ UCollationResult res; UCollator *p = (UCollator *)pCtx; res = ucol_strcoll(p, (UChar *)zLeft, nLeft/2, (UChar *)zRight, nRight/2); switch( res ){ case UCOL_LESS: return -1; case UCOL_GREATER: return +1; case UCOL_EQUAL: return 0; } assert(!"Unexpected return value from ucol_strcoll()"); return 0; } /* ** Implementation of the scalar function icu_load_collation(). ** ** This scalar function is used to add ICU collation based collation ** types to an SQLite database connection. It is intended to be called ** as follows: ** ** SELECT icu_load_collation(, ); ** ** Where is a string containing an ICU locale identifier (i.e. ** "en_AU", "tr_TR" etc.) and is the name of the ** collation sequence to create. */ static void icuLoadCollation( sqlite3_context *p, int nArg, sqlite3_value **apArg ){ sqlite3 *db = (sqlite3 *)sqlite3_user_data(p); UErrorCode status = U_ZERO_ERROR; const char *zLocale; /* Locale identifier - (eg. "jp_JP") */ const char *zName; /* SQL Collation sequence name (eg. "japanese") */ UCollator *pUCollator; /* ICU library collation object */ int rc; /* Return code from sqlite3_create_collation_x() */ assert(nArg==2); (void)nArg; /* Unused parameter */ zLocale = (const char *)sqlite3_value_text(apArg[0]); zName = (const char *)sqlite3_value_text(apArg[1]); if( !zLocale || !zName ){ return; } pUCollator = ucol_open(zLocale, &status); if( !U_SUCCESS(status) ){ icuFunctionError(p, "ucol_open", status); return; } assert(p); rc = sqlite3_create_collation_v2(db, zName, SQLITE_UTF16, (void *)pUCollator, icuCollationColl, icuCollationDel ); if( rc!=SQLITE_OK ){ ucol_close(pUCollator); sqlite3_result_error(p, "Error registering collation function", -1); } } /* ** Register the ICU extension functions with database db. */ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){ struct IcuScalar { const char *zName; /* Function name */ int nArg; /* Number of arguments */ int enc; /* Optimal text encoding */ void *pContext; /* sqlite3_user_data() context */ void (*xFunc)(sqlite3_context*,int,sqlite3_value**); } scalars[] = { {"regexp", 2, SQLITE_ANY, 0, icuRegexpFunc}, {"lower", 1, SQLITE_UTF16, 0, icuCaseFunc16}, {"lower", 2, SQLITE_UTF16, 0, icuCaseFunc16}, {"upper", 1, SQLITE_UTF16, (void*)1, icuCaseFunc16}, {"upper", 2, SQLITE_UTF16, (void*)1, icuCaseFunc16}, {"lower", 1, SQLITE_UTF8, 0, icuCaseFunc16}, {"lower", 2, SQLITE_UTF8, 0, icuCaseFunc16}, {"upper", 1, SQLITE_UTF8, (void*)1, icuCaseFunc16}, {"upper", 2, SQLITE_UTF8, (void*)1, icuCaseFunc16}, {"like", 2, SQLITE_UTF8, 0, icuLikeFunc}, {"like", 3, SQLITE_UTF8, 0, icuLikeFunc}, {"icu_load_collation", 2, SQLITE_UTF8, (void*)db, icuLoadCollation}, }; int rc = SQLITE_OK; int i; for(i=0; rc==SQLITE_OK && i<(int)(sizeof(scalars)/sizeof(scalars[0])); i++){ struct IcuScalar *p = &scalars[i]; rc = sqlite3_create_function( db, p->zName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0 ); } return rc; } #if !SQLITE_CORE #ifdef _WIN32 __declspec(dllexport) #endif SQLITE_API int SQLITE_STDCALL sqlite3_icu_init( sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi ){ SQLITE_EXTENSION_INIT2(pApi) return sqlite3IcuInit(db); } #endif #endif /************** End of icu.c *************************************************/ /************** Begin file fts3_icu.c ****************************************/ /* ** 2007 June 22 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements a tokenizer for fts3 based on the ICU library. */ /* #include "fts3Int.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) #ifdef SQLITE_ENABLE_ICU /* #include */ /* #include */ /* #include "fts3_tokenizer.h" */ #include /* #include */ /* #include */ #include typedef struct IcuTokenizer IcuTokenizer; typedef struct IcuCursor IcuCursor; struct IcuTokenizer { sqlite3_tokenizer base; char *zLocale; }; struct IcuCursor { sqlite3_tokenizer_cursor base; UBreakIterator *pIter; /* ICU break-iterator object */ int nChar; /* Number of UChar elements in pInput */ UChar *aChar; /* Copy of input using utf-16 encoding */ int *aOffset; /* Offsets of each character in utf-8 input */ int nBuffer; char *zBuffer; int iToken; }; /* ** Create a new tokenizer instance. */ static int icuCreate( int argc, /* Number of entries in argv[] */ const char * const *argv, /* Tokenizer creation arguments */ sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ ){ IcuTokenizer *p; int n = 0; if( argc>0 ){ n = strlen(argv[0])+1; } p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); if( !p ){ return SQLITE_NOMEM; } memset(p, 0, sizeof(IcuTokenizer)); if( n ){ p->zLocale = (char *)&p[1]; memcpy(p->zLocale, argv[0], n); } *ppTokenizer = (sqlite3_tokenizer *)p; return SQLITE_OK; } /* ** Destroy a tokenizer */ static int icuDestroy(sqlite3_tokenizer *pTokenizer){ IcuTokenizer *p = (IcuTokenizer *)pTokenizer; sqlite3_free(p); return SQLITE_OK; } /* ** Prepare to begin tokenizing a particular string. The input ** string to be tokenized is pInput[0..nBytes-1]. A cursor ** used to incrementally tokenize this string is returned in ** *ppCursor. */ static int icuOpen( sqlite3_tokenizer *pTokenizer, /* The tokenizer */ const char *zInput, /* Input string */ int nInput, /* Length of zInput in bytes */ sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ ){ IcuTokenizer *p = (IcuTokenizer *)pTokenizer; IcuCursor *pCsr; const int32_t opt = U_FOLD_CASE_DEFAULT; UErrorCode status = U_ZERO_ERROR; int nChar; UChar32 c; int iInput = 0; int iOut = 0; *ppCursor = 0; if( zInput==0 ){ nInput = 0; zInput = ""; }else if( nInput<0 ){ nInput = strlen(zInput); } nChar = nInput+1; pCsr = (IcuCursor *)sqlite3_malloc( sizeof(IcuCursor) + /* IcuCursor */ ((nChar+3)&~3) * sizeof(UChar) + /* IcuCursor.aChar[] */ (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ ); if( !pCsr ){ return SQLITE_NOMEM; } memset(pCsr, 0, sizeof(IcuCursor)); pCsr->aChar = (UChar *)&pCsr[1]; pCsr->aOffset = (int *)&pCsr->aChar[(nChar+3)&~3]; pCsr->aOffset[iOut] = iInput; U8_NEXT(zInput, iInput, nInput, c); while( c>0 ){ int isError = 0; c = u_foldCase(c, opt); U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); if( isError ){ sqlite3_free(pCsr); return SQLITE_ERROR; } pCsr->aOffset[iOut] = iInput; if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); if( !U_SUCCESS(status) ){ sqlite3_free(pCsr); return SQLITE_ERROR; } pCsr->nChar = iOut; ubrk_first(pCsr->pIter); *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; return SQLITE_OK; } /* ** Close a tokenization cursor previously opened by a call to icuOpen(). */ static int icuClose(sqlite3_tokenizer_cursor *pCursor){ IcuCursor *pCsr = (IcuCursor *)pCursor; ubrk_close(pCsr->pIter); sqlite3_free(pCsr->zBuffer); sqlite3_free(pCsr); return SQLITE_OK; } /* ** Extract the next token from a tokenization cursor. */ static int icuNext( sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ const char **ppToken, /* OUT: *ppToken is the token text */ int *pnBytes, /* OUT: Number of bytes in token */ int *piStartOffset, /* OUT: Starting offset of token */ int *piEndOffset, /* OUT: Ending offset of token */ int *piPosition /* OUT: Position integer of token */ ){ IcuCursor *pCsr = (IcuCursor *)pCursor; int iStart = 0; int iEnd = 0; int nByte = 0; while( iStart==iEnd ){ UChar32 c; iStart = ubrk_current(pCsr->pIter); iEnd = ubrk_next(pCsr->pIter); if( iEnd==UBRK_DONE ){ return SQLITE_DONE; } while( iStartaChar, iWhite, pCsr->nChar, c); if( u_isspace(c) ){ iStart = iWhite; }else{ break; } } assert(iStart<=iEnd); } do { UErrorCode status = U_ZERO_ERROR; if( nByte ){ char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); if( !zNew ){ return SQLITE_NOMEM; } pCsr->zBuffer = zNew; pCsr->nBuffer = nByte; } u_strToUTF8( pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ &status /* Output success/failure */ ); } while( nByte>pCsr->nBuffer ); *ppToken = pCsr->zBuffer; *pnBytes = nByte; *piStartOffset = pCsr->aOffset[iStart]; *piEndOffset = pCsr->aOffset[iEnd]; *piPosition = pCsr->iToken++; return SQLITE_OK; } /* ** The set of routines that implement the simple tokenizer */ static const sqlite3_tokenizer_module icuTokenizerModule = { 0, /* iVersion */ icuCreate, /* xCreate */ icuDestroy, /* xCreate */ icuOpen, /* xOpen */ icuClose, /* xClose */ icuNext, /* xNext */ 0, /* xLanguageid */ }; /* ** Set *ppModule to point at the implementation of the ICU tokenizer. */ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( sqlite3_tokenizer_module const**ppModule ){ *ppModule = &icuTokenizerModule; } #endif /* defined(SQLITE_ENABLE_ICU) */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS3) */ /************** End of fts3_icu.c ********************************************/ /************** Begin file sqlite3rbu.c **************************************/ /* ** 2014 August 30 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** ** OVERVIEW ** ** The RBU extension requires that the RBU update be packaged as an ** SQLite database. The tables it expects to find are described in ** sqlite3rbu.h. Essentially, for each table xyz in the target database ** that the user wishes to write to, a corresponding data_xyz table is ** created in the RBU database and populated with one row for each row to ** update, insert or delete from the target table. ** ** The update proceeds in three stages: ** ** 1) The database is updated. The modified database pages are written ** to a *-oal file. A *-oal file is just like a *-wal file, except ** that it is named "-oal" instead of "-wal". ** Because regular SQLite clients do not look for file named ** "-oal", they go on using the original database in ** rollback mode while the *-oal file is being generated. ** ** During this stage RBU does not update the database by writing ** directly to the target tables. Instead it creates "imposter" ** tables using the SQLITE_TESTCTRL_IMPOSTER interface that it uses ** to update each b-tree individually. All updates required by each ** b-tree are completed before moving on to the next, and all ** updates are done in sorted key order. ** ** 2) The "-oal" file is moved to the equivalent "-wal" ** location using a call to rename(2). Before doing this the RBU ** module takes an EXCLUSIVE lock on the database file, ensuring ** that there are no other active readers. ** ** Once the EXCLUSIVE lock is released, any other database readers ** detect the new *-wal file and read the database in wal mode. At ** this point they see the new version of the database - including ** the updates made as part of the RBU update. ** ** 3) The new *-wal file is checkpointed. This proceeds in the same way ** as a regular database checkpoint, except that a single frame is ** checkpointed each time sqlite3rbu_step() is called. If the RBU ** handle is closed before the entire *-wal file is checkpointed, ** the checkpoint progress is saved in the RBU database and the ** checkpoint can be resumed by another RBU client at some point in ** the future. ** ** POTENTIAL PROBLEMS ** ** The rename() call might not be portable. And RBU is not currently ** syncing the directory after renaming the file. ** ** When state is saved, any commit to the *-oal file and the commit to ** the RBU update database are not atomic. So if the power fails at the ** wrong moment they might get out of sync. As the main database will be ** committed before the RBU update database this will likely either just ** pass unnoticed, or result in SQLITE_CONSTRAINT errors (due to UNIQUE ** constraint violations). ** ** If some client does modify the target database mid RBU update, or some ** other error occurs, the RBU extension will keep throwing errors. It's ** not really clear how to get out of this state. The system could just ** by delete the RBU update database and *-oal file and have the device ** download the update again and start over. ** ** At present, for an UPDATE, both the new.* and old.* records are ** collected in the rbu_xyz table. And for both UPDATEs and DELETEs all ** fields are collected. This means we're probably writing a lot more ** data to disk when saving the state of an ongoing update to the RBU ** update database than is strictly necessary. ** */ /* #include */ /* #include */ /* #include */ #if !defined(_WIN32) /* # include */ #endif /* #include "sqlite3.h" */ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_RBU) /************** Include sqlite3rbu.h in the middle of sqlite3rbu.c ***********/ /************** Begin file sqlite3rbu.h **************************************/ /* ** 2014 August 30 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains the public interface for the RBU extension. */ /* ** SUMMARY ** ** Writing a transaction containing a large number of operations on ** b-tree indexes that are collectively larger than the available cache ** memory can be very inefficient. ** ** The problem is that in order to update a b-tree, the leaf page (at least) ** containing the entry being inserted or deleted must be modified. If the ** working set of leaves is larger than the available cache memory, then a ** single leaf that is modified more than once as part of the transaction ** may be loaded from or written to the persistent media multiple times. ** Additionally, because the index updates are likely to be applied in ** random order, access to pages within the database is also likely to be in ** random order, which is itself quite inefficient. ** ** One way to improve the situation is to sort the operations on each index ** by index key before applying them to the b-tree. This leads to an IO ** pattern that resembles a single linear scan through the index b-tree, ** and all but guarantees each modified leaf page is loaded and stored ** exactly once. SQLite uses this trick to improve the performance of ** CREATE INDEX commands. This extension allows it to be used to improve ** the performance of large transactions on existing databases. ** ** Additionally, this extension allows the work involved in writing the ** large transaction to be broken down into sub-transactions performed ** sequentially by separate processes. This is useful if the system cannot ** guarantee that a single update process will run for long enough to apply ** the entire update, for example because the update is being applied on a ** mobile device that is frequently rebooted. Even after the writer process ** has committed one or more sub-transactions, other database clients continue ** to read from the original database snapshot. In other words, partially ** applied transactions are not visible to other clients. ** ** "RBU" stands for "Resumable Bulk Update". As in a large database update ** transmitted via a wireless network to a mobile device. A transaction ** applied using this extension is hence refered to as an "RBU update". ** ** ** LIMITATIONS ** ** An "RBU update" transaction is subject to the following limitations: ** ** * The transaction must consist of INSERT, UPDATE and DELETE operations ** only. ** ** * INSERT statements may not use any default values. ** ** * UPDATE and DELETE statements must identify their target rows by ** non-NULL PRIMARY KEY values. Rows with NULL values stored in PRIMARY ** KEY fields may not be updated or deleted. If the table being written ** has no PRIMARY KEY, affected rows must be identified by rowid. ** ** * UPDATE statements may not modify PRIMARY KEY columns. ** ** * No triggers will be fired. ** ** * No foreign key violations are detected or reported. ** ** * CHECK constraints are not enforced. ** ** * No constraint handling mode except for "OR ROLLBACK" is supported. ** ** ** PREPARATION ** ** An "RBU update" is stored as a separate SQLite database. A database ** containing an RBU update is an "RBU database". For each table in the ** target database to be updated, the RBU database should contain a table ** named "data_" containing the same set of columns as the ** target table, and one more - "rbu_control". The data_% table should ** have no PRIMARY KEY or UNIQUE constraints, but each column should have ** the same type as the corresponding column in the target database. ** The "rbu_control" column should have no type at all. For example, if ** the target database contains: ** ** CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c UNIQUE); ** ** Then the RBU database should contain: ** ** CREATE TABLE data_t1(a INTEGER, b TEXT, c, rbu_control); ** ** The order of the columns in the data_% table does not matter. ** ** If the target database table is a virtual table or a table that has no ** PRIMARY KEY declaration, the data_% table must also contain a column ** named "rbu_rowid". This column is mapped to the tables implicit primary ** key column - "rowid". Virtual tables for which the "rowid" column does ** not function like a primary key value cannot be updated using RBU. For ** example, if the target db contains either of the following: ** ** CREATE VIRTUAL TABLE x1 USING fts3(a, b); ** CREATE TABLE x1(a, b) ** ** then the RBU database should contain: ** ** CREATE TABLE data_x1(a, b, rbu_rowid, rbu_control); ** ** All non-hidden columns (i.e. all columns matched by "SELECT *") of the ** target table must be present in the input table. For virtual tables, ** hidden columns are optional - they are updated by RBU if present in ** the input table, or not otherwise. For example, to write to an fts4 ** table with a hidden languageid column such as: ** ** CREATE VIRTUAL TABLE ft1 USING fts4(a, b, languageid='langid'); ** ** Either of the following input table schemas may be used: ** ** CREATE TABLE data_ft1(a, b, langid, rbu_rowid, rbu_control); ** CREATE TABLE data_ft1(a, b, rbu_rowid, rbu_control); ** ** For each row to INSERT into the target database as part of the RBU ** update, the corresponding data_% table should contain a single record ** with the "rbu_control" column set to contain integer value 0. The ** other columns should be set to the values that make up the new record ** to insert. ** ** If the target database table has an INTEGER PRIMARY KEY, it is not ** possible to insert a NULL value into the IPK column. Attempting to ** do so results in an SQLITE_MISMATCH error. ** ** For each row to DELETE from the target database as part of the RBU ** update, the corresponding data_% table should contain a single record ** with the "rbu_control" column set to contain integer value 1. The ** real primary key values of the row to delete should be stored in the ** corresponding columns of the data_% table. The values stored in the ** other columns are not used. ** ** For each row to UPDATE from the target database as part of the RBU ** update, the corresponding data_% table should contain a single record ** with the "rbu_control" column set to contain a value of type text. ** The real primary key values identifying the row to update should be ** stored in the corresponding columns of the data_% table row, as should ** the new values of all columns being update. The text value in the ** "rbu_control" column must contain the same number of characters as ** there are columns in the target database table, and must consist entirely ** of 'x' and '.' characters (or in some special cases 'd' - see below). For ** each column that is being updated, the corresponding character is set to ** 'x'. For those that remain as they are, the corresponding character of the ** rbu_control value should be set to '.'. For example, given the tables ** above, the update statement: ** ** UPDATE t1 SET c = 'usa' WHERE a = 4; ** ** is represented by the data_t1 row created by: ** ** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..x'); ** ** Instead of an 'x' character, characters of the rbu_control value specified ** for UPDATEs may also be set to 'd'. In this case, instead of updating the ** target table with the value stored in the corresponding data_% column, the ** user-defined SQL function "rbu_delta()" is invoked and the result stored in ** the target table column. rbu_delta() is invoked with two arguments - the ** original value currently stored in the target table column and the ** value specified in the data_xxx table. ** ** For example, this row: ** ** INSERT INTO data_t1(a, b, c, rbu_control) VALUES(4, NULL, 'usa', '..d'); ** ** is similar to an UPDATE statement such as: ** ** UPDATE t1 SET c = rbu_delta(c, 'usa') WHERE a = 4; ** ** If the target database table is a virtual table or a table with no PRIMARY ** KEY, the rbu_control value should not include a character corresponding ** to the rbu_rowid value. For example, this: ** ** INSERT INTO data_ft1(a, b, rbu_rowid, rbu_control) ** VALUES(NULL, 'usa', 12, '.x'); ** ** causes a result similar to: ** ** UPDATE ft1 SET b = 'usa' WHERE rowid = 12; ** ** The data_xxx tables themselves should have no PRIMARY KEY declarations. ** However, RBU is more efficient if reading the rows in from each data_xxx ** table in "rowid" order is roughly the same as reading them sorted by ** the PRIMARY KEY of the corresponding target database table. In other ** words, rows should be sorted using the destination table PRIMARY KEY ** fields before they are inserted into the data_xxx tables. ** ** USAGE ** ** The API declared below allows an application to apply an RBU update ** stored on disk to an existing target database. Essentially, the ** application: ** ** 1) Opens an RBU handle using the sqlite3rbu_open() function. ** ** 2) Registers any required virtual table modules with the database ** handle returned by sqlite3rbu_db(). Also, if required, register ** the rbu_delta() implementation. ** ** 3) Calls the sqlite3rbu_step() function one or more times on ** the new handle. Each call to sqlite3rbu_step() performs a single ** b-tree operation, so thousands of calls may be required to apply ** a complete update. ** ** 4) Calls sqlite3rbu_close() to close the RBU update handle. If ** sqlite3rbu_step() has been called enough times to completely ** apply the update to the target database, then the RBU database ** is marked as fully applied. Otherwise, the state of the RBU ** update application is saved in the RBU database for later ** resumption. ** ** See comments below for more detail on APIs. ** ** If an update is only partially applied to the target database by the ** time sqlite3rbu_close() is called, various state information is saved ** within the RBU database. This allows subsequent processes to automatically ** resume the RBU update from where it left off. ** ** To remove all RBU extension state information, returning an RBU database ** to its original contents, it is sufficient to drop all tables that begin ** with the prefix "rbu_" ** ** DATABASE LOCKING ** ** An RBU update may not be applied to a database in WAL mode. Attempting ** to do so is an error (SQLITE_ERROR). ** ** While an RBU handle is open, a SHARED lock may be held on the target ** database file. This means it is possible for other clients to read the ** database, but not to write it. ** ** If an RBU update is started and then suspended before it is completed, ** then an external client writes to the database, then attempting to resume ** the suspended RBU update is also an error (SQLITE_BUSY). */ #ifndef _SQLITE3RBU_H #define _SQLITE3RBU_H /* #include "sqlite3.h" ** Required for error code definitions ** */ typedef struct sqlite3rbu sqlite3rbu; /* ** Open an RBU handle. ** ** Argument zTarget is the path to the target database. Argument zRbu is ** the path to the RBU database. Each call to this function must be matched ** by a call to sqlite3rbu_close(). When opening the databases, RBU passes ** the SQLITE_CONFIG_URI flag to sqlite3_open_v2(). So if either zTarget ** or zRbu begin with "file:", it will be interpreted as an SQLite ** database URI, not a regular file name. ** ** If the zState argument is passed a NULL value, the RBU extension stores ** the current state of the update (how many rows have been updated, which ** indexes are yet to be updated etc.) within the RBU database itself. This ** can be convenient, as it means that the RBU application does not need to ** organize removing a separate state file after the update is concluded. ** Or, if zState is non-NULL, it must be a path to a database file in which ** the RBU extension can store the state of the update. ** ** When resuming an RBU update, the zState argument must be passed the same ** value as when the RBU update was started. ** ** Once the RBU update is finished, the RBU extension does not ** automatically remove any zState database file, even if it created it. ** ** By default, RBU uses the default VFS to access the files on disk. To ** use a VFS other than the default, an SQLite "file:" URI containing a ** "vfs=..." option may be passed as the zTarget option. ** ** IMPORTANT NOTE FOR ZIPVFS USERS: The RBU extension works with all of ** SQLite's built-in VFSs, including the multiplexor VFS. However it does ** not work out of the box with zipvfs. Refer to the comment describing ** the zipvfs_create_vfs() API below for details on using RBU with zipvfs. */ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( const char *zTarget, const char *zRbu, const char *zState ); /* ** Internally, each RBU connection uses a separate SQLite database ** connection to access the target and rbu update databases. This ** API allows the application direct access to these database handles. ** ** The first argument passed to this function must be a valid, open, RBU ** handle. The second argument should be passed zero to access the target ** database handle, or non-zero to access the rbu update database handle. ** Accessing the underlying database handles may be useful in the ** following scenarios: ** ** * If any target tables are virtual tables, it may be necessary to ** call sqlite3_create_module() on the target database handle to ** register the required virtual table implementations. ** ** * If the data_xxx tables in the RBU source database are virtual ** tables, the application may need to call sqlite3_create_module() on ** the rbu update db handle to any required virtual table ** implementations. ** ** * If the application uses the "rbu_delta()" feature described above, ** it must use sqlite3_create_function() or similar to register the ** rbu_delta() implementation with the target database handle. ** ** If an error has occurred, either while opening or stepping the RBU object, ** this function may return NULL. The error code and message may be collected ** when sqlite3rbu_close() is called. */ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu*, int bRbu); /* ** Do some work towards applying the RBU update to the target db. ** ** Return SQLITE_DONE if the update has been completely applied, or ** SQLITE_OK if no error occurs but there remains work to do to apply ** the RBU update. If an error does occur, some other error code is ** returned. ** ** Once a call to sqlite3rbu_step() has returned a value other than ** SQLITE_OK, all subsequent calls on the same RBU handle are no-ops ** that immediately return the same value. */ SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *pRbu); /* ** Close an RBU handle. ** ** If the RBU update has been completely applied, mark the RBU database ** as fully applied. Otherwise, assuming no error has occurred, save the ** current state of the RBU update appliation to the RBU database. ** ** If an error has already occurred as part of an sqlite3rbu_step() ** or sqlite3rbu_open() call, or if one occurs within this function, an ** SQLite error code is returned. Additionally, *pzErrmsg may be set to ** point to a buffer containing a utf-8 formatted English language error ** message. It is the responsibility of the caller to eventually free any ** such buffer using sqlite3_free(). ** ** Otherwise, if no error occurs, this function returns SQLITE_OK if the ** update has been partially applied, or SQLITE_DONE if it has been ** completely applied. */ SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg); /* ** Return the total number of key-value operations (inserts, deletes or ** updates) that have been performed on the target database since the ** current RBU update was started. */ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu); /* ** Create an RBU VFS named zName that accesses the underlying file-system ** via existing VFS zParent. Or, if the zParent parameter is passed NULL, ** then the new RBU VFS uses the default system VFS to access the file-system. ** The new object is registered as a non-default VFS with SQLite before ** returning. ** ** Part of the RBU implementation uses a custom VFS object. Usually, this ** object is created and deleted automatically by RBU. ** ** The exception is for applications that also use zipvfs. In this case, ** the custom VFS must be explicitly created by the user before the RBU ** handle is opened. The RBU VFS should be installed so that the zipvfs ** VFS uses the RBU VFS, which in turn uses any other VFS layers in use ** (for example multiplexor) to access the file-system. For example, ** to assemble an RBU enabled VFS stack that uses both zipvfs and ** multiplexor (error checking omitted): ** ** // Create a VFS named "multiplex" (not the default). ** sqlite3_multiplex_initialize(0, 0); ** ** // Create an rbu VFS named "rbu" that uses multiplexor. If the ** // second argument were replaced with NULL, the "rbu" VFS would ** // access the file-system via the system default VFS, bypassing the ** // multiplexor. ** sqlite3rbu_create_vfs("rbu", "multiplex"); ** ** // Create a zipvfs VFS named "zipvfs" that uses rbu. ** zipvfs_create_vfs_v3("zipvfs", "rbu", 0, xCompressorAlgorithmDetector); ** ** // Make zipvfs the default VFS. ** sqlite3_vfs_register(sqlite3_vfs_find("zipvfs"), 1); ** ** Because the default VFS created above includes a RBU functionality, it ** may be used by RBU clients. Attempting to use RBU with a zipvfs VFS stack ** that does not include the RBU layer results in an error. ** ** The overhead of adding the "rbu" VFS to the system is negligible for ** non-RBU users. There is no harm in an application accessing the ** file-system via "rbu" all the time, even if it only uses RBU functionality ** occasionally. */ SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const char *zParent); /* ** Deregister and destroy an RBU vfs created by an earlier call to ** sqlite3rbu_create_vfs(). ** ** VFS objects are not reference counted. If a VFS object is destroyed ** before all database handles that use it have been closed, the results ** are undefined. */ SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName); #endif /* _SQLITE3RBU_H */ /************** End of sqlite3rbu.h ******************************************/ /************** Continuing where we left off in sqlite3rbu.c *****************/ /* Maximum number of prepared UPDATE statements held by this module */ #define SQLITE_RBU_UPDATE_CACHESIZE 16 /* ** Swap two objects of type TYPE. */ #if !defined(SQLITE_AMALGAMATION) # define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;} #endif /* ** The rbu_state table is used to save the state of a partially applied ** update so that it can be resumed later. The table consists of integer ** keys mapped to values as follows: ** ** RBU_STATE_STAGE: ** May be set to integer values 1, 2, 4 or 5. As follows: ** 1: the *-rbu file is currently under construction. ** 2: the *-rbu file has been constructed, but not yet moved ** to the *-wal path. ** 4: the checkpoint is underway. ** 5: the rbu update has been checkpointed. ** ** RBU_STATE_TBL: ** Only valid if STAGE==1. The target database name of the table ** currently being written. ** ** RBU_STATE_IDX: ** Only valid if STAGE==1. The target database name of the index ** currently being written, or NULL if the main table is currently being ** updated. ** ** RBU_STATE_ROW: ** Only valid if STAGE==1. Number of rows already processed for the current ** table/index. ** ** RBU_STATE_PROGRESS: ** Trbul number of sqlite3rbu_step() calls made so far as part of this ** rbu update. ** ** RBU_STATE_CKPT: ** Valid if STAGE==4. The 64-bit checksum associated with the wal-index ** header created by recovering the *-wal file. This is used to detect ** cases when another client appends frames to the *-wal file in the ** middle of an incremental checkpoint (an incremental checkpoint cannot ** be continued if this happens). ** ** RBU_STATE_COOKIE: ** Valid if STAGE==1. The current change-counter cookie value in the ** target db file. ** ** RBU_STATE_OALSZ: ** Valid if STAGE==1. The size in bytes of the *-oal file. */ #define RBU_STATE_STAGE 1 #define RBU_STATE_TBL 2 #define RBU_STATE_IDX 3 #define RBU_STATE_ROW 4 #define RBU_STATE_PROGRESS 5 #define RBU_STATE_CKPT 6 #define RBU_STATE_COOKIE 7 #define RBU_STATE_OALSZ 8 #define RBU_STAGE_OAL 1 #define RBU_STAGE_MOVE 2 #define RBU_STAGE_CAPTURE 3 #define RBU_STAGE_CKPT 4 #define RBU_STAGE_DONE 5 #define RBU_CREATE_STATE \ "CREATE TABLE IF NOT EXISTS %s.rbu_state(k INTEGER PRIMARY KEY, v)" typedef struct RbuFrame RbuFrame; typedef struct RbuObjIter RbuObjIter; typedef struct RbuState RbuState; typedef struct rbu_vfs rbu_vfs; typedef struct rbu_file rbu_file; typedef struct RbuUpdateStmt RbuUpdateStmt; #if !defined(SQLITE_AMALGAMATION) typedef unsigned int u32; typedef unsigned char u8; typedef sqlite3_int64 i64; #endif /* ** These values must match the values defined in wal.c for the equivalent ** locks. These are not magic numbers as they are part of the SQLite file ** format. */ #define WAL_LOCK_WRITE 0 #define WAL_LOCK_CKPT 1 #define WAL_LOCK_READ0 3 /* ** A structure to store values read from the rbu_state table in memory. */ struct RbuState { int eStage; char *zTbl; char *zIdx; i64 iWalCksum; int nRow; i64 nProgress; u32 iCookie; i64 iOalSz; }; struct RbuUpdateStmt { char *zMask; /* Copy of update mask used with pUpdate */ sqlite3_stmt *pUpdate; /* Last update statement (or NULL) */ RbuUpdateStmt *pNext; }; /* ** An iterator of this type is used to iterate through all objects in ** the target database that require updating. For each such table, the ** iterator visits, in order: ** ** * the table itself, ** * each index of the table (zero or more points to visit), and ** * a special "cleanup table" state. ** ** abIndexed: ** If the table has no indexes on it, abIndexed is set to NULL. Otherwise, ** it points to an array of flags nTblCol elements in size. The flag is ** set for each column that is either a part of the PK or a part of an ** index. Or clear otherwise. ** */ struct RbuObjIter { sqlite3_stmt *pTblIter; /* Iterate through tables */ sqlite3_stmt *pIdxIter; /* Index iterator */ int nTblCol; /* Size of azTblCol[] array */ char **azTblCol; /* Array of unquoted target column names */ char **azTblType; /* Array of target column types */ int *aiSrcOrder; /* src table col -> target table col */ u8 *abTblPk; /* Array of flags, set on target PK columns */ u8 *abNotNull; /* Array of flags, set on NOT NULL columns */ u8 *abIndexed; /* Array of flags, set on indexed & PK cols */ int eType; /* Table type - an RBU_PK_XXX value */ /* Output variables. zTbl==0 implies EOF. */ int bCleanup; /* True in "cleanup" state */ const char *zTbl; /* Name of target db table */ const char *zIdx; /* Name of target db index (or null) */ int iTnum; /* Root page of current object */ int iPkTnum; /* If eType==EXTERNAL, root of PK index */ int bUnique; /* Current index is unique */ /* Statements created by rbuObjIterPrepareAll() */ int nCol; /* Number of columns in current object */ sqlite3_stmt *pSelect; /* Source data */ sqlite3_stmt *pInsert; /* Statement for INSERT operations */ sqlite3_stmt *pDelete; /* Statement for DELETE ops */ sqlite3_stmt *pTmpInsert; /* Insert into rbu_tmp_$zTbl */ /* Last UPDATE used (for PK b-tree updates only), or NULL. */ RbuUpdateStmt *pRbuUpdate; }; /* ** Values for RbuObjIter.eType ** ** 0: Table does not exist (error) ** 1: Table has an implicit rowid. ** 2: Table has an explicit IPK column. ** 3: Table has an external PK index. ** 4: Table is WITHOUT ROWID. ** 5: Table is a virtual table. */ #define RBU_PK_NOTABLE 0 #define RBU_PK_NONE 1 #define RBU_PK_IPK 2 #define RBU_PK_EXTERNAL 3 #define RBU_PK_WITHOUT_ROWID 4 #define RBU_PK_VTAB 5 /* ** Within the RBU_STAGE_OAL stage, each call to sqlite3rbu_step() performs ** one of the following operations. */ #define RBU_INSERT 1 /* Insert on a main table b-tree */ #define RBU_DELETE 2 /* Delete a row from a main table b-tree */ #define RBU_IDX_DELETE 3 /* Delete a row from an aux. index b-tree */ #define RBU_IDX_INSERT 4 /* Insert on an aux. index b-tree */ #define RBU_UPDATE 5 /* Update a row in a main table b-tree */ /* ** A single step of an incremental checkpoint - frame iWalFrame of the wal ** file should be copied to page iDbPage of the database file. */ struct RbuFrame { u32 iDbPage; u32 iWalFrame; }; /* ** RBU handle. */ struct sqlite3rbu { int eStage; /* Value of RBU_STATE_STAGE field */ sqlite3 *dbMain; /* target database handle */ sqlite3 *dbRbu; /* rbu database handle */ char *zTarget; /* Path to target db */ char *zRbu; /* Path to rbu db */ char *zState; /* Path to state db (or NULL if zRbu) */ char zStateDb[5]; /* Db name for state ("stat" or "main") */ int rc; /* Value returned by last rbu_step() call */ char *zErrmsg; /* Error message if rc!=SQLITE_OK */ int nStep; /* Rows processed for current object */ int nProgress; /* Rows processed for all objects */ RbuObjIter objiter; /* Iterator for skipping through tbl/idx */ const char *zVfsName; /* Name of automatically created rbu vfs */ rbu_file *pTargetFd; /* File handle open on target db */ i64 iOalSz; /* The following state variables are used as part of the incremental ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding ** function rbuSetupCheckpoint() for details. */ u32 iMaxFrame; /* Largest iWalFrame value in aFrame[] */ u32 mLock; int nFrame; /* Entries in aFrame[] array */ int nFrameAlloc; /* Allocated size of aFrame[] array */ RbuFrame *aFrame; int pgsz; u8 *aBuf; i64 iWalCksum; }; /* ** An rbu VFS is implemented using an instance of this structure. */ struct rbu_vfs { sqlite3_vfs base; /* rbu VFS shim methods */ sqlite3_vfs *pRealVfs; /* Underlying VFS */ sqlite3_mutex *mutex; /* Mutex to protect pMain */ rbu_file *pMain; /* Linked list of main db files */ }; /* ** Each file opened by an rbu VFS is represented by an instance of ** the following structure. */ struct rbu_file { sqlite3_file base; /* sqlite3_file methods */ sqlite3_file *pReal; /* Underlying file handle */ rbu_vfs *pRbuVfs; /* Pointer to the rbu_vfs object */ sqlite3rbu *pRbu; /* Pointer to rbu object (rbu target only) */ int openFlags; /* Flags this file was opened with */ u32 iCookie; /* Cookie value for main db files */ u8 iWriteVer; /* "write-version" value for main db files */ int nShm; /* Number of entries in apShm[] array */ char **apShm; /* Array of mmap'd *-shm regions */ char *zDel; /* Delete this when closing file */ const char *zWal; /* Wal filename for this main db file */ rbu_file *pWalFd; /* Wal file descriptor for this main db */ rbu_file *pMainNext; /* Next MAIN_DB file */ }; /* ** Prepare the SQL statement in buffer zSql against database handle db. ** If successful, set *ppStmt to point to the new statement and return ** SQLITE_OK. ** ** Otherwise, if an error does occur, set *ppStmt to NULL and return ** an SQLite error code. Additionally, set output variable *pzErrmsg to ** point to a buffer containing an error message. It is the responsibility ** of the caller to (eventually) free this buffer using sqlite3_free(). */ static int prepareAndCollectError( sqlite3 *db, sqlite3_stmt **ppStmt, char **pzErrmsg, const char *zSql ){ int rc = sqlite3_prepare_v2(db, zSql, -1, ppStmt, 0); if( rc!=SQLITE_OK ){ *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); *ppStmt = 0; } return rc; } /* ** Reset the SQL statement passed as the first argument. Return a copy ** of the value returned by sqlite3_reset(). ** ** If an error has occurred, then set *pzErrmsg to point to a buffer ** containing an error message. It is the responsibility of the caller ** to eventually free this buffer using sqlite3_free(). */ static int resetAndCollectError(sqlite3_stmt *pStmt, char **pzErrmsg){ int rc = sqlite3_reset(pStmt); if( rc!=SQLITE_OK ){ *pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(sqlite3_db_handle(pStmt))); } return rc; } /* ** Unless it is NULL, argument zSql points to a buffer allocated using ** sqlite3_malloc containing an SQL statement. This function prepares the SQL ** statement against database db and frees the buffer. If statement ** compilation is successful, *ppStmt is set to point to the new statement ** handle and SQLITE_OK is returned. ** ** Otherwise, if an error occurs, *ppStmt is set to NULL and an error code ** returned. In this case, *pzErrmsg may also be set to point to an error ** message. It is the responsibility of the caller to free this error message ** buffer using sqlite3_free(). ** ** If argument zSql is NULL, this function assumes that an OOM has occurred. ** In this case SQLITE_NOMEM is returned and *ppStmt set to NULL. */ static int prepareFreeAndCollectError( sqlite3 *db, sqlite3_stmt **ppStmt, char **pzErrmsg, char *zSql ){ int rc; assert( *pzErrmsg==0 ); if( zSql==0 ){ rc = SQLITE_NOMEM; *ppStmt = 0; }else{ rc = prepareAndCollectError(db, ppStmt, pzErrmsg, zSql); sqlite3_free(zSql); } return rc; } /* ** Free the RbuObjIter.azTblCol[] and RbuObjIter.abTblPk[] arrays allocated ** by an earlier call to rbuObjIterCacheTableInfo(). */ static void rbuObjIterFreeCols(RbuObjIter *pIter){ int i; for(i=0; inTblCol; i++){ sqlite3_free(pIter->azTblCol[i]); sqlite3_free(pIter->azTblType[i]); } sqlite3_free(pIter->azTblCol); pIter->azTblCol = 0; pIter->azTblType = 0; pIter->aiSrcOrder = 0; pIter->abTblPk = 0; pIter->abNotNull = 0; pIter->nTblCol = 0; pIter->eType = 0; /* Invalid value */ } /* ** Finalize all statements and free all allocations that are specific to ** the current object (table/index pair). */ static void rbuObjIterClearStatements(RbuObjIter *pIter){ RbuUpdateStmt *pUp; sqlite3_finalize(pIter->pSelect); sqlite3_finalize(pIter->pInsert); sqlite3_finalize(pIter->pDelete); sqlite3_finalize(pIter->pTmpInsert); pUp = pIter->pRbuUpdate; while( pUp ){ RbuUpdateStmt *pTmp = pUp->pNext; sqlite3_finalize(pUp->pUpdate); sqlite3_free(pUp); pUp = pTmp; } pIter->pSelect = 0; pIter->pInsert = 0; pIter->pDelete = 0; pIter->pRbuUpdate = 0; pIter->pTmpInsert = 0; pIter->nCol = 0; } /* ** Clean up any resources allocated as part of the iterator object passed ** as the only argument. */ static void rbuObjIterFinalize(RbuObjIter *pIter){ rbuObjIterClearStatements(pIter); sqlite3_finalize(pIter->pTblIter); sqlite3_finalize(pIter->pIdxIter); rbuObjIterFreeCols(pIter); memset(pIter, 0, sizeof(RbuObjIter)); } /* ** Advance the iterator to the next position. ** ** If no error occurs, SQLITE_OK is returned and the iterator is left ** pointing to the next entry. Otherwise, an error code and message is ** left in the RBU handle passed as the first argument. A copy of the ** error code is returned. */ static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){ int rc = p->rc; if( rc==SQLITE_OK ){ /* Free any SQLite statements used while processing the previous object */ rbuObjIterClearStatements(pIter); if( pIter->zIdx==0 ){ rc = sqlite3_exec(p->dbMain, "DROP TRIGGER IF EXISTS temp.rbu_insert_tr;" "DROP TRIGGER IF EXISTS temp.rbu_update1_tr;" "DROP TRIGGER IF EXISTS temp.rbu_update2_tr;" "DROP TRIGGER IF EXISTS temp.rbu_delete_tr;" , 0, 0, &p->zErrmsg ); } if( rc==SQLITE_OK ){ if( pIter->bCleanup ){ rbuObjIterFreeCols(pIter); pIter->bCleanup = 0; rc = sqlite3_step(pIter->pTblIter); if( rc!=SQLITE_ROW ){ rc = resetAndCollectError(pIter->pTblIter, &p->zErrmsg); pIter->zTbl = 0; }else{ pIter->zTbl = (const char*)sqlite3_column_text(pIter->pTblIter, 0); rc = pIter->zTbl ? SQLITE_OK : SQLITE_NOMEM; } }else{ if( pIter->zIdx==0 ){ sqlite3_stmt *pIdx = pIter->pIdxIter; rc = sqlite3_bind_text(pIdx, 1, pIter->zTbl, -1, SQLITE_STATIC); } if( rc==SQLITE_OK ){ rc = sqlite3_step(pIter->pIdxIter); if( rc!=SQLITE_ROW ){ rc = resetAndCollectError(pIter->pIdxIter, &p->zErrmsg); pIter->bCleanup = 1; pIter->zIdx = 0; }else{ pIter->zIdx = (const char*)sqlite3_column_text(pIter->pIdxIter, 0); pIter->iTnum = sqlite3_column_int(pIter->pIdxIter, 1); pIter->bUnique = sqlite3_column_int(pIter->pIdxIter, 2); rc = pIter->zIdx ? SQLITE_OK : SQLITE_NOMEM; } } } } } if( rc!=SQLITE_OK ){ rbuObjIterFinalize(pIter); p->rc = rc; } return rc; } /* ** Initialize the iterator structure passed as the second argument. ** ** If no error occurs, SQLITE_OK is returned and the iterator is left ** pointing to the first entry. Otherwise, an error code and message is ** left in the RBU handle passed as the first argument. A copy of the ** error code is returned. */ static int rbuObjIterFirst(sqlite3rbu *p, RbuObjIter *pIter){ int rc; memset(pIter, 0, sizeof(RbuObjIter)); rc = prepareAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg, "SELECT substr(name, 6) FROM sqlite_master " "WHERE type IN ('table', 'view') AND name LIKE 'data_%'" ); if( rc==SQLITE_OK ){ rc = prepareAndCollectError(p->dbMain, &pIter->pIdxIter, &p->zErrmsg, "SELECT name, rootpage, sql IS NULL OR substr(8, 6)=='UNIQUE' " " FROM main.sqlite_master " " WHERE type='index' AND tbl_name = ?" ); } pIter->bCleanup = 1; p->rc = rc; return rbuObjIterNext(p, pIter); } /* ** This is a wrapper around "sqlite3_mprintf(zFmt, ...)". If an OOM occurs, ** an error code is stored in the RBU handle passed as the first argument. ** ** If an error has already occurred (p->rc is already set to something other ** than SQLITE_OK), then this function returns NULL without modifying the ** stored error code. In this case it still calls sqlite3_free() on any ** printf() parameters associated with %z conversions. */ static char *rbuMPrintf(sqlite3rbu *p, const char *zFmt, ...){ char *zSql = 0; va_list ap; va_start(ap, zFmt); zSql = sqlite3_vmprintf(zFmt, ap); if( p->rc==SQLITE_OK ){ if( zSql==0 ) p->rc = SQLITE_NOMEM; }else{ sqlite3_free(zSql); zSql = 0; } va_end(ap); return zSql; } /* ** Argument zFmt is a sqlite3_mprintf() style format string. The trailing ** arguments are the usual subsitution values. This function performs ** the printf() style substitutions and executes the result as an SQL ** statement on the RBU handles database. ** ** If an error occurs, an error code and error message is stored in the ** RBU handle. If an error has already occurred when this function is ** called, it is a no-op. */ static int rbuMPrintfExec(sqlite3rbu *p, sqlite3 *db, const char *zFmt, ...){ va_list ap; char *zSql; va_start(ap, zFmt); zSql = sqlite3_vmprintf(zFmt, ap); if( p->rc==SQLITE_OK ){ if( zSql==0 ){ p->rc = SQLITE_NOMEM; }else{ p->rc = sqlite3_exec(db, zSql, 0, 0, &p->zErrmsg); } } sqlite3_free(zSql); va_end(ap); return p->rc; } /* ** Attempt to allocate and return a pointer to a zeroed block of nByte ** bytes. ** ** If an error (i.e. an OOM condition) occurs, return NULL and leave an ** error code in the rbu handle passed as the first argument. Or, if an ** error has already occurred when this function is called, return NULL ** immediately without attempting the allocation or modifying the stored ** error code. */ static void *rbuMalloc(sqlite3rbu *p, int nByte){ void *pRet = 0; if( p->rc==SQLITE_OK ){ assert( nByte>0 ); pRet = sqlite3_malloc(nByte); if( pRet==0 ){ p->rc = SQLITE_NOMEM; }else{ memset(pRet, 0, nByte); } } return pRet; } /* ** Allocate and zero the pIter->azTblCol[] and abTblPk[] arrays so that ** there is room for at least nCol elements. If an OOM occurs, store an ** error code in the RBU handle passed as the first argument. */ static void rbuAllocateIterArrays(sqlite3rbu *p, RbuObjIter *pIter, int nCol){ int nByte = (2*sizeof(char*) + sizeof(int) + 3*sizeof(u8)) * nCol; char **azNew; azNew = (char**)rbuMalloc(p, nByte); if( azNew ){ pIter->azTblCol = azNew; pIter->azTblType = &azNew[nCol]; pIter->aiSrcOrder = (int*)&pIter->azTblType[nCol]; pIter->abTblPk = (u8*)&pIter->aiSrcOrder[nCol]; pIter->abNotNull = (u8*)&pIter->abTblPk[nCol]; pIter->abIndexed = (u8*)&pIter->abNotNull[nCol]; } } /* ** The first argument must be a nul-terminated string. This function ** returns a copy of the string in memory obtained from sqlite3_malloc(). ** It is the responsibility of the caller to eventually free this memory ** using sqlite3_free(). ** ** If an OOM condition is encountered when attempting to allocate memory, ** output variable (*pRc) is set to SQLITE_NOMEM before returning. Otherwise, ** if the allocation succeeds, (*pRc) is left unchanged. */ static char *rbuStrndup(const char *zStr, int *pRc){ char *zRet = 0; assert( *pRc==SQLITE_OK ); if( zStr ){ int nCopy = strlen(zStr) + 1; zRet = (char*)sqlite3_malloc(nCopy); if( zRet ){ memcpy(zRet, zStr, nCopy); }else{ *pRc = SQLITE_NOMEM; } } return zRet; } /* ** Finalize the statement passed as the second argument. ** ** If the sqlite3_finalize() call indicates that an error occurs, and the ** rbu handle error code is not already set, set the error code and error ** message accordingly. */ static void rbuFinalize(sqlite3rbu *p, sqlite3_stmt *pStmt){ sqlite3 *db = sqlite3_db_handle(pStmt); int rc = sqlite3_finalize(pStmt); if( p->rc==SQLITE_OK && rc!=SQLITE_OK ){ p->rc = rc; p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); } } /* Determine the type of a table. ** ** peType is of type (int*), a pointer to an output parameter of type ** (int). This call sets the output parameter as follows, depending ** on the type of the table specified by parameters dbName and zTbl. ** ** RBU_PK_NOTABLE: No such table. ** RBU_PK_NONE: Table has an implicit rowid. ** RBU_PK_IPK: Table has an explicit IPK column. ** RBU_PK_EXTERNAL: Table has an external PK index. ** RBU_PK_WITHOUT_ROWID: Table is WITHOUT ROWID. ** RBU_PK_VTAB: Table is a virtual table. ** ** Argument *piPk is also of type (int*), and also points to an output ** parameter. Unless the table has an external primary key index ** (i.e. unless *peType is set to 3), then *piPk is set to zero. Or, ** if the table does have an external primary key index, then *piPk ** is set to the root page number of the primary key index before ** returning. ** ** ALGORITHM: ** ** if( no entry exists in sqlite_master ){ ** return RBU_PK_NOTABLE ** }else if( sql for the entry starts with "CREATE VIRTUAL" ){ ** return RBU_PK_VTAB ** }else if( "PRAGMA index_list()" for the table contains a "pk" index ){ ** if( the index that is the pk exists in sqlite_master ){ ** *piPK = rootpage of that index. ** return RBU_PK_EXTERNAL ** }else{ ** return RBU_PK_WITHOUT_ROWID ** } ** }else if( "PRAGMA table_info()" lists one or more "pk" columns ){ ** return RBU_PK_IPK ** }else{ ** return RBU_PK_NONE ** } */ static void rbuTableType( sqlite3rbu *p, const char *zTab, int *peType, int *piTnum, int *piPk ){ /* ** 0) SELECT count(*) FROM sqlite_master where name=%Q AND IsVirtual(%Q) ** 1) PRAGMA index_list = ? ** 2) SELECT count(*) FROM sqlite_master where name=%Q ** 3) PRAGMA table_info = ? */ sqlite3_stmt *aStmt[4] = {0, 0, 0, 0}; *peType = RBU_PK_NOTABLE; *piPk = 0; assert( p->rc==SQLITE_OK ); p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[0], &p->zErrmsg, sqlite3_mprintf( "SELECT (sql LIKE 'create virtual%%'), rootpage" " FROM sqlite_master" " WHERE name=%Q", zTab )); if( p->rc!=SQLITE_OK || sqlite3_step(aStmt[0])!=SQLITE_ROW ){ /* Either an error, or no such table. */ goto rbuTableType_end; } if( sqlite3_column_int(aStmt[0], 0) ){ *peType = RBU_PK_VTAB; /* virtual table */ goto rbuTableType_end; } *piTnum = sqlite3_column_int(aStmt[0], 1); p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[1], &p->zErrmsg, sqlite3_mprintf("PRAGMA index_list=%Q",zTab) ); if( p->rc ) goto rbuTableType_end; while( sqlite3_step(aStmt[1])==SQLITE_ROW ){ const u8 *zOrig = sqlite3_column_text(aStmt[1], 3); const u8 *zIdx = sqlite3_column_text(aStmt[1], 1); if( zOrig && zIdx && zOrig[0]=='p' ){ p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[2], &p->zErrmsg, sqlite3_mprintf( "SELECT rootpage FROM sqlite_master WHERE name = %Q", zIdx )); if( p->rc==SQLITE_OK ){ if( sqlite3_step(aStmt[2])==SQLITE_ROW ){ *piPk = sqlite3_column_int(aStmt[2], 0); *peType = RBU_PK_EXTERNAL; }else{ *peType = RBU_PK_WITHOUT_ROWID; } } goto rbuTableType_end; } } p->rc = prepareFreeAndCollectError(p->dbMain, &aStmt[3], &p->zErrmsg, sqlite3_mprintf("PRAGMA table_info=%Q",zTab) ); if( p->rc==SQLITE_OK ){ while( sqlite3_step(aStmt[3])==SQLITE_ROW ){ if( sqlite3_column_int(aStmt[3],5)>0 ){ *peType = RBU_PK_IPK; /* explicit IPK column */ goto rbuTableType_end; } } *peType = RBU_PK_NONE; } rbuTableType_end: { int i; for(i=0; iabIndexed[] array. */ static void rbuObjIterCacheIndexedCols(sqlite3rbu *p, RbuObjIter *pIter){ sqlite3_stmt *pList = 0; int bIndex = 0; if( p->rc==SQLITE_OK ){ memcpy(pIter->abIndexed, pIter->abTblPk, sizeof(u8)*pIter->nTblCol); p->rc = prepareFreeAndCollectError(p->dbMain, &pList, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl) ); } while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pList) ){ const char *zIdx = (const char*)sqlite3_column_text(pList, 1); sqlite3_stmt *pXInfo = 0; if( zIdx==0 ) break; p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) ); while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ int iCid = sqlite3_column_int(pXInfo, 1); if( iCid>=0 ) pIter->abIndexed[iCid] = 1; } rbuFinalize(p, pXInfo); bIndex = 1; } rbuFinalize(p, pList); if( bIndex==0 ) pIter->abIndexed = 0; } /* ** If they are not already populated, populate the pIter->azTblCol[], ** pIter->abTblPk[], pIter->nTblCol and pIter->bRowid variables according to ** the table (not index) that the iterator currently points to. ** ** Return SQLITE_OK if successful, or an SQLite error code otherwise. If ** an error does occur, an error code and error message are also left in ** the RBU handle. */ static int rbuObjIterCacheTableInfo(sqlite3rbu *p, RbuObjIter *pIter){ if( pIter->azTblCol==0 ){ sqlite3_stmt *pStmt = 0; int nCol = 0; int i; /* for() loop iterator variable */ int bRbuRowid = 0; /* If input table has column "rbu_rowid" */ int iOrder = 0; int iTnum = 0; /* Figure out the type of table this step will deal with. */ assert( pIter->eType==0 ); rbuTableType(p, pIter->zTbl, &pIter->eType, &iTnum, &pIter->iPkTnum); if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_NOTABLE ){ p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf("no such table: %s", pIter->zTbl); } if( p->rc ) return p->rc; if( pIter->zIdx==0 ) pIter->iTnum = iTnum; assert( pIter->eType==RBU_PK_NONE || pIter->eType==RBU_PK_IPK || pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_WITHOUT_ROWID || pIter->eType==RBU_PK_VTAB ); /* Populate the azTblCol[] and nTblCol variables based on the columns ** of the input table. Ignore any input table columns that begin with ** "rbu_". */ p->rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg, sqlite3_mprintf("SELECT * FROM 'data_%q'", pIter->zTbl) ); if( p->rc==SQLITE_OK ){ nCol = sqlite3_column_count(pStmt); rbuAllocateIterArrays(p, pIter, nCol); } for(i=0; p->rc==SQLITE_OK && irc); pIter->aiSrcOrder[pIter->nTblCol] = pIter->nTblCol; pIter->azTblCol[pIter->nTblCol++] = zCopy; } else if( 0==sqlite3_stricmp("rbu_rowid", zName) ){ bRbuRowid = 1; } } sqlite3_finalize(pStmt); pStmt = 0; if( p->rc==SQLITE_OK && bRbuRowid!=(pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) ){ p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf( "table data_%q %s rbu_rowid column", pIter->zTbl, (bRbuRowid ? "may not have" : "requires") ); } /* Check that all non-HIDDEN columns in the destination table are also ** present in the input table. Populate the abTblPk[], azTblType[] and ** aiTblOrder[] arrays at the same time. */ if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pStmt, &p->zErrmsg, sqlite3_mprintf("PRAGMA table_info(%Q)", pIter->zTbl) ); } while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ const char *zName = (const char*)sqlite3_column_text(pStmt, 1); if( zName==0 ) break; /* An OOM - finalize() below returns S_NOMEM */ for(i=iOrder; inTblCol; i++){ if( 0==strcmp(zName, pIter->azTblCol[i]) ) break; } if( i==pIter->nTblCol ){ p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf("column missing from data_%q: %s", pIter->zTbl, zName ); }else{ int iPk = sqlite3_column_int(pStmt, 5); int bNotNull = sqlite3_column_int(pStmt, 3); const char *zType = (const char*)sqlite3_column_text(pStmt, 2); if( i!=iOrder ){ SWAP(int, pIter->aiSrcOrder[i], pIter->aiSrcOrder[iOrder]); SWAP(char*, pIter->azTblCol[i], pIter->azTblCol[iOrder]); } pIter->azTblType[iOrder] = rbuStrndup(zType, &p->rc); pIter->abTblPk[iOrder] = (iPk!=0); pIter->abNotNull[iOrder] = (u8)bNotNull || (iPk!=0); iOrder++; } } rbuFinalize(p, pStmt); rbuObjIterCacheIndexedCols(p, pIter); assert( pIter->eType!=RBU_PK_VTAB || pIter->abIndexed==0 ); } return p->rc; } /* ** This function constructs and returns a pointer to a nul-terminated ** string containing some SQL clause or list based on one or more of the ** column names currently stored in the pIter->azTblCol[] array. */ static char *rbuObjIterGetCollist( sqlite3rbu *p, /* RBU object */ RbuObjIter *pIter /* Object iterator for column names */ ){ char *zList = 0; const char *zSep = ""; int i; for(i=0; inTblCol; i++){ const char *z = pIter->azTblCol[i]; zList = rbuMPrintf(p, "%z%s\"%w\"", zList, zSep, z); zSep = ", "; } return zList; } /* ** This function is used to create a SELECT list (the list of SQL ** expressions that follows a SELECT keyword) for a SELECT statement ** used to read from an data_xxx or rbu_tmp_xxx table while updating the ** index object currently indicated by the iterator object passed as the ** second argument. A "PRAGMA index_xinfo = " statement is used ** to obtain the required information. ** ** If the index is of the following form: ** ** CREATE INDEX i1 ON t1(c, b COLLATE nocase); ** ** and "t1" is a table with an explicit INTEGER PRIMARY KEY column ** "ipk", the returned string is: ** ** "`c` COLLATE 'BINARY', `b` COLLATE 'NOCASE', `ipk` COLLATE 'BINARY'" ** ** As well as the returned string, three other malloc'd strings are ** returned via output parameters. As follows: ** ** pzImposterCols: ... ** pzImposterPk: ... ** pzWhere: ... */ static char *rbuObjIterGetIndexCols( sqlite3rbu *p, /* RBU object */ RbuObjIter *pIter, /* Object iterator for column names */ char **pzImposterCols, /* OUT: Columns for imposter table */ char **pzImposterPk, /* OUT: Imposter PK clause */ char **pzWhere, /* OUT: WHERE clause */ int *pnBind /* OUT: Trbul number of columns */ ){ int rc = p->rc; /* Error code */ int rc2; /* sqlite3_finalize() return code */ char *zRet = 0; /* String to return */ char *zImpCols = 0; /* String to return via *pzImposterCols */ char *zImpPK = 0; /* String to return via *pzImposterPK */ char *zWhere = 0; /* String to return via *pzWhere */ int nBind = 0; /* Value to return via *pnBind */ const char *zCom = ""; /* Set to ", " later on */ const char *zAnd = ""; /* Set to " AND " later on */ sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = ? */ if( rc==SQLITE_OK ){ assert( p->zErrmsg==0 ); rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", pIter->zIdx) ); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ int iCid = sqlite3_column_int(pXInfo, 1); int bDesc = sqlite3_column_int(pXInfo, 3); const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4); const char *zCol; const char *zType; if( iCid<0 ){ /* An integer primary key. If the table has an explicit IPK, use ** its name. Otherwise, use "rbu_rowid". */ if( pIter->eType==RBU_PK_IPK ){ int i; for(i=0; pIter->abTblPk[i]==0; i++); assert( inTblCol ); zCol = pIter->azTblCol[i]; }else{ zCol = "rbu_rowid"; } zType = "INTEGER"; }else{ zCol = pIter->azTblCol[iCid]; zType = pIter->azTblType[iCid]; } zRet = sqlite3_mprintf("%z%s\"%w\" COLLATE %Q", zRet, zCom, zCol, zCollate); if( pIter->bUnique==0 || sqlite3_column_int(pXInfo, 5) ){ const char *zOrder = (bDesc ? " DESC" : ""); zImpPK = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\"%s", zImpPK, zCom, nBind, zCol, zOrder ); } zImpCols = sqlite3_mprintf("%z%s\"rbu_imp_%d%w\" %s COLLATE %Q", zImpCols, zCom, nBind, zCol, zType, zCollate ); zWhere = sqlite3_mprintf( "%z%s\"rbu_imp_%d%w\" IS ?", zWhere, zAnd, nBind, zCol ); if( zRet==0 || zImpPK==0 || zImpCols==0 || zWhere==0 ) rc = SQLITE_NOMEM; zCom = ", "; zAnd = " AND "; nBind++; } rc2 = sqlite3_finalize(pXInfo); if( rc==SQLITE_OK ) rc = rc2; if( rc!=SQLITE_OK ){ sqlite3_free(zRet); sqlite3_free(zImpCols); sqlite3_free(zImpPK); sqlite3_free(zWhere); zRet = 0; zImpCols = 0; zImpPK = 0; zWhere = 0; p->rc = rc; } *pzImposterCols = zImpCols; *pzImposterPk = zImpPK; *pzWhere = zWhere; *pnBind = nBind; return zRet; } /* ** Assuming the current table columns are "a", "b" and "c", and the zObj ** paramter is passed "old", return a string of the form: ** ** "old.a, old.b, old.b" ** ** With the column names escaped. ** ** For tables with implicit rowids - RBU_PK_EXTERNAL and RBU_PK_NONE, append ** the text ", old._rowid_" to the returned value. */ static char *rbuObjIterGetOldlist( sqlite3rbu *p, RbuObjIter *pIter, const char *zObj ){ char *zList = 0; if( p->rc==SQLITE_OK && pIter->abIndexed ){ const char *zS = ""; int i; for(i=0; inTblCol; i++){ if( pIter->abIndexed[i] ){ const char *zCol = pIter->azTblCol[i]; zList = sqlite3_mprintf("%z%s%s.\"%w\"", zList, zS, zObj, zCol); }else{ zList = sqlite3_mprintf("%z%sNULL", zList, zS); } zS = ", "; if( zList==0 ){ p->rc = SQLITE_NOMEM; break; } } /* For a table with implicit rowids, append "old._rowid_" to the list. */ if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ zList = rbuMPrintf(p, "%z, %s._rowid_", zList, zObj); } } return zList; } /* ** Return an expression that can be used in a WHERE clause to match the ** primary key of the current table. For example, if the table is: ** ** CREATE TABLE t1(a, b, c, PRIMARY KEY(b, c)); ** ** Return the string: ** ** "b = ?1 AND c = ?2" */ static char *rbuObjIterGetWhere( sqlite3rbu *p, RbuObjIter *pIter ){ char *zList = 0; if( pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE ){ zList = rbuMPrintf(p, "_rowid_ = ?%d", pIter->nTblCol+1); }else if( pIter->eType==RBU_PK_EXTERNAL ){ const char *zSep = ""; int i; for(i=0; inTblCol; i++){ if( pIter->abTblPk[i] ){ zList = rbuMPrintf(p, "%z%sc%d=?%d", zList, zSep, i, i+1); zSep = " AND "; } } zList = rbuMPrintf(p, "_rowid_ = (SELECT id FROM rbu_imposter2 WHERE %z)", zList ); }else{ const char *zSep = ""; int i; for(i=0; inTblCol; i++){ if( pIter->abTblPk[i] ){ const char *zCol = pIter->azTblCol[i]; zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", zList, zSep, zCol, i+1); zSep = " AND "; } } } return zList; } /* ** The SELECT statement iterating through the keys for the current object ** (p->objiter.pSelect) currently points to a valid row. However, there ** is something wrong with the rbu_control value in the rbu_control value ** stored in the (p->nCol+1)'th column. Set the error code and error message ** of the RBU handle to something reflecting this. */ static void rbuBadControlError(sqlite3rbu *p){ p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf("invalid rbu_control value"); } /* ** Return a nul-terminated string containing the comma separated list of ** assignments that should be included following the "SET" keyword of ** an UPDATE statement used to update the table object that the iterator ** passed as the second argument currently points to if the rbu_control ** column of the data_xxx table entry is set to zMask. ** ** The memory for the returned string is obtained from sqlite3_malloc(). ** It is the responsibility of the caller to eventually free it using ** sqlite3_free(). ** ** If an OOM error is encountered when allocating space for the new ** string, an error code is left in the rbu handle passed as the first ** argument and NULL is returned. Or, if an error has already occurred ** when this function is called, NULL is returned immediately, without ** attempting the allocation or modifying the stored error code. */ static char *rbuObjIterGetSetlist( sqlite3rbu *p, RbuObjIter *pIter, const char *zMask ){ char *zList = 0; if( p->rc==SQLITE_OK ){ int i; if( strlen(zMask)!=pIter->nTblCol ){ rbuBadControlError(p); }else{ const char *zSep = ""; for(i=0; inTblCol; i++){ char c = zMask[pIter->aiSrcOrder[i]]; if( c=='x' ){ zList = rbuMPrintf(p, "%z%s\"%w\"=?%d", zList, zSep, pIter->azTblCol[i], i+1 ); zSep = ", "; } if( c=='d' ){ zList = rbuMPrintf(p, "%z%s\"%w\"=rbu_delta(\"%w\", ?%d)", zList, zSep, pIter->azTblCol[i], pIter->azTblCol[i], i+1 ); zSep = ", "; } } } } return zList; } /* ** Return a nul-terminated string consisting of nByte comma separated ** "?" expressions. For example, if nByte is 3, return a pointer to ** a buffer containing the string "?,?,?". ** ** The memory for the returned string is obtained from sqlite3_malloc(). ** It is the responsibility of the caller to eventually free it using ** sqlite3_free(). ** ** If an OOM error is encountered when allocating space for the new ** string, an error code is left in the rbu handle passed as the first ** argument and NULL is returned. Or, if an error has already occurred ** when this function is called, NULL is returned immediately, without ** attempting the allocation or modifying the stored error code. */ static char *rbuObjIterGetBindlist(sqlite3rbu *p, int nBind){ char *zRet = 0; int nByte = nBind*2 + 1; zRet = (char*)rbuMalloc(p, nByte); if( zRet ){ int i; for(i=0; izIdx==0 ); if( p->rc==SQLITE_OK ){ const char *zSep = "PRIMARY KEY("; sqlite3_stmt *pXList = 0; /* PRAGMA index_list = (pIter->zTbl) */ sqlite3_stmt *pXInfo = 0; /* PRAGMA index_xinfo = */ p->rc = prepareFreeAndCollectError(p->dbMain, &pXList, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_list = %Q", pIter->zTbl) ); while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXList) ){ const char *zOrig = (const char*)sqlite3_column_text(pXList,3); if( zOrig && strcmp(zOrig, "pk")==0 ){ const char *zIdx = (const char*)sqlite3_column_text(pXList,1); if( zIdx ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) ); } break; } } rbuFinalize(p, pXList); while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ if( sqlite3_column_int(pXInfo, 5) ){ /* int iCid = sqlite3_column_int(pXInfo, 0); */ const char *zCol = (const char*)sqlite3_column_text(pXInfo, 2); const char *zDesc = sqlite3_column_int(pXInfo, 3) ? " DESC" : ""; z = rbuMPrintf(p, "%z%s\"%w\"%s", z, zSep, zCol, zDesc); zSep = ", "; } } z = rbuMPrintf(p, "%z)", z); rbuFinalize(p, pXInfo); } return z; } /* ** This function creates the second imposter table used when writing to ** a table b-tree where the table has an external primary key. If the ** iterator passed as the second argument does not currently point to ** a table (not index) with an external primary key, this function is a ** no-op. ** ** Assuming the iterator does point to a table with an external PK, this ** function creates a WITHOUT ROWID imposter table named "rbu_imposter2" ** used to access that PK index. For example, if the target table is ** declared as follows: ** ** CREATE TABLE t1(a, b TEXT, c REAL, PRIMARY KEY(b, c)); ** ** then the imposter table schema is: ** ** CREATE TABLE rbu_imposter2(c1 TEXT, c2 REAL, id INTEGER) WITHOUT ROWID; ** */ static void rbuCreateImposterTable2(sqlite3rbu *p, RbuObjIter *pIter){ if( p->rc==SQLITE_OK && pIter->eType==RBU_PK_EXTERNAL ){ int tnum = pIter->iPkTnum; /* Root page of PK index */ sqlite3_stmt *pQuery = 0; /* SELECT name ... WHERE rootpage = $tnum */ const char *zIdx = 0; /* Name of PK index */ sqlite3_stmt *pXInfo = 0; /* PRAGMA main.index_xinfo = $zIdx */ const char *zComma = ""; char *zCols = 0; /* Used to build up list of table cols */ char *zPk = 0; /* Used to build up table PK declaration */ /* Figure out the name of the primary key index for the current table. ** This is needed for the argument to "PRAGMA index_xinfo". Set ** zIdx to point to a nul-terminated string containing this name. */ p->rc = prepareAndCollectError(p->dbMain, &pQuery, &p->zErrmsg, "SELECT name FROM sqlite_master WHERE rootpage = ?" ); if( p->rc==SQLITE_OK ){ sqlite3_bind_int(pQuery, 1, tnum); if( SQLITE_ROW==sqlite3_step(pQuery) ){ zIdx = (const char*)sqlite3_column_text(pQuery, 0); } } if( zIdx ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pXInfo, &p->zErrmsg, sqlite3_mprintf("PRAGMA main.index_xinfo = %Q", zIdx) ); } rbuFinalize(p, pQuery); while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXInfo) ){ int bKey = sqlite3_column_int(pXInfo, 5); if( bKey ){ int iCid = sqlite3_column_int(pXInfo, 1); int bDesc = sqlite3_column_int(pXInfo, 3); const char *zCollate = (const char*)sqlite3_column_text(pXInfo, 4); zCols = rbuMPrintf(p, "%z%sc%d %s COLLATE %s", zCols, zComma, iCid, pIter->azTblType[iCid], zCollate ); zPk = rbuMPrintf(p, "%z%sc%d%s", zPk, zComma, iCid, bDesc?" DESC":""); zComma = ", "; } } zCols = rbuMPrintf(p, "%z, id INTEGER", zCols); rbuFinalize(p, pXInfo); sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum); rbuMPrintfExec(p, p->dbMain, "CREATE TABLE rbu_imposter2(%z, PRIMARY KEY(%z)) WITHOUT ROWID", zCols, zPk ); sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); } } /* ** If an error has already occurred when this function is called, it ** immediately returns zero (without doing any work). Or, if an error ** occurs during the execution of this function, it sets the error code ** in the sqlite3rbu object indicated by the first argument and returns ** zero. ** ** The iterator passed as the second argument is guaranteed to point to ** a table (not an index) when this function is called. This function ** attempts to create any imposter table required to write to the main ** table b-tree of the table before returning. Non-zero is returned if ** an imposter table are created, or zero otherwise. ** ** An imposter table is required in all cases except RBU_PK_VTAB. Only ** virtual tables are written to directly. The imposter table has the ** same schema as the actual target table (less any UNIQUE constraints). ** More precisely, the "same schema" means the same columns, types, ** collation sequences. For tables that do not have an external PRIMARY ** KEY, it also means the same PRIMARY KEY declaration. */ static void rbuCreateImposterTable(sqlite3rbu *p, RbuObjIter *pIter){ if( p->rc==SQLITE_OK && pIter->eType!=RBU_PK_VTAB ){ int tnum = pIter->iTnum; const char *zComma = ""; char *zSql = 0; int iCol; sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1); for(iCol=0; p->rc==SQLITE_OK && iColnTblCol; iCol++){ const char *zPk = ""; const char *zCol = pIter->azTblCol[iCol]; const char *zColl = 0; p->rc = sqlite3_table_column_metadata( p->dbMain, "main", pIter->zTbl, zCol, 0, &zColl, 0, 0, 0 ); if( pIter->eType==RBU_PK_IPK && pIter->abTblPk[iCol] ){ /* If the target table column is an "INTEGER PRIMARY KEY", add ** "PRIMARY KEY" to the imposter table column declaration. */ zPk = "PRIMARY KEY "; } zSql = rbuMPrintf(p, "%z%s\"%w\" %s %sCOLLATE %s%s", zSql, zComma, zCol, pIter->azTblType[iCol], zPk, zColl, (pIter->abNotNull[iCol] ? " NOT NULL" : "") ); zComma = ", "; } if( pIter->eType==RBU_PK_WITHOUT_ROWID ){ char *zPk = rbuWithoutRowidPK(p, pIter); if( zPk ){ zSql = rbuMPrintf(p, "%z, %z", zSql, zPk); } } sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1, tnum); rbuMPrintfExec(p, p->dbMain, "CREATE TABLE \"rbu_imp_%w\"(%z)%s", pIter->zTbl, zSql, (pIter->eType==RBU_PK_WITHOUT_ROWID ? " WITHOUT ROWID" : "") ); sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); } } /* ** Prepare a statement used to insert rows into the "rbu_tmp_xxx" table. ** Specifically a statement of the form: ** ** INSERT INTO rbu_tmp_xxx VALUES(?, ?, ? ...); ** ** The number of bound variables is equal to the number of columns in ** the target table, plus one (for the rbu_control column), plus one more ** (for the rbu_rowid column) if the target table is an implicit IPK or ** virtual table. */ static void rbuObjIterPrepareTmpInsert( sqlite3rbu *p, RbuObjIter *pIter, const char *zCollist, const char *zRbuRowid ){ int bRbuRowid = (pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE); char *zBind = rbuObjIterGetBindlist(p, pIter->nTblCol + 1 + bRbuRowid); if( zBind ){ assert( pIter->pTmpInsert==0 ); p->rc = prepareFreeAndCollectError( p->dbRbu, &pIter->pTmpInsert, &p->zErrmsg, sqlite3_mprintf( "INSERT INTO %s.'rbu_tmp_%q'(rbu_control,%s%s) VALUES(%z)", p->zStateDb, pIter->zTbl, zCollist, zRbuRowid, zBind )); } } static void rbuTmpInsertFunc( sqlite3_context *pCtx, int nVal, sqlite3_value **apVal ){ sqlite3rbu *p = sqlite3_user_data(pCtx); int rc = SQLITE_OK; int i; for(i=0; rc==SQLITE_OK && iobjiter.pTmpInsert, i+1, apVal[i]); } if( rc==SQLITE_OK ){ sqlite3_step(p->objiter.pTmpInsert); rc = sqlite3_reset(p->objiter.pTmpInsert); } if( rc!=SQLITE_OK ){ sqlite3_result_error_code(pCtx, rc); } } /* ** Ensure that the SQLite statement handles required to update the ** target database object currently indicated by the iterator passed ** as the second argument are available. */ static int rbuObjIterPrepareAll( sqlite3rbu *p, RbuObjIter *pIter, int nOffset /* Add "LIMIT -1 OFFSET $nOffset" to SELECT */ ){ assert( pIter->bCleanup==0 ); if( pIter->pSelect==0 && rbuObjIterCacheTableInfo(p, pIter)==SQLITE_OK ){ const int tnum = pIter->iTnum; char *zCollist = 0; /* List of indexed columns */ char **pz = &p->zErrmsg; const char *zIdx = pIter->zIdx; char *zLimit = 0; if( nOffset ){ zLimit = sqlite3_mprintf(" LIMIT -1 OFFSET %d", nOffset); if( !zLimit ) p->rc = SQLITE_NOMEM; } if( zIdx ){ const char *zTbl = pIter->zTbl; char *zImposterCols = 0; /* Columns for imposter table */ char *zImposterPK = 0; /* Primary key declaration for imposter */ char *zWhere = 0; /* WHERE clause on PK columns */ char *zBind = 0; int nBind = 0; assert( pIter->eType!=RBU_PK_VTAB ); zCollist = rbuObjIterGetIndexCols( p, pIter, &zImposterCols, &zImposterPK, &zWhere, &nBind ); zBind = rbuObjIterGetBindlist(p, nBind); /* Create the imposter table used to write to this index. */ sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 1); sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 1,tnum); rbuMPrintfExec(p, p->dbMain, "CREATE TABLE \"rbu_imp_%w\"( %s, PRIMARY KEY( %s ) ) WITHOUT ROWID", zTbl, zImposterCols, zImposterPK ); sqlite3_test_control(SQLITE_TESTCTRL_IMPOSTER, p->dbMain, "main", 0, 0); /* Create the statement to insert index entries */ pIter->nCol = nBind; if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError( p->dbMain, &pIter->pInsert, &p->zErrmsg, sqlite3_mprintf("INSERT INTO \"rbu_imp_%w\" VALUES(%s)", zTbl, zBind) ); } /* And to delete index entries */ if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError( p->dbMain, &pIter->pDelete, &p->zErrmsg, sqlite3_mprintf("DELETE FROM \"rbu_imp_%w\" WHERE %s", zTbl, zWhere) ); } /* Create the SELECT statement to read keys in sorted order */ if( p->rc==SQLITE_OK ){ char *zSql; if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ zSql = sqlite3_mprintf( "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s", zCollist, p->zStateDb, pIter->zTbl, zCollist, zLimit ); }else{ zSql = sqlite3_mprintf( "SELECT %s, rbu_control FROM 'data_%q' " "WHERE typeof(rbu_control)='integer' AND rbu_control!=1 " "UNION ALL " "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' " "ORDER BY %s%s", zCollist, pIter->zTbl, zCollist, p->zStateDb, pIter->zTbl, zCollist, zLimit ); } p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, zSql); } sqlite3_free(zImposterCols); sqlite3_free(zImposterPK); sqlite3_free(zWhere); sqlite3_free(zBind); }else{ int bRbuRowid = (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE); const char *zTbl = pIter->zTbl; /* Table this step applies to */ const char *zWrite; /* Imposter table name */ char *zBindings = rbuObjIterGetBindlist(p, pIter->nTblCol + bRbuRowid); char *zWhere = rbuObjIterGetWhere(p, pIter); char *zOldlist = rbuObjIterGetOldlist(p, pIter, "old"); char *zNewlist = rbuObjIterGetOldlist(p, pIter, "new"); zCollist = rbuObjIterGetCollist(p, pIter); pIter->nCol = pIter->nTblCol; /* Create the SELECT statement to read keys from data_xxx */ if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, sqlite3_mprintf( "SELECT %s, rbu_control%s FROM 'data_%q'%s", zCollist, (bRbuRowid ? ", rbu_rowid" : ""), zTbl, zLimit ) ); } /* Create the imposter table or tables (if required). */ rbuCreateImposterTable(p, pIter); rbuCreateImposterTable2(p, pIter); zWrite = (pIter->eType==RBU_PK_VTAB ? "" : "rbu_imp_"); /* Create the INSERT statement to write to the target PK b-tree */ if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pInsert, pz, sqlite3_mprintf( "INSERT INTO \"%s%w\"(%s%s) VALUES(%s)", zWrite, zTbl, zCollist, (bRbuRowid ? ", _rowid_" : ""), zBindings ) ); } /* Create the DELETE statement to write to the target PK b-tree */ if( p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pDelete, pz, sqlite3_mprintf( "DELETE FROM \"%s%w\" WHERE %s", zWrite, zTbl, zWhere ) ); } if( pIter->abIndexed ){ const char *zRbuRowid = ""; if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ zRbuRowid = ", rbu_rowid"; } /* Create the rbu_tmp_xxx table and the triggers to populate it. */ rbuMPrintfExec(p, p->dbRbu, "CREATE TABLE IF NOT EXISTS %s.'rbu_tmp_%q' AS " "SELECT *%s FROM 'data_%q' WHERE 0;" , p->zStateDb , zTbl, (pIter->eType==RBU_PK_EXTERNAL ? ", 0 AS rbu_rowid" : "") , zTbl ); rbuMPrintfExec(p, p->dbMain, "CREATE TEMP TRIGGER rbu_delete_tr BEFORE DELETE ON \"%s%w\" " "BEGIN " " SELECT rbu_tmp_insert(2, %s);" "END;" "CREATE TEMP TRIGGER rbu_update1_tr BEFORE UPDATE ON \"%s%w\" " "BEGIN " " SELECT rbu_tmp_insert(2, %s);" "END;" "CREATE TEMP TRIGGER rbu_update2_tr AFTER UPDATE ON \"%s%w\" " "BEGIN " " SELECT rbu_tmp_insert(3, %s);" "END;", zWrite, zTbl, zOldlist, zWrite, zTbl, zOldlist, zWrite, zTbl, zNewlist ); if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ rbuMPrintfExec(p, p->dbMain, "CREATE TEMP TRIGGER rbu_insert_tr AFTER INSERT ON \"%s%w\" " "BEGIN " " SELECT rbu_tmp_insert(0, %s);" "END;", zWrite, zTbl, zNewlist ); } rbuObjIterPrepareTmpInsert(p, pIter, zCollist, zRbuRowid); } sqlite3_free(zWhere); sqlite3_free(zOldlist); sqlite3_free(zNewlist); sqlite3_free(zBindings); } sqlite3_free(zCollist); sqlite3_free(zLimit); } return p->rc; } /* ** Set output variable *ppStmt to point to an UPDATE statement that may ** be used to update the imposter table for the main table b-tree of the ** table object that pIter currently points to, assuming that the ** rbu_control column of the data_xyz table contains zMask. ** ** If the zMask string does not specify any columns to update, then this ** is not an error. Output variable *ppStmt is set to NULL in this case. */ static int rbuGetUpdateStmt( sqlite3rbu *p, /* RBU handle */ RbuObjIter *pIter, /* Object iterator */ const char *zMask, /* rbu_control value ('x.x.') */ sqlite3_stmt **ppStmt /* OUT: UPDATE statement handle */ ){ RbuUpdateStmt **pp; RbuUpdateStmt *pUp = 0; int nUp = 0; /* In case an error occurs */ *ppStmt = 0; /* Search for an existing statement. If one is found, shift it to the front ** of the LRU queue and return immediately. Otherwise, leave nUp pointing ** to the number of statements currently in the cache and pUp to the ** last object in the list. */ for(pp=&pIter->pRbuUpdate; *pp; pp=&((*pp)->pNext)){ pUp = *pp; if( strcmp(pUp->zMask, zMask)==0 ){ *pp = pUp->pNext; pUp->pNext = pIter->pRbuUpdate; pIter->pRbuUpdate = pUp; *ppStmt = pUp->pUpdate; return SQLITE_OK; } nUp++; } assert( pUp==0 || pUp->pNext==0 ); if( nUp>=SQLITE_RBU_UPDATE_CACHESIZE ){ for(pp=&pIter->pRbuUpdate; *pp!=pUp; pp=&((*pp)->pNext)); *pp = 0; sqlite3_finalize(pUp->pUpdate); pUp->pUpdate = 0; }else{ pUp = (RbuUpdateStmt*)rbuMalloc(p, sizeof(RbuUpdateStmt)+pIter->nTblCol+1); } if( pUp ){ char *zWhere = rbuObjIterGetWhere(p, pIter); char *zSet = rbuObjIterGetSetlist(p, pIter, zMask); char *zUpdate = 0; pUp->zMask = (char*)&pUp[1]; memcpy(pUp->zMask, zMask, pIter->nTblCol); pUp->pNext = pIter->pRbuUpdate; pIter->pRbuUpdate = pUp; if( zSet ){ const char *zPrefix = ""; if( pIter->eType!=RBU_PK_VTAB ) zPrefix = "rbu_imp_"; zUpdate = sqlite3_mprintf("UPDATE \"%s%w\" SET %s WHERE %s", zPrefix, pIter->zTbl, zSet, zWhere ); p->rc = prepareFreeAndCollectError( p->dbMain, &pUp->pUpdate, &p->zErrmsg, zUpdate ); *ppStmt = pUp->pUpdate; } sqlite3_free(zWhere); sqlite3_free(zSet); } return p->rc; } static sqlite3 *rbuOpenDbhandle(sqlite3rbu *p, const char *zName){ sqlite3 *db = 0; if( p->rc==SQLITE_OK ){ const int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_URI; p->rc = sqlite3_open_v2(zName, &db, flags, p->zVfsName); if( p->rc ){ p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); sqlite3_close(db); db = 0; } } return db; } /* ** Open the database handle and attach the RBU database as "rbu". If an ** error occurs, leave an error code and message in the RBU handle. */ static void rbuOpenDatabase(sqlite3rbu *p){ assert( p->rc==SQLITE_OK ); assert( p->dbMain==0 && p->dbRbu==0 ); p->eStage = 0; p->dbMain = rbuOpenDbhandle(p, p->zTarget); p->dbRbu = rbuOpenDbhandle(p, p->zRbu); /* If using separate RBU and state databases, attach the state database to ** the RBU db handle now. */ if( p->zState ){ rbuMPrintfExec(p, p->dbRbu, "ATTACH %Q AS stat", p->zState); memcpy(p->zStateDb, "stat", 4); }else{ memcpy(p->zStateDb, "main", 4); } if( p->rc==SQLITE_OK ){ p->rc = sqlite3_create_function(p->dbMain, "rbu_tmp_insert", -1, SQLITE_UTF8, (void*)p, rbuTmpInsertFunc, 0, 0 ); } if( p->rc==SQLITE_OK ){ p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p); } rbuMPrintfExec(p, p->dbMain, "SELECT * FROM sqlite_master"); /* Mark the database file just opened as an RBU target database. If ** this call returns SQLITE_NOTFOUND, then the RBU vfs is not in use. ** This is an error. */ if( p->rc==SQLITE_OK ){ p->rc = sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_RBU, (void*)p); } if( p->rc==SQLITE_NOTFOUND ){ p->rc = SQLITE_ERROR; p->zErrmsg = sqlite3_mprintf("rbu vfs not found"); } } /* ** This routine is a copy of the sqlite3FileSuffix3() routine from the core. ** It is a no-op unless SQLITE_ENABLE_8_3_NAMES is defined. ** ** If SQLITE_ENABLE_8_3_NAMES is set at compile-time and if the database ** filename in zBaseFilename is a URI with the "8_3_names=1" parameter and ** if filename in z[] has a suffix (a.k.a. "extension") that is longer than ** three characters, then shorten the suffix on z[] to be the last three ** characters of the original suffix. ** ** If SQLITE_ENABLE_8_3_NAMES is set to 2 at compile-time, then always ** do the suffix shortening regardless of URI parameter. ** ** Examples: ** ** test.db-journal => test.nal ** test.db-wal => test.wal ** test.db-shm => test.shm ** test.db-mj7f3319fa => test.9fa */ static void rbuFileSuffix3(const char *zBase, char *z){ #ifdef SQLITE_ENABLE_8_3_NAMES #if SQLITE_ENABLE_8_3_NAMES<2 if( sqlite3_uri_boolean(zBase, "8_3_names", 0) ) #endif { int i, sz; sz = sqlite3Strlen30(z); for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); } #endif } /* ** Return the current wal-index header checksum for the target database ** as a 64-bit integer. ** ** The checksum is store in the first page of xShmMap memory as an 8-byte ** blob starting at byte offset 40. */ static i64 rbuShmChecksum(sqlite3rbu *p){ i64 iRet = 0; if( p->rc==SQLITE_OK ){ sqlite3_file *pDb = p->pTargetFd->pReal; u32 volatile *ptr; p->rc = pDb->pMethods->xShmMap(pDb, 0, 32*1024, 0, (void volatile**)&ptr); if( p->rc==SQLITE_OK ){ iRet = ((i64)ptr[10] << 32) + ptr[11]; } } return iRet; } /* ** This function is called as part of initializing or reinitializing an ** incremental checkpoint. ** ** It populates the sqlite3rbu.aFrame[] array with the set of ** (wal frame -> db page) copy operations required to checkpoint the ** current wal file, and obtains the set of shm locks required to safely ** perform the copy operations directly on the file-system. ** ** If argument pState is not NULL, then the incremental checkpoint is ** being resumed. In this case, if the checksum of the wal-index-header ** following recovery is not the same as the checksum saved in the RbuState ** object, then the rbu handle is set to DONE state. This occurs if some ** other client appends a transaction to the wal file in the middle of ** an incremental checkpoint. */ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){ /* If pState is NULL, then the wal file may not have been opened and ** recovered. Running a read-statement here to ensure that doing so ** does not interfere with the "capture" process below. */ if( pState==0 ){ p->eStage = 0; if( p->rc==SQLITE_OK ){ p->rc = sqlite3_exec(p->dbMain, "SELECT * FROM sqlite_master", 0, 0, 0); } } /* Assuming no error has occurred, run a "restart" checkpoint with the ** sqlite3rbu.eStage variable set to CAPTURE. This turns on the following ** special behaviour in the rbu VFS: ** ** * If the exclusive shm WRITER or READ0 lock cannot be obtained, ** the checkpoint fails with SQLITE_BUSY (normally SQLite would ** proceed with running a passive checkpoint instead of failing). ** ** * Attempts to read from the *-wal file or write to the database file ** do not perform any IO. Instead, the frame/page combinations that ** would be read/written are recorded in the sqlite3rbu.aFrame[] ** array. ** ** * Calls to xShmLock(UNLOCK) to release the exclusive shm WRITER, ** READ0 and CHECKPOINT locks taken as part of the checkpoint are ** no-ops. These locks will not be released until the connection ** is closed. ** ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL ** error. ** ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[] ** array populated with a set of (frame -> page) mappings. Because the ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy ** data from the wal file into the database file according to the ** contents of aFrame[]. */ if( p->rc==SQLITE_OK ){ int rc2; p->eStage = RBU_STAGE_CAPTURE; rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0); if( rc2!=SQLITE_INTERNAL ) p->rc = rc2; } if( p->rc==SQLITE_OK ){ p->eStage = RBU_STAGE_CKPT; p->nStep = (pState ? pState->nRow : 0); p->aBuf = rbuMalloc(p, p->pgsz); p->iWalCksum = rbuShmChecksum(p); } if( p->rc==SQLITE_OK && pState && pState->iWalCksum!=p->iWalCksum ){ p->rc = SQLITE_DONE; p->eStage = RBU_STAGE_DONE; } } /* ** Called when iAmt bytes are read from offset iOff of the wal file while ** the rbu object is in capture mode. Record the frame number of the frame ** being read in the aFrame[] array. */ static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){ const u32 mReq = (1<mLock!=mReq ){ pRbu->rc = SQLITE_BUSY; return SQLITE_INTERNAL; } pRbu->pgsz = iAmt; if( pRbu->nFrame==pRbu->nFrameAlloc ){ int nNew = (pRbu->nFrameAlloc ? pRbu->nFrameAlloc : 64) * 2; RbuFrame *aNew; aNew = (RbuFrame*)sqlite3_realloc(pRbu->aFrame, nNew * sizeof(RbuFrame)); if( aNew==0 ) return SQLITE_NOMEM; pRbu->aFrame = aNew; pRbu->nFrameAlloc = nNew; } iFrame = (u32)((iOff-32) / (i64)(iAmt+24)) + 1; if( pRbu->iMaxFrame